diff --git a/.changelog/2909.txt b/.changelog/2909.txt deleted file mode 100644 index cae7f44d45..0000000000 --- a/.changelog/2909.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -helm: Add readOnlyRootFilesystem to the default restricted security context when runnning `consul-k8s` in a restricted namespaces. -``` \ No newline at end of file diff --git a/.changelog/3635.txt b/.changelog/3635.txt deleted file mode 100644 index c5c505c808..0000000000 --- a/.changelog/3635.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -helm: (datadog integration) updated `server-statefulset.yaml` templating to handle custom Unix Domain Socket paths. -``` \ No newline at end of file diff --git a/.changelog/3685.txt b/.changelog/3685.txt deleted file mode 100644 index 05241d820d..0000000000 --- a/.changelog/3685.txt +++ /dev/null @@ -1,6 +0,0 @@ -```release-note:bug -helm: corrected datadog openmetrics and consul-checks consul server URLs set during automation to use full consul deployment release name -``` -```release-note:bug -helm: bug fix for `prometheus.io` annotation omission while using datadog integration with openmetrics/prometheus and consul integration checks -``` \ No newline at end of file diff --git a/.changelog/3718.txt b/.changelog/3718.txt deleted file mode 100644 index 9e7cd4f59a..0000000000 --- a/.changelog/3718.txt +++ /dev/null @@ -1,4 +0,0 @@ -```release-note:breaking-change -api-gateway: The api-gateway stanza located under .Values.api-gateway was deprecated in -1.16.0 of Consul and is being removed as of 1.19.0 in favor of connectInject.apiGateway. -``` \ No newline at end of file diff --git a/.changelog/3767.txt b/.changelog/3767.txt deleted file mode 100644 index 1801c88739..0000000000 --- a/.changelog/3767.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:feature -gateways: api-gateway now uses the Consul file-system-certificate by default for TLS -``` \ No newline at end of file diff --git a/.changelog/3943.txt b/.changelog/3943.txt deleted file mode 100644 index 3be45fc453..0000000000 --- a/.changelog/3943.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:feature -control-plane: Add the ability to register services via CRD. -``` diff --git a/.changelog/3989.txt b/.changelog/3989.txt deleted file mode 100644 index e19b311d05..0000000000 --- a/.changelog/3989.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -helm: Fix ArgoCD hooks related annotations on server-acl-init Job, they must be added at Job definition and not template level. -``` \ No newline at end of file diff --git a/.changelog/3991.txt b/.changelog/3991.txt deleted file mode 100644 index 45ff3c90dd..0000000000 --- a/.changelog/3991.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:feature -helm: adds ability to set the Image Pull Policy for all Consul images (consul, consul-k8s, consul-dataplane, consul-telemetry-collector) -``` diff --git a/.changelog/4003.txt b/.changelog/4000.txt similarity index 100% rename from .changelog/4003.txt rename to .changelog/4000.txt diff --git a/.changelog/4153.txt b/.changelog/4153.txt deleted file mode 100644 index 3a42a23e4b..0000000000 --- a/.changelog/4153.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -terminating-gateway: Fix generated acl policy for external services to include the namespace and partition block if they are enabled. -``` diff --git a/.github/workflows/lint.yaml b/.github/workflows/lint.yaml index aedbb51464..6901dfc5fd 100644 --- a/.github/workflows/lint.yaml +++ b/.github/workflows/lint.yaml @@ -26,7 +26,7 @@ jobs: run: echo "GOROOT=$(go env GOROOT)" >> "${GITHUB_ENV}" - name: golangci-lint-helm-gen - uses: golangci/golangci-lint-action@38e1018663fa5173f3968ea0777460d3de38f256 # v5.3.0 + uses: golangci/golangci-lint-action@a4f60bb28d35aeee14e6880718e0c85ff1882e64 # v6.0.1 with: version: "v1.55.2" working-directory: hack/helm-reference-gen @@ -34,21 +34,21 @@ jobs: args: "--no-config --disable-all --enable gofmt,govet" - name: golangci-lint-control-plane - uses: golangci/golangci-lint-action@38e1018663fa5173f3968ea0777460d3de38f256 # v5.3.0 + uses: golangci/golangci-lint-action@a4f60bb28d35aeee14e6880718e0c85ff1882e64 # v6.0.1 with: version: "v1.55.2" working-directory: control-plane skip-cache: true # We have seen sticky timeout bugs crop up with caching enabled, so disabling for now - name: golangci-lint-acceptance - uses: golangci/golangci-lint-action@38e1018663fa5173f3968ea0777460d3de38f256 # v5.3.0 + uses: golangci/golangci-lint-action@a4f60bb28d35aeee14e6880718e0c85ff1882e64 # v6.0.1 with: version: "v1.55.2" working-directory: acceptance skip-cache: true # We have seen sticky timeout bugs crop up with caching enabled, so disabling for now - name: golangci-lint-cli - uses: golangci/golangci-lint-action@38e1018663fa5173f3968ea0777460d3de38f256 # v5.3.0 + uses: golangci/golangci-lint-action@a4f60bb28d35aeee14e6880718e0c85ff1882e64 # v6.0.1 with: version: "v1.55.2" working-directory: acceptance diff --git a/.github/workflows/merge.yml b/.github/workflows/merge.yml index 22c7614eca..aa7f00cd88 100644 --- a/.github/workflows/merge.yml +++ b/.github/workflows/merge.yml @@ -27,4 +27,5 @@ jobs: repo: hashicorp/consul-k8s-workflows ref: main token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} - inputs: '{ "context":"${{ env.CONTEXT }}", "actor":"${{ github.actor }}", "repository":"${{ github.repository }}", "branch":"${{ env.BRANCH }}", "sha":"${{ env.SHA }}", "token":"${{ secrets.ELEVATED_GITHUB_TOKEN }}" }' + # set "test-ce" to false when a new minor version is released + inputs: '{ "test-ce": false, "context":"${{ env.CONTEXT }}", "actor":"${{ github.actor }}", "repository":"${{ github.repository }}", "branch":"${{ env.BRANCH }}", "sha":"${{ env.SHA }}", "token":"${{ secrets.ELEVATED_GITHUB_TOKEN }}" }' diff --git a/.github/workflows/nightly-acceptance.yml b/.github/workflows/nightly-acceptance.yml index c8ac870410..c213024498 100644 --- a/.github/workflows/nightly-acceptance.yml +++ b/.github/workflows/nightly-acceptance.yml @@ -23,4 +23,5 @@ jobs: repo: hashicorp/consul-k8s-workflows ref: main token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} - inputs: '{ "context":"${{ env.CONTEXT }}", "repository":"${{ github.repository }}", "branch":"${{ env.BRANCH }}", "sha":"${{ github.sha }}", "token":"${{ secrets.ELEVATED_GITHUB_TOKEN }}" }' + # set "test-ce" to false when a new minor version is released + inputs: '{ "test-ce": true, "context":"${{ env.CONTEXT }}", "actor":"${{ github.actor }}", "repository":"${{ github.repository }}", "branch":"${{ env.BRANCH }}", "sha":"${{ env.SHA }}", "token":"${{ secrets.ELEVATED_GITHUB_TOKEN }}" }' diff --git a/.github/workflows/pr.yml b/.github/workflows/pr.yml index d0d72cb6b3..ea7e63cee9 100644 --- a/.github/workflows/pr.yml +++ b/.github/workflows/pr.yml @@ -26,8 +26,8 @@ jobs: repo: hashicorp/consul-k8s-workflows ref: main token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} - inputs: '{ "context":"${{ env.CONTEXT }}", "actor":"${{ github.actor }}", "repository":"${{ github.repository }}", "branch":"${{ env.BRANCH }}", "sha":"${{ env.SHA }}", "token":"${{ secrets.ELEVATED_GITHUB_TOKEN }}" }' - + # set "test-ce" to false when a new minor version is released + inputs: '{ "test-ce": false, "context":"${{ env.CONTEXT }}", "actor":"${{ github.actor }}", "repository":"${{ github.repository }}", "branch":"${{ env.BRANCH }}", "sha":"${{ env.SHA }}", "token":"${{ secrets.ELEVATED_GITHUB_TOKEN }}" }' pass-required-checks-on-skip: needs: [ conditional-skip ] if: needs.conditional-skip.outputs.skip-ci == 'true' diff --git a/.github/workflows/security-scan.yml b/.github/workflows/security-scan.yml index 120b564301..c2ae3f0a2b 100644 --- a/.github/workflows/security-scan.yml +++ b/.github/workflows/security-scan.yml @@ -46,7 +46,8 @@ jobs: uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 with: repository: hashicorp/security-scanner - token: ${{ secrets.HASHIBOT_PRODSEC_GITHUB_TOKEN }} + #TODO: replace w/ HASHIBOT_PRODSEC_GITHUB_TOKEN once provisioned + token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} path: security-scanner ref: main @@ -63,6 +64,6 @@ jobs: cat results.sarif | jq - name: Upload SARIF file - uses: github/codeql-action/upload-sarif@c4fb451437765abf5018c6fbf22cce1a7da1e5cc # codeql-bundle-v2.17.1 + uses: github/codeql-action/upload-sarif@8fcfedf57053e09257688fce7a0beeb18b1b9ae3 # codeql-bundle-v2.17.2 with: - sarif_file: results.sarif + sarif_file: results.sarif \ No newline at end of file diff --git a/.github/workflows/weekly-acceptance-1-1-x.yml b/.github/workflows/weekly-acceptance-1-1-x.yml index 5aea9e3f2c..9bd2f295ab 100644 --- a/.github/workflows/weekly-acceptance-1-1-x.yml +++ b/.github/workflows/weekly-acceptance-1-1-x.yml @@ -26,4 +26,5 @@ jobs: repo: hashicorp/consul-k8s-workflows ref: main token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} - inputs: '{ "context":"${{ env.CONTEXT }}", "repository":"${{ github.repository }}", "branch":"${{ env.BRANCH }}", "sha":"${{ github.sha }}", "token":"${{ secrets.ELEVATED_GITHUB_TOKEN }}" }' + # set "test-ce" to false when a new minor version is released + inputs: '{ "test-ce": true, "context":"${{ env.CONTEXT }}", "actor":"${{ github.actor }}", "repository":"${{ github.repository }}", "branch":"${{ env.BRANCH }}", "sha":"${{ env.SHA }}", "token":"${{ secrets.ELEVATED_GITHUB_TOKEN }}" }' diff --git a/.github/workflows/weekly-acceptance-1-4-x.yml b/.github/workflows/weekly-acceptance-1-2-x.yml similarity index 86% rename from .github/workflows/weekly-acceptance-1-4-x.yml rename to .github/workflows/weekly-acceptance-1-2-x.yml index eb8d50bc73..efe61e1249 100644 --- a/.github/workflows/weekly-acceptance-1-4-x.yml +++ b/.github/workflows/weekly-acceptance-1-2-x.yml @@ -1,16 +1,17 @@ # Dispatch to the consul-k8s-workflows with a weekly cron # # A separate file is needed for each release because the cron schedules are different for each release. -name: weekly-acceptance-1-4-x +name: weekly-acceptance-1-2-x on: schedule: # * is a special character in YAML so you have to quote this string - # Run weekly on Thursday at 3AM UTC/11PM EST/8PM PST - - cron: '0 3 * * 4' + # Run weekly on Tuesday at 3AM UTC/11PM EST/8PM PST + - cron: '0 3 * * 2' + # these should be the only settings that you will ever need to change env: - BRANCH: "release/1.4.x" + BRANCH: "release/1.2.x" CONTEXT: "weekly" jobs: diff --git a/.github/workflows/weekly-acceptance-1-5-x.yml b/.github/workflows/weekly-acceptance-1-5-x.yml deleted file mode 100644 index ea245dc1d9..0000000000 --- a/.github/workflows/weekly-acceptance-1-5-x.yml +++ /dev/null @@ -1,28 +0,0 @@ -# Dispatch to the consul-k8s-workflows with a weekly cron -# -# A separate file is needed for each release because the cron schedules are different for each release. -name: weekly-acceptance-1-5-x -on: - schedule: - # * is a special character in YAML so you have to quote this string - # Run weekly on Friday at 3AM UTC/11PM EST/8PM PST - - cron: '0 3 * * 5' - -# these should be the only settings that you will ever need to change -env: - BRANCH: "release/1.5.x" - CONTEXT: "weekly" - -jobs: - cloud: - name: cloud - runs-on: ubuntu-latest - steps: - - uses: benc-uk/workflow-dispatch@25b02cc069be46d637e8fe2f1e8484008e9e9609 # v1.2.3 - name: cloud - with: - workflow: cloud.yml - repo: hashicorp/consul-k8s-workflows - ref: main - token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} - inputs: '{ "context":"${{ env.CONTEXT }}", "repository":"${{ github.repository }}", "branch":"${{ env.BRANCH }}", "sha":"${{ github.sha }}", "token":"${{ secrets.ELEVATED_GITHUB_TOKEN }}" }' diff --git a/CHANGELOG.md b/CHANGELOG.md index 7f857f8fa3..540a922ff4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,23 +1,3 @@ -## 1.5.1 (July 16, 2024) - -SECURITY: - -* Upgrade go version to 1.22.5 to address [CVE-2024-24791](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2024-24791) [[GH-4154](https://github.com/hashicorp/consul-k8s/issues/4154)] -* Upgrade go-retryablehttp to v0.7.7 to address [GHSA-v6v8-xj6m-xwqh](https://github.com/advisories/GHSA-v6v8-xj6m-xwqh) [[GH-4169](https://github.com/hashicorp/consul-k8s/issues/4169)] - -IMPROVEMENTS: - -* api-gateways: Change security settings to make root file system read only and to not allow privilage escalation. [[GH-3959](https://github.com/hashicorp/consul-k8s/issues/3959)] -* control-plane: Remove anyuid Security Context Constraints (SCC) requirement in OpenShift. [[GH-4152](https://github.com/hashicorp/consul-k8s/issues/4152)] -* partition-init: Role no longer includes unnecessary access to Secrets resource. [[GH-4053](https://github.com/hashicorp/consul-k8s/issues/4053)] - -BUG FIXES: - -* api-gateway: fix issue where API Gateway specific acl roles/policy were not being cleaned up on deletion of an api-gateway [[GH-4060](https://github.com/hashicorp/consul-k8s/issues/4060)] -* connect-inject: add NET_BIND_SERVICE capability when injecting consul-dataplane sidecar [[GH-4152](https://github.com/hashicorp/consul-k8s/issues/4152)] -* endpoints-controller: graceful shutdown logic should not run on a new pod with the same name. Fixes a case where statefulset rollouts could get stuck in graceful shutdown when the new pods come up. [[GH-4059](https://github.com/hashicorp/consul-k8s/issues/4059)] -* terminating-gateway: Fix generated acl policy for external services to include the namespace and partition block if they are enabled. [[GH-4153](https://github.com/hashicorp/consul-k8s/issues/4153)] - ## 1.4.4 (July 15, 2024) SECURITY: @@ -40,112 +20,20 @@ BUG FIXES: * connect-inject: add NET_BIND_SERVICE capability when injecting consul-dataplane sidecar [[GH-4152](https://github.com/hashicorp/consul-k8s/issues/4152)] * endpoints-controller: graceful shutdown logic should not run on a new pod with the same name. Fixes a case where statefulset rollouts could get stuck in graceful shutdown when the new pods come up. [[GH-4059](https://github.com/hashicorp/consul-k8s/issues/4059)] -## 1.3.7 (July 16, 2024) - -SECURITY: - -* Upgrade go version to 1.22.5 to address [CVE-2024-24791](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2024-24791) [[GH-4154](https://github.com/hashicorp/consul-k8s/issues/4154)] -* Upgrade go-retryablehttp to v0.7.7 to address [GHSA-v6v8-xj6m-xwqh](https://github.com/advisories/GHSA-v6v8-xj6m-xwqh) [[GH-4169](https://github.com/hashicorp/consul-k8s/issues/4169)] - -IMPROVEMENTS: - -* upgrade go version to v1.22.4. [[GH-4085](https://github.com/hashicorp/consul-k8s/issues/4085)] -* partition-init: Role no longer includes unnecessary access to Secrets resource. [[GH-4053](https://github.com/hashicorp/consul-k8s/issues/4053)] - -BUG FIXES: - -* api-gateway: fix issue where API Gateway specific acl roles/policy were not being cleaned up on deletion of an api-gateway [[GH-4060](https://github.com/hashicorp/consul-k8s/issues/4060)] -* cni: fix incorrect release version due to unstable submodule pinning [[GH-4091](https://github.com/hashicorp/consul-k8s/issues/4091)] -* endpoints-controller: graceful shutdown logic should not run on a new pod with the same name. Fixes a case where statefulset rollouts could get stuck in graceful shutdown when the new pods come up. [[GH-4059](https://github.com/hashicorp/consul-k8s/issues/4059)] - -## 1.1.14 (July 16, 2024) - -SECURITY: - -* Upgrade go version to 1.22.5 to address [CVE-2024-24791](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2024-24791) [[GH-4154](https://github.com/hashicorp/consul-k8s/issues/4154)] -* Upgrade go-retryablehttp to v0.7.7 to address [GHSA-v6v8-xj6m-xwqh](https://github.com/advisories/GHSA-v6v8-xj6m-xwqh) [[GH-4169](https://github.com/hashicorp/consul-k8s/issues/4169)] - -IMPROVEMENTS: - -* upgrade go version to v1.22.4. [[GH-4085](https://github.com/hashicorp/consul-k8s/issues/4085)] -* partition-init: Role no longer includes unnecessary access to Secrets resource. [[GH-4053](https://github.com/hashicorp/consul-k8s/issues/4053)] - -BUG FIXES: - -* cni: fix incorrect release version due to unstable submodule pinning [[GH-4091](https://github.com/hashicorp/consul-k8s/issues/4091)] - -## 1.5.0 (June 13, 2024) - -> NOTE: Consul K8s 1.5.x is compatible with Consul 1.19.x and Consul Dataplane 1.5.x. Refer to our [compatibility matrix](https://developer.hashicorp.com/consul/docs/k8s/compatibility) for more info. - -BREAKING CHANGES: - -* api-gateway: The api-gateway stanza located under .Values.api-gateway was deprecated in -1.16.0 of Consul and is being removed as of 1.19.0 in favor of connectInject.apiGateway. [[GH-3718](https://github.com/hashicorp/consul-k8s/issues/3718)] - -FEATURES: - -* control-plane: Add the ability to register services via CRD. [[GH-3943](https://github.com/hashicorp/consul-k8s/issues/3943)] -* gateways: api-gateway now uses the Consul file-system-certificate by default for TLS [[GH-3767](https://github.com/hashicorp/consul-k8s/issues/3767)] -* helm: adds ability to set the Image Pull Policy for all Consul images (consul, consul-k8s, consul-dataplane, consul-telemetry-collector) [[GH-3991](https://github.com/hashicorp/consul-k8s/issues/3991)] - -IMPROVEMENTS: - -* upgrade go version to v1.22.4. [[GH-4085](https://github.com/hashicorp/consul-k8s/issues/4085)] -* cni: package `consul-cni` as .deb and .rpm files [[GH-4040](https://github.com/hashicorp/consul-k8s/issues/4040)] -* helm: Add readOnlyRootFilesystem to the default restricted security context when runnning `consul-k8s` in a restricted namespaces. [[GH-2909](https://github.com/hashicorp/consul-k8s/issues/2909)] - -BUG FIXES: - -* api-gateway: fix bug where multiple logical APIGateways would share the same ACL policy. [[GH-4003](https://github.com/hashicorp/consul-k8s/issues/4003)] -* cni: fix incorrect release version due to unstable submodule pinning [[GH-4091](https://github.com/hashicorp/consul-k8s/issues/4091)] -* helm: (datadog integration) updated `server-statefulset.yaml` templating to handle custom Unix Domain Socket paths. [[GH-3635](https://github.com/hashicorp/consul-k8s/issues/3635)] -* helm: bug fix for `prometheus.io` annotation omission while using datadog integration with openmetrics/prometheus and consul integration checks [[GH-3685](https://github.com/hashicorp/consul-k8s/issues/3685)] -* helm: corrected datadog openmetrics and consul-checks consul server URLs set during automation to use full consul deployment release name [[GH-3685](https://github.com/hashicorp/consul-k8s/issues/3685)] - -## 1.4.3 (May 24, 2024) - -IMPROVEMENTS: - -* upgrade go version to v1.22.3. [[GH-3994](https://github.com/hashicorp/consul-k8s/issues/3994)] -* Bump Dockerfile base image for `consul-k8s-control-plane` to `alpine:3.19`. [[GH-4016](https://github.com/hashicorp/consul-k8s/issues/4016)] - -## 1.3.6 (May 24, 2024) - -IMPROVEMENTS: - -* upgrade go version to v1.22.3. [[GH-3994](https://github.com/hashicorp/consul-k8s/issues/3994)] -* Bump Dockerfile base image for `consul-k8s-control-plane` to `alpine:3.19`. [[GH-4016](https://github.com/hashicorp/consul-k8s/issues/4016)] - -## 1.2.9 (May 24, 2024) - -IMPROVEMENTS: - -* upgrade go version to v1.22.3. [[GH-3994](https://github.com/hashicorp/consul-k8s/issues/3994)] -* Bump Dockerfile base image for `consul-k8s-control-plane` to `alpine:3.19`. [[GH-4016](https://github.com/hashicorp/consul-k8s/issues/4016)] - -## 1.1.13 (May 24, 2024) - -IMPROVEMENTS: - -* upgrade go version to v1.22.3. [[GH-3994](https://github.com/hashicorp/consul-k8s/issues/3994)] -* Bump Dockerfile base image for `consul-k8s-control-plane` to `alpine:3.19`. [[GH-4016](https://github.com/hashicorp/consul-k8s/issues/4016)] - - ## 1.4.2 (May 20, 2024) SECURITY: -* Upgrade Go to use 1.21.10. This addresses CVEs - [CVE-2024-24787](https://nvd.nist.gov/vuln/detail/CVE-2024-24787) and - [CVE-2024-24788](https://nvd.nist.gov/vuln/detail/CVE-2024-24788) [[GH-3980](https://github.com/hashicorp/consul-k8s/issues/3980)] +* Upgrade Go to use 1.21.10. This addresses CVEs +[CVE-2024-24787](https://nvd.nist.gov/vuln/detail/CVE-2024-24787) and +[CVE-2024-24788](https://nvd.nist.gov/vuln/detail/CVE-2024-24788) [[GH-3980](https://github.com/hashicorp/consul-k8s/issues/3980)] * Upgrade `helm/v3` to 3.14.4. This resolves the following security vulnerabilities: - [CVE-2024-25620](https://osv.dev/vulnerability/CVE-2024-25620) - [CVE-2024-26147](https://osv.dev/vulnerability/CVE-2024-26147) [[GH-3935](https://github.com/hashicorp/consul-k8s/issues/3935)] +[CVE-2024-25620](https://osv.dev/vulnerability/CVE-2024-25620) +[CVE-2024-26147](https://osv.dev/vulnerability/CVE-2024-26147) [[GH-3935](https://github.com/hashicorp/consul-k8s/issues/3935)] * Upgrade to use Go `1.21.9`. This resolves CVE - [CVE-2023-45288](https://nvd.nist.gov/vuln/detail/CVE-2023-45288) (`http2`). [[GH-3893](https://github.com/hashicorp/consul-k8s/issues/3893)] +[CVE-2023-45288](https://nvd.nist.gov/vuln/detail/CVE-2023-45288) (`http2`). [[GH-3893](https://github.com/hashicorp/consul-k8s/issues/3893)] * Upgrade to use golang.org/x/net `v0.24.0`. This resolves CVE - [CVE-2023-45288](https://nvd.nist.gov/vuln/detail/CVE-2023-45288) (`x/net`). [[GH-3893](https://github.com/hashicorp/consul-k8s/issues/3893)] +[CVE-2023-45288](https://nvd.nist.gov/vuln/detail/CVE-2023-45288) (`x/net`). [[GH-3893](https://github.com/hashicorp/consul-k8s/issues/3893)] FEATURES: @@ -158,8 +46,8 @@ IMPROVEMENTS: * control-plane: Add support for receiving iptables configuration via CNI arguments, to support Nomad transparent proxy [[GH-3795](https://github.com/hashicorp/consul-k8s/issues/3795)] * control-plane: Remove anyuid Security Context Constraints (SCC) requirement in OpenShift. [[GH-3813](https://github.com/hashicorp/consul-k8s/issues/3813)] * helm: only create the default Prometheus path annotation when it's not already specified within the component-specific - annotations. For example if the `client.annotations` value sets prometheus.io/path annotation, don't overwrite it with - the default value. [[GH-3846](https://github.com/hashicorp/consul-k8s/issues/3846)] +annotations. For example if the `client.annotations` value sets prometheus.io/path annotation, don't overwrite it with +the default value. [[GH-3846](https://github.com/hashicorp/consul-k8s/issues/3846)] * helm: support sync-lb-services-endpoints flag for syncCatalog [[GH-3905](https://github.com/hashicorp/consul-k8s/issues/3905)] * terminating-gateways: Remove unnecessary permissions from terminating gateways role [[GH-3928](https://github.com/hashicorp/consul-k8s/issues/3928)] @@ -172,93 +60,6 @@ BUG FIXES: * control-plane: fix a panic when an upstream annotation is malformed. [[GH-3956](https://github.com/hashicorp/consul-k8s/issues/3956)] * connect-inject: Fixed issue where on restart, if a managed-gateway-acl-role already existed the container would error [[GH-3978](https://github.com/hashicorp/consul-k8s/issues/3978)] -## 1.3.5 (May 20, 2024) - -SECURITY: - -* Upgrade Go to use 1.21.10. This addresses CVEs - [CVE-2024-24787](https://nvd.nist.gov/vuln/detail/CVE-2024-24787) and - [CVE-2024-24788](https://nvd.nist.gov/vuln/detail/CVE-2024-24788) [[GH-3980](https://github.com/hashicorp/consul-k8s/issues/3980)] -* Upgrade `helm/v3` to 3.14.4. This resolves the following security vulnerabilities: - [CVE-2024-25620](https://osv.dev/vulnerability/CVE-2024-25620) - [CVE-2024-26147](https://osv.dev/vulnerability/CVE-2024-26147) [[GH-3935](https://github.com/hashicorp/consul-k8s/issues/3935)] -* Upgrade to use Go `1.21.9`. This resolves CVE - [CVE-2023-45288](https://nvd.nist.gov/vuln/detail/CVE-2023-45288) (`http2`). [[GH-3902](https://github.com/hashicorp/consul-k8s/issues/3902)] -* Upgrade to use golang.org/x/net `v0.24.0`. This resolves CVE - [CVE-2023-45288](https://nvd.nist.gov/vuln/detail/CVE-2023-45288) (`x/net`). [[GH-3902](https://github.com/hashicorp/consul-k8s/issues/3902)] - -FEATURES: - -* Add support for configuring graceful startup proxy lifecycle management settings. [[GH-3878](https://github.com/hashicorp/consul-k8s/issues/3878)] - -IMPROVEMENTS: - -* control-plane: support , and <\n> as upstream separators. [[GH-3956](https://github.com/hashicorp/consul-k8s/issues/3956)] -* ConfigEntries controller: Only error for config entries from different datacenters when the config entries are different [[GH-3873](https://github.com/hashicorp/consul-k8s/issues/3873)] -* control-plane: Remove anyuid Security Context Constraints (SCC) requirement in OpenShift. [[GH-3813](https://github.com/hashicorp/consul-k8s/issues/3813)] -* helm: only create the default Prometheus path annotation when it's not already specified within the component-specific - annotations. For example if the `client.annotations` value sets prometheus.io/path annotation, don't overwrite it with - the default value. [[GH-3846](https://github.com/hashicorp/consul-k8s/issues/3846)] -* helm: support sync-lb-services-endpoints flag for syncCatalog [[GH-3905](https://github.com/hashicorp/consul-k8s/issues/3905)] -* terminating-gateways: Remove unnecessary permissions from terminating gateways role [[GH-3928](https://github.com/hashicorp/consul-k8s/issues/3928)] - -BUG FIXES: - -* Create Consul service with mode transparent-proxy even when a cluster IP is not assigned to the service.. [[GH-3974](https://github.com/hashicorp/consul-k8s/issues/3974)] -* api-gateway: Fix order of initialization for creating ACL role/policy to avoid error logs in consul when upgrading between versions. [[GH-3918](https://github.com/hashicorp/consul-k8s/issues/3918)] -* api-gateway: fix bug where multiple logical APIGateways would share the same ACL policy. [[GH-4001](https://github.com/hashicorp/consul-k8s/issues/4001)] -* control-plane: fix a panic when an upstream annotation is malformed. [[GH-3956](https://github.com/hashicorp/consul-k8s/issues/3956)] -* connect-inject: Fixed issue where on restart, if a managed-gateway-acl-role already existed the container would error [[GH-3978](https://github.com/hashicorp/consul-k8s/issues/3978)] - -## 1.2.8 (May 20, 2024) - -SECURITY: - -* Upgrade Go to use 1.21.10. This addresses CVEs - [CVE-2024-24787](https://nvd.nist.gov/vuln/detail/CVE-2024-24787) and - [CVE-2024-24788](https://nvd.nist.gov/vuln/detail/CVE-2024-24788) [[GH-3980](https://github.com/hashicorp/consul-k8s/issues/3980)] -* Upgrade `helm/v3` to 3.14.4. This resolves the following security vulnerabilities: - [CVE-2024-25620](https://osv.dev/vulnerability/CVE-2024-25620) - [CVE-2024-26147](https://osv.dev/vulnerability/CVE-2024-26147) [[GH-3935](https://github.com/hashicorp/consul-k8s/issues/3935)] -* Upgrade to use Go `1.21.9`. This resolves CVE - [CVE-2023-45288](https://nvd.nist.gov/vuln/detail/CVE-2023-45288) (`http2`). [[GH-3901](https://github.com/hashicorp/consul-k8s/issues/3901)] -* Upgrade to use golang.org/x/net `v0.24.0`. This resolves CVE - [CVE-2023-45288](https://nvd.nist.gov/vuln/detail/CVE-2023-45288) (`x/net`). [[GH-3901](https://github.com/hashicorp/consul-k8s/issues/3901)] - -IMPROVEMENTS: - -* ConfigEntries controller: Only error for config entries from different datacenters when the config entries are different [[GH-3873](https://github.com/hashicorp/consul-k8s/issues/3873)] -* control-plane: Remove anyuid Security Context Constraints (SCC) requirement in OpenShift. [[GH-3813](https://github.com/hashicorp/consul-k8s/issues/3813)] -* helm: only create the default Prometheus path annotation when it's not already specified within the component-specific - annotations. For example if the `client.annotations` value sets prometheus.io/path annotation, don't overwrite it with - the default value. [[GH-3846](https://github.com/hashicorp/consul-k8s/issues/3846)] -* helm: support sync-lb-services-endpoints flag for syncCatalog [[GH-3905](https://github.com/hashicorp/consul-k8s/issues/3905)] - -BUG FIXES: - -* api-gateway: Fix order of initialization for creating ACL role/policy to avoid error logs in consul when upgrading between versions. [[GH-3918](https://github.com/hashicorp/consul-k8s/issues/3918)] -* api-gateway: fix bug where multiple logical APIGateways would share the same ACL policy. [[GH-4002](https://github.com/hashicorp/consul-k8s/issues/4002)] -* connect-inject: Fixed issue where on restart, if a managed-gateway-acl-role already existed the container would error [[GH-3978](https://github.com/hashicorp/consul-k8s/issues/3978)] - -## 1.1.12 (May 20, 2024) - -SECURITY: - -* Upgrade Go to use 1.21.10. This addresses CVEs - [CVE-2024-24787](https://nvd.nist.gov/vuln/detail/CVE-2024-24787) and - [CVE-2024-24788](https://nvd.nist.gov/vuln/detail/CVE-2024-24788) [[GH-3980](https://github.com/hashicorp/consul-k8s/issues/3980)] -* Upgrade `helm/v3` to 3.14.4. This resolves the following security vulnerabilities: - [CVE-2024-25620](https://osv.dev/vulnerability/CVE-2024-25620) - [CVE-2024-26147](https://osv.dev/vulnerability/CVE-2024-26147) [[GH-3935](https://github.com/hashicorp/consul-k8s/issues/3935)] -* Upgrade to use Go `1.21.9`. This resolves CVE - [CVE-2023-45288](https://nvd.nist.gov/vuln/detail/CVE-2023-45288) (`http2`). [[GH-3900](https://github.com/hashicorp/consul-k8s/issues/3900)] -* Upgrade to use golang.org/x/net `v0.24.0`. This resolves CVE - [CVE-2023-45288](https://nvd.nist.gov/vuln/detail/CVE-2023-45288) (`x/net`). [[GH-3900](https://github.com/hashicorp/consul-k8s/issues/3900)] - -IMPROVEMENTS: - -* ConfigEntries controller: Only error for config entries from different datacenters when the config entries are different [[GH-3873](https://github.com/hashicorp/consul-k8s/issues/3873)] - ## 1.4.1 (March 28, 2024) SECURITY: @@ -287,125 +88,6 @@ tokens were invalidated immediately on pod entering Terminating state. [[GH-3736 * control-plane: fix an issue where ACL tokens would prematurely be deleted and services would be deregistered if there was a K8s API error fetching the pod. [[GH-3758](https://github.com/hashicorp/consul-k8s/issues/3758)] -## 1.3.4 (March 28, 2024) - -SECURITY: - -* Update `google.golang.org/protobuf` to v1.33.0 to address [CVE-2024-24786](https://nvd.nist.gov/vuln/detail/CVE-2024-24786). [[GH-3719](https://github.com/hashicorp/consul-k8s/issues/3719)] -* Update the Consul Build Go base image to `alpine3.19`. This resolves CVEs -[CVE-2023-52425](https://nvd.nist.gov/vuln/detail/CVE-2023-52425) -[CVE-2023-52426⁠](https://nvd.nist.gov/vuln/detail/CVE-2023-52426) [[GH-3741](https://github.com/hashicorp/consul-k8s/issues/3741)] -* Upgrade `helm/v3` to 3.11.3. This resolves the following security vulnerabilities: -[CVE-2023-25165](https://osv.dev/vulnerability/CVE-2023-25165) -[CVE-2022-23524](https://osv.dev/vulnerability/CVE-2022-23524) -[CVE-2022-23526](https://osv.dev/vulnerability/CVE-2022-23526) -[CVE-2022-23525](https://osv.dev/vulnerability/CVE-2022-23525) [[GH-3625](https://github.com/hashicorp/consul-k8s/issues/3625)] -* Upgrade docker/distribution to 2.8.3+incompatible (latest) to resolve [CVE-2023-2253](https://osv.dev/vulnerability/CVE-2023-2253). [[GH-3625](https://github.com/hashicorp/consul-k8s/issues/3625)] -* Upgrade docker/docker to 25.0.3+incompatible (latest) to resolve [GHSA-jq35-85cj-fj4p](https://osv.dev/vulnerability/GHSA-jq35-85cj-fj4p). [[GH-3625](https://github.com/hashicorp/consul-k8s/issues/3625)] -* Upgrade filepath-securejoin to 0.2.4 (latest) to resolve [GO-2023-2048](https://osv.dev/vulnerability/GO-2023-2048). [[GH-3625](https://github.com/hashicorp/consul-k8s/issues/3625)] -* Upgrade to use Go `1.21.8`. This resolves CVEs -[CVE-2024-24783](https://nvd.nist.gov/vuln/detail/CVE-2024-24783) (`crypto/x509`). -[CVE-2023-45290](https://nvd.nist.gov/vuln/detail/CVE-2023-45290) (`net/http`). -[CVE-2023-45289](https://nvd.nist.gov/vuln/detail/CVE-2023-45289) (`net/http`, `net/http/cookiejar`). -[CVE-2024-24785](https://nvd.nist.gov/vuln/detail/CVE-2024-24785) (`html/template`). -[CVE-2024-24784](https://nvd.nist.gov/vuln/detail/CVE-2024-24784) (`net/mail`). [[GH-3741](https://github.com/hashicorp/consul-k8s/issues/3741)] -* security: upgrade containerd to 1.7.13 (latest) to resolve [GHSA-7ww5-4wqc-m92c](https://osv.dev/vulnerability/GO-2023-2412). [[GH-3625](https://github.com/hashicorp/consul-k8s/issues/3625)] - -IMPROVEMENTS: - -* catalog: Topology zone and region information is now read from the Kubernetes endpoints and associated node and added to registered consul services under Metadata. [[GH-3693](https://github.com/hashicorp/consul-k8s/issues/3693)] -* control-plane: publish `consul-k8s-control-plane` and `consul-k8s-control-plane-fips` images to official HashiCorp AWS ECR. [[GH-3668](https://github.com/hashicorp/consul-k8s/issues/3668)] - -BUG FIXES: - -* api-gateway: Fix order of initialization for creating ACL role/policy to avoid error logs in consul. [[GH-3779](https://github.com/hashicorp/consul-k8s/issues/3779)] -* control-plane: fix an issue where ACL token cleanup did not respect a pod's GracefulShutdownPeriodSeconds and -tokens were invalidated immediately on pod entering Terminating state. [[GH-3736](https://github.com/hashicorp/consul-k8s/issues/3736)] -* control-plane: fix an issue where ACL tokens would prematurely be deleted and services would be deregistered if there -was a K8s API error fetching the pod. [[GH-3758](https://github.com/hashicorp/consul-k8s/issues/3758)] - -NOTES: - -* build: Releases will now also be available as Debian and RPM packages for the arm64 architecture, refer to the -[Official Packaging Guide](https://www.hashicorp.com/official-packaging-guide) for more information. [[GH-3428](https://github.com/hashicorp/consul-k8s/issues/3428)] - -## 1.2.7 (March 28, 2024) - -SECURITY: - -* Update `google.golang.org/protobuf` to v1.33.0 to address [CVE-2024-24786](https://nvd.nist.gov/vuln/detail/CVE-2024-24786). [[GH-3719](https://github.com/hashicorp/consul-k8s/issues/3719)] -* Update the Consul Build Go base image to `alpine3.19`. This resolves CVEs -[CVE-2023-52425](https://nvd.nist.gov/vuln/detail/CVE-2023-52425) -[CVE-2023-52426⁠](https://nvd.nist.gov/vuln/detail/CVE-2023-52426) [[GH-3741](https://github.com/hashicorp/consul-k8s/issues/3741)] -* Upgrade `helm/v3` to 3.11.3. This resolves the following security vulnerabilities: -[CVE-2023-25165](https://osv.dev/vulnerability/CVE-2023-25165) -[CVE-2022-23524](https://osv.dev/vulnerability/CVE-2022-23524) -[CVE-2022-23526](https://osv.dev/vulnerability/CVE-2022-23526) -[CVE-2022-23525](https://osv.dev/vulnerability/CVE-2022-23525) [[GH-3625](https://github.com/hashicorp/consul-k8s/issues/3625)] -* Upgrade docker/distribution to 2.8.3+incompatible (latest) to resolve [CVE-2023-2253](https://osv.dev/vulnerability/CVE-2023-2253). [[GH-3625](https://github.com/hashicorp/consul-k8s/issues/3625)] -* Upgrade docker/docker to 25.0.3+incompatible (latest) to resolve [GHSA-jq35-85cj-fj4p](https://osv.dev/vulnerability/GHSA-jq35-85cj-fj4p). [[GH-3625](https://github.com/hashicorp/consul-k8s/issues/3625)] -* Upgrade filepath-securejoin to 0.2.4 (latest) to resolve [GO-2023-2048](https://osv.dev/vulnerability/GO-2023-2048). [[GH-3625](https://github.com/hashicorp/consul-k8s/issues/3625)] -* Upgrade to use Go `1.21.8`. This resolves CVEs -[CVE-2024-24783](https://nvd.nist.gov/vuln/detail/CVE-2024-24783) (`crypto/x509`). -[CVE-2023-45290](https://nvd.nist.gov/vuln/detail/CVE-2023-45290) (`net/http`). -[CVE-2023-45289](https://nvd.nist.gov/vuln/detail/CVE-2023-45289) (`net/http`, `net/http/cookiejar`). -[CVE-2024-24785](https://nvd.nist.gov/vuln/detail/CVE-2024-24785) (`html/template`). -[CVE-2024-24784](https://nvd.nist.gov/vuln/detail/CVE-2024-24784) (`net/mail`). [[GH-3741](https://github.com/hashicorp/consul-k8s/issues/3741)] -* security: upgrade containerd to 1.7.13 (latest) to resolve [GHSA-7ww5-4wqc-m92c](https://osv.dev/vulnerability/GO-2023-2412). [[GH-3625](https://github.com/hashicorp/consul-k8s/issues/3625)] - -IMPROVEMENTS: - -* catalog: Topology zone and region information is now read from the Kubernetes endpoints and associated node and added to registered consul services under Metadata. [[GH-3693](https://github.com/hashicorp/consul-k8s/issues/3693)] -* control-plane: publish `consul-k8s-control-plane` and `consul-k8s-control-plane-fips` images to official HashiCorp AWS ECR. [[GH-3668](https://github.com/hashicorp/consul-k8s/issues/3668)] - -BUG FIXES: - -* api-gateway: Fix order of initialization for creating ACL role/policy to avoid error logs in consul. [[GH-3779](https://github.com/hashicorp/consul-k8s/issues/3779)] -* control-plane: fix an issue where ACL token cleanup did not respect a pod's GracefulShutdownPeriodSeconds and -tokens were invalidated immediately on pod entering Terminating state. [[GH-3736](https://github.com/hashicorp/consul-k8s/issues/3736)] -* control-plane: fix an issue where ACL tokens would prematurely be deleted and services would be deregistered if there -was a K8s API error fetching the pod. [[GH-3758](https://github.com/hashicorp/consul-k8s/issues/3758)] - -NOTES: - -* build: Releases will now also be available as Debian and RPM packages for the arm64 architecture, refer to the -[Official Packaging Guide](https://www.hashicorp.com/official-packaging-guide) for more information. [[GH-3428](https://github.com/hashicorp/consul-k8s/issues/3428)] - -## 1.1.11 (March 28, 2024) - -SECURITY: - -* Update `google.golang.org/protobuf` to v1.33.0 to address [CVE-2024-24786](https://nvd.nist.gov/vuln/detail/CVE-2024-24786). [[GH-3719](https://github.com/hashicorp/consul-k8s/issues/3719)] -* Update the Consul Build Go base image to `alpine3.19`. This resolves CVEs -[CVE-2023-52425](https://nvd.nist.gov/vuln/detail/CVE-2023-52425) -[CVE-2023-52426⁠](https://nvd.nist.gov/vuln/detail/CVE-2023-52426) [[GH-3741](https://github.com/hashicorp/consul-k8s/issues/3741)] -* Upgrade `helm/v3` to 3.11.3. This resolves the following security vulnerabilities: -[CVE-2023-25165](https://osv.dev/vulnerability/CVE-2023-25165) -[CVE-2022-23524](https://osv.dev/vulnerability/CVE-2022-23524) -[CVE-2022-23526](https://osv.dev/vulnerability/CVE-2022-23526) -[CVE-2022-23525](https://osv.dev/vulnerability/CVE-2022-23525) [[GH-3625](https://github.com/hashicorp/consul-k8s/issues/3625)] -* Upgrade docker/distribution to 2.8.3+incompatible (latest) to resolve [CVE-2023-2253](https://osv.dev/vulnerability/CVE-2023-2253). [[GH-3625](https://github.com/hashicorp/consul-k8s/issues/3625)] -* Upgrade docker/docker to 25.0.3+incompatible (latest) to resolve [GHSA-jq35-85cj-fj4p](https://osv.dev/vulnerability/GHSA-jq35-85cj-fj4p). [[GH-3625](https://github.com/hashicorp/consul-k8s/issues/3625)] -* Upgrade filepath-securejoin to 0.2.4 (latest) to resolve [GO-2023-2048](https://osv.dev/vulnerability/GO-2023-2048). [[GH-3625](https://github.com/hashicorp/consul-k8s/issues/3625)] -* Upgrade to use Go `1.21.8`. This resolves CVEs -[CVE-2024-24783](https://nvd.nist.gov/vuln/detail/CVE-2024-24783) (`crypto/x509`). -[CVE-2023-45290](https://nvd.nist.gov/vuln/detail/CVE-2023-45290) (`net/http`). -[CVE-2023-45289](https://nvd.nist.gov/vuln/detail/CVE-2023-45289) (`net/http`, `net/http/cookiejar`). -[CVE-2024-24785](https://nvd.nist.gov/vuln/detail/CVE-2024-24785) (`html/template`). -[CVE-2024-24784](https://nvd.nist.gov/vuln/detail/CVE-2024-24784) (`net/mail`). [[GH-3741](https://github.com/hashicorp/consul-k8s/issues/3741)] -* security: upgrade containerd to 1.7.13 (latest) to resolve [GHSA-7ww5-4wqc-m92c](https://osv.dev/vulnerability/GO-2023-2412). [[GH-3625](https://github.com/hashicorp/consul-k8s/issues/3625)] - -IMPROVEMENTS: - -* control-plane: publish `consul-k8s-control-plane` and `consul-k8s-control-plane-fips` images to official HashiCorp AWS ECR. [[GH-3668](https://github.com/hashicorp/consul-k8s/issues/3668)] - -BUG FIXES: - -* control-plane: fix an issue where ACL token cleanup did not respect a pod's GracefulShutdownPeriodSeconds and -tokens were invalidated immediately on pod entering Terminating state. [[GH-3736](https://github.com/hashicorp/consul-k8s/issues/3736)] -* control-plane: fix an issue where ACL tokens would prematurely be deleted and services would be deregistered if there -was a K8s API error fetching the pod. [[GH-3758](https://github.com/hashicorp/consul-k8s/issues/3758)] - ## 1.4.0 (February 29, 2024) > NOTE: Consul K8s 1.4.x is compatible with Consul 1.18.x and Consul Dataplane 1.4.x. Refer to our [compatibility matrix](https://developer.hashicorp.com/consul/docs/k8s/compatibility) for more info. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index cca5d28116..5b06c27d8a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -14,7 +14,7 @@ 1. [Webhook](#webhook) 1. [Update command.go](#update-commandgo) 1. [Generating YAML](#generating-yaml) - 1. [Updating consul-helm](#updating-helm-chart) + 1. [Updating consul-helm](#updating-consul-helm) 1. [Testing a new CRD](#testing-a-new-crd) 1. [Update Consul K8s acceptance tests](#update-consul-k8s-acceptance-tests) 1. [Adding a new ACL Token](#adding-a-new-acl-token) @@ -340,14 +340,7 @@ rebase the branch on main, fixing any conflicts along the way before the code ca 1. Replace the names 1. Ensure you've correctly replaced the names in the kubebuilder annotation, ensure the plurality is correct ```go - // +kubebuilder:webhook:verbs=create;update,path=/mutate-v1alpha1-ingressgateway,mutating=true,failurePolicy=fail,groups=consul.hashicorp.com,resources=ingressgateways,versions=v1alpha1,name=mutate-ingressgateway.consul.hashicorp.com,sideEffects=None,admissionReviewVersions=v1beta1;v1 - ``` -1. Ensure you update the path to match the annotation in the `SetupWithManager` method: - ```go - func (v *IngressGatewayWebhook) SetupWithManager(mgr ctrl.Manager) { - v.decoder = admission.NewDecoder(mgr.GetScheme()) - mgr.GetWebhookServer().Register("/mutate-v1alpha1-ingressgateway", &admission.Webhook{Handler: v}) -} + // +kubebuilder:webhook:verbs=create;update,path=/mutate-v1alpha1-ingressgateway,mutating=true,failurePolicy=fail,groups=consul.hashicorp.com,resources=ingressgateways,versions=v1alpha1,name=mutate-ingressgateway.consul.hashicorp.com,webhookVersions=v1beta1,sideEffects=None ``` ### Update command.go @@ -369,13 +362,16 @@ rebase the branch on main, fixing any conflicts along the way before the code ca return 1 } ``` -1. Update `control-plane/subcommand/inject-connect/command.go` and add your webhook +1. Update `control-plane/subcommand/inject-connect/command.go` and add your webhook (the path should match the kubebuilder annotation): ```go - (&v1alpha1.IngressGatewayWebhook - Client: mgr.GetClient(), - Logger: ctrl.Log.WithName("webhooks").WithName(common.IngressGateway), - ConsulMeta: consulMeta, - }).SetupWithManager(mgr) + mgr.GetWebhookServer().Register("/mutate-v1alpha1-ingressgateway", + &webhook.Admission{Handler: &v1alpha1.IngressGatewayWebhook{ + Client: mgr.GetClient(), + ConsulClient: consulClient, + Logger: ctrl.Log.WithName("webhooks").WithName(common.IngressGateway), + EnableConsulNamespaces: c.flagEnableNamespaces, + EnableNSMirroring: c.flagEnableNSMirroring, + }}) ``` ### Generating YAML @@ -961,23 +957,17 @@ The tests are organized like this : ```shell demo $ tree -L 1 -d acceptance/tests acceptance/tests -├── api-gateway ├── basic ├── cli -├── cloud ├── config-entries ├── connect ├── consul-dns -├── datadog ├── example ├── fixtures ├── ingress-gateway ├── metrics ├── partitions ├── peering -├── sameness -├── segments -├── server ├── snapshot-agent ├── sync ├── terminating-gateway @@ -1015,9 +1005,7 @@ $ kind create cluster --name=dc1 && kind create cluster --name=dc2 `-consul-k8s-image=` && `-consul-image=` * You can set custom helm flags by modifying the test file directly in the respective directory. -Finally, you have two options on how you can run your test: -1. Take the following steps, this will run the test through to completion but not teardown any resources created by the test so you can inspect the state of the cluster -at that point. You will be responsible for cleaning up the resources or deleting the cluster entirely when you're done. +Finally, run the test like shown above: ```shell $ cd acceptance/tests $ go test -run Vault_WANFederationViaGateways ./vault/... -p 1 -timeout 2h -failfast -use-kind -no-cleanup-on-failure -kubecontext=kind-dc1 -secondary-kubecontext=kind-dc2 -enable-multi-cluster -debug-directory=/tmp/debug @@ -1026,36 +1014,6 @@ You can interact with the running kubernetes clusters now using `kubectl [COMMAN * `kind delete clusters --all` is helpful for cleanup! -2. The other option is to use the helper method in the framework: `helpers.WaitForInput(t)` at the spot in your acceptance test where you would like to pause execution to inspect the cluster. This will pause the test execution until you execute a request to `localhost:38501` which tells the test to continue running, you can override the port value used by setting the `CONSUL_K8S_TEST_PAUSE_PORT` environment variable to a port of your choosing. When running the tests with the `-v` flag you will see a log output of the endpoint that the test is waiting on. - -First you'll want to add the helper method to your test file: - -```go -import "github.com/hashicorp/consul-k8s/acceptance/framework/helpers" - -func TestSomeTest(t *testing.T) { - // stuff to setup - - // test execution will pause here until the endpoint is hit - helpers.WaitForInput(t) - - // rest of test -} -``` - -Then run the tests (note the removal of the `-no-cleanup-on-failure` flag): -```shell -$ cd acceptance/tests -$ go test -run Vault_WANFederationViaGateways ./vault/... -p 1 -timeout 2h -failfast -use-kind -kubecontext=kind-dc1 -secondary-kubecontext=kind-dc2 -enable-multi-cluster -debug-directory=/tmp/debug -``` - -You can interact with the running kubernetes clusters now using `kubectl [COMMAND] --context=` - -When you're done interacting you can tell the test to continue by issuing a curl command to the endpoint (if you are using a non-default port for this test then replace the `38501` port value with the value you have set): -```shell -curl localhost:38501 -``` - ### Example Debugging session using the acceptance test framework to bootstrap and debug a Vault backed federated Consul installation: This test utilizes the `consul-k8s` acceptance test framework, with a custom consul-k8s branch which: * Modifies the acceptance test to use custom consul+consul-k8s images and sleeps at the end of the test to allow analysis. diff --git a/Makefile b/Makefile index d6ed9f815d..5458bffd53 100644 --- a/Makefile +++ b/Makefile @@ -9,8 +9,6 @@ KUBECTL_VERSION= $(shell ./control-plane/build-support/scripts/read-yaml-config. GO_MODULES := $(shell find . -name go.mod -exec dirname {} \; | sort) -GOTESTSUM_PATH?=$(shell command -v gotestsum) - ##@ Helm Targets .PHONY: gen-helm-docs @@ -99,32 +97,11 @@ control-plane-fips-dev-docker: ## Build consul-k8s-control-plane FIPS dev Docker .PHONY: control-plane-test control-plane-test: ## Run go test for the control plane. -ifeq ("$(GOTESTSUM_PATH)","") - cd control-plane && go test ./... -else - cd control-plane && \ - gotestsum \ - --format=short-verbose \ - --debug \ - --rerun-fails=3 \ - --packages="./..." -endif - + cd control-plane; go test ./... .PHONY: control-plane-ent-test control-plane-ent-test: ## Run go test with Consul enterprise tests. The consul binary in your PATH must be Consul Enterprise. -ifeq ("$(GOTESTSUM_PATH)","") - cd control-plane && go test ./... -tags=enterprise -else - cd control-plane && \ - gotestsum \ - --format=short-verbose \ - --debug \ - --rerun-fails=3 \ - --packages="./..." \ - -- \ - --tags enterprise -endif + cd control-plane; go test ./... -tags=enterprise .PHONY: control-plane-cov control-plane-cov: ## Run go test with code coverage. diff --git a/acceptance/framework/config/config.go b/acceptance/framework/config/config.go index 370e276bc7..4f9a8648c2 100644 --- a/acceptance/framework/config/config.go +++ b/acceptance/framework/config/config.go @@ -70,9 +70,6 @@ type TestConfig struct { EnableEnterprise bool EnterpriseLicense string - SkipDataDogTests bool - DatadogHelmChartVersion string - EnableOpenshift bool EnablePodSecurityPolicies bool diff --git a/acceptance/framework/connhelper/connect_helper.go b/acceptance/framework/connhelper/connect_helper.go index 2746b43348..bbcaf7aff9 100644 --- a/acceptance/framework/connhelper/connect_helper.go +++ b/acceptance/framework/connhelper/connect_helper.go @@ -294,7 +294,7 @@ func (c *ConnectHelper) SetupAppNamespace(t *testing.T) { } // CreateResolverRedirect creates a resolver that redirects to a static-server, a corresponding k8s service, -// and intentions. This helper is primarily used to ensure that the virtual-ips are persisted to consul properly. +// and intentions. This helper is primarly used to ensure that the virtual-ips are persisted to consul properly. func (c *ConnectHelper) CreateResolverRedirect(t *testing.T) { logger.Log(t, "creating resolver redirect") opts := c.KubectlOptsForApp(t) diff --git a/acceptance/framework/consul/helm_cluster.go b/acceptance/framework/consul/helm_cluster.go index 54032b2978..72d22441ff 100644 --- a/acceptance/framework/consul/helm_cluster.go +++ b/acceptance/framework/consul/helm_cluster.go @@ -13,6 +13,8 @@ import ( "github.com/gruntwork-io/terratest/modules/helm" terratestLogger "github.com/gruntwork-io/terratest/modules/logger" "github.com/stretchr/testify/require" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" corev1 "k8s.io/api/core/v1" policyv1beta "k8s.io/api/policy/v1beta1" rbacv1 "k8s.io/api/rbac/v1" @@ -26,6 +28,7 @@ import ( "github.com/hashicorp/consul-k8s/control-plane/api/v1alpha1" "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/proto-public/pbresource" "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/consul-k8s/acceptance/framework/config" @@ -155,6 +158,7 @@ func (h *HelmCluster) Create(t *testing.T) { if h.ChartPath != "" { chartName = h.ChartPath } + // Retry the install in case previous tests have not finished cleaning up. retry.RunWith(&retry.Counter{Wait: 2 * time.Second, Count: 30}, t, func(r *retry.R) { err := helm.InstallE(r, h.helmOptions, chartName, h.releaseName) @@ -484,6 +488,26 @@ func (h *HelmCluster) CreatePortForwardTunnel(t *testing.T, remotePort int, rele return portforward.CreateTunnelToResourcePort(t, serverPod, remotePort, h.helmOptions.KubectlOptions, h.logger) } +// ResourceClient returns a resource service grpc client for the given helm release. +func (h *HelmCluster) ResourceClient(t *testing.T, secure bool, release ...string) (client pbresource.ResourceServiceClient) { + if secure { + panic("TODO: add support for secure resource client") + } + releaseName := h.releaseName + if len(release) > 0 { + releaseName = release[0] + } + + // TODO: get grpc port from somewhere + localTunnelAddr := h.CreatePortForwardTunnel(t, 8502, releaseName) + + // Create a grpc connection to the server pod. + grpcConn, err := grpc.Dial(localTunnelAddr, grpc.WithTransportCredentials(insecure.NewCredentials())) + require.NoError(t, err) + resourceClient := pbresource.NewResourceServiceClient(grpcConn) + return resourceClient +} + func (h *HelmCluster) SetupConsulClient(t *testing.T, secure bool, release ...string) (client *api.Client, configAddress string) { t.Helper() diff --git a/acceptance/framework/datadog/datadog.go b/acceptance/framework/datadog/datadog.go deleted file mode 100644 index 5ffefae4a7..0000000000 --- a/acceptance/framework/datadog/datadog.go +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package datadog - -import ( - "context" - "fmt" - "github.com/hashicorp/consul-k8s/acceptance/framework/k8s" - "github.com/hashicorp/consul/sdk/testutil/retry" - "github.com/stretchr/testify/require" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "testing" - "time" - - "github.com/hashicorp/consul-k8s/acceptance/framework/config" - "github.com/hashicorp/consul-k8s/acceptance/framework/helpers" - "github.com/hashicorp/consul-k8s/acceptance/framework/logger" - - "github.com/gruntwork-io/terratest/modules/helm" - terratestk8s "github.com/gruntwork-io/terratest/modules/k8s" - terratestLogger "github.com/gruntwork-io/terratest/modules/logger" - "github.com/hashicorp/consul-k8s/acceptance/framework/environment" - "k8s.io/client-go/kubernetes" -) - -const ( - releaseLabel = "app.kubernetes.io/name" - OperatorReleaseName = "datadog-operator" - DefaultHelmChartVersion = "1.4.0" - datadogSecretName = "datadog-secret" - datadogAPIKey = "api-key" - datadogAppKey = "app-key" - datadogFakeAPIKey = "DD_FAKEAPIKEY" - datadogFakeAPPKey = "DD_FAKEAPPKEY" -) - -type DatadogCluster struct { - ctx environment.TestContext - - helmOptions *helm.Options - releaseName string - - kubectlOptions *terratestk8s.KubectlOptions - - kubernetesClient kubernetes.Interface - - noCleanupOnFailure bool - noCleanup bool - debugDirectory string - logger terratestLogger.TestLogger -} - -// releaseLabelSelector returns label selector that selects all pods -// from a Datadog installation. -func (d *DatadogCluster) releaseLabelSelector() string { - return fmt.Sprintf("%s=%s", releaseLabel, d.releaseName) -} - -func NewDatadogCluster(t *testing.T, ctx environment.TestContext, cfg *config.TestConfig, releaseName string, releaseNamespace string, helmValues map[string]string) *DatadogCluster { - logger := terratestLogger.New(logger.TestLogger{}) - - configureNamespace(t, ctx.KubernetesClient(t), cfg, releaseNamespace) - - createOrUpdateDatadogSecret(t, ctx.KubernetesClient(t), cfg, releaseNamespace) - - kopts := ctx.KubectlOptionsForNamespace(releaseNamespace) - - values := defaultHelmValues() - - ddHelmChartVersion := DefaultHelmChartVersion - if cfg.DatadogHelmChartVersion != "" { - ddHelmChartVersion = cfg.DatadogHelmChartVersion - } - - helpers.MergeMaps(values, helmValues) - datadogHelmOpts := &helm.Options{ - SetValues: values, - KubectlOptions: kopts, - Logger: logger, - Version: ddHelmChartVersion, - } - - helm.AddRepo(t, datadogHelmOpts, "datadog", "https://helm.datadoghq.com") - // Ignoring the error from `helm repo update` as it could fail due to stale cache or unreachable servers and we're - // asserting a chart version on Install which would fail in an obvious way should this not succeed. - _, err := helm.RunHelmCommandAndGetOutputE(t, &helm.Options{}, "repo", "update") - if err != nil { - logger.Logf(t, "Unable to update helm repository, proceeding anyway: %s.", err) - } - - return &DatadogCluster{ - ctx: ctx, - helmOptions: datadogHelmOpts, - kubectlOptions: kopts, - kubernetesClient: ctx.KubernetesClient(t), - noCleanupOnFailure: cfg.NoCleanupOnFailure, - noCleanup: cfg.NoCleanup, - debugDirectory: cfg.DebugDirectory, - logger: logger, - releaseName: releaseName, - } -} - -func (d *DatadogCluster) Create(t *testing.T) { - t.Helper() - - helpers.Cleanup(t, d.noCleanupOnFailure, d.noCleanup, func() { - d.Destroy(t) - }) - - helm.Install(t, d.helmOptions, "datadog/datadog-operator", d.releaseName) - // Wait for the datadog-operator to become ready - k8s.WaitForAllPodsToBeReady(t, d.kubernetesClient, d.helmOptions.KubectlOptions.Namespace, d.releaseLabelSelector()) -} - -func (d *DatadogCluster) Destroy(t *testing.T) { - t.Helper() - - k8s.WritePodsDebugInfoIfFailed(t, d.kubectlOptions, d.debugDirectory, d.releaseLabelSelector()) - // Ignore the error returned by the helm delete here so that we can - // always idempotent clean up resources in the cluster. - _ = helm.DeleteE(t, d.helmOptions, d.releaseName, true) -} - -func defaultHelmValues() map[string]string { - return map[string]string{ - "replicaCount": "1", - "image.tag": DefaultHelmChartVersion, - "image.repository": "gcr.io/datadoghq/operator", - } -} - -func configureNamespace(t *testing.T, client kubernetes.Interface, cfg *config.TestConfig, namespace string) { - ctx := context.Background() - - ns := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: namespace, - Labels: map[string]string{}, - }, - } - if cfg.EnableRestrictedPSAEnforcement { - ns.ObjectMeta.Labels["pod-security.kubernetes.io/enforce"] = "restricted" - ns.ObjectMeta.Labels["pod-security.kubernetes.io/enforce-version"] = "latest" - } - - _, createErr := client.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{}) - if createErr == nil { - logger.Logf(t, "Created namespace %s", namespace) - return - } - - _, updateErr := client.CoreV1().Namespaces().Update(ctx, ns, metav1.UpdateOptions{}) - if updateErr == nil { - logger.Logf(t, "Updated namespace %s", namespace) - return - } - - require.Failf(t, "Failed to create or update namespace", "Namespace=%s, CreateError=%s, UpdateError=%s", namespace, createErr, updateErr) -} - -func createOrUpdateDatadogSecret(t *testing.T, client kubernetes.Interface, cfg *config.TestConfig, namespace string) { - secretMap := map[string]string{ - datadogAPIKey: datadogFakeAPIKey, - datadogAppKey: datadogFakeAPPKey, - } - createMultiKeyK8sSecret(t, client, cfg, namespace, datadogSecretName, secretMap) -} - -func createMultiKeyK8sSecret(t *testing.T, client kubernetes.Interface, cfg *config.TestConfig, namespace, secretName string, secretMap map[string]string) { - retry.RunWith(&retry.Counter{Wait: 2 * time.Second, Count: 15}, t, func(r *retry.R) { - _, err := client.CoreV1().Secrets(namespace).Get(context.Background(), secretName, metav1.GetOptions{}) - if errors.IsNotFound(err) { - _, err := client.CoreV1().Secrets(namespace).Create(context.Background(), &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: secretName, - }, - StringData: secretMap, - Type: corev1.SecretTypeOpaque, - }, metav1.CreateOptions{}) - require.NoError(r, err) - } else { - require.NoError(r, err) - } - }) - - helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() { - _ = client.CoreV1().Secrets(namespace).Delete(context.Background(), secretName, metav1.DeleteOptions{}) - }) -} diff --git a/acceptance/framework/flags/flags.go b/acceptance/framework/flags/flags.go index c956c3f7e3..c68983fe8c 100644 --- a/acceptance/framework/flags/flags.go +++ b/acceptance/framework/flags/flags.go @@ -25,8 +25,6 @@ type TestFlags struct { flagEnableOpenshift bool - flagSkipDatadogTests bool - flagEnablePodSecurityPolicies bool flagEnableCNI bool @@ -157,9 +155,6 @@ func (t *TestFlags) init() { flag.BoolVar(&t.flagDisablePeering, "disable-peering", false, "If true, the peering tests will not run.") - flag.BoolVar(&t.flagSkipDatadogTests, "skip-datadog", false, - "If true, datadog acceptance tests will not run.") - if t.flagEnterpriseLicense == "" { t.flagEnterpriseLicense = os.Getenv("CONSUL_ENT_LICENSE") } @@ -203,9 +198,11 @@ func (t *TestFlags) TestConfigFromFlags() *config.TestConfig { // if the Version is empty consulVersion will be nil consulVersion, _ := version.NewVersion(t.flagConsulVersion) consulDataplaneVersion, _ := version.NewVersion(t.flagConsulDataplaneVersion) + //vaultserverVersion, _ := version.NewVersion(t.flagVaultServerVersion) kubeEnvs := config.NewKubeTestConfigList(t.flagKubeconfigs, t.flagKubecontexts, t.flagKubeNamespaces) c := &config.TestConfig{ + EnableEnterprise: t.flagEnableEnterprise, EnterpriseLicense: t.flagEnterpriseLicense, @@ -214,8 +211,6 @@ func (t *TestFlags) TestConfigFromFlags() *config.TestConfig { EnableOpenshift: t.flagEnableOpenshift, - SkipDataDogTests: t.flagSkipDatadogTests, - EnablePodSecurityPolicies: t.flagEnablePodSecurityPolicies, EnableCNI: t.flagEnableCNI, diff --git a/acceptance/framework/helpers/helpers.go b/acceptance/framework/helpers/helpers.go index e00e2926e3..0871532426 100644 --- a/acceptance/framework/helpers/helpers.go +++ b/acceptance/framework/helpers/helpers.go @@ -6,9 +6,7 @@ package helpers import ( "context" "encoding/json" - "errors" "fmt" - "net/http" "os" "os/exec" "os/signal" @@ -156,51 +154,13 @@ func MergeMaps(a, b map[string]string) { } } -type K8sOptions struct { - Options *k8s.KubectlOptions - NoCleanupOnFailure bool - NoCleanup bool - ConfigPath string -} - -type ConsulOptions struct { - ConsulClient *api.Client - Namespace string -} - -func RegisterExternalServiceCRD(t *testing.T, k8sOptions K8sOptions, consulOptions ConsulOptions) { - t.Helper() - t.Logf("Registering external service %s", k8sOptions.ConfigPath) - - if consulOptions.Namespace != "" { - logger.Logf(t, "creating the %s namespace in Consul", consulOptions.Namespace) - _, _, err := consulOptions.ConsulClient.Namespaces().Create(&api.Namespace{ - Name: consulOptions.Namespace, - }, nil) - require.NoError(t, err) - } - - // Register the external service - k8s.KubectlApply(t, k8sOptions.Options, k8sOptions.ConfigPath) - - Cleanup(t, k8sOptions.NoCleanupOnFailure, k8sOptions.NoCleanup, func() { - // Note: this delete command won't wait for pods to be fully terminated. - // This shouldn't cause any test pollution because the underlying - // objects are deployments, and so when other tests create these - // they should have different pod names. - k8s.KubectlDelete(t, k8sOptions.Options, k8sOptions.ConfigPath) - }) -} - // RegisterExternalService registers an external service to a virtual node in Consul for testing purposes. // This function takes a testing.T object, a Consul client, service namespace, service name, address, and port as // parameters. It registers the service with Consul, and if a namespace is provided, it also creates the namespace // in Consul. It uses the provided testing.T object to log registration details and verify the registration process. // If the registration fails, the test calling the function will fail. -// DEPRECATED: Use RegisterExternalServiceCRD instead. func RegisterExternalService(t *testing.T, consulClient *api.Client, namespace, name, address string, port int) { t.Helper() - t.Log("RegisterExternalService is DEPRECATED, use RegisterExternalServiceCRD instead") service := &api.AgentService{ ID: name, @@ -219,7 +179,7 @@ func RegisterExternalService(t *testing.T, consulClient *api.Client, namespace, require.NoError(t, err) } - logger.Log(t, fmt.Sprintf("registering the external service %s", name)) + logger.Log(t, "registering the external service %s", name) _, err := consulClient.Catalog().Register(&api.CatalogRegistration{ Node: "external", Address: address, @@ -360,52 +320,3 @@ func createCmdArgs(options *k8s.KubectlOptions) []string { } return cmdArgs } - -const DEFAULT_PAUSE_PORT = "38501" - -// WaitForInput starts a http server on a random port (which is output in the logs) and waits until you -// issue a request to that endpoint to continue the tests. This is useful for debugging tests that require -// inspecting the current state of a running cluster and you don't need to use long sleeps. -func WaitForInput(t *testing.T) { - t.Helper() - - listenerPort := os.Getenv("CONSUL_K8S_TEST_PAUSE_PORT") - - if listenerPort == "" { - listenerPort = DEFAULT_PAUSE_PORT - } - - mux := http.NewServeMux() - srv := &http.Server{ - Addr: fmt.Sprintf(":%s", listenerPort), - Handler: mux, - } - - mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { - defer func() { - err := r.Body.Close() - if err != nil { - t.Logf("error closing request body: %v", err) - } - }() - - w.WriteHeader(http.StatusOK) - - _, err := w.Write([]byte("input received\n")) - if err != nil { - t.Logf("writing body: %v", err) - } - - err = srv.Shutdown(context.Background()) - if err != nil { - t.Logf("error closing listener: %v", err) - } - - t.Log("input received, continuing test") - }) - - t.Logf("Waiting for input on http://localhost:%s", listenerPort) - if err := srv.ListenAndServe(); !errors.Is(err, http.ErrServerClosed) { - t.Fatal(err) - } -} diff --git a/acceptance/go.mod b/acceptance/go.mod index c5d6bf2aa8..e40fa6f8f6 100644 --- a/acceptance/go.mod +++ b/acceptance/go.mod @@ -9,8 +9,9 @@ require ( github.com/google/uuid v1.3.0 github.com/gruntwork-io/terratest v0.46.7 github.com/hashicorp/consul-k8s/control-plane v0.0.0-20240226161840-f3842c41cb2b - github.com/hashicorp/consul/api v1.29.1 - github.com/hashicorp/consul/sdk v0.16.1 + github.com/hashicorp/consul/api v1.28.2 + github.com/hashicorp/consul/proto-public v0.6.0 + github.com/hashicorp/consul/sdk v0.16.0 github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/go-uuid v1.0.3 github.com/hashicorp/go-version v1.6.0 @@ -19,6 +20,7 @@ require ( github.com/hashicorp/vault/api v1.12.2 github.com/stretchr/testify v1.8.4 go.opentelemetry.io/proto/otlp v1.0.0 + google.golang.org/grpc v1.58.3 google.golang.org/protobuf v1.33.0 gopkg.in/yaml.v2 v2.4.0 k8s.io/api v0.28.9 @@ -68,7 +70,6 @@ require ( github.com/google/pprof v0.0.0-20230602150820-91b7bce49751 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect github.com/gruntwork-io/go-commons v0.8.0 // indirect - github.com/hashicorp/consul/proto-public v0.6.1 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-bexpr v0.1.11 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect @@ -107,7 +108,7 @@ require ( github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/pquerna/otp v1.2.0 // indirect github.com/prometheus/client_golang v1.16.0 // indirect - github.com/prometheus/client_model v0.5.0 // indirect + github.com/prometheus/client_model v0.4.0 // indirect github.com/prometheus/common v0.44.0 // indirect github.com/prometheus/procfs v0.10.1 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect @@ -134,7 +135,6 @@ require ( google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect - google.golang.org/grpc v1.58.3 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/klog/v2 v2.100.1 // indirect diff --git a/acceptance/go.sum b/acceptance/go.sum index 88daac648d..9eb3f1c037 100644 --- a/acceptance/go.sum +++ b/acceptance/go.sum @@ -185,12 +185,12 @@ github.com/gruntwork-io/terratest v0.46.7 h1:oqGPBBO87SEsvBYaA0R5xOq+Lm2Xc5dmFVf github.com/gruntwork-io/terratest v0.46.7/go.mod h1:6gI5MlLeyF+SLwqocA5GBzcTix+XiuxCy1BPwKuT+WM= github.com/hashicorp/consul-k8s/control-plane v0.0.0-20240226161840-f3842c41cb2b h1:AdeWjUb+rxrRryC5ZHaL32oOZuxubOzV2q6oJ97UMT0= github.com/hashicorp/consul-k8s/control-plane v0.0.0-20240226161840-f3842c41cb2b/go.mod h1:TVaSJM7vYM/mtKGpVc/Lch53lrqLI9XAXJgy/gY8v4A= -github.com/hashicorp/consul/api v1.29.1 h1:UEwOjYJrd3lG1x5w7HxDRMGiAUPrb3f103EoeKuuEcc= -github.com/hashicorp/consul/api v1.29.1/go.mod h1:lumfRkY/coLuqMICkI7Fh3ylMG31mQSRZyef2c5YvJI= -github.com/hashicorp/consul/proto-public v0.6.1 h1:+uzH3olCrksXYWAYHKqK782CtK9scfqH+Unlw3UHhCg= -github.com/hashicorp/consul/proto-public v0.6.1/go.mod h1:cXXbOg74KBNGajC+o8RlA502Esf0R9prcoJgiOX/2Tg= -github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg= -github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s= +github.com/hashicorp/consul/api v1.28.2 h1:mXfkRHrpHN4YY3RqL09nXU1eHKLNiuAN4kHvDQ16k/8= +github.com/hashicorp/consul/api v1.28.2/go.mod h1:KyzqzgMEya+IZPcD65YFoOVAgPpbfERu4I/tzG6/ueE= +github.com/hashicorp/consul/proto-public v0.6.0 h1:9qrBujmoTB5gQQ84kQO+YWvhjgYoYBNrOoHdo4cpHHM= +github.com/hashicorp/consul/proto-public v0.6.0/go.mod h1:JF6983XNCzvw4wDNOLEwLqOq2IPw7iyT+pkswHSz08U= +github.com/hashicorp/consul/sdk v0.16.0 h1:SE9m0W6DEfgIVCJX7xU+iv/hUl4m/nxqMTnCdMxDpJ8= +github.com/hashicorp/consul/sdk v0.16.0/go.mod h1:7pxqqhqoaPqnBnzXD1StKed62LqJeClzVsUEy85Zr0A= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -369,8 +369,8 @@ github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lF github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= -github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= +github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= diff --git a/acceptance/tests/api-gateway/api_gateway_external_servers_test.go b/acceptance/tests/api-gateway/api_gateway_external_servers_test.go index 43755f5b40..aa0934dc65 100644 --- a/acceptance/tests/api-gateway/api_gateway_external_servers_test.go +++ b/acceptance/tests/api-gateway/api_gateway_external_servers_test.go @@ -76,19 +76,8 @@ func TestAPIGateway_ExternalServers(t *testing.T) { require.NoError(t, err) logger.Log(t, "set consul config entry") - // Create certificate secret, we do this separately since - // applying the secret will make an invalid certificate that breaks other tests - logger.Log(t, "creating certificate secret") - out, err := k8s.RunKubectlAndGetOutputE(t, ctx.KubectlOptions(t), "apply", "-f", "../fixtures/bases/api-gateway/certificate.yaml") - require.NoError(t, err, out) - helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() { - // Ignore errors here because if the test ran as expected - // the custom resources will have been deleted. - k8s.RunKubectlAndGetOutputE(t, ctx.KubectlOptions(t), "delete", "-f", "../fixtures/bases/api-gateway/certificate.yaml") - }) - logger.Log(t, "creating api-gateway resources") - out, err = k8s.RunKubectlAndGetOutputE(t, ctx.KubectlOptions(t), "apply", "-k", "../fixtures/bases/api-gateway") + out, err := k8s.RunKubectlAndGetOutputE(t, ctx.KubectlOptions(t), "apply", "-k", "../fixtures/bases/api-gateway") require.NoError(t, err, out) helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() { // Ignore errors here because if the test ran as expected diff --git a/acceptance/tests/api-gateway/api_gateway_lifecycle_test.go b/acceptance/tests/api-gateway/api_gateway_lifecycle_test.go index 59be62595b..e8fbc945f1 100644 --- a/acceptance/tests/api-gateway/api_gateway_lifecycle_test.go +++ b/acceptance/tests/api-gateway/api_gateway_lifecycle_test.go @@ -295,7 +295,7 @@ func TestAPIGateway_Lifecycle(t *testing.T) { // make sure our certificate exists logger.Log(t, "checking that the certificate is synchronized to Consul") - checkConsulExists(t, consulClient, api.FileSystemCertificate, certificateName) + checkConsulExists(t, consulClient, api.InlineCertificate, certificateName) // delete the certificate in Kubernetes logger.Log(t, "deleting the certificate in Kubernetes") @@ -304,7 +304,7 @@ func TestAPIGateway_Lifecycle(t *testing.T) { // make sure the certificate no longer exists in Consul logger.Log(t, "checking that the certificate is deleted from Consul") - checkConsulNotExists(t, consulClient, api.FileSystemCertificate, certificateName) + checkConsulNotExists(t, consulClient, api.InlineCertificate, certificateName) } func checkConsulNotExists(t *testing.T, client *api.Client, kind, name string, namespace ...string) { diff --git a/acceptance/tests/api-gateway/api_gateway_tenancy_test.go b/acceptance/tests/api-gateway/api_gateway_tenancy_test.go index 461fde1ecf..f7b0ac6d79 100644 --- a/acceptance/tests/api-gateway/api_gateway_tenancy_test.go +++ b/acceptance/tests/api-gateway/api_gateway_tenancy_test.go @@ -168,7 +168,7 @@ func TestAPIGateway_Tenancy(t *testing.T) { }) // we only sync validly referenced certificates over, so check to make sure it is not created. - checkConsulNotExists(t, consulClient, api.FileSystemCertificate, "certificate", namespaceForConsul(c.namespaceMirroring, certificateNamespace)) + checkConsulNotExists(t, consulClient, api.InlineCertificate, "certificate", namespaceForConsul(c.namespaceMirroring, certificateNamespace)) // now create reference grants createReferenceGrant(t, k8sClient, "gateway-certificate", gatewayNamespace, certificateNamespace) @@ -237,11 +237,11 @@ func TestAPIGateway_Tenancy(t *testing.T) { // and check to make sure that the certificate exists retryCheck(t, 30, func(r *retry.R) { - entry, _, err := consulClient.ConfigEntries().Get(api.FileSystemCertificate, "certificate", &api.QueryOptions{ + entry, _, err := consulClient.ConfigEntries().Get(api.InlineCertificate, "certificate", &api.QueryOptions{ Namespace: namespaceForConsul(c.namespaceMirroring, certificateNamespace), }) require.NoError(r, err) - certificate := entry.(*api.FileSystemCertificateConfigEntry) + certificate := entry.(*api.InlineCertificateConfigEntry) require.EqualValues(r, "certificate", certificate.Meta["k8s-name"]) require.EqualValues(r, certificateNamespace, certificate.Meta["k8s-namespace"]) diff --git a/acceptance/tests/consul-dns/consul_dns_partitions_test.go b/acceptance/tests/consul-dns/consul_dns_partitions_test.go deleted file mode 100644 index c35668e695..0000000000 --- a/acceptance/tests/consul-dns/consul_dns_partitions_test.go +++ /dev/null @@ -1,462 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package consuldns - -import ( - "fmt" - "strconv" - "testing" - "time" - - terratestk8s "github.com/gruntwork-io/terratest/modules/k8s" - "github.com/hashicorp/consul-k8s/acceptance/framework/config" - "github.com/hashicorp/consul-k8s/acceptance/framework/consul" - "github.com/hashicorp/consul-k8s/acceptance/framework/environment" - "github.com/hashicorp/consul-k8s/acceptance/framework/helpers" - "github.com/hashicorp/consul-k8s/acceptance/framework/k8s" - "github.com/hashicorp/consul-k8s/acceptance/framework/logger" - "github.com/hashicorp/consul/api" - "github.com/hashicorp/consul/sdk/testutil/retry" - "github.com/stretchr/testify/require" -) - -const staticServerName = "static-server" -const staticServerNamespace = "ns1" - -type dnsWithPartitionsTestCase struct { - name string - secure bool -} - -type dnsVerification struct { - name string - requestingCtx environment.TestContext - svcContext environment.TestContext - svcName string - shouldResolveDNS bool - preProcessingFunc func(t *testing.T) -} - -const defaultPartition = "default" -const secondaryPartition = "secondary" -const defaultNamespace = "default" - -// TestConsulDNSProxy_WithPartitionsAndCatalogSync verifies DNS queries for services across partitions -// when DNS proxy is enabled. It configures CoreDNS to use configure consul domain queries to -// be forwarded to the Consul DNS Proxy. The test validates: -// - returning the local partition's service when tenancy is not included in the DNS question. -// - properly not resolving DNS for unexported services when ACLs are enabled. -// - properly resolving DNS for exported services when ACLs are enabled. -func TestConsulDNSProxy_WithPartitionsAndCatalogSync(t *testing.T) { - env := suite.Environment() - cfg := suite.Config() - - if cfg.EnableCNI { - t.Skipf("skipping because -enable-cni is set") - } - if !cfg.EnableEnterprise { - t.Skipf("skipping this test because -enable-enterprise is not set") - } - - cases := []dnsWithPartitionsTestCase{ - { - name: "not secure - ACLs and auto-encrypt not enabled", - secure: false, - }, - { - name: "secure - ACLs and auto-encrypt enabled", - secure: true, - }, - } - - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - defaultClusterContext := env.DefaultContext(t) - secondaryClusterContext := env.Context(t, 1) - - // Setup the clusters and the static service. - releaseName, consulClient, defaultPartitionOpts, secondaryPartitionQueryOpts, defaultConsulCluster := setupClustersAndStaticService(t, cfg, - defaultClusterContext, secondaryClusterContext, c, secondaryPartition, - defaultPartition) - - // Update CoreDNS to use the Consul domain and forward queries to the Consul DNS Service or Proxy. - updateCoreDNSWithConsulDomain(t, defaultClusterContext, releaseName, true) - updateCoreDNSWithConsulDomain(t, secondaryClusterContext, releaseName, true) - - podLabelSelector := "app=static-server" - // The index of the dnsUtils pod to use for the DNS queries so that the pod name can be unique. - dnsUtilsPodIndex := 0 - - // When ACLs are enabled, the unexported service should not resolve. - shouldResolveUnexportedCrossPartitionDNSRecord := true - if c.secure { - shouldResolveUnexportedCrossPartitionDNSRecord = false - } - - // Verify that the service is in the catalog under each partition. - verifyServiceInCatalog(t, consulClient, defaultPartitionOpts) - verifyServiceInCatalog(t, consulClient, secondaryPartitionQueryOpts) - - logger.Log(t, "verify the service via DNS in the default partition of the Consul catalog.") - for _, v := range getVerifications(defaultClusterContext, secondaryClusterContext, - shouldResolveUnexportedCrossPartitionDNSRecord, cfg, releaseName, defaultConsulCluster) { - t.Run(v.name, func(t *testing.T) { - if v.preProcessingFunc != nil { - v.preProcessingFunc(t) - } - verifyDNS(t, releaseName, staticServerNamespace, v.requestingCtx, v.svcContext, - podLabelSelector, v.svcName, v.shouldResolveDNS, dnsUtilsPodIndex) - dnsUtilsPodIndex++ - }) - } - }) - } -} - -func getVerifications(defaultClusterContext environment.TestContext, secondaryClusterContext environment.TestContext, - shouldResolveUnexportedCrossPartitionDNSRecord bool, cfg *config.TestConfig, releaseName string, defaultConsulCluster *consul.HelmCluster) []dnsVerification { - serviceRequestWithNoPartition := fmt.Sprintf("%s.service.consul", staticServerName) - serviceRequestInDefaultPartition := fmt.Sprintf("%s.service.%s.ap.consul", staticServerName, defaultPartition) - serviceRequestInSecondaryPartition := fmt.Sprintf("%s.service.%s.ap.consul", staticServerName, secondaryPartition) - verifications := []dnsVerification{ - { - name: "verify static-server.service.consul from default partition resolves the default partition ip address.", - requestingCtx: defaultClusterContext, - svcContext: defaultClusterContext, - svcName: serviceRequestWithNoPartition, - shouldResolveDNS: true, - }, - { - name: "verify static-server.service.default.ap.consul resolves the default partition ip address.", - requestingCtx: defaultClusterContext, - svcContext: defaultClusterContext, - svcName: serviceRequestInDefaultPartition, - shouldResolveDNS: true, - }, - { - name: "verify the unexported static-server.service.secondary.ap.consul from the default partition. With ACLs turned on, this should not resolve. Otherwise, it will resolve.", - requestingCtx: defaultClusterContext, - svcContext: secondaryClusterContext, - svcName: serviceRequestInSecondaryPartition, - shouldResolveDNS: shouldResolveUnexportedCrossPartitionDNSRecord, - }, - { - name: "verify static-server.service.secondary.ap.consul from the secondary partition.", - requestingCtx: secondaryClusterContext, - svcContext: secondaryClusterContext, - svcName: serviceRequestInSecondaryPartition, - shouldResolveDNS: true, - }, - { - name: "verify static-server.service.consul from the secondary partition should return the ip in the secondary.", - requestingCtx: secondaryClusterContext, - svcContext: secondaryClusterContext, - svcName: serviceRequestWithNoPartition, - shouldResolveDNS: true, - }, - { - name: "verify static-server.service.default.ap.consul from the secondary partition. With ACLs turned on, this should not resolve. Otherwise, it will resolve.", - requestingCtx: secondaryClusterContext, - svcContext: defaultClusterContext, - svcName: serviceRequestInDefaultPartition, - shouldResolveDNS: shouldResolveUnexportedCrossPartitionDNSRecord, - }, - { - name: "verify static-server.service.secondary.ap.consul from the default partition once the service is exported.", - requestingCtx: defaultClusterContext, - svcContext: secondaryClusterContext, - svcName: serviceRequestInSecondaryPartition, - shouldResolveDNS: true, - preProcessingFunc: func(t *testing.T) { - k8s.KubectlApplyK(t, secondaryClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/secondary-partition-default") - helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() { - k8s.KubectlDeleteK(t, secondaryClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/secondary-partition-default") - }) - }, - }, - { - name: "verify static-server.service.default.ap.consul from the secondary partition once the service is exported.", - requestingCtx: secondaryClusterContext, - svcContext: defaultClusterContext, - svcName: serviceRequestInDefaultPartition, - shouldResolveDNS: true, - preProcessingFunc: func(t *testing.T) { - k8s.KubectlApplyK(t, defaultClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/default-partition-default") - helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() { - k8s.KubectlDeleteK(t, defaultClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/default-partition-default") - }) - }, - }, - { - name: "after rollout restart of dns-proxy in default partition - verify static-server.service.secondary.ap.consul from the default partition once the service is exported.", - requestingCtx: defaultClusterContext, - svcContext: secondaryClusterContext, - svcName: serviceRequestInSecondaryPartition, - shouldResolveDNS: true, - preProcessingFunc: func(t *testing.T) { - restartDNSProxy(t, releaseName, defaultClusterContext) - k8s.KubectlApplyK(t, secondaryClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/secondary-partition-default") - helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() { - k8s.KubectlDeleteK(t, secondaryClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/secondary-partition-default") - }) - }, - }, - { - name: "after rollout restart of dns-proxy in secondary partition - verify static-server.service.default.ap.consul from the secondary partition once the service is exported.", - requestingCtx: secondaryClusterContext, - svcContext: defaultClusterContext, - svcName: serviceRequestInDefaultPartition, - shouldResolveDNS: true, - preProcessingFunc: func(t *testing.T) { - restartDNSProxy(t, releaseName, secondaryClusterContext) - k8s.KubectlApplyK(t, defaultClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/default-partition-default") - helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() { - k8s.KubectlDeleteK(t, defaultClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/default-partition-default") - }) - }, - }, - { - name: "flip default cluster to use DNS service instead - verify static-server.service.secondary.ap.consul from the default partition once the service is exported.", - requestingCtx: defaultClusterContext, - svcContext: secondaryClusterContext, - svcName: serviceRequestInSecondaryPartition, - shouldResolveDNS: true, - preProcessingFunc: func(t *testing.T) { - defaultConsulCluster.Upgrade(t, map[string]string{"dns.proxy.enabled": "false"}) - updateCoreDNSWithConsulDomain(t, defaultClusterContext, releaseName, false) - k8s.KubectlApplyK(t, secondaryClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/secondary-partition-default") - helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() { - k8s.KubectlDeleteK(t, secondaryClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/secondary-partition-default") - }) - }, - }, - { - name: "flip default cluster back to using DNS Proxy - verify static-server.service.secondary.ap.consul from the default partition once the service is exported.", - requestingCtx: defaultClusterContext, - svcContext: secondaryClusterContext, - svcName: serviceRequestInSecondaryPartition, - shouldResolveDNS: true, - preProcessingFunc: func(t *testing.T) { - defaultConsulCluster.Upgrade(t, map[string]string{"dns.proxy.enabled": "true"}) - updateCoreDNSWithConsulDomain(t, defaultClusterContext, releaseName, true) - k8s.KubectlApplyK(t, secondaryClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/secondary-partition-default") - helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() { - k8s.KubectlDeleteK(t, secondaryClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/secondary-partition-default") - }) - }, - }, - } - - return verifications -} - -func restartDNSProxy(t *testing.T, releaseName string, ctx environment.TestContext) { - dnsDeploymentName := fmt.Sprintf("deployment/%s-consul-dns-proxy", releaseName) - restartDNSProxyCommand := []string{"rollout", "restart", dnsDeploymentName} - k8sOptions := ctx.KubectlOptions(t) - logger.Log(t, fmt.Sprintf("restarting the dns-proxy deployment in %s k8s context", k8sOptions.ContextName)) - _, err := k8s.RunKubectlAndGetOutputE(t, k8sOptions, restartDNSProxyCommand...) - require.NoError(t, err) - - // Wait for restart to finish. - out, err := k8s.RunKubectlAndGetOutputE(t, k8sOptions, "rollout", "status", "--timeout", "1m", "--watch", dnsDeploymentName) - require.NoError(t, err, out, "rollout status command errored, this likely means the rollout didn't complete in time") - logger.Log(t, fmt.Sprintf("dns-proxy deployment in %s k8s context has finished restarting", k8sOptions.ContextName)) -} -func verifyServiceInCatalog(t *testing.T, consulClient *api.Client, queryOpts *api.QueryOptions) { - logger.Log(t, "verify the service in the secondary partition of the Consul catalog.") - svc, _, err := consulClient.Catalog().Service(staticServerName, "", queryOpts) - require.NoError(t, err) - require.Equal(t, 1, len(svc)) - require.Equal(t, []string{"k8s"}, svc[0].ServiceTags) -} - -func setupClustersAndStaticService(t *testing.T, cfg *config.TestConfig, defaultClusterContext environment.TestContext, - secondaryClusterContext environment.TestContext, c dnsWithPartitionsTestCase, secondaryPartition string, - defaultPartition string) (string, *api.Client, *api.QueryOptions, *api.QueryOptions, *consul.HelmCluster) { - commonHelmValues := map[string]string{ - "global.adminPartitions.enabled": "true", - "global.enableConsulNamespaces": "true", - - "global.tls.enabled": "true", - "global.tls.httpsOnly": strconv.FormatBool(c.secure), - - "global.acls.manageSystemACLs": strconv.FormatBool(c.secure), - - "syncCatalog.enabled": "true", - // When mirroringK8S is set, this setting is ignored. - "syncCatalog.consulNamespaces.consulDestinationNamespace": defaultNamespace, - "syncCatalog.consulNamespaces.mirroringK8S": "false", - "syncCatalog.addK8SNamespaceSuffix": "false", - - "dns.enabled": "true", - "dns.proxy.enabled": "true", - "dns.enableRedirection": strconv.FormatBool(cfg.EnableTransparentProxy), - } - - serverHelmValues := map[string]string{ - "server.exposeGossipAndRPCPorts": "true", - "server.extraConfig": `"{\"log_level\": \"TRACE\"}"`, - } - - if cfg.UseKind { - serverHelmValues["server.exposeService.type"] = "NodePort" - serverHelmValues["server.exposeService.nodePort.https"] = "30000" - } - - releaseName := helpers.RandomName() - - helpers.MergeMaps(serverHelmValues, commonHelmValues) - - // Install the consul cluster with servers in the default kubernetes context. - defaultConsulCluster := consul.NewHelmCluster(t, serverHelmValues, defaultClusterContext, cfg, releaseName) - defaultConsulCluster.Create(t) - - // Get the TLS CA certificate and key secret from the server cluster and apply it to the client cluster. - caCertSecretName := fmt.Sprintf("%s-consul-ca-cert", releaseName) - caKeySecretName := fmt.Sprintf("%s-consul-ca-key", releaseName) - - logger.Logf(t, "retrieving ca cert secret %s from the server cluster and applying to the client cluster", caCertSecretName) - k8s.CopySecret(t, defaultClusterContext, secondaryClusterContext, caCertSecretName) - - if !c.secure { - // When auto-encrypt is disabled, we need both - // the CA cert and CA key to be available in the clients cluster to generate client certificates and keys. - logger.Logf(t, "retrieving ca key secret %s from the server cluster and applying to the client cluster", caKeySecretName) - k8s.CopySecret(t, defaultClusterContext, secondaryClusterContext, caKeySecretName) - } - - partitionToken := fmt.Sprintf("%s-consul-partitions-acl-token", releaseName) - if c.secure { - logger.Logf(t, "retrieving partition token secret %s from the server cluster and applying to the client cluster", partitionToken) - k8s.CopySecret(t, defaultClusterContext, secondaryClusterContext, partitionToken) - } - - partitionServiceName := fmt.Sprintf("%s-consul-expose-servers", releaseName) - partitionSvcAddress := k8s.ServiceHost(t, cfg, defaultClusterContext, partitionServiceName) - - k8sAuthMethodHost := k8s.KubernetesAPIServerHost(t, cfg, secondaryClusterContext) - - // Create client cluster. - clientHelmValues := map[string]string{ - "global.enabled": "false", - - "global.adminPartitions.name": secondaryPartition, - - "global.tls.caCert.secretName": caCertSecretName, - "global.tls.caCert.secretKey": "tls.crt", - - "externalServers.enabled": "true", - "externalServers.hosts[0]": partitionSvcAddress, - "externalServers.tlsServerName": "server.dc1.consul", - } - - if c.secure { - // Setup partition token and auth method host if ACLs enabled. - clientHelmValues["global.acls.bootstrapToken.secretName"] = partitionToken - clientHelmValues["global.acls.bootstrapToken.secretKey"] = "token" - clientHelmValues["externalServers.k8sAuthMethodHost"] = k8sAuthMethodHost - } else { - // Provide CA key when auto-encrypt is disabled. - clientHelmValues["global.tls.caKey.secretName"] = caKeySecretName - clientHelmValues["global.tls.caKey.secretKey"] = "tls.key" - } - - if cfg.UseKind { - clientHelmValues["externalServers.httpsPort"] = "30000" - } - - helpers.MergeMaps(clientHelmValues, commonHelmValues) - - // Install the consul cluster without servers in the client cluster kubernetes context. - secondaryConsulCluster := consul.NewHelmCluster(t, clientHelmValues, secondaryClusterContext, cfg, releaseName) - secondaryConsulCluster.Create(t) - - defaultStaticServerOpts := &terratestk8s.KubectlOptions{ - ContextName: defaultClusterContext.KubectlOptions(t).ContextName, - ConfigPath: defaultClusterContext.KubectlOptions(t).ConfigPath, - Namespace: staticServerNamespace, - } - secondaryStaticServerOpts := &terratestk8s.KubectlOptions{ - ContextName: secondaryClusterContext.KubectlOptions(t).ContextName, - ConfigPath: secondaryClusterContext.KubectlOptions(t).ConfigPath, - Namespace: staticServerNamespace, - } - - logger.Logf(t, "creating namespaces %s in servers cluster", staticServerNamespace) - k8s.RunKubectl(t, defaultClusterContext.KubectlOptions(t), "create", "ns", staticServerNamespace) - helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() { - k8s.RunKubectl(t, defaultClusterContext.KubectlOptions(t), "delete", "ns", staticServerNamespace) - }) - - logger.Logf(t, "creating namespaces %s in clients cluster", staticServerNamespace) - k8s.RunKubectl(t, secondaryClusterContext.KubectlOptions(t), "create", "ns", staticServerNamespace) - helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() { - k8s.RunKubectl(t, secondaryClusterContext.KubectlOptions(t), "delete", "ns", staticServerNamespace) - }) - - consulClient, _ := defaultConsulCluster.SetupConsulClient(t, c.secure) - - defaultPartitionQueryOpts := &api.QueryOptions{Namespace: defaultNamespace, Partition: defaultPartition} - secondaryPartitionQueryOpts := &api.QueryOptions{Namespace: defaultNamespace, Partition: secondaryPartition} - - // Check that the ACL token is deleted. - if c.secure { - // We need to register the cleanup function before we create the deployments - // because golang will execute them in reverse order i.e. the last registered - // cleanup function will be executed first. - t.Cleanup(func() { - if c.secure { - retry.Run(t, func(r *retry.R) { - tokens, _, err := consulClient.ACL().TokenList(defaultPartitionQueryOpts) - require.NoError(r, err) - for _, token := range tokens { - require.NotContains(r, token.Description, staticServerName) - } - - tokens, _, err = consulClient.ACL().TokenList(secondaryPartitionQueryOpts) - require.NoError(r, err) - for _, token := range tokens { - require.NotContains(r, token.Description, staticServerName) - } - }) - } - }) - } - - logger.Log(t, "creating a static-server with a service") - // create service in default partition. - k8s.DeployKustomize(t, defaultStaticServerOpts, cfg.NoCleanupOnFailure, cfg.NoCleanup, cfg.DebugDirectory, "../fixtures/bases/static-server") - // create service in secondary partition. - k8s.DeployKustomize(t, secondaryStaticServerOpts, cfg.NoCleanupOnFailure, cfg.NoCleanup, cfg.DebugDirectory, "../fixtures/bases/static-server") - - logger.Log(t, "checking that the service has been synced to Consul") - var services map[string][]string - counter := &retry.Counter{Count: 30, Wait: 30 * time.Second} - retry.RunWith(counter, t, func(r *retry.R) { - var err error - // list services in default partition catalog. - services, _, err = consulClient.Catalog().Services(defaultPartitionQueryOpts) - require.NoError(r, err) - require.Contains(r, services, staticServerName) - if _, ok := services[staticServerName]; !ok { - r.Errorf("service '%s' is not in Consul's list of services %s in the default partition", staticServerName, services) - } - // list services in secondary partition catalog. - services, _, err = consulClient.Catalog().Services(secondaryPartitionQueryOpts) - require.NoError(r, err) - require.Contains(r, services, staticServerName) - if _, ok := services[staticServerName]; !ok { - r.Errorf("service '%s' is not in Consul's list of services %s in the secondary partition", staticServerName, services) - } - }) - - logger.Log(t, "verify the service in the default partition of the Consul catalog.") - service, _, err := consulClient.Catalog().Service(staticServerName, "", defaultPartitionQueryOpts) - require.NoError(t, err) - require.Equal(t, 1, len(service)) - require.Equal(t, []string{"k8s"}, service[0].ServiceTags) - - return releaseName, consulClient, defaultPartitionQueryOpts, secondaryPartitionQueryOpts, defaultConsulCluster -} diff --git a/acceptance/tests/consul-dns/consul_dns_test.go b/acceptance/tests/consul-dns/consul_dns_test.go index c70b8d2fae..f67ac96bd3 100644 --- a/acceptance/tests/consul-dns/consul_dns_test.go +++ b/acceptance/tests/consul-dns/consul_dns_test.go @@ -6,26 +6,17 @@ package consuldns import ( "context" "fmt" - "github.com/hashicorp/consul-k8s/acceptance/framework/environment" - "os" "strconv" - "strings" "testing" - "time" "github.com/hashicorp/consul-k8s/acceptance/framework/consul" "github.com/hashicorp/consul-k8s/acceptance/framework/helpers" "github.com/hashicorp/consul-k8s/acceptance/framework/k8s" - "github.com/hashicorp/consul-k8s/acceptance/framework/logger" "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// TestConsulDNS configures CoreDNS to use configure consul domain queries to -// be forwarded to the Consul DNS Service or the Consul DNS Proxy depending on -// the test case. The test validates that the DNS queries are resolved when querying -// for .consul services in secure and non-secure modes. func TestConsulDNS(t *testing.T) { cfg := suite.Config() if cfg.EnableCNI { @@ -36,147 +27,74 @@ func TestConsulDNS(t *testing.T) { t.Skipf("skipping because -use-aks is set") } - cases := []struct { - secure bool - enableDNSProxy bool - }{ - {secure: false, enableDNSProxy: false}, - {secure: false, enableDNSProxy: true}, - {secure: true, enableDNSProxy: false}, - {secure: true, enableDNSProxy: true}, - } - - for _, c := range cases { - name := fmt.Sprintf("secure: %t / enableDNSProxy: %t", c.secure, c.enableDNSProxy) + for _, secure := range []bool{false, true} { + name := fmt.Sprintf("secure: %t", secure) t.Run(name, func(t *testing.T) { env := suite.Environment() ctx := env.DefaultContext(t) releaseName := helpers.RandomName() + helmValues := map[string]string{ "dns.enabled": "true", - "global.tls.enabled": strconv.FormatBool(c.secure), - "global.acls.manageSystemACLs": strconv.FormatBool(c.secure), - "dns.proxy.enabled": strconv.FormatBool(c.enableDNSProxy), + "global.tls.enabled": strconv.FormatBool(secure), + "global.acls.manageSystemACLs": strconv.FormatBool(secure), } - cluster := consul.NewHelmCluster(t, helmValues, ctx, suite.Config(), releaseName) cluster.Create(t) - updateCoreDNSWithConsulDomain(t, ctx, releaseName, c.enableDNSProxy) - verifyDNS(t, releaseName, ctx.KubectlOptions(t).Namespace, ctx, ctx, "app=consul,component=server", - "consul.service.consul", true, 0) - }) - } -} - -func updateCoreDNSWithConsulDomain(t *testing.T, ctx environment.TestContext, releaseName string, enableDNSProxy bool) { - updateCoreDNSFile(t, ctx, releaseName, enableDNSProxy, "coredns-custom.yaml") - updateCoreDNS(t, ctx, "coredns-custom.yaml") - - t.Cleanup(func() { - updateCoreDNS(t, ctx, "coredns-original.yaml") - time.Sleep(5 * time.Second) - }) -} - -func updateCoreDNSFile(t *testing.T, ctx environment.TestContext, releaseName string, - enableDNSProxy bool, dnsFileName string) { - dnsIP, err := getDNSServiceClusterIP(t, ctx, releaseName, enableDNSProxy) - require.NoError(t, err) + k8sClient := ctx.KubernetesClient(t) + contextNamespace := ctx.KubectlOptions(t).Namespace - input, err := os.ReadFile("coredns-template.yaml") - require.NoError(t, err) - newContents := strings.Replace(string(input), "{{CONSUL_DNS_IP}}", dnsIP, -1) - err = os.WriteFile(dnsFileName, []byte(newContents), os.FileMode(0644)) - require.NoError(t, err) -} - -func updateCoreDNS(t *testing.T, ctx environment.TestContext, coreDNSConfigFile string) { - coreDNSCommand := []string{ - "replace", "-n", "kube-system", "-f", coreDNSConfigFile, - } - logs, err := k8s.RunKubectlAndGetOutputE(t, ctx.KubectlOptions(t), coreDNSCommand...) - require.NoError(t, err) - require.Contains(t, logs, "configmap/coredns replaced") - restartCoreDNSCommand := []string{"rollout", "restart", "deployment/coredns", "-n", "kube-system"} - _, err = k8s.RunKubectlAndGetOutputE(t, ctx.KubectlOptions(t), restartCoreDNSCommand...) - require.NoError(t, err) - // Wait for restart to finish. - out, err := k8s.RunKubectlAndGetOutputE(t, ctx.KubectlOptions(t), "rollout", "status", "--timeout", "1m", "--watch", "deployment/coredns", "-n", "kube-system") - require.NoError(t, err, out, "rollout status command errored, this likely means the rollout didn't complete in time") -} + dnsService, err := k8sClient.CoreV1().Services(contextNamespace).Get(context.Background(), fmt.Sprintf("%s-%s", releaseName, "consul-dns"), metav1.GetOptions{}) + require.NoError(t, err) -func verifyDNS(t *testing.T, releaseName string, svcNamespace string, requestingCtx, svcContext environment.TestContext, - podLabelSelector, svcName string, shouldResolveDNSRecord bool, dnsUtilsPodIndex int) { - podList, err := svcContext.KubernetesClient(t).CoreV1().Pods(svcNamespace).List(context.Background(), metav1.ListOptions{ - LabelSelector: podLabelSelector, - }) - require.NoError(t, err) + dnsIP := dnsService.Spec.ClusterIP - servicePodIPs := make([]string, len(podList.Items)) - for i, serverPod := range podList.Items { - servicePodIPs[i] = serverPod.Status.PodIP - } + consulServerList, err := k8sClient.CoreV1().Pods(contextNamespace).List(context.Background(), metav1.ListOptions{ + LabelSelector: "app=consul,component=server", + }) + require.NoError(t, err) - logger.Log(t, "launch a pod to test the dns resolution.") - dnsUtilsPod := fmt.Sprintf("%s-dns-utils-pod-%d", releaseName, dnsUtilsPodIndex) - dnsTestPodArgs := []string{ - "run", "-it", dnsUtilsPod, "--restart", "Never", "--image", "anubhavmishra/tiny-tools", "--", "dig", svcName, - } + serverIPs := make([]string, len(consulServerList.Items)) + for _, serverPod := range consulServerList.Items { + serverIPs = append(serverIPs, serverPod.Status.PodIP) + } - helpers.Cleanup(t, suite.Config().NoCleanupOnFailure, suite.Config().NoCleanup, func() { - // Note: this delete command won't wait for pods to be fully terminated. - // This shouldn't cause any test pollution because the underlying - // objects are deployments, and so when other tests create these - // they should have different pod names. - k8s.RunKubectl(t, requestingCtx.KubectlOptions(t), "delete", "pod", dnsUtilsPod) - }) - - retry.Run(t, func(r *retry.R) { - logger.Log(t, "run the dns utilize pod and query DNS for the service.") - logs, err := k8s.RunKubectlAndGetOutputE(r, requestingCtx.KubectlOptions(r), dnsTestPodArgs...) - require.NoError(r, err) - - // When the `dig` request is successful, a section of it's response looks like the following: - // - // ;; ANSWER SECTION: - // consul.service.consul. 0 IN A - // - // ;; Query time: 2 msec - // ;; SERVER: #() - // ;; WHEN: Mon Aug 10 15:02:40 UTC 2020 - // ;; MSG SIZE rcvd: 98 - // - // We assert on the existence of the ANSWER SECTION, The consul-server IPs being present in the ANSWER SECTION and the the DNS IP mentioned in the SERVER: field - - logger.Log(t, "verify the DNS results.") - // strip logs of tabs, newlines and spaces to make it easier to assert on the content when there is a DNS match - strippedLogs := strings.Replace(logs, "\t", "", -1) - strippedLogs = strings.Replace(strippedLogs, "\n", "", -1) - strippedLogs = strings.Replace(strippedLogs, " ", "", -1) - for _, ip := range servicePodIPs { - aRecordPattern := "%s.5INA%s" - aRecord := fmt.Sprintf(aRecordPattern, svcName, ip) - if shouldResolveDNSRecord { - require.Contains(r, logs, "ANSWER SECTION:") - require.Contains(r, strippedLogs, aRecord) - } else { - require.NotContains(r, logs, "ANSWER SECTION:") - require.NotContains(r, strippedLogs, aRecord) - require.Contains(r, logs, "status: NXDOMAIN") - require.Contains(r, logs, "AUTHORITY SECTION:\nconsul.\t\t\t5\tIN\tSOA\tns.consul. hostmaster.consul.") + dnsPodName := fmt.Sprintf("%s-dns-pod", releaseName) + dnsTestPodArgs := []string{ + "run", "-it", dnsPodName, "--restart", "Never", "--image", "anubhavmishra/tiny-tools", "--", "dig", fmt.Sprintf("@%s-consul-dns", releaseName), "consul.service.consul", } - } - }) -} -func getDNSServiceClusterIP(t *testing.T, requestingCtx environment.TestContext, releaseName string, enableDNSProxy bool) (string, error) { - logger.Log(t, "get the in cluster dns service or proxy.") - dnsSvcName := fmt.Sprintf("%s-consul-dns", releaseName) - if enableDNSProxy { - dnsSvcName += "-proxy" + helpers.Cleanup(t, suite.Config().NoCleanupOnFailure, suite.Config().NoCleanup, func() { + // Note: this delete command won't wait for pods to be fully terminated. + // This shouldn't cause any test pollution because the underlying + // objects are deployments, and so when other tests create these + // they should have different pod names. + k8s.RunKubectl(t, ctx.KubectlOptions(t), "delete", "pod", dnsPodName) + }) + + retry.Run(t, func(r *retry.R) { + logs, err := k8s.RunKubectlAndGetOutputE(r, ctx.KubectlOptions(r), dnsTestPodArgs...) + require.NoError(r, err) + + // When the `dig` request is successful, a section of it's response looks like the following: + // + // ;; ANSWER SECTION: + // consul.service.consul. 0 IN A + // + // ;; Query time: 2 msec + // ;; SERVER: #() + // ;; WHEN: Mon Aug 10 15:02:40 UTC 2020 + // ;; MSG SIZE rcvd: 98 + // + // We assert on the existence of the ANSWER SECTION, The consul-server IPs being present in the ANSWER SECTION and the the DNS IP mentioned in the SERVER: field + + require.Contains(r, logs, fmt.Sprintf("SERVER: %s", dnsIP)) + require.Contains(r, logs, "ANSWER SECTION:") + for _, ip := range serverIPs { + require.Contains(r, logs, fmt.Sprintf("consul.service.consul.\t0\tIN\tA\t%s", ip)) + } + }) + }) } - dnsService, err := requestingCtx.KubernetesClient(t).CoreV1().Services(requestingCtx.KubectlOptions(t).Namespace).Get(context.Background(), dnsSvcName, metav1.GetOptions{}) - require.NoError(t, err) - return dnsService.Spec.ClusterIP, err } diff --git a/acceptance/tests/consul-dns/coredns-original.yaml b/acceptance/tests/consul-dns/coredns-original.yaml deleted file mode 100644 index ba59c03471..0000000000 --- a/acceptance/tests/consul-dns/coredns-original.yaml +++ /dev/null @@ -1,27 +0,0 @@ -apiVersion: v1 -data: - Corefile: | - .:53 { - errors - health { - lameduck 5s - } - ready - kubernetes cluster.local in-addr.arpa ip6.arpa { - pods insecure - fallthrough in-addr.arpa ip6.arpa - ttl 30 - } - prometheus :9153 - forward . /etc/resolv.conf { - max_concurrent 1000 - } - cache 30 - loop - reload - loadbalance - } -kind: ConfigMap -metadata: - name: coredns - namespace: kube-system \ No newline at end of file diff --git a/acceptance/tests/consul-dns/coredns-template.yaml b/acceptance/tests/consul-dns/coredns-template.yaml deleted file mode 100644 index 1a00674704..0000000000 --- a/acceptance/tests/consul-dns/coredns-template.yaml +++ /dev/null @@ -1,32 +0,0 @@ -apiVersion: v1 -data: - Corefile: | - .:53 { - errors - health { - lameduck 5s - } - ready - kubernetes cluster.local in-addr.arpa ip6.arpa { - pods insecure - fallthrough in-addr.arpa ip6.arpa - ttl 30 - } - prometheus :9153 - forward . /etc/resolv.conf { - max_concurrent 1000 - } - cache 30 - loop - reload - loadbalance - } - consul:53 { - errors - cache 30 - forward . {{CONSUL_DNS_IP}} - } -kind: ConfigMap -metadata: - name: coredns - namespace: kube-system \ No newline at end of file diff --git a/acceptance/tests/datadog/datadog_test.go b/acceptance/tests/datadog/datadog_test.go deleted file mode 100644 index 319ea6a55d..0000000000 --- a/acceptance/tests/datadog/datadog_test.go +++ /dev/null @@ -1,423 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package datadog - -import ( - "context" - "encoding/json" - "fmt" - "strings" - "testing" - "time" - - "github.com/hashicorp/consul-k8s/acceptance/framework/consul" - "github.com/hashicorp/consul-k8s/acceptance/framework/datadog" - "github.com/hashicorp/consul-k8s/acceptance/framework/helpers" - "github.com/hashicorp/consul-k8s/acceptance/framework/k8s" - "github.com/hashicorp/consul-k8s/acceptance/framework/logger" - "github.com/hashicorp/consul/sdk/testutil/retry" - "github.com/stretchr/testify/require" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - consulDogstatsDMetricQuery = "consul.memberlist.gossip" -) - -// TODO: Refactor test cases into single function that accepts Helm Values, Fixture Name, and Validation Callback -// TestDatadogDogstatsDUnixDomainSocket -// Acceptance test to verify e2e metrics configuration works as expected -// with live datadog API using histogram formatted metric -// -// Method: DogstatsD + Unix Domain Socket. -func TestDatadogDogstatsDUnixDomainSocket(t *testing.T) { - env := suite.Environment() - cfg := suite.Config() - ctx := env.DefaultContext(t) - - if cfg.SkipDataDogTests { - t.Skipf("skipping this test because -skip-datadog is set") - } - - acceptanceTestingTags := "acceptance_test:unix_domain_sockets" - helmValues := map[string]string{ - "global.datacenter": "dc1", - "global.metrics.enabled": "true", - "global.metrics.enableAgentMetrics": "true", - "global.metrics.disableAgentHostName": "true", - "global.metrics.enableHostMetrics": "true", - "global.metrics.datadog.enabled": "true", - "global.metrics.datadog.namespace": "datadog", - "global.metrics.datadog.dogstatsd.enabled": "true", - "global.metrics.datadog.dogstatsd.socketTransportType": "UDS", - "global.metrics.datadog.dogstatsd.dogstatsdTags[0]": "source:consul", - "global.metrics.datadog.dogstatsd.dogstatsdTags[1]": "consul_service:consul-server", - "global.metrics.datadog.dogstatsd.dogstatsdTags[2]": acceptanceTestingTags, - } - - releaseName := helpers.RandomName() - datadogOperatorRelease := datadog.OperatorReleaseName - - // Install the consul cluster in the default kubernetes ctx. - consulCluster := consul.NewHelmCluster(t, helmValues, ctx, cfg, releaseName) - consulCluster.Create(t) - - // Deploy Datadog Agent via Datadog Operator and apply dogstatsd overlay - datadogNamespace := helmValues["global.metrics.datadog.namespace"] - logger.Log(t, fmt.Sprintf("deploying datadog-operator via helm | namespace: %s | release-name: %s", datadogNamespace, datadogOperatorRelease)) - datadogCluster := datadog.NewDatadogCluster(t, ctx, cfg, datadogOperatorRelease, datadogNamespace, map[string]string{}) - datadogCluster.Create(t) - - logger.Log(t, fmt.Sprintf("applying dogstatd over unix domain sockets kustomization patch to datadog-agent | namespace: %s", datadogNamespace)) - k8s.DeployKustomize(t, ctx.KubectlOptionsForNamespace(datadogNamespace), cfg.NoCleanupOnFailure, cfg.NoCleanup, cfg.DebugDirectory, "../fixtures/cases/datadog-dogstatsd-uds") - k8s.WaitForAllPodsToBeReady(t, ctx.KubernetesClient(t), datadogNamespace, "agent.datadoghq.com/component=agent") - - // Retrieve datadog-agent pod name for exec - podList, err := ctx.KubernetesClient(t).CoreV1().Pods(datadogNamespace).List(context.Background(), metav1.ListOptions{LabelSelector: "agent.datadoghq.com/component=agent"}) - require.NoError(t, err) - require.Len(t, podList.Items, 1) - ddAgentName := podList.Items[0].Name - - // Check the dogstats-stats of the local cluster agent to see if consul metrics - // are being seen by the agent - logger.Log(t, fmt.Sprintf("retrieving datadog-agent control api auth token from pod %s", ddAgentName)) - bearerToken, err := k8s.RunKubectlAndGetOutputE(t, ctx.KubectlOptionsForNamespace(datadogNamespace), "exec", "pod/"+ddAgentName, "-c", "agent", "--", "cat", "/etc/datadog-agent/auth_token") - require.NoError(t, err) - // Retry because sometimes the merged metrics server takes a couple hundred milliseconds - // to start. - logger.Log(t, fmt.Sprintf("scraping datadog-agent /agent/dogstatsd-stats endpoint for %s | auth-token: %s", consulDogstatsDMetricQuery, bearerToken)) - retry.RunWith(&retry.Counter{Count: 20, Wait: 2 * time.Second}, t, func(r *retry.R) { - metricsOutput, err := k8s.RunKubectlAndGetOutputE(t, ctx.KubectlOptionsForNamespace(datadogNamespace), "exec", "pod/"+ddAgentName, "-c", "agent", "--", "curl", "--silent", "--insecure", "--show-error", "--header", fmt.Sprintf("authorization: Bearer %s", bearerToken), "https://localhost:5001/agent/dogstatsd-stats") - require.NoError(r, err) - require.Contains(r, metricsOutput, consulDogstatsDMetricQuery) - }) -} - -// TestDatadogDogstatsDUDP -// Acceptance test to verify e2e metrics configuration works as expected -// with live datadog API using histogram formatted metric -// -// Method: DogstatsD + UDP to Kube Service DNS name on port 8125. -func TestDatadogDogstatsDUDP(t *testing.T) { - env := suite.Environment() - cfg := suite.Config() - ctx := env.DefaultContext(t) - - if cfg.SkipDataDogTests { - t.Skipf("skipping this test because -skip-datadog is set") - } - - acceptanceTestingTags := "acceptance_test:dogstatsd_udp" - helmValues := map[string]string{ - "global.datacenter": "dc1", - "global.metrics.enabled": "true", - "global.metrics.enableAgentMetrics": "true", - "global.metrics.disableAgentHostName": "true", - "global.metrics.enableHostMetrics": "true", - "global.metrics.datadog.enabled": "true", - "global.metrics.datadog.namespace": "datadog", - "global.metrics.datadog.dogstatsd.enabled": "true", - "global.metrics.datadog.dogstatsd.socketTransportType": "UDP", - "global.metrics.datadog.dogstatsd.dogstatsdAddr": "datadog-agent.datadog.svc.cluster.local", - "global.metrics.datadog.dogstatsd.dogstatsdTags[0]": "source:consul", - "global.metrics.datadog.dogstatsd.dogstatsdTags[1]": "consul_service:consul-server", - "global.metrics.datadog.dogstatsd.dogstatsdTags[2]": acceptanceTestingTags, - } - - releaseName := helpers.RandomName() - datadogOperatorRelease := datadog.OperatorReleaseName - - // Install the consul cluster in the default kubernetes ctx. - consulCluster := consul.NewHelmCluster(t, helmValues, ctx, cfg, releaseName) - consulCluster.Create(t) - - // Deploy Datadog Agent via Datadog Operator and apply dogstatsd overlay. - datadogNamespace := helmValues["global.metrics.datadog.namespace"] - logger.Log(t, fmt.Sprintf("deploying datadog-operator via helm | namespace: %s | release-name: %s", datadogNamespace, datadogOperatorRelease)) - datadogCluster := datadog.NewDatadogCluster(t, ctx, cfg, datadogOperatorRelease, datadogNamespace, map[string]string{}) - datadogCluster.Create(t) - - logger.Log(t, fmt.Sprintf("applying dogstatd over UDP kustomization patch to datadog-agent | namespace: %s", datadogNamespace)) - k8s.DeployKustomize(t, ctx.KubectlOptionsForNamespace(datadogNamespace), cfg.NoCleanupOnFailure, cfg.NoCleanup, cfg.DebugDirectory, "../fixtures/cases/datadog-dogstatsd-udp") - k8s.WaitForAllPodsToBeReady(t, ctx.KubernetesClient(t), datadogNamespace, "agent.datadoghq.com/component=agent") - - // Retrieve datadog-agent pod name for exec - podList, err := ctx.KubernetesClient(t).CoreV1().Pods(datadogNamespace).List(context.Background(), metav1.ListOptions{LabelSelector: "agent.datadoghq.com/component=agent"}) - require.NoError(t, err) - require.Len(t, podList.Items, 1) - ddAgentName := podList.Items[0].Name - - // Check the dogstats-stats of the local cluster agent to see if consul metrics - // are being seen by the agent - logger.Log(t, fmt.Sprintf("retrieving datadog-agent control api auth token from pod %s", ddAgentName)) - bearerToken, err := k8s.RunKubectlAndGetOutputE(t, ctx.KubectlOptionsForNamespace(datadogNamespace), "exec", "pod/"+ddAgentName, "-c", "agent", "--", "cat", "/etc/datadog-agent/auth_token") - require.NoError(t, err) - // Retry because sometimes the merged metrics server takes a couple hundred milliseconds - // to start. - logger.Log(t, fmt.Sprintf("scraping datadog-agent /agent/dogstatsd-stats endpoint for %s | auth-token: %s", consulDogstatsDMetricQuery, bearerToken)) - retry.RunWith(&retry.Counter{Count: 20, Wait: 2 * time.Second}, t, func(r *retry.R) { - metricsOutput, err := k8s.RunKubectlAndGetOutputE(t, ctx.KubectlOptionsForNamespace(datadogNamespace), "exec", "pod/"+ddAgentName, "-c", "agent", "--", "curl", "--silent", "--insecure", "--show-error", "--header", fmt.Sprintf("authorization: Bearer %s", bearerToken), "https://localhost:5001/agent/dogstatsd-stats") - require.NoError(r, err) - require.Contains(r, metricsOutput, consulDogstatsDMetricQuery) - }) -} - -// TestDatadogConsulChecks -// Acceptance test to verify e2e metrics configuration works as expected -// with live datadog API using histogram formatted metric -// -// Method: Consul Integrated Datadog Checks. -func TestDatadogConsulChecks(t *testing.T) { - env := suite.Environment() - cfg := suite.Config() - ctx := env.DefaultContext(t) - - if cfg.SkipDataDogTests { - t.Skipf("skipping this test because -skip-datadog is set") - } - - helmValues := map[string]string{ - "global.datacenter": "dc1", - "global.metrics.enabled": "true", - "global.metrics.enableAgentMetrics": "true", - "global.metrics.disableAgentHostName": "true", - "global.metrics.enableHostMetrics": "true", - "global.metrics.datadog.enabled": "true", - "global.metrics.datadog.namespace": "datadog", - } - - releaseName := helpers.RandomName() - datadogOperatorRelease := datadog.OperatorReleaseName - - // Install the consul cluster in the default kubernetes ctx. - consulCluster := consul.NewHelmCluster(t, helmValues, ctx, cfg, releaseName) - consulCluster.Create(t) - - // Deploy Datadog Agent via Datadog Operator and apply dogstatsd overlay. - datadogNamespace := helmValues["global.metrics.datadog.namespace"] - logger.Log(t, fmt.Sprintf("deploying datadog-operator via helm | namespace: %s | release-name: %s", datadogNamespace, datadogOperatorRelease)) - datadogCluster := datadog.NewDatadogCluster(t, ctx, cfg, datadogOperatorRelease, datadogNamespace, map[string]string{}) - datadogCluster.Create(t) - - logger.Log(t, fmt.Sprintf("applying datadog consul integration patch to datadog-agent | namespace: %s", datadogNamespace)) - k8s.DeployKustomize(t, ctx.KubectlOptionsForNamespace(datadogNamespace), cfg.NoCleanupOnFailure, cfg.NoCleanup, cfg.DebugDirectory, "../fixtures/bases/datadog") - k8s.WaitForAllPodsToBeReady(t, ctx.KubernetesClient(t), datadogNamespace, "agent.datadoghq.com/component=agent") - - // Retrieve datadog-agent pod name for exec - podList, err := ctx.KubernetesClient(t).CoreV1().Pods(datadogNamespace).List(context.Background(), metav1.ListOptions{LabelSelector: "agent.datadoghq.com/component=agent"}) - require.NoError(t, err) - require.Len(t, podList.Items, 1) - ddAgentName := podList.Items[0].Name - - // Check the dogstats-stats of the local cluster agent to see if consul metrics - // are being seen by the agent - logger.Log(t, fmt.Sprintf("retrieving datadog-agent control api auth token from pod %s", ddAgentName)) - bearerToken, err := k8s.RunKubectlAndGetOutputE(t, ctx.KubectlOptionsForNamespace(datadogNamespace), "exec", "pod/"+ddAgentName, "-c", "agent", "--", "cat", "/etc/datadog-agent/auth_token") - // Retry because sometimes the merged metrics server takes a couple hundred milliseconds - // to start. - logger.Log(t, fmt.Sprintf("scraping datadog-agent /agent/status endpoint | auth-token: %s", bearerToken)) - var metricsOutput string - retry.RunWith(&retry.Counter{Count: 20, Wait: 2 * time.Second}, t, func(r *retry.R) { - metricsOutput, err = k8s.RunKubectlAndGetOutputE(t, ctx.KubectlOptionsForNamespace(datadogNamespace), "exec", "pod/"+ddAgentName, "-c", "agent", "--", "curl", "--silent", "--insecure", "--show-error", "--header", fmt.Sprintf("authorization: Bearer %s", bearerToken), "https://localhost:5001/agent/status") - require.NoError(r, err) - }) - var root Root - err = json.Unmarshal([]byte(metricsOutput), &root) - require.NoError(t, err) - for _, check := range root.RunnerStats.Checks.Consul { - require.Equal(t, ``, check.LastError) - } -} - -// TestDatadogOpenmetrics -// Acceptance test to verify e2e metrics configuration works as expected -// with live datadog API using histogram formatted metric -// -// Method: Datadog Openmetrics Prometheus Metrics Collection. -func TestDatadogOpenmetrics(t *testing.T) { - env := suite.Environment() - cfg := suite.Config() - ctx := env.DefaultContext(t) - - if cfg.SkipDataDogTests { - t.Skipf("skipping this test because -skip-datadog is set") - } - - helmValues := map[string]string{ - "global.datacenter": "dc1", - "global.metrics.enabled": "true", - "global.metrics.enableAgentMetrics": "true", - "global.metrics.disableAgentHostName": "true", - "global.metrics.enableHostMetrics": "true", - "global.metrics.datadog.enabled": "true", - "global.metrics.datadog.namespace": "datadog", - "global.metrics.datadog.openMetricsPrometheus.enabled": "true", - } - - releaseName := helpers.RandomName() - datadogOperatorRelease := datadog.OperatorReleaseName - - // Install the consul cluster in the default kubernetes ctx. - consulCluster := consul.NewHelmCluster(t, helmValues, ctx, cfg, releaseName) - consulCluster.Create(t) - - // Deploy Datadog Agent via Datadog Operator and apply dogstatsd overlay - datadogNamespace := helmValues["global.metrics.datadog.namespace"] - logger.Log(t, fmt.Sprintf("deploying datadog-operator via helm | namespace: %s | release-name: %s", datadogNamespace, datadogOperatorRelease)) - datadogCluster := datadog.NewDatadogCluster(t, ctx, cfg, datadogOperatorRelease, datadogNamespace, map[string]string{}) - datadogCluster.Create(t) - - logger.Log(t, fmt.Sprintf("applying datadog openmetrics patch to datadog-agent | namespace: %s", datadogNamespace)) - k8s.DeployKustomize(t, ctx.KubectlOptionsForNamespace(datadogNamespace), cfg.NoCleanupOnFailure, cfg.NoCleanup, cfg.DebugDirectory, "../fixtures/cases/datadog-openmetrics") - k8s.WaitForAllPodsToBeReady(t, ctx.KubernetesClient(t), datadogNamespace, "agent.datadoghq.com/component=agent") - - // Retrieve datadog-agent pod name for exec - podList, err := ctx.KubernetesClient(t).CoreV1().Pods(datadogNamespace).List(context.Background(), metav1.ListOptions{LabelSelector: "agent.datadoghq.com/component=agent"}) - require.NoError(t, err) - require.Len(t, podList.Items, 1) - ddAgentName := podList.Items[0].Name - - // Check the dogstats-stats of the local cluster agent to see if consul metrics - // are being seen by the agent - logger.Log(t, fmt.Sprintf("retrieving datadog-agent control api auth token from pod %s", ddAgentName)) - bearerToken, err := k8s.RunKubectlAndGetOutputE(t, ctx.KubectlOptionsForNamespace(datadogNamespace), "exec", "pod/"+ddAgentName, "-c", "agent", "--", "cat", "/etc/datadog-agent/auth_token") - // Retry because sometimes the merged metrics server takes a couple hundred milliseconds - // to start. - logger.Log(t, fmt.Sprintf("scraping datadog-agent /agent/status endpoint | auth-token: %s", bearerToken)) - var metricsOutput string - retry.RunWith(&retry.Counter{Count: 20, Wait: 2 * time.Second}, t, func(r *retry.R) { - metricsOutput, err = k8s.RunKubectlAndGetOutputE(t, ctx.KubectlOptionsForNamespace(datadogNamespace), "exec", "pod/"+ddAgentName, "-c", "agent", "--", "curl", "--silent", "--insecure", "--show-error", "--header", fmt.Sprintf("authorization: Bearer %s", bearerToken), "https://localhost:5001/agent/status") - require.NoError(r, err) - }) - var root Root - err = json.Unmarshal([]byte(metricsOutput), &root) - require.NoError(t, err) - for _, check := range root.RunnerStats.Checks.Openmetrics { - if strings.Contains(check.CheckID, "consul") { - require.Equal(t, ``, check.LastError) - break - } - continue - } -} - -// TestDatadogOTLPCollection -// Acceptance test to verify e2e metrics configuration works as expected -// with live datadog API using histogram formatted metric -// -// Method: Datadog otlp metrics collection via consul-telemetry collector using dd-agent gRPC receiver. -//func TestDatadogOTLPCollection(t *testing.T) { -// env := suite.Environment() -// cfg := suite.Config() -// ctx := env.DefaultContext(t) -// // ns := ctx.KubectlOptions(t).Namespace -// -// helmValues := map[string]string{ -// "global.datacenter": "dc1", -// "global.metrics.enabled": "true", -// "global.metrics.enableAgentMetrics": "true", -// "global.metrics.disableAgentHostName": "true", -// "global.metrics.enableHostMetrics": "true", -// "global.metrics.datadog.enabled": "true", -// "global.metrics.datadog.namespace": "datadog", -// "global.metrics.datadog.otlp.enabled": "true", -// "global.metrics.datadog.otlp.protocol": "http", -// "telemetryCollector.enabled": "true", -// } -// -// datadogOperatorHelmValues := map[string]string{ -// "replicaCount": "1", -// "image.tag": datadog.DefaultHelmChartVersion, -// "image.repository": "gcr.io/datadoghq/operator", -// } -// -// releaseName := helpers.RandomName() -// datadogOperatorRelease := datadog.OperatorReleaseName -// -// // Install the consul cluster in the default kubernetes ctx. -// consulCluster := consul.NewHelmCluster(t, helmValues, ctx, cfg, releaseName) -// consulCluster.Create(t) -// -// // Deploy Datadog Agent via Datadog Operator and apply dogstatsd overlay -// datadogNamespace := helmValues["global.metrics.datadog.namespace"] -// logger.Log(t, fmt.Sprintf("deploying datadog-operator via helm | namespace: %s | release-name: %s", datadogNamespace, datadogOperatorRelease)) -// datadogCluster := datadog.NewDatadogCluster(t, ctx, cfg, datadogOperatorRelease, datadogNamespace, datadogOperatorHelmValues) -// datadogCluster.Create(t) -// -// logger.Log(t, fmt.Sprintf("applying datadog otlp HTTP endpoint collector patch to datadog-agent | namespace: %s", datadogNamespace)) -// k8s.DeployKustomize(t, ctx.KubectlOptionsForNamespace(datadogNamespace), cfg.NoCleanupOnFailure, cfg.NoCleanup, cfg.DebugDirectory, "../fixtures/cases/datadog-otlp") -// k8s.WaitForAllPodsToBeReady(t, ctx.KubernetesClient(t), datadogNamespace, "agent.datadoghq.com/component=agent") -// -// // Retrieve datadog-agent pod name for exec -// podList, err := ctx.KubernetesClient(t).CoreV1().Pods(datadogNamespace).List(context.Background(), metav1.ListOptions{LabelSelector: "agent.datadoghq.com/component=agent"}) -// require.NoError(t, err) -// require.Len(t, podList.Items, 1) -// ddAgentName := podList.Items[0].Name -// -// // Check the dogstats-stats of the local cluster agent to see if consul metrics -// // are being seen by the agent -// bearerToken, err := k8s.RunKubectlAndGetOutputE(t, ctx.KubectlOptionsForNamespace(datadogNamespace), "exec", "pod/"+ddAgentName, "-c", "agent", "--", "cat /etc/datadog-agent/auth_token") -// metricsOutput, err := k8s.RunKubectlAndGetOutputE(t, ctx.KubectlOptionsForNamespace(datadogNamespace), "exec", "pod/"+ddAgentName, "-c", "agent", "--", "curl", "--silent", "--insecure", "--show-error", "--header", fmt.Sprintf("authorization: Bearer %s", bearerToken), "https://localhost:5001/agent/dogstatsd-stats") -// require.NoError(t, err) -// require.Contains(t, metricsOutput, consulOTLPMetricQuery) -//} - -type ConsulCheck struct { - AverageExecutionTime int `json:"AverageExecutionTime"` - CheckConfigSource string `json:"CheckConfigSource"` - CheckID string `json:"CheckID"` - CheckName string `json:"CheckName"` - CheckVersion string `json:"CheckVersion"` - Events int `json:"Events"` - ExecutionTimes []int `json:"ExecutionTimes"` - LastError string `json:"LastError"` - LastExecutionTime int `json:"LastExecutionTime"` - LastSuccessDate int `json:"LastSuccessDate"` - MetricSamples int `json:"MetricSamples"` - ServiceChecks int `json:"ServiceChecks"` - TotalErrors int `json:"TotalErrors"` - TotalEvents int `json:"TotalEvents"` - TotalMetricSamples int `json:"TotalMetricSamples"` - TotalRuns int `json:"TotalRuns"` - TotalServiceChecks int `json:"TotalServiceChecks"` - TotalWarnings int `json:"TotalWarnings"` - UpdateTimestamp int `json:"UpdateTimestamp"` -} - -type OpenmetricsCheck struct { - AverageExecutionTime int `json:"AverageExecutionTime"` - CheckConfigSource string `json:"CheckConfigSource"` - CheckID string `json:"CheckID"` - CheckName string `json:"CheckName"` - CheckVersion string `json:"CheckVersion"` - Events int `json:"Events"` - ExecutionTimes []int `json:"ExecutionTimes"` - LastError string `json:"LastError"` - LastExecutionTime int `json:"LastExecutionTime"` - LastSuccessDate int64 `json:"LastSuccessDate"` - MetricSamples int `json:"MetricSamples"` - ServiceChecks int `json:"ServiceChecks"` - TotalErrors int `json:"TotalErrors"` - TotalEventPlatformEvents map[string]interface{} `json:"TotalEventPlatformEvents"` - TotalEvents int `json:"TotalEvents"` - TotalHistogramBuckets int `json:"TotalHistogramBuckets"` - TotalMetricSamples int `json:"TotalMetricSamples"` - TotalRuns int `json:"TotalRuns"` - TotalServiceChecks int `json:"TotalServiceChecks"` - TotalWarnings int `json:"TotalWarnings"` - UpdateTimestamp int64 `json:"UpdateTimestamp"` -} - -type Checks struct { - Consul map[string]ConsulCheck `json:"consul"` - Openmetrics map[string]OpenmetricsCheck `json:"openmetrics"` -} - -type RunnerStats struct { - Checks Checks `json:"Checks"` -} - -type Root struct { - RunnerStats RunnerStats `json:"runnerStats"` -} diff --git a/acceptance/tests/fixtures/bases/datadog/datadog.yaml b/acceptance/tests/fixtures/bases/datadog/datadog.yaml deleted file mode 100644 index 1496a9ac93..0000000000 --- a/acceptance/tests/fixtures/bases/datadog/datadog.yaml +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -# https://github.com/DataDog/datadog-operator/blob/main/docs/configuration.v2alpha1.md -apiVersion: datadoghq.com/v2alpha1 -kind: DatadogAgent -metadata: - name: datadog -spec: - global: - clusterName: dc1 - registry: gcr.io/datadoghq - logLevel: debug - # Site is the Datadog intake site Agent data are sent to. Set to 'datadoghq.com' to - # send data to the US1 site (default). Set to 'datadoghq.eu' to send data to the EU site. - # fake-intake image is datadog spoof site URL used for testing. - # Default: 'datadoghq.com' - site: http://fake-intake.datadog.svc.cluster.local - credentials: - apiSecret: - secretName: datadog-secret - keyName: api-key - appSecret: - secretName: datadog-secret - keyName: app-key - # Requirement for kind cluster as tls verification prevents the agent from - # being able to obtain hostname from hostnameFile - # ref: https://docs.datadoghq.com/agent/troubleshooting/hostname_containers/?tab=operator - kubelet: - tlsVerify: false - features: - dogstatsd: - unixDomainSocketConfig: - enabled: false - hostPortConfig: - enabled: false - clusterChecks: - enabled: false - useClusterChecksRunners: false - admissionController: - enabled: false - mutateUnlabelled: false - apm: - enabled: false - # features.npm.enabled: false - # required as the /etc/passwd rootfs is mounted for this - # see: https://github.com/DataDog/helm-charts/issues/273 - npm: - enabled: false - logCollection: - enabled: false - containerCollectAll: false - # features.processDiscovery.enabled: false - # required as the /etc/passwd rootfs is mounted for this - # see: https://github.com/DataDog/helm-charts/issues/273 - processDiscovery: - enabled: false - # features.liveProcessCollection.enabled: false - # required as the /etc/passwd rootfs is mounted for this - # see: https://github.com/DataDog/helm-charts/issues/273 - liveProcessCollection: - enabled: false - liveContainerCollection: - enabled: false - orchestratorExplorer: - enabled: false - prometheusScrape: - enabled: false - enableServiceEndpoints: false - override: - clusterAgent: - replicas: 0 \ No newline at end of file diff --git a/acceptance/tests/fixtures/bases/datadog/fake-intake.yaml b/acceptance/tests/fixtures/bases/datadog/fake-intake.yaml deleted file mode 100644 index c330a4718d..0000000000 --- a/acceptance/tests/fixtures/bases/datadog/fake-intake.yaml +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: fake-intake - namespace: datadog ---- -apiVersion: v1 -kind: Service -metadata: - name: fake-intake - namespace: datadog -spec: - selector: - app: fake-intake - ports: - - port: 80 - targetPort: 80 - protocol: TCP - name: http ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: fake-intake - namespace: datadog -spec: - replicas: 1 - selector: - matchLabels: - app: fake-intake - template: - metadata: - name: fake-intake - namespace: datadog - labels: - app: fake-intake - tags.datadoghq.com/env: "dev" - tags.datadoghq.com/service: "fake-intake" - tags.datadoghq.com/version: "latest" - annotations: - 'consul.hashicorp.com/connect-inject': 'false' - 'consul.hashicorp.com/transparent-proxy': 'false' - 'consul.hashicorp.com/enable-metrics-merging': 'false' - 'consul.hashicorp.com/transparent-proxy-overwrite-probes': 'false' - spec: - serviceAccountName: fake-intake - containers: - - name: fake-intake - image: datadog/fakeintake:latest - ports: - - name: http - containerPort: 80 - protocol: TCP - securityContext: - privileged: true - runAsUser: 0 \ No newline at end of file diff --git a/acceptance/tests/fixtures/bases/datadog/kustomization.yaml b/acceptance/tests/fixtures/bases/datadog/kustomization.yaml deleted file mode 100644 index d2839c2a99..0000000000 --- a/acceptance/tests/fixtures/bases/datadog/kustomization.yaml +++ /dev/null @@ -1,7 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - - -resources: - - fake-intake.yaml - - datadog.yaml diff --git a/acceptance/tests/fixtures/cases/datadog-dogstatsd-udp/kustomization.yaml b/acceptance/tests/fixtures/cases/datadog-dogstatsd-udp/kustomization.yaml deleted file mode 100644 index dcfce4e9f8..0000000000 --- a/acceptance/tests/fixtures/cases/datadog-dogstatsd-udp/kustomization.yaml +++ /dev/null @@ -1,8 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -resources: - - ../../bases/datadog -patches: - - path: patch.yaml \ No newline at end of file diff --git a/acceptance/tests/fixtures/cases/datadog-dogstatsd-udp/patch.yaml b/acceptance/tests/fixtures/cases/datadog-dogstatsd-udp/patch.yaml deleted file mode 100644 index 69caaac697..0000000000 --- a/acceptance/tests/fixtures/cases/datadog-dogstatsd-udp/patch.yaml +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -apiVersion: datadoghq.com/v2alpha1 -kind: DatadogAgent -metadata: - name: datadog -spec: - features: - dogstatsd: - unixDomainSocketConfig: - enabled: false - hostPortConfig: - enabled: true - hostPort: 8125 - mapperProfiles: - configData: |- - - name: consul - prefix: "consul." - mappings: - - match: 'consul\.raft\.replication\.appendEntries\.logs\.([0-9a-f-]+)' - match_type: "regex" - name: "consul.raft.replication.appendEntries.logs" - tags: - peer_id: "$1" - - match: 'consul\.raft\.replication\.appendEntries\.rpc\.([0-9a-f-]+)' - match_type: "regex" - name: "consul.raft.replication.appendEntries.rpc" - tags: - peer_id: "$1" - - match: 'consul\.raft\.replication\.heartbeat\.([0-9a-f-]+)' - match_type: "regex" - name: "consul.raft.replication.heartbeat" - tags: - peer_id: "$1" - override: - nodeAgent: - annotations: - 'consul.hashicorp.com/connect-inject': 'false' - 'consul.hashicorp.com/transparent-proxy': 'false' - tolerations: - - operator: Exists - env: - - name: DD_HISTOGRAM_PERCENTILES - value: '0.10 0.20 0.30 0.40 0.50 0.60 0.70 0.80 0.90 0.95 0.99' - - name: DD_SECRET_BACKEND_COMMAND - value: /readsecret_multiple_providers.sh - containers: - agent: - env: - - name: DD_DOGSTATSD_METRICS_STATS_ENABLE - value: "true" - - name: DD_OTLP_CONFIG_LOGS_ENABLED - value: "true" - - name: DD_DOGSTATSD_NON_LOCAL_TRAFFIC - value: "true" - - name: DD_USE_V2_API_SERIES - value: "true" \ No newline at end of file diff --git a/acceptance/tests/fixtures/cases/datadog-dogstatsd-uds/kustomization.yaml b/acceptance/tests/fixtures/cases/datadog-dogstatsd-uds/kustomization.yaml deleted file mode 100644 index dcfce4e9f8..0000000000 --- a/acceptance/tests/fixtures/cases/datadog-dogstatsd-uds/kustomization.yaml +++ /dev/null @@ -1,8 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -resources: - - ../../bases/datadog -patches: - - path: patch.yaml \ No newline at end of file diff --git a/acceptance/tests/fixtures/cases/datadog-dogstatsd-uds/patch.yaml b/acceptance/tests/fixtures/cases/datadog-dogstatsd-uds/patch.yaml deleted file mode 100644 index 2157940c8b..0000000000 --- a/acceptance/tests/fixtures/cases/datadog-dogstatsd-uds/patch.yaml +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -apiVersion: datadoghq.com/v2alpha1 -kind: DatadogAgent -metadata: - name: datadog -spec: - features: - dogstatsd: - unixDomainSocketConfig: - enabled: true - path: "/var/run/datadog/dsd.socket" - hostPortConfig: - enabled: false - mapperProfiles: - configData: |- - - name: consul - prefix: "consul." - mappings: - - match: 'consul\.raft\.replication\.appendEntries\.logs\.([0-9a-f-]+)' - match_type: "regex" - name: "consul.raft.replication.appendEntries.logs" - tags: - peer_id: "$1" - - match: 'consul\.raft\.replication\.appendEntries\.rpc\.([0-9a-f-]+)' - match_type: "regex" - name: "consul.raft.replication.appendEntries.rpc" - tags: - peer_id: "$1" - - match: 'consul\.raft\.replication\.heartbeat\.([0-9a-f-]+)' - match_type: "regex" - name: "consul.raft.replication.heartbeat" - tags: - peer_id: "$1" - override: - nodeAgent: - annotations: - 'consul.hashicorp.com/connect-inject': 'false' - 'consul.hashicorp.com/transparent-proxy': 'false' - volumes: - - hostPath: - path: /var/run/datadog/ - name: dsdsocket - tolerations: - - operator: Exists - env: - - name: DD_HISTOGRAM_PERCENTILES - value: '0.10 0.20 0.30 0.40 0.50 0.60 0.70 0.80 0.90 0.95 0.99' - - name: DD_SECRET_BACKEND_COMMAND - value: /readsecret_multiple_providers.sh - containers: - agent: - env: - - name: DD_DOGSTATSD_METRICS_STATS_ENABLE - value: "true" - - name: DD_OTLP_CONFIG_LOGS_ENABLED - value: "true" - - name: DD_DOGSTATSD_NON_LOCAL_TRAFFIC - value: "true" - - name: DD_USE_V2_API_SERIES - value: "true" - volumeMounts: - - name: dsdsocket - mountPath: /var/run/datadog diff --git a/acceptance/tests/fixtures/cases/datadog-openmetrics/kustomization.yaml b/acceptance/tests/fixtures/cases/datadog-openmetrics/kustomization.yaml deleted file mode 100644 index dcfce4e9f8..0000000000 --- a/acceptance/tests/fixtures/cases/datadog-openmetrics/kustomization.yaml +++ /dev/null @@ -1,8 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -resources: - - ../../bases/datadog -patches: - - path: patch.yaml \ No newline at end of file diff --git a/acceptance/tests/fixtures/cases/datadog-openmetrics/patch.yaml b/acceptance/tests/fixtures/cases/datadog-openmetrics/patch.yaml deleted file mode 100644 index 3672aaa7e4..0000000000 --- a/acceptance/tests/fixtures/cases/datadog-openmetrics/patch.yaml +++ /dev/null @@ -1,12 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -apiVersion: datadoghq.com/v2alpha1 -kind: DatadogAgent -metadata: - name: datadog -spec: - features: - prometheusScrape: - enabled: true - enableServiceEndpoints: true diff --git a/acceptance/tests/fixtures/cases/datadog-otlp-grpc/kustomization.yaml b/acceptance/tests/fixtures/cases/datadog-otlp-grpc/kustomization.yaml deleted file mode 100644 index dcfce4e9f8..0000000000 --- a/acceptance/tests/fixtures/cases/datadog-otlp-grpc/kustomization.yaml +++ /dev/null @@ -1,8 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -resources: - - ../../bases/datadog -patches: - - path: patch.yaml \ No newline at end of file diff --git a/acceptance/tests/fixtures/cases/datadog-otlp-grpc/patch.yaml b/acceptance/tests/fixtures/cases/datadog-otlp-grpc/patch.yaml deleted file mode 100644 index e72f70a855..0000000000 --- a/acceptance/tests/fixtures/cases/datadog-otlp-grpc/patch.yaml +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -apiVersion: datadoghq.com/v2alpha1 -kind: DatadogAgent -metadata: - name: datadog -spec: - features: - # Sets: DD_OTLP_CONFIG_RECEIVER_PROTOCOLS_GRPC_ENDPOINT: 0.0.0.0:4317 │ - # DD_OTLP_CONFIG_RECEIVER_PROTOCOLS_HTTP_ENDPOINT: 0.0.0.0:4318 - otlp: - receiver: - protocols: - # Set to "0.0.0.0" as per the below reference docs - # ref: https://docs.datadoghq.com/opentelemetry/otlp_ingest_in_the_agent/?tab=host#enabling-otlp-ingestion-on-the-datadog-agent - grpc: - enabled: true - endpoint: "0.0.0.0:4317" \ No newline at end of file diff --git a/acceptance/tests/fixtures/cases/datadog-otlp/kustomization.yaml b/acceptance/tests/fixtures/cases/datadog-otlp/kustomization.yaml deleted file mode 100644 index dcfce4e9f8..0000000000 --- a/acceptance/tests/fixtures/cases/datadog-otlp/kustomization.yaml +++ /dev/null @@ -1,8 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -resources: - - ../../bases/datadog -patches: - - path: patch.yaml \ No newline at end of file diff --git a/acceptance/tests/fixtures/cases/datadog-otlp/patch.yaml b/acceptance/tests/fixtures/cases/datadog-otlp/patch.yaml deleted file mode 100644 index 6d6a42c972..0000000000 --- a/acceptance/tests/fixtures/cases/datadog-otlp/patch.yaml +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -apiVersion: datadoghq.com/v2alpha1 -kind: DatadogAgent -metadata: - name: datadog -spec: - features: - # Sets: DD_OTLP_CONFIG_RECEIVER_PROTOCOLS_GRPC_ENDPOINT: 0.0.0.0:4317 │ - # DD_OTLP_CONFIG_RECEIVER_PROTOCOLS_HTTP_ENDPOINT: 0.0.0.0:4318 - otlp: - receiver: - protocols: - # Set to "0.0.0.0" as per the below reference docs - # ref: https://docs.datadoghq.com/opentelemetry/otlp_ingest_in_the_agent/?tab=host#enabling-otlp-ingestion-on-the-datadog-agent - http: - enabled: true - endpoint: "0.0.0.0:4318" \ No newline at end of file diff --git a/acceptance/tests/fixtures/cases/terminating-gateway-destinations/terminating-gateway.yaml b/acceptance/tests/fixtures/cases/terminating-gateway-destinations/terminating-gateway.yaml deleted file mode 100644 index 4bb0257d5e..0000000000 --- a/acceptance/tests/fixtures/cases/terminating-gateway-destinations/terminating-gateway.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: consul.hashicorp.com/v1alpha1 -kind: TerminatingGateway -metadata: - name: terminating-gateway -spec: - services: - - name: static-server-hostname - - name: static-server-ip diff --git a/acceptance/tests/fixtures/cases/terminating-gateway-namespaces/external-service.yaml b/acceptance/tests/fixtures/cases/terminating-gateway-namespaces/external-service.yaml deleted file mode 100644 index 5b33ee36e0..0000000000 --- a/acceptance/tests/fixtures/cases/terminating-gateway-namespaces/external-service.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: consul.hashicorp.com/v1alpha1 -kind: Registration -metadata: - name: static-server-registration - namespace: ns1 -spec: - datacenter: dc1 - node: external - nodeMeta: - external-node: "true" - external-probe: "true" - address: static-server.ns1 - service: - id: static-server - name: static-server - namespace: ns1 - port: 80 diff --git a/acceptance/tests/fixtures/cases/terminating-gateway-namespaces/terminating-gateway.yaml b/acceptance/tests/fixtures/cases/terminating-gateway-namespaces/terminating-gateway.yaml deleted file mode 100644 index f96b02545c..0000000000 --- a/acceptance/tests/fixtures/cases/terminating-gateway-namespaces/terminating-gateway.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: consul.hashicorp.com/v1alpha1 -kind: TerminatingGateway -metadata: - name: terminating-gateway -spec: - services: - - name: static-server - namespace: ns1 diff --git a/acceptance/tests/fixtures/cases/terminating-gateway/external-service.yaml b/acceptance/tests/fixtures/cases/terminating-gateway/external-service.yaml deleted file mode 100644 index 651411f165..0000000000 --- a/acceptance/tests/fixtures/cases/terminating-gateway/external-service.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: consul.hashicorp.com/v1alpha1 -kind: Registration -metadata: - name: static-server-registration -spec: - datacenter: dc1 - node: external - nodeMeta: - external-node: "true" - external-probe: "true" - address: static-server - service: - id: static-server - name: static-server - port: 80 diff --git a/acceptance/tests/fixtures/cases/terminating-gateway/terminating-gateway.yaml b/acceptance/tests/fixtures/cases/terminating-gateway/terminating-gateway.yaml deleted file mode 100644 index ab5b5ff839..0000000000 --- a/acceptance/tests/fixtures/cases/terminating-gateway/terminating-gateway.yaml +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: consul.hashicorp.com/v1alpha1 -kind: TerminatingGateway -metadata: - name: terminating-gateway -spec: - services: - - name: static-server diff --git a/acceptance/tests/datadog/main_test.go b/acceptance/tests/mesh_v2/main_test.go similarity index 94% rename from acceptance/tests/datadog/main_test.go rename to acceptance/tests/mesh_v2/main_test.go index aca6e26e5f..d510056a10 100644 --- a/acceptance/tests/datadog/main_test.go +++ b/acceptance/tests/mesh_v2/main_test.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package datadog +package mesh_v2 import ( "os" @@ -15,5 +15,4 @@ var suite testsuite.Suite func TestMain(m *testing.M) { suite = testsuite.NewSuite(m) os.Exit(suite.Run()) - } diff --git a/acceptance/tests/mesh_v2/mesh_inject_test.go b/acceptance/tests/mesh_v2/mesh_inject_test.go new file mode 100644 index 0000000000..d54229d84b --- /dev/null +++ b/acceptance/tests/mesh_v2/mesh_inject_test.go @@ -0,0 +1,155 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package mesh_v2 + +import ( + "context" + "fmt" + "strconv" + "testing" + "time" + + "github.com/hashicorp/consul/sdk/testutil/retry" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/hashicorp/consul-k8s/acceptance/framework/connhelper" + "github.com/hashicorp/consul-k8s/acceptance/framework/consul" + "github.com/hashicorp/consul-k8s/acceptance/framework/helpers" + "github.com/hashicorp/consul-k8s/acceptance/framework/k8s" + "github.com/hashicorp/consul-k8s/acceptance/framework/logger" +) + +const multiport = "multiport" + +// Test that mesh sidecar proxies work for an application with multiple ports. The multiport application is a Pod listening on +// two ports. This tests inbound connections to each port of the multiport app, and outbound connections from the +// multiport app to static-server. +func TestMeshInject_MultiportService(t *testing.T) { + for _, secure := range []bool{false, true} { + name := fmt.Sprintf("secure: %t", secure) + + t.Run(name, func(t *testing.T) { + cfg := suite.Config() + cfg.SkipWhenOpenshiftAndCNI(t) + ctx := suite.Environment().DefaultContext(t) + + helmValues := map[string]string{ + "global.experiments[0]": "resource-apis", + "global.image": "ndhanushkodi/consul-dev:expose2", + // The UI is not supported for v2 in 1.17, so for now it must be disabled. + "ui.enabled": "false", + "connectInject.enabled": "true", + // Enable DNS so we can test that DNS redirection _isn't_ set in the pod. + "dns.enabled": "true", + + "global.tls.enabled": strconv.FormatBool(secure), + "global.acls.manageSystemACLs": strconv.FormatBool(secure), + } + + releaseName := helpers.RandomName() + consulCluster := consul.NewHelmCluster(t, helmValues, ctx, cfg, releaseName) + + consulCluster.Create(t) + + consulClient, _ := consulCluster.SetupConsulClient(t, secure) + + // Check that the ACL token is deleted. + if secure { + // We need to register the cleanup function before we create the deployments + // because golang will execute them in reverse order i.e. the last registered + // cleanup function will be executed first. + t.Cleanup(func() { + retrier := &retry.Timer{Timeout: 5 * time.Minute, Wait: 1 * time.Second} + retry.RunWith(retrier, t, func(r *retry.R) { + tokens, _, err := consulClient.ACL().TokenList(nil) + require.NoError(r, err) + for _, token := range tokens { + require.NotContains(r, token.Description, multiport) + require.NotContains(r, token.Description, connhelper.StaticClientName) + } + }) + }) + } + + logger.Log(t, "creating multiport static-server and static-client deployments") + k8s.DeployKustomize(t, ctx.KubectlOptions(t), cfg.NoCleanupOnFailure, cfg.NoCleanup, cfg.DebugDirectory, "../../tests/fixtures/bases/v2-multiport-app") + if cfg.EnableTransparentProxy { + k8s.DeployKustomize(t, ctx.KubectlOptions(t), cfg.NoCleanupOnFailure, cfg.NoCleanup, cfg.DebugDirectory, "../../tests/fixtures/cases/v2-static-client-inject-tproxy") + } else { + k8s.DeployKustomize(t, ctx.KubectlOptions(t), cfg.NoCleanupOnFailure, cfg.NoCleanup, cfg.DebugDirectory, "../../tests/fixtures/cases/v2-static-client-inject") + } + + // Check that static-client has been injected and now has 2 containers. + podList, err := ctx.KubernetesClient(t).CoreV1().Pods(ctx.KubectlOptions(t).Namespace).List(context.Background(), metav1.ListOptions{ + LabelSelector: "app=static-client", + }) + require.NoError(t, err) + require.Len(t, podList.Items, 1) + require.Len(t, podList.Items[0].Spec.Containers, 2) + + // Check that multiport has been injected and now has 3 containers. + podList, err = ctx.KubernetesClient(t).CoreV1().Pods(ctx.KubectlOptions(t).Namespace).List(context.Background(), metav1.ListOptions{ + LabelSelector: "app=multiport", + }) + require.NoError(t, err) + require.Len(t, podList.Items, 1) + require.Len(t, podList.Items[0].Spec.Containers, 3) + + if !secure { + k8s.KubectlApplyK(t, ctx.KubectlOptions(t), "../../tests/fixtures/cases/trafficpermissions-deny") + } + + // Now test that traffic is denied between the source and the destination. + if cfg.EnableTransparentProxy { + k8s.CheckStaticServerConnectionMultipleFailureMessages(t, ctx.KubectlOptions(t), connhelper.StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server"}, "", "http://multiport:8080") + k8s.CheckStaticServerConnectionMultipleFailureMessages(t, ctx.KubectlOptions(t), connhelper.StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server"}, "", "http://multiport:9090") + } else { + k8s.CheckStaticServerConnectionMultipleFailureMessages(t, ctx.KubectlOptions(t), connhelper.StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server"}, "", "http://localhost:1234") + k8s.CheckStaticServerConnectionMultipleFailureMessages(t, ctx.KubectlOptions(t), connhelper.StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server"}, "", "http://localhost:2345") + } + k8s.KubectlApplyK(t, ctx.KubectlOptions(t), "../../tests/fixtures/bases/trafficpermissions") + helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() { + k8s.KubectlDeleteK(t, ctx.KubectlOptions(t), "../../tests/fixtures/bases/trafficpermissions") + }) + + // TODO: add a trafficpermission to a particular port and validate + + // Check connection from static-client to multiport. + if cfg.EnableTransparentProxy { + k8s.CheckStaticServerConnectionSuccessful(t, ctx.KubectlOptions(t), connhelper.StaticClientName, "http://multiport:8080") + } else { + k8s.CheckStaticServerConnectionSuccessful(t, ctx.KubectlOptions(t), connhelper.StaticClientName, "http://localhost:1234") + } + + // Check connection from static-client to multiport-admin. + if cfg.EnableTransparentProxy { + k8s.CheckStaticServerConnectionSuccessfulWithMessage(t, ctx.KubectlOptions(t), connhelper.StaticClientName, "hello world from 9090 admin", "http://multiport:9090") + } else { + k8s.CheckStaticServerConnectionSuccessfulWithMessage(t, ctx.KubectlOptions(t), connhelper.StaticClientName, "hello world from 9090 admin", "http://localhost:2345") + } + + // Test that kubernetes readiness status is synced to Consul. This will make the multi port pods unhealthy + // and check inbound connections to the multi port pods' services. + // Create the files so that the readiness probes of the multi port pod fails. + logger.Log(t, "testing k8s -> consul health checks sync by making the multiport unhealthy") + k8s.RunKubectl(t, ctx.KubectlOptions(t), "exec", "deploy/"+multiport, "-c", "multiport", "--", "touch", "/tmp/unhealthy-multiport") + logger.Log(t, "testing k8s -> consul health checks sync by making the multiport-admin unhealthy") + k8s.RunKubectl(t, ctx.KubectlOptions(t), "exec", "deploy/"+multiport, "-c", "multiport-admin", "--", "touch", "/tmp/unhealthy-multiport-admin") + + // The readiness probe should take a moment to be reflected in Consul, CheckStaticServerConnection will retry + // until Consul marks the service instance unavailable for mesh traffic, causing the connection to fail. + // We are expecting a "connection reset by peer" error because in a case of health checks, + // there will be no healthy proxy host to connect to. That's why we can't assert that we receive an empty reply + // from server, which is the case when a connection is unsuccessful due to intentions in other tests. + if cfg.EnableTransparentProxy { + k8s.CheckStaticServerConnectionMultipleFailureMessages(t, ctx.KubectlOptions(t), connhelper.StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server"}, "", "http://multiport:8080") + k8s.CheckStaticServerConnectionMultipleFailureMessages(t, ctx.KubectlOptions(t), connhelper.StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server"}, "", "http://multiport:9090") + } else { + k8s.CheckStaticServerConnectionMultipleFailureMessages(t, ctx.KubectlOptions(t), connhelper.StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server"}, "", "http://localhost:1234") + k8s.CheckStaticServerConnectionMultipleFailureMessages(t, ctx.KubectlOptions(t), connhelper.StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server"}, "", "http://localhost:2345") + } + }) + } +} diff --git a/acceptance/tests/partitions/partitions_gateway_test.go b/acceptance/tests/partitions/partitions_gateway_test.go index acf9226715..a90a790cb6 100644 --- a/acceptance/tests/partitions/partitions_gateway_test.go +++ b/acceptance/tests/partitions/partitions_gateway_test.go @@ -219,19 +219,8 @@ func TestPartitions_Gateway(t *testing.T) { logger.Log(t, "creating static-client pod in secondary partition cluster") k8s.DeployKustomize(t, secondaryPartitionClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.NoCleanup, cfg.DebugDirectory, "../fixtures/bases/static-client") - // Create certificate secret, we do this separately since - // applying the secret will make an invalid certificate that breaks other tests - logger.Log(t, "creating certificate secret") - out, err := k8s.RunKubectlAndGetOutputE(t, secondaryPartitionClusterStaticServerOpts, "apply", "-f", "../fixtures/bases/api-gateway/certificate.yaml") - require.NoError(t, err, out) - helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() { - // Ignore errors here because if the test ran as expected - // the custom resources will have been deleted. - k8s.RunKubectlAndGetOutputE(t, secondaryPartitionClusterStaticServerOpts, "delete", "-f", "../fixtures/bases/api-gateway/certificate.yaml") - }) - logger.Log(t, "creating api-gateway resources") - out, err = k8s.RunKubectlAndGetOutputE(t, secondaryPartitionClusterStaticServerOpts, "apply", "-k", "../fixtures/bases/api-gateway") + out, err := k8s.RunKubectlAndGetOutputE(t, secondaryPartitionClusterStaticServerOpts, "apply", "-k", "../fixtures/bases/api-gateway") require.NoError(t, err, out) helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() { // Ignore errors here because if the test ran as expected diff --git a/acceptance/tests/peering/peering_gateway_test.go b/acceptance/tests/peering/peering_gateway_test.go index cafaa51064..542a215839 100644 --- a/acceptance/tests/peering/peering_gateway_test.go +++ b/acceptance/tests/peering/peering_gateway_test.go @@ -236,19 +236,8 @@ func TestPeering_Gateway(t *testing.T) { k8s.KubectlDeleteK(t, staticServerPeerClusterContext.KubectlOptions(t), "../fixtures/cases/crd-peers/non-default-namespace") }) - // Create certificate secret, we do this separately since - // applying the secret will make an invalid certificate that breaks other tests - logger.Log(t, "creating certificate secret") - out, err := k8s.RunKubectlAndGetOutputE(t, staticClientOpts, "apply", "-f", "../fixtures/bases/api-gateway/certificate.yaml") - require.NoError(t, err, out) - helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() { - // Ignore errors here because if the test ran as expected - // the custom resources will have been deleted. - k8s.RunKubectlAndGetOutputE(t, staticClientOpts, "delete", "-f", "../fixtures/bases/api-gateway/certificate.yaml") - }) - logger.Log(t, "creating api-gateway resources in client peer") - out, err = k8s.RunKubectlAndGetOutputE(t, staticClientOpts, "apply", "-k", "../fixtures/bases/api-gateway") + out, err := k8s.RunKubectlAndGetOutputE(t, staticClientOpts, "apply", "-k", "../fixtures/bases/api-gateway") require.NoError(t, err, out) helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() { // Ignore errors here because if the test ran as expected diff --git a/acceptance/tests/sync/sync_catalog_test.go b/acceptance/tests/sync/sync_catalog_test.go index 531b3748a5..d4a0e9e99c 100644 --- a/acceptance/tests/sync/sync_catalog_test.go +++ b/acceptance/tests/sync/sync_catalog_test.go @@ -9,14 +9,13 @@ import ( "testing" "time" - "github.com/hashicorp/consul/api" - "github.com/hashicorp/consul/sdk/testutil/retry" - "github.com/stretchr/testify/require" - "github.com/hashicorp/consul-k8s/acceptance/framework/consul" "github.com/hashicorp/consul-k8s/acceptance/framework/helpers" "github.com/hashicorp/consul-k8s/acceptance/framework/k8s" "github.com/hashicorp/consul-k8s/acceptance/framework/logger" + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/sdk/testutil/retry" + "github.com/stretchr/testify/require" ) // Test that sync catalog works in both the default installation and @@ -86,10 +85,15 @@ func TestSyncCatalog(t *testing.T) { // The test will create a test service and a pod and will // wait for the service to be synced *to* consul. func TestSyncCatalogWithIngress(t *testing.T) { + t.Skip("TODO(fails): NET-8594") + cfg := suite.Config() if cfg.EnableCNI { t.Skipf("skipping because -enable-cni is set and sync catalog is already tested with regular tproxy") } + if !cfg.UseEKS { + t.Skipf("skipping because -use-eks is not set and the ingress test only runs on EKS") + } cases := map[string]struct { secure bool @@ -103,7 +107,7 @@ func TestSyncCatalogWithIngress(t *testing.T) { ctx := suite.Environment().DefaultContext(t) helmValues := map[string]string{ "syncCatalog.enabled": "true", - "syncCatalog.ingress.enabled": "true", + "syncCatalog.ingres.enabled": "true", "global.tls.enabled": strconv.FormatBool(c.secure), "global.acls.manageSystemACLs": strconv.FormatBool(c.secure), } @@ -145,18 +149,16 @@ func TestSyncCatalogWithIngress(t *testing.T) { } }) - retry.RunWith(counter, t, func(r *retry.R) { - service, _, err := consulClient.Catalog().Service(syncedServiceName, "", nil) - require.NoError(r, err) - require.Len(r, service, 1) - require.Equal(r, "test.acceptance.com", service[0].ServiceAddress) - require.Equal(r, []string{"k8s"}, service[0].ServiceTags) - filter := fmt.Sprintf("ServiceID == %q", service[0].ServiceID) - healthChecks, _, err := consulClient.Health().Checks(syncedServiceName, &api.QueryOptions{Filter: filter}) - require.NoError(r, err) - require.Len(r, healthChecks, 1) - require.Equal(r, api.HealthPassing, healthChecks[0].Status) - }) + service, _, err := consulClient.Catalog().Service(syncedServiceName, "", nil) + require.NoError(t, err) + require.Len(t, service, 1) + require.Equal(t, "test.acceptance.com", service[0].Address) + require.Equal(t, []string{"k8s"}, service[0].ServiceTags) + filter := fmt.Sprintf("ServiceID == %q", service[0].ServiceID) + healthChecks, _, err := consulClient.Health().Checks(syncedServiceName, &api.QueryOptions{Filter: filter}) + require.NoError(t, err) + require.Len(t, healthChecks, 1) + require.Equal(t, api.HealthPassing, healthChecks[0].Status) }) } } diff --git a/acceptance/tests/tenancy_v2/main_test.go b/acceptance/tests/tenancy_v2/main_test.go new file mode 100644 index 0000000000..1766d95319 --- /dev/null +++ b/acceptance/tests/tenancy_v2/main_test.go @@ -0,0 +1,30 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tenancy_v2 + +import ( + "fmt" + "os" + "testing" + + testsuite "github.com/hashicorp/consul-k8s/acceptance/framework/suite" +) + +var suite testsuite.Suite + +func TestMain(m *testing.M) { + suite = testsuite.NewSuite(m) + + expectedNumberOfClusters := 1 + if suite.Config().IsExpectedClusterCount(expectedNumberOfClusters) { + os.Exit(suite.Run()) + } else { + fmt.Printf( + "Skipping tenancy_v2 tests because the number of clusters, %d, did not match the expected count of %d\n", + len(suite.Config().KubeEnvs), + expectedNumberOfClusters, + ) + os.Exit(0) + } +} diff --git a/acceptance/tests/tenancy_v2/partition_test.go b/acceptance/tests/tenancy_v2/partition_test.go new file mode 100644 index 0000000000..8ad031c8fe --- /dev/null +++ b/acceptance/tests/tenancy_v2/partition_test.go @@ -0,0 +1,91 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tenancy_v2 + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul-k8s/acceptance/framework/consul" + "github.com/hashicorp/consul-k8s/acceptance/framework/helpers" + "github.com/hashicorp/consul/proto-public/pbresource" + pbtenancy "github.com/hashicorp/consul/proto-public/pbtenancy/v2beta1" +) + +// TestTenancy_Partition_Created tests that V2 partitions are created when requested +// by a consul client external to the consul server cluster's k8s cluster. +// +// It sets up an external Consul server in the same cluster but a different Helm installation +// and then treats this server as external. +func TestTenancy_Partition_Created(t *testing.T) { + // Given a single k8s kind cluster + // Where helm "server" release hosts a consul server cluster (server.enabled=true) + // And helm "client" release hosts a consul client cluster (server.enabled=false) + // And both releases have experiments "resource-apis" and "v2tenancy enabled" + // And helm "client" release is configured to point to the helm "server" release as an external server (externalServer.enabled=true) + // And helm "client" release has admin partitions enabled with name "ap1" (global.adminPartitions.name=ap1) + // And helm "server" release is open for business + // When helm "client" release is installed + // Then partition "ap1" is created by the partition-init job in the helm "client" release + + // We're skipping ACLs for now because they're not supported in v2. + cfg := suite.Config() + // Requires connnectInject.enabled which we disable below. + cfg.SkipWhenCNI(t) + ctx := suite.Environment().DefaultContext(t) + + serverHelmValues := map[string]string{ + "server.enabled": "true", + "global.experiments[0]": "resource-apis", + "global.experiments[1]": "v2tenancy", + "global.adminPartitions.enabled": "false", + "global.enableConsulNamespaces": "true", + + // Don't install injector, controller and cni on this k8s cluster so that it's not installed twice. + "connectInject.enabled": "false", + + // The UI is not supported for v2 in 1.17, so for now it must be disabled. + "ui.enabled": "false", + } + + serverReleaseName := helpers.RandomName() + serverCluster := consul.NewHelmCluster(t, serverHelmValues, ctx, cfg, serverReleaseName) + serverCluster.Create(t) + + clientHelmValues := map[string]string{ + "server.enabled": "false", + "global.experiments[0]": "resource-apis", + "global.experiments[1]": "v2tenancy", + "global.adminPartitions.enabled": "true", + "global.adminPartitions.name": "ap1", + "global.enableConsulNamespaces": "true", + "externalServers.enabled": "true", + "externalServers.hosts[0]": fmt.Sprintf("%s-consul-server", serverReleaseName), + + // This needs to be set to true otherwise the pods never materialize + "connectInject.enabled": "true", + + // The UI is not supported for v2 in 1.17, so for now it must be disabled. + "ui.enabled": "false", + } + + clientReleaseName := helpers.RandomName() + clientCluster := consul.NewHelmCluster(t, clientHelmValues, ctx, cfg, clientReleaseName) + clientCluster.SkipCheckForPreviousInstallations = true + + clientCluster.Create(t) + + // verify partition ap1 created by partition init job + serverResourceClient := serverCluster.ResourceClient(t, false) + _, err := serverResourceClient.Read(context.Background(), &pbresource.ReadRequest{ + Id: &pbresource.ID{ + Name: "ap1", + Type: pbtenancy.PartitionType, + }, + }) + require.NoError(t, err, "expected partition ap1 to be created by partition init job") +} diff --git a/acceptance/tests/terminating-gateway/common.go b/acceptance/tests/terminating-gateway/common.go index 36b5293c2b..65dd7545a8 100644 --- a/acceptance/tests/terminating-gateway/common.go +++ b/acceptance/tests/terminating-gateway/common.go @@ -8,8 +8,6 @@ import ( "strings" "testing" - "github.com/gruntwork-io/terratest/modules/k8s" - "github.com/hashicorp/consul-k8s/acceptance/framework/helpers" "github.com/hashicorp/consul-k8s/acceptance/framework/logger" "github.com/hashicorp/consul/api" "github.com/stretchr/testify/require" @@ -41,19 +39,6 @@ func AddIntention(t *testing.T, consulClient *api.Client, sourcePeer, sourceNS, require.NoError(t, err) } -func CreateTerminatingGatewayFromCRD(t *testing.T, kubectlOptions *k8s.KubectlOptions, noCleanupOnFailure, noCleanup bool, path string) { - // Create the config entry for the terminating gateway. - k8s.KubectlApply(t, kubectlOptions, path) - - helpers.Cleanup(t, noCleanupOnFailure, noCleanup, func() { - // Note: this delete command won't wait for pods to be fully terminated. - // This shouldn't cause any test pollution because the underlying - // objects are deployments, and so when other tests create these - // they should have different pod names. - k8s.KubectlDelete(t, kubectlOptions, path) - }) -} - func CreateTerminatingGatewayConfigEntry(t *testing.T, consulClient *api.Client, gwNamespace, serviceNamespace string, serviceNames ...string) { t.Helper() diff --git a/acceptance/tests/terminating-gateway/terminating_gateway_destinations_test.go b/acceptance/tests/terminating-gateway/terminating_gateway_destinations_test.go index 753e49580a..67097b7648 100644 --- a/acceptance/tests/terminating-gateway/terminating_gateway_destinations_test.go +++ b/acceptance/tests/terminating-gateway/terminating_gateway_destinations_test.go @@ -86,7 +86,7 @@ func TestTerminatingGatewayDestinations(t *testing.T) { CreateMeshConfigEntry(t, consulClient, "") // Create the config entry for the terminating gateway. - CreateTerminatingGatewayFromCRD(t, ctx.KubectlOptions(t), cfg.NoCleanupOnFailure, cfg.NoCleanup, "../fixtures/cases/terminating-gateway-destinations/terminating-gateway.yaml") + CreateTerminatingGatewayConfigEntry(t, consulClient, "", "", staticServerHostnameID, staticServerIPID) // Deploy the static client logger.Log(t, "deploying static client") diff --git a/acceptance/tests/terminating-gateway/terminating_gateway_namespaces_test.go b/acceptance/tests/terminating-gateway/terminating_gateway_namespaces_test.go index 6407a483ae..ee51a64c0d 100644 --- a/acceptance/tests/terminating-gateway/terminating_gateway_namespaces_test.go +++ b/acceptance/tests/terminating-gateway/terminating_gateway_namespaces_test.go @@ -76,19 +76,8 @@ func TestTerminatingGatewaySingleNamespace(t *testing.T) { logger.Log(t, "creating static-server deployment") k8s.DeployKustomize(t, nsK8SOptions, cfg.NoCleanupOnFailure, cfg.NoCleanup, cfg.DebugDirectory, "../fixtures/bases/static-server") - // Register the external service - k8sOptions := helpers.K8sOptions{ - Options: ctx.KubectlOptions(t), - NoCleanupOnFailure: cfg.NoCleanupOnFailure, - NoCleanup: cfg.NoCleanup, - ConfigPath: "../fixtures/cases/terminating-gateway-namespaces/external-service.yaml", - } - - consulOptions := helpers.ConsulOptions{ - ConsulClient: consulClient, - } - - helpers.RegisterExternalServiceCRD(t, k8sOptions, consulOptions) + // Register the external service. + helpers.RegisterExternalService(t, consulClient, testNamespace, staticServerName, staticServerName, 80) // If ACLs are enabled we need to update the role of the terminating gateway // with service:write permissions to the static-server service @@ -98,7 +87,6 @@ func TestTerminatingGatewaySingleNamespace(t *testing.T) { } // Create the config entry for the terminating gateway. - // This case cannot be replicated using CRDs because the consul namespace does not match the kubernetes namespace the terminating gateway is in CreateTerminatingGatewayConfigEntry(t, consulClient, testNamespace, testNamespace, staticServerName) // Deploy the static client. @@ -198,19 +186,7 @@ func TestTerminatingGatewayNamespaceMirroring(t *testing.T) { k8s.DeployKustomize(t, ns1K8SOptions, cfg.NoCleanupOnFailure, cfg.NoCleanup, cfg.DebugDirectory, "../fixtures/bases/static-server") // Register the external service - k8sOptions := helpers.K8sOptions{ - Options: ctx.KubectlOptions(t), - NoCleanupOnFailure: cfg.NoCleanupOnFailure, - NoCleanup: cfg.NoCleanup, - ConfigPath: "../fixtures/cases/terminating-gateway-namespaces/external-service.yaml", - } - - consulOptions := helpers.ConsulOptions{ - ConsulClient: consulClient, - Namespace: testNamespace, - } - - helpers.RegisterExternalServiceCRD(t, k8sOptions, consulOptions) + helpers.RegisterExternalService(t, consulClient, testNamespace, staticServerName, staticServerName, 80) // If ACLs are enabled we need to update the role of the terminating gateway // with service:write permissions to the static-server service @@ -219,8 +195,8 @@ func TestTerminatingGatewayNamespaceMirroring(t *testing.T) { UpdateTerminatingGatewayRole(t, consulClient, fmt.Sprintf(staticServerPolicyRulesNamespace, testNamespace)) } - // Create the config entry for the terminating gateway. - CreateTerminatingGatewayFromCRD(t, ctx.KubectlOptions(t), cfg.NoCleanupOnFailure, cfg.NoCleanup, "../fixtures/cases/terminating-gateway-namespaces/terminating-gateway.yaml") + // Create the config entry for the terminating gateway + CreateTerminatingGatewayConfigEntry(t, consulClient, "", testNamespace, staticServerName) // Deploy the static client logger.Log(t, "deploying static client") diff --git a/acceptance/tests/terminating-gateway/terminating_gateway_test.go b/acceptance/tests/terminating-gateway/terminating_gateway_test.go index bf3cd44d60..acd0232227 100644 --- a/acceptance/tests/terminating-gateway/terminating_gateway_test.go +++ b/acceptance/tests/terminating-gateway/terminating_gateway_test.go @@ -55,18 +55,7 @@ func TestTerminatingGateway(t *testing.T) { consulClient, _ := consulCluster.SetupConsulClient(t, c.secure) // Register the external service - k8sOptions := helpers.K8sOptions{ - Options: ctx.KubectlOptions(t), - NoCleanupOnFailure: cfg.NoCleanupOnFailure, - NoCleanup: cfg.NoCleanup, - ConfigPath: "../fixtures/cases/terminating-gateway/external-service.yaml", - } - - consulOptions := helpers.ConsulOptions{ - ConsulClient: consulClient, - } - - helpers.RegisterExternalServiceCRD(t, k8sOptions, consulOptions) + helpers.RegisterExternalService(t, consulClient, "", staticServerName, staticServerName, 80) // If ACLs are enabled we need to update the role of the terminating gateway // with service:write permissions to the static-server service @@ -75,9 +64,8 @@ func TestTerminatingGateway(t *testing.T) { UpdateTerminatingGatewayRole(t, consulClient, staticServerPolicyRules) } - logger.Log(t, "creating terminating gateway config entry") // Create the config entry for the terminating gateway. - CreateTerminatingGatewayFromCRD(t, ctx.KubectlOptions(t), cfg.NoCleanupOnFailure, cfg.NoCleanup, "../fixtures/cases/terminating-gateway/terminating-gateway.yaml") + CreateTerminatingGatewayConfigEntry(t, consulClient, "", "", staticServerName) // Deploy the static client logger.Log(t, "deploying static client") diff --git a/charts/consul/Chart.yaml b/charts/consul/Chart.yaml index 3dc8d13015..16be797490 100644 --- a/charts/consul/Chart.yaml +++ b/charts/consul/Chart.yaml @@ -3,8 +3,8 @@ apiVersion: v2 name: consul -version: 1.6.0-dev -appVersion: 1.20-dev +version: 1.4.3-dev +appVersion: 1.18-dev kubeVersion: ">=1.22.0-0" description: Official HashiCorp Consul Chart home: https://www.consul.io @@ -16,11 +16,11 @@ annotations: artifacthub.io/prerelease: true artifacthub.io/images: | - name: consul - image: docker.mirror.hashicorp.services/hashicorppreview/consul:1.20-dev + image: docker.mirror.hashicorp.services/hashicorppreview/consul:1.18-dev - name: consul-k8s-control-plane - image: docker.mirror.hashicorp.services/hashicorppreview/consul-k8s-control-plane:1.6-dev + image: docker.mirror.hashicorp.services/hashicorppreview/consul-k8s-control-plane:1.4-dev - name: consul-dataplane - image: docker.mirror.hashicorp.services/hashicorppreview/consul-dataplane:1.6-dev + image: docker.mirror.hashicorp.services/hashicorppreview/consul-dataplane:1.4-dev - name: envoy image: envoyproxy/envoy:v1.25.11 artifacthub.io/license: MPL-2.0 diff --git a/charts/consul/templates/_helpers.tpl b/charts/consul/templates/_helpers.tpl index ded6485418..ca87485a78 100644 --- a/charts/consul/templates/_helpers.tpl +++ b/charts/consul/templates/_helpers.tpl @@ -19,7 +19,6 @@ as well as the global.name setting. {{- if not .Values.global.enablePodSecurityPolicies -}} securityContext: allowPrivilegeEscalation: false - readOnlyRootFilesystem: true capabilities: drop: - ALL @@ -246,7 +245,6 @@ This template is for an init container. {{- define "consul.getAutoEncryptClientCA" -}} - name: get-auto-encrypt-client-ca image: {{ .Values.global.imageK8S }} - {{ template "consul.imagePullPolicy" . }} command: - "/bin/sh" - "-ec" @@ -496,6 +494,55 @@ Usage: {{ template "consul.validateTelemetryCollectorResourceId" . }} {{/**/}} +{{/* +Fails if global.experiments.resourceAPIs is set along with any of these unsupported features. +- global.peering.enabled +- global.federation.enabled +- global.cloud.enabled +- client.enabled +- ui.enabled +- syncCatalog.enabled +- meshGateway.enabled +- ingressGateways.enabled +- terminatingGateways.enabled +- apiGateway.enabled + +Usage: {{ template "consul.validateResourceAPIs" . }} + +*/}} +{{- define "consul.validateResourceAPIs" -}} +{{- if (and (mustHas "resource-apis" .Values.global.experiments) .Values.global.peering.enabled ) }} +{{fail "When the value global.experiments.resourceAPIs is set, global.peering.enabled is currently unsupported."}} +{{- end }} +{{- if (and (mustHas "resource-apis" .Values.global.experiments) (not (mustHas "v2tenancy" .Values.global.experiments)) .Values.global.adminPartitions.enabled ) }} +{{fail "When the value global.experiments.resourceAPIs is set, global.experiments.v2tenancy must also be set to support global.adminPartitions.enabled."}} +{{- end }} +{{- if (and (mustHas "resource-apis" .Values.global.experiments) .Values.global.federation.enabled ) }} +{{fail "When the value global.experiments.resourceAPIs is set, global.federation.enabled is currently unsupported."}} +{{- end }} +{{- if (and (mustHas "resource-apis" .Values.global.experiments) .Values.global.cloud.enabled ) }} +{{fail "When the value global.experiments.resourceAPIs is set, global.cloud.enabled is currently unsupported."}} +{{- end }} +{{- if (and (mustHas "resource-apis" .Values.global.experiments) .Values.client.enabled ) }} +{{fail "When the value global.experiments.resourceAPIs is set, client.enabled is currently unsupported."}} +{{- end }} +{{- if (and (mustHas "resource-apis" .Values.global.experiments) .Values.ui.enabled ) }} +{{fail "When the value global.experiments.resourceAPIs is set, ui.enabled is currently unsupported."}} +{{- end }} +{{- if (and (mustHas "resource-apis" .Values.global.experiments) .Values.syncCatalog.enabled ) }} +{{fail "When the value global.experiments.resourceAPIs is set, syncCatalog.enabled is currently unsupported."}} +{{- end }} +{{- if (and (mustHas "resource-apis" .Values.global.experiments) .Values.ingressGateways.enabled ) }} +{{fail "When the value global.experiments.resourceAPIs is set, ingressGateways.enabled is currently unsupported."}} +{{- end }} +{{- if (and (mustHas "resource-apis" .Values.global.experiments) .Values.terminatingGateways.enabled ) }} +{{fail "When the value global.experiments.resourceAPIs is set, terminatingGateways.enabled is currently unsupported."}} +{{- end }} +{{- if (and (mustHas "resource-apis" .Values.global.experiments) .Values.apiGateway.enabled ) }} +{{fail "When the value global.experiments.resourceAPIs is set, apiGateway.enabled is currently unsupported."}} +{{- end }} +{{- end }} + {{/* Validation for Consul Metrics configuration: @@ -637,23 +684,5 @@ Usage: {{ template "consul.versionInfo" }} {{- else }} {{- $sanitizedVersion = $versionInfo }} {{- end -}} -{{- printf "%s" $sanitizedVersion | trunc 63 | quote }} -{{- end -}} - -{{/* -Sets the imagePullPolicy for all Consul images (consul, consul-dataplane, consul-k8s, consul-telemetry-collector) -Valid values are: - IfNotPresent - Always - Never - In the case of empty, see https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy for details - -Usage: {{ template "consul.imagePullPolicy" . }} TODO: melisa should we name this differently ? -*/}} -{{- define "consul.imagePullPolicy" -}} -{{ if or (eq .Values.global.imagePullPolicy "IfNotPresent") (eq .Values.global.imagePullPolicy "Always") (eq .Values.global.imagePullPolicy "Never")}}imagePullPolicy: {{ .Values.global.imagePullPolicy }} -{{ else if eq .Values.global.imagePullPolicy "" }} -{{ else }} -{{fail "imagePullPolicy can only be IfNotPresent, Always, Never, or empty" }} -{{ end }} -{{- end -}} +{{- printf "%s" $sanitizedVersion | quote }} +{{- end -}} \ No newline at end of file diff --git a/charts/consul/templates/api-gateway-controller-clusterrole.yaml b/charts/consul/templates/api-gateway-controller-clusterrole.yaml new file mode 100644 index 0000000000..eac2bd1f69 --- /dev/null +++ b/charts/consul/templates/api-gateway-controller-clusterrole.yaml @@ -0,0 +1,265 @@ +{{- if .Values.apiGateway.enabled }} +# The ClusterRole to enable the API Gateway controller to access required api endpoints. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "consul.fullname" . }}-api-gateway-controller + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: api-gateway-controller +rules: +- apiGroups: + - api-gateway.consul.hashicorp.com + resources: + - gatewayclassconfigs + verbs: + - get + - list + - update + - watch +- apiGroups: + - api-gateway.consul.hashicorp.com + resources: + - gatewayclassconfigs/finalizers + verbs: + - update +- apiGroups: + - api-gateway.consul.hashicorp.com + resources: + - meshservices + verbs: + - get + - list + - watch +- apiGroups: + - apps + resources: + - deployments + verbs: + - create + - get + - list + - update + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - list + - update +- apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - configmaps/status + verbs: + - get + - patch + - update +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - pods + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - get + - list + - update + - watch +- apiGroups: + - "" + resources: + - services + verbs: + - create + - get + - list + - update + - watch +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get + - list + - watch +- apiGroups: + - gateway.networking.k8s.io + resources: + - referencegrants + verbs: + - get + - list + - watch +- apiGroups: + - gateway.networking.k8s.io + resources: + - referencepolicies + verbs: + - get + - list + - watch +- apiGroups: + - gateway.networking.k8s.io + resources: + - gatewayclasses + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - gateway.networking.k8s.io + resources: + - gatewayclasses/finalizers + verbs: + - update +- apiGroups: + - gateway.networking.k8s.io + resources: + - gatewayclasses/status + verbs: + - get + - patch + - update +- apiGroups: + - gateway.networking.k8s.io + resources: + - gateways + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - gateway.networking.k8s.io + resources: + - gateways/finalizers + verbs: + - update +- apiGroups: + - gateway.networking.k8s.io + resources: + - gateways/status + verbs: + - get + - patch + - update +- apiGroups: + - gateway.networking.k8s.io + resources: + - httproutes + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - gateway.networking.k8s.io + resources: + - httproutes/finalizers + verbs: + - update +- apiGroups: + - gateway.networking.k8s.io + resources: + - httproutes/status + verbs: + - get + - patch + - update +- apiGroups: + - gateway.networking.k8s.io + resources: + - tcproutes + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - gateway.networking.k8s.io + resources: + - tcproutes/finalizers + verbs: + - update +- apiGroups: + - gateway.networking.k8s.io + resources: + - tcproutes/status + verbs: + - get + - patch + - update +{{- if .Values.global.enablePodSecurityPolicies }} +- apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use +- apiGroups: + - rbac.authorization.k8s.io + resources: + - roles + - rolebindings + verbs: + - create + - get + - list + - watch +{{- end }} +{{- end }} diff --git a/charts/consul/templates/api-gateway-controller-clusterrolebinding.yaml b/charts/consul/templates/api-gateway-controller-clusterrolebinding.yaml new file mode 100644 index 0000000000..d083a08129 --- /dev/null +++ b/charts/consul/templates/api-gateway-controller-clusterrolebinding.yaml @@ -0,0 +1,20 @@ +{{- if .Values.apiGateway.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "consul.fullname" . }}-api-gateway-controller + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: api-gateway-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "consul.fullname" . }}-api-gateway-controller +subjects: +- kind: ServiceAccount + name: {{ template "consul.fullname" . }}-api-gateway-controller + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/charts/consul/templates/api-gateway-controller-deployment.yaml b/charts/consul/templates/api-gateway-controller-deployment.yaml new file mode 100644 index 0000000000..453be66054 --- /dev/null +++ b/charts/consul/templates/api-gateway-controller-deployment.yaml @@ -0,0 +1,306 @@ +{{- if .Values.apiGateway.enabled }} +{{- if not .Values.client.grpc }}{{ fail "client.grpc must be true for api gateway" }}{{ end }} +{{- if not .Values.apiGateway.image}}{{ fail "apiGateway.image must be set to enable api gateway" }}{{ end }} +{{- if and .Values.global.adminPartitions.enabled (not .Values.global.enableConsulNamespaces) }}{{ fail "global.enableConsulNamespaces must be true if global.adminPartitions.enabled=true" }}{{ end }} +{{ template "consul.validateRequiredCloudSecretsExist" . }} +{{ template "consul.validateCloudSecretKeys" . }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "consul.fullname" . }}-api-gateway-controller + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: api-gateway-controller + {{- if .Values.global.extraLabels }} + {{- toYaml .Values.global.extraLabels | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.apiGateway.controller.replicas }} + selector: + matchLabels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + release: {{ .Release.Name }} + component: api-gateway-controller + template: + metadata: + annotations: + consul.hashicorp.com/connect-inject: "false" + consul.hashicorp.com/mesh-inject: "false" + {{- if (and .Values.global.secretsBackend.vault.enabled .Values.global.tls.enabled) }} + "vault.hashicorp.com/agent-init-first": "true" + "vault.hashicorp.com/agent-inject": "true" + "vault.hashicorp.com/role": {{ .Values.global.secretsBackend.vault.consulCARole }} + "vault.hashicorp.com/agent-inject-secret-serverca.crt": {{ .Values.global.tls.caCert.secretName }} + "vault.hashicorp.com/agent-inject-template-serverca.crt": {{ template "consul.serverTLSCATemplate" . }} + {{- if .Values.global.secretsBackend.vault.agentAnnotations }} + {{ tpl .Values.global.secretsBackend.vault.agentAnnotations . | nindent 8 | trim }} + {{ end }} + {{- if (and (.Values.global.secretsBackend.vault.vaultNamespace) (not (hasKey (default "" .Values.global.secretsBackend.vault.agentAnnotations | fromYaml) "vault.hashicorp.com/namespace")))}} + "vault.hashicorp.com/namespace": "{{ .Values.global.secretsBackend.vault.vaultNamespace }}" + {{- end }} + {{- if and .Values.global.secretsBackend.vault.ca.secretName .Values.global.secretsBackend.vault.ca.secretKey }} + "vault.hashicorp.com/agent-extra-secret": "{{ .Values.global.secretsBackend.vault.ca.secretName }}" + "vault.hashicorp.com/ca-cert": "/vault/custom/{{ .Values.global.secretsBackend.vault.ca.secretKey }}" + {{- end }} + {{- end }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + release: {{ .Release.Name }} + component: api-gateway-controller + {{- if .Values.global.extraLabels }} + {{- toYaml .Values.global.extraLabels | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ template "consul.fullname" . }}-api-gateway-controller + containers: + - name: api-gateway-controller + image: {{ .Values.apiGateway.image }} + ports: + - containerPort: 9090 + name: sds + protocol: TCP + env: + {{- if or (not (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots)) .Values.client.enabled }} + {{- if .Values.global.tls.enabled }} + - name: CONSUL_CACERT + {{- /* When Vault is being used as a secrets backend, auto-encrypt must be enabled. Since clients use a separate + root CA from servers when auto-encrypt is enabled, and our controller communicates with the agent when clients are + enabled, we only use the Vault server CA if clients are disabled and our controller will be communicating w/ the server. */}} + {{- if and (not .Values.client.enabled) .Values.global.secretsBackend.vault.enabled }} + value: /vault/secrets/serverca.crt + {{- else }} + value: /consul/tls/ca/tls.crt + {{- end }} + {{- end }} + {{- end }} + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + {{- if .Values.global.acls.manageSystemACLs }} + - name: CONSUL_HTTP_TOKEN_FILE + value: "/consul/login/acl-token" + # CONSUL_LOGIN_DATACENTER is passed to the gateway that gets created. The controller does not use this to log in + - name: CONSUL_LOGIN_DATACENTER + value: {{ .Values.global.datacenter }} + {{- end }} + - name: CONSUL_HTTP_ADDR + {{- if .Values.client.enabled }} + {{/* + We use client agent nodes if we have them to support backwards compatibility for Consul API Gateway + v0.4 and older, which requires connectivity between the registered Consul agent node and a + deployment for health checking (originating from the Consul node). Always leveraging the agents in + the case that they're explicitly opted into allows us to support users with agent node + + "externalServers" configuration upgrading a Helm chart without upgrading API gateways. + */}} + {{- if .Values.global.tls.enabled }} + value: $(HOST_IP):8501 + {{- else }} + value: $(HOST_IP):8500 + {{- end }} + {{- else if .Values.externalServers.enabled }} + {{/* + "externalServers" specified and running in "agentless" mode, this will only work with + Consul API Gateway v0.5 or newer + */}} + value: {{ first .Values.externalServers.hosts }}:{{ .Values.externalServers.httpsPort }} + {{- else }} + {{/* + We have local network connectivity between deployments and the internal cluster, this + should be supported in all versions of Consul API Gateway + */}} + {{- if .Values.global.tls.enabled }} + value: {{ template "consul.fullname" . }}-server:8501 + {{- else }} + value: {{ template "consul.fullname" . }}-server:8500 + {{- end }} + {{- end }} + - name: CONSUL_HTTP_SSL + value: "{{ .Values.global.tls.enabled }}" + {{- if and (not .Values.client.enabled) .Values.externalServers.enabled .Values.externalServers.tlsServerName }} + - name: CONSUL_TLS_SERVER_NAME + value: {{ .Values.externalServers.tlsServerName }} + {{- end }} + {{- if .Values.global.adminPartitions.enabled }} + - name: CONSUL_PARTITION + value: {{ .Values.global.adminPartitions.name }} + {{- if .Values.global.acls.manageSystemACLs }} + - name: CONSUL_LOGIN_PARTITION + value: {{ .Values.global.adminPartitions.name }} + {{- end }} + {{- end }} + {{- if not .Values.client.enabled }} + - name: CONSUL_DYNAMIC_SERVER_DISCOVERY + value: "true" + {{- end }} + command: + - "/bin/sh" + - "-ec" + - | + exec consul-api-gateway server \ + -sds-server-host {{ template "consul.fullname" . }}-api-gateway-controller.{{ .Release.Namespace }}.svc \ + -k8s-namespace {{ .Release.Namespace }} \ + {{- if .Values.global.enableConsulNamespaces }} + {{- if .Values.connectInject.consulNamespaces.consulDestinationNamespace }} + -consul-destination-namespace={{ .Values.connectInject.consulNamespaces.consulDestinationNamespace }} \ + {{- end }} + {{- if .Values.connectInject.consulNamespaces.mirroringK8S }} + -mirroring-k8s=true \ + {{- if .Values.connectInject.consulNamespaces.mirroringK8SPrefix }} + -mirroring-k8s-prefix={{ .Values.connectInject.consulNamespaces.mirroringK8SPrefix }} \ + {{- end }} + {{- end }} + {{- end }} + {{- if and .Values.global.federation.enabled .Values.global.federation.primaryDatacenter }} + -primary-datacenter={{ .Values.global.federation.primaryDatacenter }} \ + {{- end }} + -log-level {{ default .Values.global.logLevel .Values.apiGateway.logLevel }} \ + -log-json={{ .Values.global.logJSON }} + volumeMounts: + {{- if .Values.global.acls.manageSystemACLs }} + - name: consul-bin + mountPath: /consul-bin + {{- end }} + {{- if or (not (or (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots) .Values.global.secretsBackend.vault.enabled)) .Values.client.enabled }} + {{- if .Values.global.tls.enabled }} + {{- if and .Values.client.enabled .Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + {{- else }} + - name: consul-ca-cert + {{- end }} + mountPath: /consul/tls/ca + readOnly: true + {{- end }} + {{- end }} + - mountPath: /consul/login + name: consul-data + readOnly: true + {{- if .Values.apiGateway.resources }} + resources: + {{- toYaml .Values.apiGateway.resources | nindent 12 }} + {{- end }} + {{- if .Values.global.acls.manageSystemACLs }} + lifecycle: + preStop: + exec: + command: ["/consul-bin/consul", "logout" ] + {{- end }} + volumes: + {{- if .Values.global.acls.manageSystemACLs }} + - name: consul-bin + emptyDir: { } + {{- end }} + {{- if .Values.global.tls.enabled }} + {{- if not (or (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots) .Values.global.secretsBackend.vault.enabled) }} + - name: consul-ca-cert + secret: + {{- if .Values.global.tls.caCert.secretName }} + secretName: {{ .Values.global.tls.caCert.secretName }} + {{- else }} + secretName: {{ template "consul.fullname" . }}-ca-cert + {{- end }} + items: + - key: {{ default "tls.crt" .Values.global.tls.caCert.secretKey }} + path: tls.crt + {{- end }} + {{- if .Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + emptyDir: + medium: "Memory" + {{- end }} + {{- end }} + - name: consul-data + emptyDir: + medium: "Memory" + {{- if or .Values.global.acls.manageSystemACLs (and .Values.global.tls.enabled .Values.global.tls.enableAutoEncrypt) }} + initContainers: + {{- if .Values.global.acls.manageSystemACLs }} + - name: copy-consul-bin + image: {{ .Values.global.image | quote }} + command: + - cp + - /bin/consul + - /consul-bin/consul + volumeMounts: + - name: consul-bin + mountPath: /consul-bin + {{- if .Values.apiGateway.initCopyConsulContainer }} + {{- if .Values.apiGateway.initCopyConsulContainer.resources }} + resources: {{ toYaml .Values.apiGateway.initCopyConsulContainer.resources | nindent 12 }} + {{- end }} + {{- end }} + {{- end }} + {{- if (and .Values.global.tls.enabled .Values.global.tls.enableAutoEncrypt) }} + {{- include "consul.getAutoEncryptClientCA" . | nindent 6 }} + {{- end }} + {{- if .Values.global.acls.manageSystemACLs }} + - name: api-gateway-controller-acl-init + env: + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: CONSUL_LOGIN_META + value: "component=api-gateway-controller,pod=$(NAMESPACE)/$(POD_NAME)" + - name: CONSUL_LOGIN_DATACENTER + {{- if and .Values.global.federation.enabled .Values.global.federation.primaryDatacenter }} + value: {{ .Values.global.federation.primaryDatacenter }} + {{- else }} + value: {{ .Values.global.datacenter }} + {{- end}} + {{- include "consul.consulK8sConsulServerEnvVars" . | nindent 8 }} + image: {{ .Values.global.imageK8S }} + volumeMounts: + - mountPath: /consul/login + name: consul-data + readOnly: false + {{- if not (or (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots) .Values.global.secretsBackend.vault.enabled) }} + {{- if .Values.global.tls.enabled }} + - name: consul-ca-cert + mountPath: /consul/tls/ca + readOnly: true + {{- end }} + {{- end }} + command: + - "/bin/sh" + - "-ec" + - | + exec consul-k8s-control-plane acl-init \ + {{- if and .Values.global.federation.enabled .Values.global.federation.primaryDatacenter }} + -auth-method-name={{ template "consul.fullname" . }}-k8s-component-auth-method-{{ .Values.global.datacenter }} \ + {{- else }} + -auth-method-name={{ template "consul.fullname" . }}-k8s-component-auth-method \ + {{- end }} + -log-level={{ default .Values.global.logLevel .Values.apiGateway.logLevel }} \ + -log-json={{ .Values.global.logJSON }} + resources: + requests: + memory: "25Mi" + cpu: "50m" + limits: + memory: "25Mi" + cpu: "50m" + {{- end }} + {{- end }} + {{- if .Values.apiGateway.controller.priorityClassName }} + priorityClassName: {{ .Values.apiGateway.controller.priorityClassName | quote }} + {{- end }} + {{- if .Values.apiGateway.controller.nodeSelector }} + nodeSelector: + {{ tpl .Values.apiGateway.controller.nodeSelector . | indent 8 | trim }} + {{- end }} + {{- if .Values.apiGateway.controller.tolerations }} + tolerations: + {{ tpl .Values.apiGateway.controller.tolerations . | indent 8 | trim }} + {{- end }} +{{- end }} diff --git a/charts/consul/templates/api-gateway-controller-podsecuritypolicy.yaml b/charts/consul/templates/api-gateway-controller-podsecuritypolicy.yaml new file mode 100644 index 0000000000..390d084303 --- /dev/null +++ b/charts/consul/templates/api-gateway-controller-podsecuritypolicy.yaml @@ -0,0 +1,40 @@ +{{- if and .Values.apiGateway.enabled .Values.global.enablePodSecurityPolicies }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "consul.fullname" . }}-api-gateway-controller + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: api-gateway-controller +spec: + privileged: false + # Required to prevent escalations to root. + allowPrivilegeEscalation: false + # This is redundant with non-root + disallow privilege escalation, + # but we can provide it for defense in depth. + requiredDropCapabilities: + - ALL + # Allow core volume types. + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'RunAsAny' + fsGroup: + rule: 'RunAsAny' + readOnlyRootFilesystem: true +{{- end }} diff --git a/charts/consul/templates/api-gateway-controller-service.yaml b/charts/consul/templates/api-gateway-controller-service.yaml new file mode 100644 index 0000000000..aa79ff9fc3 --- /dev/null +++ b/charts/consul/templates/api-gateway-controller-service.yaml @@ -0,0 +1,27 @@ +{{- if .Values.apiGateway.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "consul.fullname" . }}-api-gateway-controller + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: api-gateway-controller + annotations: + {{- if .Values.apiGateway.controller.service.annotations }} + {{ tpl .Values.apiGateway.controller.service.annotations . | nindent 4 | trim }} + {{- end }} +spec: + ports: + - name: sds + port: 9090 + protocol: TCP + targetPort: 9090 + selector: + app: {{ template "consul.name" . }} + release: "{{ .Release.Name }}" + component: api-gateway-controller +{{- end }} diff --git a/charts/consul/templates/dns-proxy-serviceaccount.yaml b/charts/consul/templates/api-gateway-controller-serviceaccount.yaml similarity index 55% rename from charts/consul/templates/dns-proxy-serviceaccount.yaml rename to charts/consul/templates/api-gateway-controller-serviceaccount.yaml index 74c88ecdc6..98292a8dbe 100644 --- a/charts/consul/templates/dns-proxy-serviceaccount.yaml +++ b/charts/consul/templates/api-gateway-controller-serviceaccount.yaml @@ -1,15 +1,19 @@ -{{- if (or (and (ne (.Values.dns.proxy.enabled | toString) "-") .Values.dns.proxy.enabled) (and (eq (.Values.dns.proxy.enabled | toString) "-") .Values.global.enabled)) }} +{{- if .Values.apiGateway.enabled }} apiVersion: v1 kind: ServiceAccount metadata: - name: {{ template "consul.fullname" . }}-dns-proxy + name: {{ template "consul.fullname" . }}-api-gateway-controller namespace: {{ .Release.Namespace }} labels: app: {{ template "consul.name" . }} chart: {{ template "consul.chart" . }} heritage: {{ .Release.Service }} release: {{ .Release.Name }} - component: dns-proxy + component: api-gateway-controller + {{- if .Values.apiGateway.serviceAccount.annotations }} + annotations: + {{ tpl .Values.apiGateway.serviceAccount.annotations . | nindent 4 | trim }} + {{- end }} {{- with .Values.global.imagePullSecrets }} imagePullSecrets: {{- range . }} diff --git a/charts/consul/templates/api-gateway-gatewayclass.yaml b/charts/consul/templates/api-gateway-gatewayclass.yaml new file mode 100644 index 0000000000..d9ba85e633 --- /dev/null +++ b/charts/consul/templates/api-gateway-gatewayclass.yaml @@ -0,0 +1,18 @@ +{{- if (and .Values.apiGateway.enabled .Values.apiGateway.managedGatewayClass.enabled) }} +apiVersion: gateway.networking.k8s.io/v1alpha2 +kind: GatewayClass +metadata: + name: consul-api-gateway + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: api-gateway-controller +spec: + controllerName: hashicorp.com/consul-api-gateway-controller + parametersRef: + group: api-gateway.consul.hashicorp.com + kind: GatewayClassConfig + name: consul-api-gateway +{{- end }} diff --git a/charts/consul/templates/api-gateway-gatewayclassconfig.yaml b/charts/consul/templates/api-gateway-gatewayclassconfig.yaml new file mode 100644 index 0000000000..ba0e6c63db --- /dev/null +++ b/charts/consul/templates/api-gateway-gatewayclassconfig.yaml @@ -0,0 +1,84 @@ +{{- if (and .Values.apiGateway.enabled .Values.apiGateway.managedGatewayClass.enabled) }} +apiVersion: api-gateway.consul.hashicorp.com/v1alpha1 +kind: GatewayClassConfig +metadata: + name: consul-api-gateway + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: api-gateway +spec: + consul: + {{- if .Values.client.enabled }} + {{/* + We use client agent nodes if we have them to support backwards compatibility in <=0.4 releases which + require connectivity between the registered Consul agent node and a deployment for health checking + (originating from the Consul node). Always leveraging the agents in the case that they're explicitly + opted into allows us to support users with agent node + "externalServers" configuration upgrading a + helm chart without upgrading api gateways. Otherwise, using "externalServers" when provided + without local agents will break gateways <=0.4. + */}} + address: $(HOST_IP) + {{- else if .Values.externalServers.enabled }} + {{/* + "externalServers" specified and running in "agentless" mode, this will only work 0.5+ + */}} + address: {{ first .Values.externalServers.hosts }} + {{- else }} + {{/* + We have local network connectivity between deployments and the internal cluster, this + should be supported in all versions of api-gateway + */}} + address: {{ template "consul.fullname" . }}-server.{{ .Release.Namespace }}.svc + {{- end }} + authentication: + {{- if .Values.global.acls.manageSystemACLs }} + managed: true + method: {{ template "consul.fullname" . }}-k8s-auth-method + {{- if .Values.global.enablePodSecurityPolicies }} + podSecurityPolicy: {{ template "consul.fullname" . }}-api-gateway + {{- end }} + {{- end }} + {{- if .Values.global.tls.enabled }} + scheme: https + {{- else }} + scheme: http + {{- end }} + ports: + {{- if .Values.externalServers.enabled }} + grpc: {{ .Values.externalServers.grpcPort }} + http: {{ .Values.externalServers.httpsPort }} + {{- else }} + grpc: 8502 + {{- if .Values.global.tls.enabled }} + http: 8501 + {{- else }} + http: 8500 + {{- end }} + {{- end }} + {{- with .Values.apiGateway.managedGatewayClass.deployment }} + deployment: + {{- toYaml . | nindent 4 }} + {{- end }} + image: + consulAPIGateway: {{ .Values.apiGateway.image }} + envoy: {{ .Values.apiGateway.imageEnvoy }} + {{- if .Values.apiGateway.managedGatewayClass.nodeSelector }} + nodeSelector: + {{ tpl .Values.apiGateway.managedGatewayClass.nodeSelector . | indent 4 | trim }} + {{- end }} + {{- if .Values.apiGateway.managedGatewayClass.tolerations }} + tolerations: + {{ tpl .Values.apiGateway.managedGatewayClass.tolerations . | indent 4 | trim }} + {{- end }} + {{- if .Values.apiGateway.managedGatewayClass.copyAnnotations.service }} + copyAnnotations: + service: + {{ tpl .Values.apiGateway.managedGatewayClass.copyAnnotations.service.annotations . | nindent 6 | trim }} + {{- end }} + serviceType: {{ .Values.apiGateway.managedGatewayClass.serviceType }} + useHostPorts: {{ .Values.apiGateway.managedGatewayClass.useHostPorts }} + logLevel: {{ default .Values.global.logLevel .Values.apiGateway.managedGatewayClass.logLevel }} +{{- end }} diff --git a/charts/consul/templates/api-gateway-podsecuritypolicy.yaml b/charts/consul/templates/api-gateway-podsecuritypolicy.yaml new file mode 100644 index 0000000000..48f826f995 --- /dev/null +++ b/charts/consul/templates/api-gateway-podsecuritypolicy.yaml @@ -0,0 +1,45 @@ +{{- if and .Values.apiGateway.enabled .Values.global.enablePodSecurityPolicies }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "consul.fullname" . }}-api-gateway + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: api-gateway-controller +spec: + privileged: false + # Required to prevent escalations to root. + allowPrivilegeEscalation: false + # This is redundant with non-root + disallow privilege escalation, + # but we can provide it for defense in depth. + requiredDropCapabilities: + - ALL + # Allow core volume types. + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + allowedCapabilities: + - NET_BIND_SERVICE + hostNetwork: false + hostIPC: false + hostPID: false + hostPorts: + - max: 65535 + min: 1025 + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'RunAsAny' + fsGroup: + rule: 'RunAsAny' + readOnlyRootFilesystem: true +{{- end }} diff --git a/charts/consul/templates/client-daemonset.yaml b/charts/consul/templates/client-daemonset.yaml index 9c607385cf..cf0cb1d686 100644 --- a/charts/consul/templates/client-daemonset.yaml +++ b/charts/consul/templates/client-daemonset.yaml @@ -200,7 +200,6 @@ spec: containers: - name: consul image: "{{ default .Values.global.image .Values.client.image }}" - {{ template "consul.imagePullPolicy" . }} {{- if .Values.global.acls.manageSystemACLs }} lifecycle: preStop: @@ -503,7 +502,6 @@ spec: {{- if .Values.global.acls.manageSystemACLs }} - name: client-acl-init image: {{ .Values.global.imageK8S }} - {{ template "consul.imagePullPolicy" . }} env: - name: NAMESPACE valueFrom: @@ -556,7 +554,6 @@ spec: {{- if and .Values.global.tls.enabled (not .Values.global.tls.enableAutoEncrypt) }} - name: client-tls-init image: "{{ default .Values.global.image .Values.client.image }}" - {{ template "consul.imagePullPolicy" . }} env: - name: HOST_IP valueFrom: diff --git a/charts/consul/templates/cni-daemonset.yaml b/charts/consul/templates/cni-daemonset.yaml index a93e3aea90..258924f449 100644 --- a/charts/consul/templates/cni-daemonset.yaml +++ b/charts/consul/templates/cni-daemonset.yaml @@ -62,7 +62,6 @@ spec: # This container installs the consul CNI binaries and CNI network config file on each node - name: install-cni image: {{ .Values.global.imageK8S }} - {{ template "consul.imagePullPolicy" . }} securityContext: privileged: true command: diff --git a/charts/consul/templates/connect-inject-clusterrole.yaml b/charts/consul/templates/connect-inject-clusterrole.yaml index c8d6d71794..c6845870ba 100644 --- a/charts/consul/templates/connect-inject-clusterrole.yaml +++ b/charts/consul/templates/connect-inject-clusterrole.yaml @@ -32,7 +32,6 @@ rules: - routetimeoutfilters - routeauthfilters - gatewaypolicies - - registrations {{- if .Values.global.peering.enabled }} - peeringacceptors - peeringdialers @@ -62,7 +61,6 @@ rules: - terminatinggateways/status - samenessgroups/status - controlplanerequestlimits/status - - registrations/status {{- if .Values.global.peering.enabled }} - peeringacceptors/status - peeringdialers/status @@ -74,6 +72,86 @@ rules: - get - patch - update +{{- if (mustHas "resource-apis" .Values.global.experiments) }} +- apiGroups: + - auth.consul.hashicorp.com + resources: + - trafficpermissions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - auth.consul.hashicorp.com + resources: + - trafficpermissions/status + verbs: + - get + - patch + - update +- apiGroups: + - mesh.consul.hashicorp.com + resources: + - gatewayclassconfigs + - gatewayclasses + - meshconfigurations + - grpcroutes + - httproutes + - meshgateways + - tcproutes + - proxyconfigurations + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - mesh.consul.hashicorp.com + resources: + - gatewayclassconfigs/status + - gatewayclasses/status + - meshconfigurations/status + - grpcroutes/status + - httproutes/status + - meshgateways/status + - tcproutes/status + - proxyconfigurations/status + verbs: + - get + - patch + - update +- apiGroups: + - multicluster.consul.hashicorp.com + resources: + - exportedservices + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - multicluster.consul.hashicorp.com + resources: + - exportedservices/status + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +{{- end }} - apiGroups: [ "" ] resources: [ "secrets", "serviceaccounts", "endpoints", "services", "namespaces", "nodes" ] verbs: diff --git a/charts/consul/templates/connect-inject-deployment.yaml b/charts/consul/templates/connect-inject-deployment.yaml index 725c26df10..fe07c2581a 100644 --- a/charts/consul/templates/connect-inject-deployment.yaml +++ b/charts/consul/templates/connect-inject-deployment.yaml @@ -14,6 +14,7 @@ {{- $dnsRedirectionEnabled := (or (and (ne (.Values.dns.enableRedirection | toString) "-") .Values.dns.enableRedirection) (and (eq (.Values.dns.enableRedirection | toString) "-") .Values.connectInject.transparentProxy.defaultEnabled)) -}} {{ template "consul.validateRequiredCloudSecretsExist" . }} {{ template "consul.validateCloudSecretKeys" . }} +{{ template "consul.validateResourceAPIs" . }} # The deployment for running the Connect sidecar injector apiVersion: apps/v1 kind: Deployment @@ -97,7 +98,6 @@ spec: containers: - name: sidecar-injector image: "{{ default .Values.global.imageK8S .Values.connectInject.image }}" - {{ template "consul.imagePullPolicy" . }} ports: - containerPort: 8080 name: webhook-server @@ -154,6 +154,12 @@ spec: -release-namespace="{{ .Release.Namespace }}" \ -resource-prefix={{ template "consul.fullname" . }} \ -listen=:8080 \ + {{- if (mustHas "resource-apis" .Values.global.experiments) }} + -enable-resource-apis=true \ + {{- end }} + {{- if (mustHas "v2tenancy" .Values.global.experiments) }} + -enable-v2tenancy=true \ + {{- end }} {{- range $k, $v := .Values.connectInject.consulNode.meta }} -node-meta={{ $k }}={{ $v }} \ {{- end }} diff --git a/charts/consul/templates/connect-inject-mutatingwebhookconfiguration.yaml b/charts/consul/templates/connect-inject-mutatingwebhookconfiguration.yaml index e4fe79f621..e65c386636 100644 --- a/charts/consul/templates/connect-inject-mutatingwebhookconfiguration.yaml +++ b/charts/consul/templates/connect-inject-mutatingwebhookconfiguration.yaml @@ -333,6 +333,29 @@ webhooks: resources: - samenessgroups sideEffects: None +{{- if (mustHas "resource-apis" .Values.global.experiments) }} +- admissionReviewVersions: + - v1beta1 + - v1 + clientConfig: + service: + name: {{ template "consul.fullname" . }}-connect-injector + namespace: {{ .Release.Namespace }} + path: /mutate-v2beta1-trafficpermissions + failurePolicy: Fail + name: mutate-trafficpermissions.auth.consul.hashicorp.com + rules: + - apiGroups: + - auth.consul.hashicorp.com + apiVersions: + - v2beta1 + operations: + - CREATE + - UPDATE + resources: + - trafficpermissions + sideEffects: None +{{- end }} {{- end }} - admissionReviewVersions: - v1beta1 diff --git a/charts/consul/templates/connect-inject-validatingwebhookconfiguration.yaml b/charts/consul/templates/connect-inject-validatingwebhookconfiguration.yaml index 92068bbf68..8d01ace911 100644 --- a/charts/consul/templates/connect-inject-validatingwebhookconfiguration.yaml +++ b/charts/consul/templates/connect-inject-validatingwebhookconfiguration.yaml @@ -28,20 +28,4 @@ webhooks: name: {{ template "consul.fullname" . }}-connect-injector namespace: {{ .Release.Namespace }} path: /validate-v1alpha1-gatewaypolicy -- name: validate-registration.consul.hashicorp.com - matchPolicy: Equivalent - rules: - - operations: [ "CREATE" , "UPDATE" ] - apiGroups: [ "consul.hashicorp.com" ] - apiVersions: [ "v1alpha1" ] - resources: [ "registrations" ] - failurePolicy: Fail - sideEffects: None - admissionReviewVersions: - - v1 - clientConfig: - service: - name: {{ template "consul.fullname" . }}-connect-injector - namespace: {{ .Release.Namespace }} - path: /validate-v1alpha1-registration {{- end }} diff --git a/charts/consul/templates/crd-apigateways.yaml b/charts/consul/templates/crd-apigateways.yaml new file mode 100644 index 0000000000..755fb05b64 --- /dev/null +++ b/charts/consul/templates/crd-apigateways.yaml @@ -0,0 +1,240 @@ +{{- if .Values.connectInject.enabled }} +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.1 + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: crd + name: apigateways.mesh.consul.hashicorp.com +spec: + group: mesh.consul.hashicorp.com + names: + kind: APIGateway + listKind: APIGatewayList + plural: apigateways + singular: apigateway + scope: Cluster + versions: + - additionalPrinterColumns: + - description: The sync status of the resource with Consul + jsonPath: .status.conditions[?(@.type=="Synced")].status + name: Synced + type: string + - description: The last successful synced time of the resource with Consul + jsonPath: .status.lastSyncedTime + name: Last Synced + type: date + - description: The age of the resource + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v2beta1 + schema: + openAPIV3Schema: + description: APIGateway is the Schema for the API Gateway + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + gatewayClassName: + description: GatewayClassName is the name of the GatewayClass used + by the APIGateway + type: string + listeners: + items: + properties: + hostname: + description: Hostname is the host name that a listener should + be bound to, if unspecified, the listener accepts requests + for all hostnames. + type: string + name: + description: Name is the name of the listener in a given gateway. + This must be unique within a gateway. + type: string + port: + format: int32 + maximum: 65535 + minimum: 0 + type: integer + protocol: + description: Protocol is the protocol that a listener should + use, it must either be "http" or "tcp" + type: string + tls: + description: TLS is the TLS settings for the listener. + properties: + certificates: + description: Certificates is a set of references to certificates + that a gateway listener uses for TLS termination. + items: + description: Reference identifies which resource a condition + relates to, when it is not the core resource itself. + properties: + name: + description: Name is the user-given name of the resource + (e.g. the "billing" service). + type: string + section: + description: Section identifies which part of the + resource the condition relates to. + type: string + tenancy: + description: Tenancy identifies the tenancy units + (i.e. partition, namespace) in which the resource + resides. + properties: + namespace: + description: "Namespace further isolates resources + within a partition. https://developer.hashicorp.com/consul/docs/enterprise/namespaces + \n When using the List and WatchList endpoints, + provide the wildcard value \"*\" to list resources + across all namespaces." + type: string + partition: + description: "Partition is the topmost administrative + boundary within a cluster. https://developer.hashicorp.com/consul/docs/enterprise/admin-partitions + \n When using the List and WatchList endpoints, + provide the wildcard value \"*\" to list resources + across all partitions." + type: string + peerName: + description: "PeerName identifies which peer the + resource is imported from. https://developer.hashicorp.com/consul/docs/connect/cluster-peering + \n When using the List and WatchList endpoints, + provide the wildcard value \"*\" to list resources + across all peers." + type: string + type: object + type: + description: Type identifies the resource's type. + properties: + group: + description: Group describes the area of functionality + to which this resource type relates (e.g. "catalog", + "authorization"). + type: string + groupVersion: + description: GroupVersion is incremented when + sweeping or backward-incompatible changes are + made to the group's resource types. + type: string + kind: + description: Kind identifies the specific resource + type within the group. + type: string + type: object + type: object + type: array + tlsParameters: + description: TLSParameters contains optional configuration + for running TLS termination. + properties: + cipherSuites: + items: + enum: + - TLS_CIPHER_SUITE_ECDHE_ECDSA_AES128_GCM_SHA256 + - TLS_CIPHER_SUITE_AES256_SHA + - TLS_CIPHER_SUITE_ECDHE_ECDSA_CHACHA20_POLY1305 + - TLS_CIPHER_SUITE_ECDHE_RSA_AES128_GCM_SHA256 + - TLS_CIPHER_SUITE_ECDHE_RSA_CHACHA20_POLY1305 + - TLS_CIPHER_SUITE_ECDHE_ECDSA_AES128_SHA + - TLS_CIPHER_SUITE_ECDHE_RSA_AES128_SHA + - TLS_CIPHER_SUITE_AES128_GCM_SHA256 + - TLS_CIPHER_SUITE_AES128_SHA + - TLS_CIPHER_SUITE_ECDHE_ECDSA_AES256_GCM_SHA384 + - TLS_CIPHER_SUITE_ECDHE_RSA_AES256_GCM_SHA384 + - TLS_CIPHER_SUITE_ECDHE_ECDSA_AES256_SHA + - TLS_CIPHER_SUITE_ECDHE_RSA_AES256_SHA + - TLS_CIPHER_SUITE_AES256_GCM_SHA384 + format: int32 + type: string + type: array + maxVersion: + enum: + - TLS_VERSION_AUTO + - TLS_VERSION_1_0 + - TLS_VERSION_1_1 + - TLS_VERSION_1_2 + - TLS_VERSION_1_3 + - TLS_VERSION_INVALID + - TLS_VERSION_UNSPECIFIED + format: int32 + type: string + minVersion: + enum: + - TLS_VERSION_AUTO + - TLS_VERSION_1_0 + - TLS_VERSION_1_1 + - TLS_VERSION_1_2 + - TLS_VERSION_1_3 + - TLS_VERSION_INVALID + - TLS_VERSION_UNSPECIFIED + format: int32 + type: string + type: object + type: object + type: object + minItems: 1 + type: array + type: object + status: + properties: + conditions: + description: Conditions indicate the latest available observations + of a resource's current state. + items: + description: 'Conditions define a readiness condition for a Consul + resource. See: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties' + properties: + lastTransitionTime: + description: LastTransitionTime is the last time the condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition. + type: string + required: + - status + - type + type: object + type: array + lastSyncedTime: + description: LastSyncedTime is the last time the resource successfully + synced with Consul. + format: date-time + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +{{- end }} diff --git a/charts/consul/templates/crd-exportedservices.yaml b/charts/consul/templates/crd-exportedservices.yaml new file mode 100644 index 0000000000..6613e3da7e --- /dev/null +++ b/charts/consul/templates/crd-exportedservices.yaml @@ -0,0 +1,108 @@ +{{- if .Values.connectInject.enabled }} +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.1 + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: crd + name: exportedservices.multicluster.consul.hashicorp.com +spec: + group: multicluster.consul.hashicorp.com + names: + kind: ExportedServices + listKind: ExportedServicesList + plural: exportedservices + singular: exportedservices + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The sync status of the resource with Consul + jsonPath: .status.conditions[?(@.type=="Synced")].status + name: Synced + type: string + - description: The last successful synced time of the resource with Consul + jsonPath: .status.lastSyncedTime + name: Last Synced + type: date + - description: The age of the resource + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v2 + schema: + openAPIV3Schema: + description: ExportedServices is the Schema for the Exported Services API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + consumers: + items: + type: object + x-kubernetes-preserve-unknown-fields: true + type: array + services: + items: + type: string + type: array + type: object + status: + properties: + conditions: + description: Conditions indicate the latest available observations + of a resource's current state. + items: + description: 'Conditions define a readiness condition for a Consul + resource. See: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties' + properties: + lastTransitionTime: + description: LastTransitionTime is the last time the condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition. + type: string + required: + - status + - type + type: object + type: array + lastSyncedTime: + description: LastSyncedTime is the last time the resource successfully + synced with Consul. + format: date-time + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +{{- end }} diff --git a/charts/consul/templates/crd-gatewayclassconfigs-v1.yaml b/charts/consul/templates/crd-gatewayclassconfigs-v1.yaml index a611e91b2b..41023c19dc 100644 --- a/charts/consul/templates/crd-gatewayclassconfigs-v1.yaml +++ b/charts/consul/templates/crd-gatewayclassconfigs-v1.yaml @@ -122,8 +122,7 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. Requests cannot exceed - Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object type: object diff --git a/charts/consul/templates/crd-gatewayclassconfigs.yaml b/charts/consul/templates/crd-gatewayclassconfigs.yaml new file mode 100644 index 0000000000..93effd843b --- /dev/null +++ b/charts/consul/templates/crd-gatewayclassconfigs.yaml @@ -0,0 +1,1826 @@ +{{- if .Values.connectInject.enabled }} +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.1 + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: crd + name: gatewayclassconfigs.mesh.consul.hashicorp.com +spec: + group: mesh.consul.hashicorp.com + names: + kind: GatewayClassConfig + listKind: GatewayClassConfigList + plural: gatewayclassconfigs + singular: gatewayclassconfig + scope: Cluster + versions: + - additionalPrinterColumns: + - description: The age of the resource + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v2beta1 + schema: + openAPIV3Schema: + description: GatewayClassConfig is the Schema for the Mesh Gateway API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: GatewayClassConfigSpec specifies the desired state of the + GatewayClassConfig CRD. + properties: + annotations: + description: Annotations are applied to the created resource + properties: + inheritFromGateway: + description: InheritFromGateway lists the names/keys of annotations + or labels to copy from the Gateway resource. Any name/key included + here will override those in Set if specified on the Gateway. + items: + type: string + type: array + set: + additionalProperties: + type: string + description: Set lists the names/keys and values of annotations + or labels to set on the resource. Any name/key included here + will be overridden if present in InheritFromGateway and set + on the Gateway. + type: object + type: object + deployment: + description: Deployment contains config specific to the Deployment + created from this GatewayClass + properties: + affinity: + description: Affinity specifies the affinity to use on the created + Deployment. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for + the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods + to nodes that satisfy the affinity expressions specified + by this field, but it may choose a node that violates + one or more of the expressions. The node that is most + preferred is the one with the greatest sum of weights, + i.e. for each node that meets all of the scheduling + requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum by iterating + through the elements of this field and adding "weight" + to the sum if the node matches the corresponding matchExpressions; + the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches + all objects with implicit weight 0 (i.e. it's a no-op). + A null preferred scheduling term matches no objects + (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with + the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is + a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, the + values array must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. If the + operator is Gt or Lt, the values array + must have a single element, which will + be interpreted as an integer. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is + a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, the + values array must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. If the + operator is Gt or Lt, the values array + must have a single element, which will + be interpreted as an integer. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the + corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by + this field are not met at scheduling time, the pod will + not be scheduled onto the node. If the affinity requirements + specified by this field cease to be met at some point + during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from + its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: A null or empty node selector term + matches no objects. The requirements of them are + ANDed. The TopologySelectorTerm type implements + a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is + a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, the + values array must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. If the + operator is Gt or Lt, the values array + must have a single element, which will + be interpreted as an integer. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is + a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, the + values array must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. If the + operator is Gt or Lt, the values array + must have a single element, which will + be interpreted as an integer. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + type: array + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods + to nodes that satisfy the affinity expressions specified + by this field, but it may choose a node that violates + one or more of the expressions. The node that is most + preferred is the one with the greatest sum of weights, + i.e. for each node that meets all of the scheduling + requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum by iterating + through the elements of this field and adding "weight" + to the sum if the node has pods which matches the corresponding + podAffinityTerm; the node(s) with the highest sum are + the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied + to the union of the namespaces selected by + this field and the ones listed in the namespaces + field. null selector and null or empty namespaces + list means "this pod's namespace". An empty + selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. + The term is applied to the union of the namespaces + listed in this field and the ones selected + by namespaceSelector. null or empty namespaces + list and null namespaceSelector means "this + pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the + pods matching the labelSelector in the specified + namespaces, where co-located is defined as + running on a node whose value of the label + with key topologyKey matches that of any node + on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the + corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by + this field are not met at scheduling time, the pod will + not be scheduled onto the node. If the affinity requirements + specified by this field cease to be met at some point + during pod execution (e.g. due to a pod label update), + the system may or may not try to eventually evict the + pod from its node. When there are multiple elements, + the lists of nodes corresponding to each podAffinityTerm + are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not + co-located (anti-affinity) with, where co-located + is defined as running on a node whose value of the + label with key matches that of any node + on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied + to the union of the namespaces selected by this + field and the ones listed in the namespaces field. + null selector and null or empty namespaces list + means "this pod's namespace". An empty selector + ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. The + term is applied to the union of the namespaces + listed in this field and the ones selected by + namespaceSelector. null or empty namespaces list + and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, etc. + as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods + to nodes that satisfy the anti-affinity expressions + specified by this field, but it may choose a node that + violates one or more of the expressions. The node that + is most preferred is the one with the greatest sum of + weights, i.e. for each node that meets all of the scheduling + requirements (resource request, requiredDuringScheduling + anti-affinity expressions, etc.), compute a sum by iterating + through the elements of this field and adding "weight" + to the sum if the node has pods which matches the corresponding + podAffinityTerm; the node(s) with the highest sum are + the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied + to the union of the namespaces selected by + this field and the ones listed in the namespaces + field. null selector and null or empty namespaces + list means "this pod's namespace". An empty + selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. + The term is applied to the union of the namespaces + listed in this field and the ones selected + by namespaceSelector. null or empty namespaces + list and null namespaceSelector means "this + pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the + pods matching the labelSelector in the specified + namespaces, where co-located is defined as + running on a node whose value of the label + with key topologyKey matches that of any node + on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the + corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified + by this field are not met at scheduling time, the pod + will not be scheduled onto the node. If the anti-affinity + requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod + label update), the system may or may not try to eventually + evict the pod from its node. When there are multiple + elements, the lists of nodes corresponding to each podAffinityTerm + are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not + co-located (anti-affinity) with, where co-located + is defined as running on a node whose value of the + label with key matches that of any node + on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied + to the union of the namespaces selected by this + field and the ones listed in the namespaces field. + null selector and null or empty namespaces list + means "this pod's namespace". An empty selector + ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. The + term is applied to the union of the namespaces + listed in this field and the ones selected by + namespaceSelector. null or empty namespaces list + and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + annotations: + description: Annotations are applied to the created resource + properties: + inheritFromGateway: + description: InheritFromGateway lists the names/keys of annotations + or labels to copy from the Gateway resource. Any name/key + included here will override those in Set if specified on + the Gateway. + items: + type: string + type: array + set: + additionalProperties: + type: string + description: Set lists the names/keys and values of annotations + or labels to set on the resource. Any name/key included + here will be overridden if present in InheritFromGateway + and set on the Gateway. + type: object + type: object + container: + description: Container contains config specific to the created + Deployment's container. + properties: + consul: + description: Consul specifies configuration for the consul-dataplane + container + properties: + logging: + description: Logging specifies the logging configuration + for Consul Dataplane + properties: + level: + description: Level sets the logging level for Consul + Dataplane (debug, info, etc.) + type: string + type: object + type: object + hostPort: + description: HostPort specifies a port to be exposed to the + external host network + format: int32 + type: integer + portModifier: + description: PortModifier specifies the value to be added + to every port value for listeners on this gateway. This + is generally used to avoid binding to privileged ports in + the container. + format: int32 + type: integer + resources: + description: Resources specifies the resource requirements + for the created Deployment's container + properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. \n This field + is immutable. It can only be set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry + in pod.spec.resourceClaims of the Pod where this + field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of + compute resources required. If Requests is omitted for + a container, it defaults to Limits if that is explicitly + specified, otherwise to an implementation-defined value. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + dnsPolicy: + description: DNSPolicy specifies the dns policy to use. These + are set on a per pod basis. + enum: + - Default + - ClusterFirst + - ClusterFirstWithHostNet + - None + type: string + hostNetwork: + description: HostNetwork specifies whether the gateway pods should + run on the host network. + type: boolean + initContainer: + description: InitContainer contains config specific to the created + Deployment's init container. + properties: + consul: + description: Consul specifies configuration for the consul-k8s-control-plane + init container + properties: + logging: + description: Logging specifies the logging configuration + for Consul Dataplane + properties: + level: + description: Level sets the logging level for Consul + Dataplane (debug, info, etc.) + type: string + type: object + type: object + resources: + description: Resources specifies the resource requirements + for the created Deployment's init container + properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. \n This field + is immutable. It can only be set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry + in pod.spec.resourceClaims of the Pod where this + field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of + compute resources required. If Requests is omitted for + a container, it defaults to Limits if that is explicitly + specified, otherwise to an implementation-defined value. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + labels: + description: Labels are applied to the created resource + properties: + inheritFromGateway: + description: InheritFromGateway lists the names/keys of annotations + or labels to copy from the Gateway resource. Any name/key + included here will override those in Set if specified on + the Gateway. + items: + type: string + type: array + set: + additionalProperties: + type: string + description: Set lists the names/keys and values of annotations + or labels to set on the resource. Any name/key included + here will be overridden if present in InheritFromGateway + and set on the Gateway. + type: object + type: object + nodeSelector: + additionalProperties: + type: string + description: 'NodeSelector is a feature that constrains the scheduling + of a pod to nodes that match specified labels. By defining NodeSelector + in a pod''s configuration, you can ensure that the pod is only + scheduled to nodes with the corresponding labels, providing + a way to influence the placement of workloads based on node + attributes. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' + type: object + priorityClassName: + description: PriorityClassName specifies the priority class name + to use on the created Deployment. + type: string + replicas: + description: Replicas specifies the configuration to control the + number of replicas for the created Deployment. + properties: + default: + description: Default is the number of replicas assigned to + the Deployment when created + format: int32 + type: integer + max: + description: Max is the maximum number of replicas allowed + for a gateway with this class. If the replica count exceeds + this value due to manual or automated scaling, the replica + count will be restored to this value. + format: int32 + type: integer + min: + description: Min is the minimum number of replicas allowed + for a gateway with this class. If the replica count drops + below this value due to manual or automated scaling, the + replica count will be restored to this value. + format: int32 + type: integer + type: object + securityContext: + description: SecurityContext specifies the security context for + the created Deployment's Pod. + properties: + fsGroup: + description: "A special supplemental group that applies to + all containers in a pod. Some volume types allow the Kubelet + to change the ownership of that volume to be owned by the + pod: \n 1. The owning GID will be the FSGroup 2. The setgid + bit is set (new files created in the volume will be owned + by FSGroup) 3. The permission bits are OR'd with rw-rw---- + \n If unset, the Kubelet will not modify the ownership and + permissions of any volume. Note that this field cannot be + set when spec.os.name is windows." + format: int64 + type: integer + fsGroupChangePolicy: + description: 'fsGroupChangePolicy defines behavior of changing + ownership and permission of the volume before being exposed + inside Pod. This field will only apply to volume types which + support fsGroup based ownership(and permissions). It will + have no effect on ephemeral volume types such as: secret, + configmaps and emptydir. Valid values are "OnRootMismatch" + and "Always". If not specified, "Always" is used. Note that + this field cannot be set when spec.os.name is windows.' + type: string + runAsGroup: + description: The GID to run the entrypoint of the container + process. Uses runtime default if unset. May also be set + in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. Note that this field + cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root + user. If true, the Kubelet will validate the image at runtime + to ensure that it does not run as UID 0 (root) and fail + to start the container if it does. If unset or false, no + such validation will be performed. May also be set in SecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container + process. Defaults to user specified in image metadata if + unspecified. May also be set in SecurityContext. If set + in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence for that container. + Note that this field cannot be set when spec.os.name is + windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random + SELinux context for each container. May also be set in + SecurityContext. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence + for that container. Note that this field cannot be set when + spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by the containers + in this pod. Note that this field cannot be set when spec.os.name + is windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined + in a file on the node should be used. The profile must + be preconfigured on the node to work. Must be a descending + path, relative to the kubelet's configured seccomp profile + location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile + will be applied. Valid options are: \n Localhost - a + profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile + should be used. Unconfined - no profile should be applied." + type: string + required: + - type + type: object + supplementalGroups: + description: A list of groups applied to the first process + run in each container, in addition to the container's primary + GID, the fsGroup (if specified), and group memberships defined + in the container image for the uid of the container process. + If unspecified, no additional groups are added to any container. + Note that group memberships defined in the container image + for the uid of the container process are still effective, + even if they are not included in this list. Note that this + field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + sysctls: + description: Sysctls hold a list of namespaced sysctls used + for the pod. Pods with unsupported sysctls (by the container + runtime) might fail to launch. Note that this field cannot + be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: The Windows specific settings applied to all + containers. If unspecified, the options within a container's + SecurityContext will be used. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when + spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named + by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should + be run as a 'Host Process' container. This field is + alpha-level and will only be honored by components that + enable the WindowsHostProcessContainers feature flag. + Setting this field without the feature flag will result + in errors when validating the Pod. All of a Pod's containers + must have the same effective HostProcess value (it is + not allowed to have a mix of HostProcess containers + and non-HostProcess containers). In addition, if HostProcess + is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set in + PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. + type: string + type: object + type: object + tolerations: + description: Tolerations specifies the tolerations to use on the + created Deployment. + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple using + the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. + Empty means match all taint effects. When specified, allowed + values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies + to. Empty means match all taint keys. If the key is empty, + operator must be Exists; this combination means to match + all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to + the value. Valid operators are Exists and Equal. Defaults + to Equal. Exists is equivalent to wildcard for value, + so that a pod can tolerate all taints of a particular + category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of + time the toleration (which must be of effect NoExecute, + otherwise this field is ignored) tolerates the taint. + By default, it is not set, which means tolerate the taint + forever (do not evict). Zero and negative values will + be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches + to. If the operator is Exists, the value should be empty, + otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: 'TopologySpreadConstraints is a feature that controls + how pods are spead across your topology. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/' + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine + the number of pods in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be empty. + This array is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: MatchLabelKeys is a set of pod label keys to + select the pods over which spreading will be calculated. + The keys are used to lookup values from the incoming pod + labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading + will be calculated for the incoming pod. Keys that don't + exist in the incoming pod labels will be ignored. A null + or empty list means only match against labelSelector. + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: 'MaxSkew describes the degree to which pods + may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, + it is the maximum permitted difference between the number + of matching pods in the target topology and the global + minimum. The global minimum is the minimum number of matching + pods in an eligible domain or zero if the number of eligible + domains is less than MinDomains. For example, in a 3-zone + cluster, MaxSkew is set to 1, and pods with the same labelSelector + spread as 2/2/1: In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | | P P | P P | P | - + if MaxSkew is 1, incoming pod can only be scheduled to + zone3 to become 2/2/2; scheduling it onto zone1(zone2) + would make the ActualSkew(3-1) on zone1(zone2) violate + MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled + onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, + it is used to give higher precedence to topologies that + satisfy it. It''s a required field. Default value is 1 + and 0 is not allowed.' + format: int32 + type: integer + minDomains: + description: "MinDomains indicates a minimum number of eligible + domains. When the number of eligible domains with matching + topology keys is less than minDomains, Pod Topology Spread + treats \"global minimum\" as 0, and then the calculation + of Skew is performed. And when the number of eligible + domains with matching topology keys equals or greater + than minDomains, this value has no effect on scheduling. + As a result, when the number of eligible domains is less + than minDomains, scheduler won't schedule more than maxSkew + Pods to those domains. If value is nil, the constraint + behaves as if MinDomains is equal to 1. Valid values are + integers greater than 0. When value is not nil, WhenUnsatisfiable + must be DoNotSchedule. \n For example, in a 3-zone cluster, + MaxSkew is set to 2, MinDomains is set to 5 and pods with + the same labelSelector spread as 2/2/2: | zone1 | zone2 + | zone3 | | P P | P P | P P | The number of domains + is less than 5(MinDomains), so \"global minimum\" is treated + as 0. In this situation, new pod with the same labelSelector + cannot be scheduled, because computed skew will be 3(3 + - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. \n This is a beta field and requires + the MinDomainsInPodTopologySpread feature gate to be enabled + (enabled by default)." + format: int32 + type: integer + nodeAffinityPolicy: + description: "NodeAffinityPolicy indicates how we will treat + Pod's nodeAffinity/nodeSelector when calculating pod topology + spread skew. Options are: - Honor: only nodes matching + nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes + are included in the calculations. \n If this value is + nil, the behavior is equivalent to the Honor policy. This + is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread + feature flag." + type: string + nodeTaintsPolicy: + description: "NodeTaintsPolicy indicates how we will treat + node taints when calculating pod topology spread skew. + Options are: - Honor: nodes without taints, along with + tainted nodes for which the incoming pod has a toleration, + are included. - Ignore: node taints are ignored. All nodes + are included. \n If this value is nil, the behavior is + equivalent to the Ignore policy. This is a beta-level + feature default enabled by the NodeInclusionPolicyInPodTopologySpread + feature flag." + type: string + topologyKey: + description: TopologyKey is the key of node labels. Nodes + that have a label with this key and identical values are + considered to be in the same topology. We consider each + as a "bucket", and try to put balanced number + of pods into each bucket. We define a domain as a particular + instance of a topology. Also, we define an eligible domain + as a domain whose nodes meet the requirements of nodeAffinityPolicy + and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", + each Node is a domain of that topology. And, if TopologyKey + is "topology.kubernetes.io/zone", each zone is a domain + of that topology. It's a required field. + type: string + whenUnsatisfiable: + description: 'WhenUnsatisfiable indicates how to deal with + a pod if it doesn''t satisfy the spread constraint. - + DoNotSchedule (default) tells the scheduler not to schedule + it. - ScheduleAnyway tells the scheduler to schedule the + pod in any location, but giving higher precedence to topologies + that would help reduce the skew. A constraint is considered + "Unsatisfiable" for an incoming pod if and only if every + possible node assignment for that pod would violate "MaxSkew" + on some topology. For example, in a 3-zone cluster, MaxSkew + is set to 1, and pods with the same labelSelector spread + as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming + pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) + as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). + In other words, the cluster can still be imbalanced, but + scheduler won''t make it *more* imbalanced. It''s a required + field.' + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + type: object + labels: + description: Labels are applied to the created resource + properties: + inheritFromGateway: + description: InheritFromGateway lists the names/keys of annotations + or labels to copy from the Gateway resource. Any name/key included + here will override those in Set if specified on the Gateway. + items: + type: string + type: array + set: + additionalProperties: + type: string + description: Set lists the names/keys and values of annotations + or labels to set on the resource. Any name/key included here + will be overridden if present in InheritFromGateway and set + on the Gateway. + type: object + type: object + role: + description: Role contains config specific to the Role created from + this GatewayClass + properties: + annotations: + description: Annotations are applied to the created resource + properties: + inheritFromGateway: + description: InheritFromGateway lists the names/keys of annotations + or labels to copy from the Gateway resource. Any name/key + included here will override those in Set if specified on + the Gateway. + items: + type: string + type: array + set: + additionalProperties: + type: string + description: Set lists the names/keys and values of annotations + or labels to set on the resource. Any name/key included + here will be overridden if present in InheritFromGateway + and set on the Gateway. + type: object + type: object + labels: + description: Labels are applied to the created resource + properties: + inheritFromGateway: + description: InheritFromGateway lists the names/keys of annotations + or labels to copy from the Gateway resource. Any name/key + included here will override those in Set if specified on + the Gateway. + items: + type: string + type: array + set: + additionalProperties: + type: string + description: Set lists the names/keys and values of annotations + or labels to set on the resource. Any name/key included + here will be overridden if present in InheritFromGateway + and set on the Gateway. + type: object + type: object + type: object + roleBinding: + description: RoleBinding contains config specific to the RoleBinding + created from this GatewayClass + properties: + annotations: + description: Annotations are applied to the created resource + properties: + inheritFromGateway: + description: InheritFromGateway lists the names/keys of annotations + or labels to copy from the Gateway resource. Any name/key + included here will override those in Set if specified on + the Gateway. + items: + type: string + type: array + set: + additionalProperties: + type: string + description: Set lists the names/keys and values of annotations + or labels to set on the resource. Any name/key included + here will be overridden if present in InheritFromGateway + and set on the Gateway. + type: object + type: object + labels: + description: Labels are applied to the created resource + properties: + inheritFromGateway: + description: InheritFromGateway lists the names/keys of annotations + or labels to copy from the Gateway resource. Any name/key + included here will override those in Set if specified on + the Gateway. + items: + type: string + type: array + set: + additionalProperties: + type: string + description: Set lists the names/keys and values of annotations + or labels to set on the resource. Any name/key included + here will be overridden if present in InheritFromGateway + and set on the Gateway. + type: object + type: object + type: object + service: + description: Service contains config specific to the Service created + from this GatewayClass + properties: + annotations: + description: Annotations are applied to the created resource + properties: + inheritFromGateway: + description: InheritFromGateway lists the names/keys of annotations + or labels to copy from the Gateway resource. Any name/key + included here will override those in Set if specified on + the Gateway. + items: + type: string + type: array + set: + additionalProperties: + type: string + description: Set lists the names/keys and values of annotations + or labels to set on the resource. Any name/key included + here will be overridden if present in InheritFromGateway + and set on the Gateway. + type: object + type: object + labels: + description: Labels are applied to the created resource + properties: + inheritFromGateway: + description: InheritFromGateway lists the names/keys of annotations + or labels to copy from the Gateway resource. Any name/key + included here will override those in Set if specified on + the Gateway. + items: + type: string + type: array + set: + additionalProperties: + type: string + description: Set lists the names/keys and values of annotations + or labels to set on the resource. Any name/key included + here will be overridden if present in InheritFromGateway + and set on the Gateway. + type: object + type: object + type: + description: Type specifies the type of Service to use (LoadBalancer, + ClusterIP, etc.) + enum: + - ClusterIP + - NodePort + - LoadBalancer + type: string + type: object + serviceAccount: + description: ServiceAccount contains config specific to the corev1.ServiceAccount + created from this GatewayClass + properties: + annotations: + description: Annotations are applied to the created resource + properties: + inheritFromGateway: + description: InheritFromGateway lists the names/keys of annotations + or labels to copy from the Gateway resource. Any name/key + included here will override those in Set if specified on + the Gateway. + items: + type: string + type: array + set: + additionalProperties: + type: string + description: Set lists the names/keys and values of annotations + or labels to set on the resource. Any name/key included + here will be overridden if present in InheritFromGateway + and set on the Gateway. + type: object + type: object + labels: + description: Labels are applied to the created resource + properties: + inheritFromGateway: + description: InheritFromGateway lists the names/keys of annotations + or labels to copy from the Gateway resource. Any name/key + included here will override those in Set if specified on + the Gateway. + items: + type: string + type: array + set: + additionalProperties: + type: string + description: Set lists the names/keys and values of annotations + or labels to set on the resource. Any name/key included + here will be overridden if present in InheritFromGateway + and set on the Gateway. + type: object + type: object + type: object + type: object + status: + properties: + conditions: + description: Conditions indicate the latest available observations + of a resource's current state. + items: + description: 'Conditions define a readiness condition for a Consul + resource. See: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties' + properties: + lastTransitionTime: + description: LastTransitionTime is the last time the condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition. + type: string + required: + - status + - type + type: object + type: array + lastSyncedTime: + description: LastSyncedTime is the last time the resource successfully + synced with Consul. + format: date-time + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +{{- end }} diff --git a/charts/consul/templates/crd-gatewayclasses.yaml b/charts/consul/templates/crd-gatewayclasses.yaml new file mode 100644 index 0000000000..70763f9104 --- /dev/null +++ b/charts/consul/templates/crd-gatewayclasses.yaml @@ -0,0 +1,122 @@ +{{- if .Values.connectInject.enabled }} +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.1 + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: crd + name: gatewayclasses.mesh.consul.hashicorp.com +spec: + group: mesh.consul.hashicorp.com + names: + kind: GatewayClass + listKind: GatewayClassList + plural: gatewayclasses + singular: gatewayclass + scope: Cluster + versions: + - additionalPrinterColumns: + - description: The age of the resource + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v2beta1 + schema: + openAPIV3Schema: + description: GatewayClass is the Schema for the Gateway Class API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + controllerName: + description: ControllerName is the name of the Kubernetes controller + that manages Gateways of this class + type: string + description: + description: Description of GatewayClass + type: string + parametersRef: + description: ParametersRef refers to a resource responsible for configuring + the behavior of the GatewayClass. + properties: + group: + description: The Kubernetes Group that the referred object belongs + to + type: string + kind: + description: The Kubernetes Kind that the referred object is + type: string + name: + description: The Name of the referred object + type: string + namespace: + description: The kubernetes namespace that the referred object + is in + type: string + required: + - name + type: object + required: + - controllerName + - parametersRef + type: object + status: + properties: + conditions: + description: Conditions indicate the latest available observations + of a resource's current state. + items: + description: 'Conditions define a readiness condition for a Consul + resource. See: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties' + properties: + lastTransitionTime: + description: LastTransitionTime is the last time the condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition. + type: string + required: + - status + - type + type: object + type: array + lastSyncedTime: + description: LastSyncedTime is the last time the resource successfully + synced with Consul. + format: date-time + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +{{- end }} diff --git a/charts/consul/templates/crd-grpcroutes.yaml b/charts/consul/templates/crd-grpcroutes.yaml new file mode 100644 index 0000000000..31812fff35 --- /dev/null +++ b/charts/consul/templates/crd-grpcroutes.yaml @@ -0,0 +1,617 @@ +{{- if .Values.connectInject.enabled }} +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.1 + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: crd + name: grpcroutes.mesh.consul.hashicorp.com +spec: + group: mesh.consul.hashicorp.com + names: + kind: GRPCRoute + listKind: GRPCRouteList + plural: grpcroutes + shortNames: + - grpc-route + singular: grpcroute + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The sync status of the resource with Consul + jsonPath: .status.conditions[?(@.type=="Synced")].status + name: Synced + type: string + - description: The last successful synced time of the resource with Consul + jsonPath: .status.lastSyncedTime + name: Last Synced + type: date + - description: The age of the resource + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v2beta1 + schema: + openAPIV3Schema: + description: GRPCRoute is the Schema for the GRPC Route API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: "NOTE: this should align to the GAMMA/gateway-api version, + or at least be easily translatable. \n https://gateway-api.sigs.k8s.io/references/spec/#gateway.networking.k8s.io/v1alpha2.GRPCRoute + \n This is a Resource type." + properties: + hostnames: + description: "Hostnames are the hostnames for which this GRPCRoute + should respond to requests. \n This is only valid for north/south." + items: + type: string + type: array + parentRefs: + description: "ParentRefs references the resources (usually Services) + that a Route wants to be attached to. \n It is invalid to reference + an identical parent more than once. It is valid to reference multiple + distinct sections within the same parent resource." + items: + description: 'NOTE: roughly equivalent to structs.ResourceReference' + properties: + port: + description: For east/west this is the name of the Consul Service + port to direct traffic to or empty to imply all. For north/south + this is TBD. + type: string + ref: + description: For east/west configuration, this should point + to a Service. For north/south it should point to a Gateway. + properties: + name: + description: Name is the user-given name of the resource + (e.g. the "billing" service). + type: string + section: + description: Section identifies which part of the resource + the condition relates to. + type: string + tenancy: + description: Tenancy identifies the tenancy units (i.e. + partition, namespace) in which the resource resides. + properties: + namespace: + description: "Namespace further isolates resources within + a partition. https://developer.hashicorp.com/consul/docs/enterprise/namespaces + \n When using the List and WatchList endpoints, provide + the wildcard value \"*\" to list resources across + all namespaces." + type: string + partition: + description: "Partition is the topmost administrative + boundary within a cluster. https://developer.hashicorp.com/consul/docs/enterprise/admin-partitions + \n When using the List and WatchList endpoints, provide + the wildcard value \"*\" to list resources across + all partitions." + type: string + peerName: + description: "PeerName identifies which peer the resource + is imported from. https://developer.hashicorp.com/consul/docs/connect/cluster-peering + \n When using the List and WatchList endpoints, provide + the wildcard value \"*\" to list resources across + all peers." + type: string + type: object + type: + description: Type identifies the resource's type. + properties: + group: + description: Group describes the area of functionality + to which this resource type relates (e.g. "catalog", + "authorization"). + type: string + groupVersion: + description: GroupVersion is incremented when sweeping + or backward-incompatible changes are made to the group's + resource types. + type: string + kind: + description: Kind identifies the specific resource type + within the group. + type: string + type: object + type: object + type: object + type: array + rules: + description: Rules are a list of GRPC matchers, filters and actions. + items: + properties: + backendRefs: + description: "BackendRefs defines the backend(s) where matching + requests should be sent. Failure behavior here depends on + how many BackendRefs are specified and how many are invalid. + \n If all entries in BackendRefs are invalid, and there are + also no filters specified in this route rule, all traffic + which matches this rule MUST receive a 500 status code. \n + See the GRPCBackendRef definition for the rules about what + makes a single GRPCBackendRef invalid. \n When a GRPCBackendRef + is invalid, 500 status codes MUST be returned for requests + that would have otherwise been routed to an invalid backend. + If multiple backends are specified, and some are invalid, + the proportion of requests that would otherwise have been + routed to an invalid backend MUST receive a 500 status code. + \n For example, if two backends are specified with equal weights, + and one is invalid, 50 percent of traffic must receive a 500. + Implementations may choose how that 50 percent is determined." + items: + properties: + backendRef: + properties: + datacenter: + type: string + port: + description: "For east/west this is the name of the + Consul Service port to direct traffic to or empty + to imply using the same value as the parent ref. + \n For north/south this is TBD." + type: string + ref: + description: For east/west configuration, this should + point to a Service. + properties: + name: + description: Name is the user-given name of the + resource (e.g. the "billing" service). + type: string + section: + description: Section identifies which part of + the resource the condition relates to. + type: string + tenancy: + description: Tenancy identifies the tenancy units + (i.e. partition, namespace) in which the resource + resides. + properties: + namespace: + description: "Namespace further isolates resources + within a partition. https://developer.hashicorp.com/consul/docs/enterprise/namespaces + \n When using the List and WatchList endpoints, + provide the wildcard value \"*\" to list + resources across all namespaces." + type: string + partition: + description: "Partition is the topmost administrative + boundary within a cluster. https://developer.hashicorp.com/consul/docs/enterprise/admin-partitions + \n When using the List and WatchList endpoints, + provide the wildcard value \"*\" to list + resources across all partitions." + type: string + peerName: + description: "PeerName identifies which peer + the resource is imported from. https://developer.hashicorp.com/consul/docs/connect/cluster-peering + \n When using the List and WatchList endpoints, + provide the wildcard value \"*\" to list + resources across all peers." + type: string + type: object + type: + description: Type identifies the resource's type. + properties: + group: + description: Group describes the area of functionality + to which this resource type relates (e.g. + "catalog", "authorization"). + type: string + groupVersion: + description: GroupVersion is incremented when + sweeping or backward-incompatible changes + are made to the group's resource types. + type: string + kind: + description: Kind identifies the specific + resource type within the group. + type: string + type: object + type: object + type: object + filters: + description: Filters defined at this level should be executed + if and only if the request is being forwarded to the + backend defined here. + items: + properties: + requestHeaderModifier: + description: RequestHeaderModifier defines a schema + for a filter that modifies request headers. + properties: + add: + description: Add adds the given header(s) (name, + value) to the request before the action. It + appends to any existing values associated + with the header name. + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + remove: + description: Remove the given header(s) from + the HTTP request before the action. The value + of Remove is a list of HTTP header names. + Note that the header names are case-insensitive + (see https://datatracker.ietf.org/doc/html/rfc2616#section-4.2). + items: + type: string + type: array + set: + description: Set overwrites the request with + the given header (name, value) before the + action. + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + type: object + responseHeaderModifier: + description: ResponseHeaderModifier defines a schema + for a filter that modifies response headers. + properties: + add: + description: Add adds the given header(s) (name, + value) to the request before the action. It + appends to any existing values associated + with the header name. + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + remove: + description: Remove the given header(s) from + the HTTP request before the action. The value + of Remove is a list of HTTP header names. + Note that the header names are case-insensitive + (see https://datatracker.ietf.org/doc/html/rfc2616#section-4.2). + items: + type: string + type: array + set: + description: Set overwrites the request with + the given header (name, value) before the + action. + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + type: object + urlRewrite: + description: URLRewrite defines a schema for a filter + that modifies a request during forwarding. + properties: + pathPrefix: + type: string + type: object + type: object + type: array + weight: + description: "Weight specifies the proportion of requests + forwarded to the referenced backend. This is computed + as weight/(sum of all weights in this BackendRefs list). + For non-zero values, there may be some epsilon from + the exact proportion defined here depending on the precision + an implementation supports. Weight is not a percentage + and the sum of weights does not need to equal 100. \n + If only one backend is specified and it has a weight + greater than 0, 100% of the traffic is forwarded to + that backend. If weight is set to 0, no traffic should + be forwarded for this entry. If unspecified, weight + defaults to 1." + format: int32 + type: integer + type: object + type: array + filters: + items: + properties: + requestHeaderModifier: + description: RequestHeaderModifier defines a schema for + a filter that modifies request headers. + properties: + add: + description: Add adds the given header(s) (name, value) + to the request before the action. It appends to + any existing values associated with the header name. + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + remove: + description: Remove the given header(s) from the HTTP + request before the action. The value of Remove is + a list of HTTP header names. Note that the header + names are case-insensitive (see https://datatracker.ietf.org/doc/html/rfc2616#section-4.2). + items: + type: string + type: array + set: + description: Set overwrites the request with the given + header (name, value) before the action. + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + type: object + responseHeaderModifier: + description: ResponseHeaderModifier defines a schema for + a filter that modifies response headers. + properties: + add: + description: Add adds the given header(s) (name, value) + to the request before the action. It appends to + any existing values associated with the header name. + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + remove: + description: Remove the given header(s) from the HTTP + request before the action. The value of Remove is + a list of HTTP header names. Note that the header + names are case-insensitive (see https://datatracker.ietf.org/doc/html/rfc2616#section-4.2). + items: + type: string + type: array + set: + description: Set overwrites the request with the given + header (name, value) before the action. + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + type: object + urlRewrite: + description: URLRewrite defines a schema for a filter + that modifies a request during forwarding. + properties: + pathPrefix: + type: string + type: object + type: object + type: array + matches: + items: + properties: + headers: + description: Headers specifies gRPC request header matchers. + Multiple match values are ANDed together, meaning, a + request MUST match all the specified headers to select + the route. + items: + properties: + name: + type: string + type: + description: "HeaderMatchType specifies the semantics + of how HTTP header values should be compared. + Valid HeaderMatchType values, along with their + conformance levels, are: \n Note that values may + be added to this enum, implementations must ensure + that unknown values will not cause a crash. \n + Unknown values here must result in the implementation + setting the Accepted Condition for the Route to + status: False, with a Reason of UnsupportedValue." + enum: + - HEADER_MATCH_TYPE_UNSPECIFIED + - HEADER_MATCH_TYPE_EXACT + - HEADER_MATCH_TYPE_REGEX + - HEADER_MATCH_TYPE_PRESENT + - HEADER_MATCH_TYPE_PREFIX + - HEADER_MATCH_TYPE_SUFFIX + format: int32 + type: string + value: + type: string + type: object + type: array + method: + description: Method specifies a gRPC request service/method + matcher. If this field is not specified, all services + and methods will match. + properties: + method: + description: "Value of the method to match against. + If left empty or omitted, will match all services. + \n At least one of Service and Method MUST be a + non-empty string.}" + type: string + service: + description: "Value of the service to match against. + If left empty or omitted, will match any service. + \n At least one of Service and Method MUST be a + non-empty string." + type: string + type: + description: 'Type specifies how to match against + the service and/or method. Support: Core (Exact + with service and method specified)' + enum: + - GRPC_METHOD_MATCH_TYPE_UNSPECIFIED + - GRPC_METHOD_MATCH_TYPE_EXACT + - GRPC_METHOD_MATCH_TYPE_REGEX + format: int32 + type: string + type: object + type: object + type: array + retries: + properties: + number: + description: Number is the number of times to retry the + request when a retryable result occurs. + properties: + value: + description: The uint32 value. + format: int32 + type: integer + type: object + onConditions: + description: RetryOn allows setting envoy specific conditions + when a request should be automatically retried. + items: + type: string + type: array + onConnectFailure: + description: RetryOnConnectFailure allows for connection + failure errors to trigger a retry. + type: boolean + onStatusCodes: + description: RetryOnStatusCodes is a flat list of http response + status codes that are eligible for retry. This again should + be feasible in any reasonable proxy. + items: + format: int32 + type: integer + type: array + type: object + timeouts: + description: HTTPRouteTimeouts defines timeouts that can be + configured for an HTTPRoute or GRPCRoute. + properties: + idle: + description: Idle specifies the total amount of time permitted + for the request stream to be idle. + format: duration + properties: + nanos: + description: Signed fractions of a second at nanosecond + resolution of the span of time. Durations less than + one second are represented with a 0 `seconds` field + and a positive or negative `nanos` field. For durations + of one second or more, a non-zero value for the `nanos` + field must be of the same sign as the `seconds` field. + Must be from -999,999,999 to +999,999,999 inclusive. + format: int32 + type: integer + seconds: + description: 'Signed seconds of the span of time. Must + be from -315,576,000,000 to +315,576,000,000 inclusive. + Note: these bounds are computed from: 60 sec/min * + 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years' + format: int64 + type: integer + type: object + request: + description: RequestTimeout is the total amount of time + permitted for the entire downstream request (and retries) + to be processed. + format: duration + properties: + nanos: + description: Signed fractions of a second at nanosecond + resolution of the span of time. Durations less than + one second are represented with a 0 `seconds` field + and a positive or negative `nanos` field. For durations + of one second or more, a non-zero value for the `nanos` + field must be of the same sign as the `seconds` field. + Must be from -999,999,999 to +999,999,999 inclusive. + format: int32 + type: integer + seconds: + description: 'Signed seconds of the span of time. Must + be from -315,576,000,000 to +315,576,000,000 inclusive. + Note: these bounds are computed from: 60 sec/min * + 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years' + format: int64 + type: integer + type: object + type: object + type: object + type: array + type: object + status: + properties: + conditions: + description: Conditions indicate the latest available observations + of a resource's current state. + items: + description: 'Conditions define a readiness condition for a Consul + resource. See: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties' + properties: + lastTransitionTime: + description: LastTransitionTime is the last time the condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition. + type: string + required: + - status + - type + type: object + type: array + lastSyncedTime: + description: LastSyncedTime is the last time the resource successfully + synced with Consul. + format: date-time + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +{{- end }} diff --git a/charts/consul/templates/crd-httproutes.yaml b/charts/consul/templates/crd-httproutes.yaml new file mode 100644 index 0000000000..3da6e1e637 --- /dev/null +++ b/charts/consul/templates/crd-httproutes.yaml @@ -0,0 +1,673 @@ +{{- if .Values.connectInject.enabled }} +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.1 + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: crd + name: httproutes.mesh.consul.hashicorp.com +spec: + group: mesh.consul.hashicorp.com + names: + kind: HTTPRoute + listKind: HTTPRouteList + plural: httproutes + shortNames: + - http-route + singular: httproute + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The sync status of the resource with Consul + jsonPath: .status.conditions[?(@.type=="Synced")].status + name: Synced + type: string + - description: The last successful synced time of the resource with Consul + jsonPath: .status.lastSyncedTime + name: Last Synced + type: date + - description: The age of the resource + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v2beta1 + schema: + openAPIV3Schema: + description: HTTPRoute is the Schema for the HTTP Route API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: "NOTE: this should align to the GAMMA/gateway-api version, + or at least be easily translatable. \n https://gateway-api.sigs.k8s.io/references/spec/#gateway.networking.k8s.io/v1alpha2.HTTPRoute + \n This is a Resource type." + properties: + hostnames: + description: "Hostnames are the hostnames for which this HTTPRoute + should respond to requests. \n This is only valid for north/south." + items: + type: string + type: array + parentRefs: + description: "ParentRefs references the resources (usually Services) + that a Route wants to be attached to. \n It is invalid to reference + an identical parent more than once. It is valid to reference multiple + distinct sections within the same parent resource." + items: + description: 'NOTE: roughly equivalent to structs.ResourceReference' + properties: + port: + description: For east/west this is the name of the Consul Service + port to direct traffic to or empty to imply all. For north/south + this is TBD. + type: string + ref: + description: For east/west configuration, this should point + to a Service. For north/south it should point to a Gateway. + properties: + name: + description: Name is the user-given name of the resource + (e.g. the "billing" service). + type: string + section: + description: Section identifies which part of the resource + the condition relates to. + type: string + tenancy: + description: Tenancy identifies the tenancy units (i.e. + partition, namespace) in which the resource resides. + properties: + namespace: + description: "Namespace further isolates resources within + a partition. https://developer.hashicorp.com/consul/docs/enterprise/namespaces + \n When using the List and WatchList endpoints, provide + the wildcard value \"*\" to list resources across + all namespaces." + type: string + partition: + description: "Partition is the topmost administrative + boundary within a cluster. https://developer.hashicorp.com/consul/docs/enterprise/admin-partitions + \n When using the List and WatchList endpoints, provide + the wildcard value \"*\" to list resources across + all partitions." + type: string + peerName: + description: "PeerName identifies which peer the resource + is imported from. https://developer.hashicorp.com/consul/docs/connect/cluster-peering + \n When using the List and WatchList endpoints, provide + the wildcard value \"*\" to list resources across + all peers." + type: string + type: object + type: + description: Type identifies the resource's type. + properties: + group: + description: Group describes the area of functionality + to which this resource type relates (e.g. "catalog", + "authorization"). + type: string + groupVersion: + description: GroupVersion is incremented when sweeping + or backward-incompatible changes are made to the group's + resource types. + type: string + kind: + description: Kind identifies the specific resource type + within the group. + type: string + type: object + type: object + type: object + type: array + rules: + description: Rules are a list of HTTP-based routing rules that this + route should use for constructing a routing table. + items: + description: HTTPRouteRule specifies the routing rules used to determine + what upstream service an HTTP request is routed to. + properties: + backendRefs: + description: "BackendRefs defines the backend(s) where matching + requests should be sent. \n Failure behavior here depends + on how many BackendRefs are specified and how many are invalid. + \n If all entries in BackendRefs are invalid, and there are + also no filters specified in this route rule, all traffic + which matches this rule MUST receive a 500 status code. \n + See the HTTPBackendRef definition for the rules about what + makes a single HTTPBackendRef invalid. \n When a HTTPBackendRef + is invalid, 500 status codes MUST be returned for requests + that would have otherwise been routed to an invalid backend. + If multiple backends are specified, and some are invalid, + the proportion of requests that would otherwise have been + routed to an invalid backend MUST receive a 500 status code. + \n For example, if two backends are specified with equal weights, + and one is invalid, 50 percent of traffic must receive a 500. + Implementations may choose how that 50 percent is determined." + items: + properties: + backendRef: + properties: + datacenter: + type: string + port: + description: "For east/west this is the name of the + Consul Service port to direct traffic to or empty + to imply using the same value as the parent ref. + \n For north/south this is TBD." + type: string + ref: + description: For east/west configuration, this should + point to a Service. + properties: + name: + description: Name is the user-given name of the + resource (e.g. the "billing" service). + type: string + section: + description: Section identifies which part of + the resource the condition relates to. + type: string + tenancy: + description: Tenancy identifies the tenancy units + (i.e. partition, namespace) in which the resource + resides. + properties: + namespace: + description: "Namespace further isolates resources + within a partition. https://developer.hashicorp.com/consul/docs/enterprise/namespaces + \n When using the List and WatchList endpoints, + provide the wildcard value \"*\" to list + resources across all namespaces." + type: string + partition: + description: "Partition is the topmost administrative + boundary within a cluster. https://developer.hashicorp.com/consul/docs/enterprise/admin-partitions + \n When using the List and WatchList endpoints, + provide the wildcard value \"*\" to list + resources across all partitions." + type: string + peerName: + description: "PeerName identifies which peer + the resource is imported from. https://developer.hashicorp.com/consul/docs/connect/cluster-peering + \n When using the List and WatchList endpoints, + provide the wildcard value \"*\" to list + resources across all peers." + type: string + type: object + type: + description: Type identifies the resource's type. + properties: + group: + description: Group describes the area of functionality + to which this resource type relates (e.g. + "catalog", "authorization"). + type: string + groupVersion: + description: GroupVersion is incremented when + sweeping or backward-incompatible changes + are made to the group's resource types. + type: string + kind: + description: Kind identifies the specific + resource type within the group. + type: string + type: object + type: object + type: object + filters: + description: Filters defined at this level should be executed + if and only if the request is being forwarded to the + backend defined here. + items: + properties: + requestHeaderModifier: + description: RequestHeaderModifier defines a schema + for a filter that modifies request headers. + properties: + add: + description: Add adds the given header(s) (name, + value) to the request before the action. It + appends to any existing values associated + with the header name. + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + remove: + description: Remove the given header(s) from + the HTTP request before the action. The value + of Remove is a list of HTTP header names. + Note that the header names are case-insensitive + (see https://datatracker.ietf.org/doc/html/rfc2616#section-4.2). + items: + type: string + type: array + set: + description: Set overwrites the request with + the given header (name, value) before the + action. + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + type: object + responseHeaderModifier: + description: ResponseHeaderModifier defines a schema + for a filter that modifies response headers. + properties: + add: + description: Add adds the given header(s) (name, + value) to the request before the action. It + appends to any existing values associated + with the header name. + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + remove: + description: Remove the given header(s) from + the HTTP request before the action. The value + of Remove is a list of HTTP header names. + Note that the header names are case-insensitive + (see https://datatracker.ietf.org/doc/html/rfc2616#section-4.2). + items: + type: string + type: array + set: + description: Set overwrites the request with + the given header (name, value) before the + action. + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + type: object + urlRewrite: + description: URLRewrite defines a schema for a filter + that modifies a request during forwarding. + properties: + pathPrefix: + type: string + type: object + type: object + type: array + weight: + description: "Weight specifies the proportion of requests + forwarded to the referenced backend. This is computed + as weight/(sum of all weights in this BackendRefs list). + For non-zero values, there may be some epsilon from + the exact proportion defined here depending on the precision + an implementation supports. Weight is not a percentage + and the sum of weights does not need to equal 100. \n + If only one backend is specified and it has a weight + greater than 0, 100% of the traffic is forwarded to + that backend. If weight is set to 0, no traffic should + be forwarded for this entry. If unspecified, weight + defaults to 1." + format: int32 + type: integer + type: object + type: array + filters: + items: + properties: + requestHeaderModifier: + description: RequestHeaderModifier defines a schema for + a filter that modifies request headers. + properties: + add: + description: Add adds the given header(s) (name, value) + to the request before the action. It appends to + any existing values associated with the header name. + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + remove: + description: Remove the given header(s) from the HTTP + request before the action. The value of Remove is + a list of HTTP header names. Note that the header + names are case-insensitive (see https://datatracker.ietf.org/doc/html/rfc2616#section-4.2). + items: + type: string + type: array + set: + description: Set overwrites the request with the given + header (name, value) before the action. + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + type: object + responseHeaderModifier: + description: ResponseHeaderModifier defines a schema for + a filter that modifies response headers. + properties: + add: + description: Add adds the given header(s) (name, value) + to the request before the action. It appends to + any existing values associated with the header name. + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + remove: + description: Remove the given header(s) from the HTTP + request before the action. The value of Remove is + a list of HTTP header names. Note that the header + names are case-insensitive (see https://datatracker.ietf.org/doc/html/rfc2616#section-4.2). + items: + type: string + type: array + set: + description: Set overwrites the request with the given + header (name, value) before the action. + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + type: object + urlRewrite: + description: URLRewrite defines a schema for a filter + that modifies a request during forwarding. + properties: + pathPrefix: + type: string + type: object + type: object + type: array + matches: + items: + properties: + headers: + description: Headers specifies HTTP request header matchers. + Multiple match values are ANDed together, meaning, a + request must match all the specified headers to select + the route. + items: + properties: + invert: + description: 'NOTE: not in gamma; service-router + compat' + type: boolean + name: + description: "Name is the name of the HTTP Header + to be matched. Name matching MUST be case insensitive. + (See https://tools.ietf.org/html/rfc7230#section-3.2). + \n If multiple entries specify equivalent header + names, only the first entry with an equivalent + name MUST be considered for a match. Subsequent + entries with an equivalent header name MUST be + ignored. Due to the case-insensitivity of header + names, “foo” and “Foo” are considered equivalent. + \n When a header is repeated in an HTTP request, + it is implementation-specific behavior as to how + this is represented. Generally, proxies should + follow the guidance from the RFC: https://www.rfc-editor.org/rfc/rfc7230.html#section-3.2.2 + regarding processing a repeated header, with special + handling for “Set-Cookie”." + type: string + type: + description: Type specifies how to match against + the value of the header. + enum: + - HEADER_MATCH_TYPE_UNSPECIFIED + - HEADER_MATCH_TYPE_EXACT + - HEADER_MATCH_TYPE_REGEX + - HEADER_MATCH_TYPE_PRESENT + - HEADER_MATCH_TYPE_PREFIX + - HEADER_MATCH_TYPE_SUFFIX + format: int32 + type: string + value: + description: Value is the value of HTTP Header to + be matched. + type: string + type: object + type: array + method: + description: Method specifies HTTP method matcher. When + specified, this route will be matched only if the request + has the specified method. + type: string + path: + description: Path specifies a HTTP request path matcher. + If this field is not specified, a default prefix match + on the “/” path is provided. + properties: + type: + description: Type specifies how to match against the + path Value. + enum: + - PATH_MATCH_TYPE_UNSPECIFIED + - PATH_MATCH_TYPE_EXACT + - PATH_MATCH_TYPE_PREFIX + - PATH_MATCH_TYPE_REGEX + format: int32 + type: string + value: + description: Value of the HTTP path to match against. + type: string + type: object + queryParams: + description: QueryParams specifies HTTP query parameter + matchers. Multiple match values are ANDed together, + meaning, a request must match all the specified query + parameters to select the route. + items: + properties: + name: + description: "Name is the name of the HTTP query + param to be matched. This must be an exact string + match. (See https://tools.ietf.org/html/rfc7230#section-2.7.3). + \n If multiple entries specify equivalent query + param names, only the first entry with an equivalent + name MUST be considered for a match. Subsequent + entries with an equivalent query param name MUST + be ignored. \n If a query param is repeated in + an HTTP request, the behavior is purposely left + undefined, since different data planes have different + capabilities. However, it is recommended that + implementations should match against the first + value of the param if the data plane supports + it, as this behavior is expected in other load + balancing contexts outside of the Gateway API. + \n Users SHOULD NOT route traffic based on repeated + query params to guard themselves against potential + differences in the implementations." + type: string + type: + description: Type specifies how to match against + the value of the query parameter. + enum: + - QUERY_PARAM_MATCH_TYPE_UNSPECIFIED + - QUERY_PARAM_MATCH_TYPE_EXACT + - QUERY_PARAM_MATCH_TYPE_REGEX + - QUERY_PARAM_MATCH_TYPE_PRESENT + format: int32 + type: string + value: + description: Value is the value of HTTP query param + to be matched. + type: string + type: object + type: array + type: object + type: array + retries: + properties: + number: + description: Number is the number of times to retry the + request when a retryable result occurs. + properties: + value: + description: The uint32 value. + format: int32 + type: integer + type: object + onConditions: + description: RetryOn allows setting envoy specific conditions + when a request should be automatically retried. + items: + type: string + type: array + onConnectFailure: + description: RetryOnConnectFailure allows for connection + failure errors to trigger a retry. + type: boolean + onStatusCodes: + description: RetryOnStatusCodes is a flat list of http response + status codes that are eligible for retry. This again should + be feasible in any reasonable proxy. + items: + format: int32 + type: integer + type: array + type: object + timeouts: + description: HTTPRouteTimeouts defines timeouts that can be + configured for an HTTPRoute or GRPCRoute. + properties: + idle: + description: Idle specifies the total amount of time permitted + for the request stream to be idle. + format: duration + properties: + nanos: + description: Signed fractions of a second at nanosecond + resolution of the span of time. Durations less than + one second are represented with a 0 `seconds` field + and a positive or negative `nanos` field. For durations + of one second or more, a non-zero value for the `nanos` + field must be of the same sign as the `seconds` field. + Must be from -999,999,999 to +999,999,999 inclusive. + format: int32 + type: integer + seconds: + description: 'Signed seconds of the span of time. Must + be from -315,576,000,000 to +315,576,000,000 inclusive. + Note: these bounds are computed from: 60 sec/min * + 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years' + format: int64 + type: integer + type: object + request: + description: RequestTimeout is the total amount of time + permitted for the entire downstream request (and retries) + to be processed. + format: duration + properties: + nanos: + description: Signed fractions of a second at nanosecond + resolution of the span of time. Durations less than + one second are represented with a 0 `seconds` field + and a positive or negative `nanos` field. For durations + of one second or more, a non-zero value for the `nanos` + field must be of the same sign as the `seconds` field. + Must be from -999,999,999 to +999,999,999 inclusive. + format: int32 + type: integer + seconds: + description: 'Signed seconds of the span of time. Must + be from -315,576,000,000 to +315,576,000,000 inclusive. + Note: these bounds are computed from: 60 sec/min * + 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years' + format: int64 + type: integer + type: object + type: object + type: object + type: array + type: object + status: + properties: + conditions: + description: Conditions indicate the latest available observations + of a resource's current state. + items: + description: 'Conditions define a readiness condition for a Consul + resource. See: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties' + properties: + lastTransitionTime: + description: LastTransitionTime is the last time the condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition. + type: string + required: + - status + - type + type: object + type: array + lastSyncedTime: + description: LastSyncedTime is the last time the resource successfully + synced with Consul. + format: date-time + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +{{- end }} diff --git a/charts/consul/templates/crd-meshconfigurations.yaml b/charts/consul/templates/crd-meshconfigurations.yaml new file mode 100644 index 0000000000..21114d723f --- /dev/null +++ b/charts/consul/templates/crd-meshconfigurations.yaml @@ -0,0 +1,100 @@ +{{- if .Values.connectInject.enabled }} +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.1 + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: crd + name: meshconfigurations.mesh.consul.hashicorp.com +spec: + group: mesh.consul.hashicorp.com + names: + kind: MeshConfiguration + listKind: MeshConfigurationList + plural: meshconfigurations + singular: meshconfiguration + scope: Cluster + versions: + - additionalPrinterColumns: + - description: The sync status of the resource with Consul + jsonPath: .status.conditions[?(@.type=="Synced")].status + name: Synced + type: string + - description: The last successful synced time of the resource with Consul + jsonPath: .status.lastSyncedTime + name: Last Synced + type: date + - description: The age of the resource + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v2beta1 + schema: + openAPIV3Schema: + description: MeshConfiguration is the Schema for the Mesh Configuration + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: MeshConfiguration is responsible for configuring the default + behavior of Mesh Gateways. This is a Resource type. + type: object + status: + properties: + conditions: + description: Conditions indicate the latest available observations + of a resource's current state. + items: + description: 'Conditions define a readiness condition for a Consul + resource. See: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties' + properties: + lastTransitionTime: + description: LastTransitionTime is the last time the condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition. + type: string + required: + - status + - type + type: object + type: array + lastSyncedTime: + description: LastSyncedTime is the last time the resource successfully + synced with Consul. + format: date-time + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +{{- end }} diff --git a/charts/consul/templates/crd-meshgateways.yaml b/charts/consul/templates/crd-meshgateways.yaml new file mode 100644 index 0000000000..6202add695 --- /dev/null +++ b/charts/consul/templates/crd-meshgateways.yaml @@ -0,0 +1,134 @@ +{{- if .Values.connectInject.enabled }} +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.1 + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: crd + name: meshgateways.mesh.consul.hashicorp.com +spec: + group: mesh.consul.hashicorp.com + names: + kind: MeshGateway + listKind: MeshGatewayList + plural: meshgateways + singular: meshgateway + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The sync status of the resource with Consul + jsonPath: .status.conditions[?(@.type=="Synced")].status + name: Synced + type: string + - description: The last successful synced time of the resource with Consul + jsonPath: .status.lastSyncedTime + name: Last Synced + type: date + - description: The age of the resource + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v2beta1 + schema: + openAPIV3Schema: + description: MeshGateway is the Schema for the Mesh Gateway API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + gatewayClassName: + description: GatewayClassName is the name of the GatewayClass used + by the MeshGateway + type: string + listeners: + items: + properties: + name: + type: string + port: + format: int32 + maximum: 65535 + minimum: 0 + type: integer + protocol: + enum: + - TCP + type: string + type: object + minItems: 1 + type: array + workloads: + description: Selection of workloads to be configured as mesh gateways + properties: + filter: + type: string + names: + items: + type: string + type: array + prefixes: + items: + type: string + type: array + type: object + type: object + status: + properties: + conditions: + description: Conditions indicate the latest available observations + of a resource's current state. + items: + description: 'Conditions define a readiness condition for a Consul + resource. See: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties' + properties: + lastTransitionTime: + description: LastTransitionTime is the last time the condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition. + type: string + required: + - status + - type + type: object + type: array + lastSyncedTime: + description: LastSyncedTime is the last time the resource successfully + synced with Consul. + format: date-time + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +{{- end }} diff --git a/charts/consul/templates/crd-proxyconfigurations.yaml b/charts/consul/templates/crd-proxyconfigurations.yaml new file mode 100644 index 0000000000..3d19d5ea4f --- /dev/null +++ b/charts/consul/templates/crd-proxyconfigurations.yaml @@ -0,0 +1,405 @@ +{{- if .Values.connectInject.enabled }} +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.1 + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: crd + name: proxyconfigurations.mesh.consul.hashicorp.com +spec: + group: mesh.consul.hashicorp.com + names: + kind: ProxyConfiguration + listKind: ProxyConfigurationList + plural: proxyconfigurations + shortNames: + - proxy-configuration + singular: proxyconfiguration + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The sync status of the resource with Consul + jsonPath: .status.conditions[?(@.type=="Synced")].status + name: Synced + type: string + - description: The last successful synced time of the resource with Consul + jsonPath: .status.lastSyncedTime + name: Last Synced + type: date + - description: The age of the resource + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v2beta1 + schema: + openAPIV3Schema: + description: ProxyConfiguration is the Schema for the TCP Routes API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: This is a Resource type. + properties: + bootstrapConfig: + description: bootstrap_config is the configuration that requires proxies + to be restarted to be applied. + properties: + dogstatsdUrl: + type: string + overrideJsonTpl: + type: string + prometheusBindAddr: + type: string + readyBindAddr: + type: string + staticClustersJson: + type: string + staticListenersJson: + type: string + statsBindAddr: + type: string + statsConfigJson: + type: string + statsFlushInterval: + type: string + statsSinksJson: + type: string + statsTags: + items: + type: string + type: array + statsdUrl: + type: string + telemetryCollectorBindSocketDir: + type: string + tracingConfigJson: + type: string + type: object + dynamicConfig: + description: dynamic_config is the configuration that could be changed + dynamically (i.e. without needing restart). + properties: + accessLogs: + description: AccessLogs configures the output and format of Envoy + access logs + properties: + disableListenerLogs: + description: DisableListenerLogs turns off just listener logs + for connections rejected by Envoy because they don't have + a matching listener filter. + type: boolean + enabled: + description: Enabled turns off all access logging + type: boolean + jsonFormat: + description: The presence of one format string or the other + implies the access log string encoding. Defining both is + invalid. + type: string + path: + description: Path is the output file to write logs + type: string + textFormat: + type: string + type: + description: 'Type selects the output for logs: "file", "stderr". + "stdout"' + enum: + - LOG_SINK_TYPE_DEFAULT + - LOG_SINK_TYPE_FILE + - LOG_SINK_TYPE_STDERR + - LOG_SINK_TYPE_STDOUT + format: int32 + type: string + type: object + exposeConfig: + properties: + exposePaths: + items: + properties: + listenerPort: + format: int32 + type: integer + localPathPort: + format: int32 + type: integer + path: + type: string + protocol: + enum: + - EXPOSE_PATH_PROTOCOL_HTTP + - EXPOSE_PATH_PROTOCOL_HTTP2 + format: int32 + type: string + type: object + type: array + type: object + inboundConnections: + description: inbound_connections configures inbound connections + to the proxy. + properties: + balanceInboundConnections: + enum: + - BALANCE_CONNECTIONS_DEFAULT + - BALANCE_CONNECTIONS_EXACT + format: int32 + type: string + maxInboundConnections: + format: int32 + type: integer + type: object + listenerTracingJson: + type: string + localClusterJson: + type: string + localConnection: + additionalProperties: + description: Referenced by ProxyConfiguration + properties: + connectTimeout: + description: "A Duration represents a signed, fixed-length + span of time represented as a count of seconds and fractions + of seconds at nanosecond resolution. It is independent + of any calendar and concepts like \"day\" or \"month\". + It is related to Timestamp in that the difference between + two Timestamp values is a Duration and it can be added + or subtracted from a Timestamp. Range is approximately + +-10,000 years. \n # Examples \n Example 1: Compute Duration + from two Timestamps in pseudo code. \n Timestamp start + = ...; Timestamp end = ...; Duration duration = ...; \n + duration.seconds = end.seconds - start.seconds; duration.nanos + = end.nanos - start.nanos; \n if (duration.seconds < 0 + && duration.nanos > 0) { duration.seconds += 1; duration.nanos + -= 1000000000; } else if (duration.seconds > 0 && duration.nanos + < 0) { duration.seconds -= 1; duration.nanos += 1000000000; + } \n Example 2: Compute Timestamp from Timestamp + Duration + in pseudo code. \n Timestamp start = ...; Duration duration + = ...; Timestamp end = ...; \n end.seconds = start.seconds + + duration.seconds; end.nanos = start.nanos + duration.nanos; + \n if (end.nanos < 0) { end.seconds -= 1; end.nanos += + 1000000000; } else if (end.nanos >= 1000000000) { end.seconds + += 1; end.nanos -= 1000000000; } \n Example 3: Compute + Duration from datetime.timedelta in Python. \n td = datetime.timedelta(days=3, + minutes=10) duration = Duration() duration.FromTimedelta(td) + \n # JSON Mapping \n In JSON format, the Duration type + is encoded as a string rather than an object, where the + string ends in the suffix \"s\" (indicating seconds) and + is preceded by the number of seconds, with nanoseconds + expressed as fractional seconds. For example, 3 seconds + with 0 nanoseconds should be encoded in JSON format as + \"3s\", while 3 seconds and 1 nanosecond should be expressed + in JSON format as \"3.000000001s\", and 3 seconds and + 1 microsecond should be expressed in JSON format as \"3.000001s\"." + format: duration + properties: + nanos: + description: Signed fractions of a second at nanosecond + resolution of the span of time. Durations less than + one second are represented with a 0 `seconds` field + and a positive or negative `nanos` field. For durations + of one second or more, a non-zero value for the `nanos` + field must be of the same sign as the `seconds` field. + Must be from -999,999,999 to +999,999,999 inclusive. + format: int32 + type: integer + seconds: + description: 'Signed seconds of the span of time. Must + be from -315,576,000,000 to +315,576,000,000 inclusive. + Note: these bounds are computed from: 60 sec/min * + 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years' + format: int64 + type: integer + type: object + requestTimeout: + description: "A Duration represents a signed, fixed-length + span of time represented as a count of seconds and fractions + of seconds at nanosecond resolution. It is independent + of any calendar and concepts like \"day\" or \"month\". + It is related to Timestamp in that the difference between + two Timestamp values is a Duration and it can be added + or subtracted from a Timestamp. Range is approximately + +-10,000 years. \n # Examples \n Example 1: Compute Duration + from two Timestamps in pseudo code. \n Timestamp start + = ...; Timestamp end = ...; Duration duration = ...; \n + duration.seconds = end.seconds - start.seconds; duration.nanos + = end.nanos - start.nanos; \n if (duration.seconds < 0 + && duration.nanos > 0) { duration.seconds += 1; duration.nanos + -= 1000000000; } else if (duration.seconds > 0 && duration.nanos + < 0) { duration.seconds -= 1; duration.nanos += 1000000000; + } \n Example 2: Compute Timestamp from Timestamp + Duration + in pseudo code. \n Timestamp start = ...; Duration duration + = ...; Timestamp end = ...; \n end.seconds = start.seconds + + duration.seconds; end.nanos = start.nanos + duration.nanos; + \n if (end.nanos < 0) { end.seconds -= 1; end.nanos += + 1000000000; } else if (end.nanos >= 1000000000) { end.seconds + += 1; end.nanos -= 1000000000; } \n Example 3: Compute + Duration from datetime.timedelta in Python. \n td = datetime.timedelta(days=3, + minutes=10) duration = Duration() duration.FromTimedelta(td) + \n # JSON Mapping \n In JSON format, the Duration type + is encoded as a string rather than an object, where the + string ends in the suffix \"s\" (indicating seconds) and + is preceded by the number of seconds, with nanoseconds + expressed as fractional seconds. For example, 3 seconds + with 0 nanoseconds should be encoded in JSON format as + \"3s\", while 3 seconds and 1 nanosecond should be expressed + in JSON format as \"3.000000001s\", and 3 seconds and + 1 microsecond should be expressed in JSON format as \"3.000001s\"." + format: duration + properties: + nanos: + description: Signed fractions of a second at nanosecond + resolution of the span of time. Durations less than + one second are represented with a 0 `seconds` field + and a positive or negative `nanos` field. For durations + of one second or more, a non-zero value for the `nanos` + field must be of the same sign as the `seconds` field. + Must be from -999,999,999 to +999,999,999 inclusive. + format: int32 + type: integer + seconds: + description: 'Signed seconds of the span of time. Must + be from -315,576,000,000 to +315,576,000,000 inclusive. + Note: these bounds are computed from: 60 sec/min * + 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years' + format: int64 + type: integer + type: object + type: object + description: local_connection is the configuration that should + be used to connect to the local application provided per-port. + The map keys should correspond to port names on the workload. + type: object + localWorkloadAddress: + description: "deprecated: local_workload_address, local_workload_port, + and local_workload_socket_path are deprecated and are only needed + for migration of existing resources. \n Deprecated: Marked as + deprecated in pbmesh/v2beta1/proxy_configuration.proto." + type: string + localWorkloadPort: + description: 'Deprecated: Marked as deprecated in pbmesh/v2beta1/proxy_configuration.proto.' + format: int32 + type: integer + localWorkloadSocketPath: + description: 'Deprecated: Marked as deprecated in pbmesh/v2beta1/proxy_configuration.proto.' + type: string + meshGatewayMode: + enum: + - MESH_GATEWAY_MODE_UNSPECIFIED + - MESH_GATEWAY_MODE_NONE + - MESH_GATEWAY_MODE_LOCAL + - MESH_GATEWAY_MODE_REMOTE + format: int32 + type: string + mode: + description: mode indicates the proxy's mode. This will default + to 'transparent'. + enum: + - PROXY_MODE_DEFAULT + - PROXY_MODE_TRANSPARENT + - PROXY_MODE_DIRECT + format: int32 + type: string + mutualTlsMode: + enum: + - MUTUAL_TLS_MODE_DEFAULT + - MUTUAL_TLS_MODE_STRICT + - MUTUAL_TLS_MODE_PERMISSIVE + format: int32 + type: string + publicListenerJson: + type: string + transparentProxy: + properties: + dialedDirectly: + description: dialed_directly indicates whether this proxy + should be dialed using original destination IP in the connection + rather than load balance between all endpoints. + type: boolean + outboundListenerPort: + description: outbound_listener_port is the port for the proxy's + outbound listener. This defaults to 15001. + format: int32 + type: integer + type: object + type: object + opaqueConfig: + description: "deprecated: prevent usage when using v2 APIs directly. + needed for backwards compatibility \n Deprecated: Marked as deprecated + in pbmesh/v2beta1/proxy_configuration.proto." + type: object + x-kubernetes-preserve-unknown-fields: true + workloads: + description: Selection of workloads this proxy configuration should + apply to. These can be prefixes or specific workload names. + properties: + filter: + type: string + names: + items: + type: string + type: array + prefixes: + items: + type: string + type: array + type: object + type: object + status: + properties: + conditions: + description: Conditions indicate the latest available observations + of a resource's current state. + items: + description: 'Conditions define a readiness condition for a Consul + resource. See: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties' + properties: + lastTransitionTime: + description: LastTransitionTime is the last time the condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition. + type: string + required: + - status + - type + type: object + type: array + lastSyncedTime: + description: LastSyncedTime is the last time the resource successfully + synced with Consul. + format: date-time + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +{{- end }} diff --git a/charts/consul/templates/crd-registrations.yaml b/charts/consul/templates/crd-registrations.yaml deleted file mode 100644 index 32edd278ce..0000000000 --- a/charts/consul/templates/crd-registrations.yaml +++ /dev/null @@ -1,250 +0,0 @@ -{{- if .Values.connectInject.enabled }} -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.12.1 - labels: - app: {{ template "consul.name" . }} - chart: {{ template "consul.chart" . }} - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} - component: crd - name: registrations.consul.hashicorp.com -spec: - group: consul.hashicorp.com - names: - kind: Registration - listKind: RegistrationList - plural: registrations - singular: registration - scope: Cluster - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: Registration defines the resource for working with service registrations. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Spec defines the desired state of Registration. - properties: - address: - type: string - check: - description: HealthCheck is used to represent a single check. - properties: - checkId: - type: string - definition: - description: HealthCheckDefinition is used to store the details - about a health check's execution. - properties: - body: - type: string - deregisterCriticalServiceAfterDuration: - type: string - grpc: - type: string - grpcUseTLS: - type: boolean - header: - additionalProperties: - items: - type: string - type: array - type: object - http: - type: string - intervalDuration: - type: string - method: - type: string - osService: - type: string - tcp: - type: string - tcpUseTLS: - type: boolean - timeoutDuration: - type: string - tlsServerName: - type: string - tlsSkipVerify: - type: boolean - udp: - type: string - required: - - intervalDuration - type: object - exposedPort: - type: integer - name: - type: string - namespace: - type: string - node: - type: string - notes: - type: string - output: - type: string - partition: - type: string - serviceId: - type: string - serviceName: - type: string - status: - type: string - type: - type: string - required: - - checkId - - definition - - name - - serviceId - - serviceName - - status - type: object - datacenter: - type: string - id: - type: string - locality: - properties: - region: - type: string - zone: - type: string - type: object - node: - type: string - nodeMeta: - additionalProperties: - type: string - type: object - partition: - type: string - service: - properties: - address: - type: string - enableTagOverride: - type: boolean - id: - type: string - locality: - properties: - region: - type: string - zone: - type: string - type: object - meta: - additionalProperties: - type: string - type: object - name: - type: string - namespace: - type: string - partition: - type: string - port: - type: integer - socketPath: - type: string - taggedAddresses: - additionalProperties: - properties: - address: - type: string - port: - type: integer - required: - - address - - port - type: object - type: object - tags: - items: - type: string - type: array - weights: - properties: - passing: - type: integer - warning: - type: integer - required: - - passing - - warning - type: object - required: - - name - - port - type: object - skipNodeUpdate: - type: boolean - taggedAddresses: - additionalProperties: - type: string - type: object - type: object - status: - description: RegistrationStatus defines the observed state of Registration. - properties: - conditions: - description: Conditions indicate the latest available observations - of a resource's current state. - items: - description: 'Conditions define a readiness condition for a Consul - resource. See: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties' - properties: - lastTransitionTime: - description: LastTransitionTime is the last time the condition - transitioned from one status to another. - format: date-time - type: string - message: - description: A human readable message indicating details about - the transition. - type: string - reason: - description: The reason for the condition's last transition. - type: string - status: - description: Status of the condition, one of True, False, Unknown. - type: string - type: - description: Type of condition. - type: string - required: - - status - - type - type: object - type: array - lastSyncedTime: - description: LastSyncedTime is the last time the resource successfully - synced with Consul. - format: date-time - type: string - type: object - type: object - served: true - storage: true - subresources: - status: {} -{{- end }} diff --git a/charts/consul/templates/crd-tcproutes.yaml b/charts/consul/templates/crd-tcproutes.yaml new file mode 100644 index 0000000000..ae9d2cd080 --- /dev/null +++ b/charts/consul/templates/crd-tcproutes.yaml @@ -0,0 +1,278 @@ +{{- if .Values.connectInject.enabled }} +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.1 + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: crd + name: tcproutes.mesh.consul.hashicorp.com +spec: + group: mesh.consul.hashicorp.com + names: + kind: TCPRoute + listKind: TCPRouteList + plural: tcproutes + shortNames: + - tcp-route + singular: tcproute + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The sync status of the resource with Consul + jsonPath: .status.conditions[?(@.type=="Synced")].status + name: Synced + type: string + - description: The last successful synced time of the resource with Consul + jsonPath: .status.lastSyncedTime + name: Last Synced + type: date + - description: The age of the resource + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v2beta1 + schema: + openAPIV3Schema: + description: TCPRoute is the Schema for the TCP Route API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: "NOTE: this should align to the GAMMA/gateway-api version, + or at least be easily translatable. \n https://gateway-api.sigs.k8s.io/references/spec/#gateway.networking.k8s.io/v1alpha2.TCPRoute + \n This is a Resource type." + properties: + parentRefs: + description: "ParentRefs references the resources (usually Services) + that a Route wants to be attached to. \n It is invalid to reference + an identical parent more than once. It is valid to reference multiple + distinct sections within the same parent resource." + items: + description: 'NOTE: roughly equivalent to structs.ResourceReference' + properties: + port: + description: For east/west this is the name of the Consul Service + port to direct traffic to or empty to imply all. For north/south + this is TBD. + type: string + ref: + description: For east/west configuration, this should point + to a Service. For north/south it should point to a Gateway. + properties: + name: + description: Name is the user-given name of the resource + (e.g. the "billing" service). + type: string + section: + description: Section identifies which part of the resource + the condition relates to. + type: string + tenancy: + description: Tenancy identifies the tenancy units (i.e. + partition, namespace) in which the resource resides. + properties: + namespace: + description: "Namespace further isolates resources within + a partition. https://developer.hashicorp.com/consul/docs/enterprise/namespaces + \n When using the List and WatchList endpoints, provide + the wildcard value \"*\" to list resources across + all namespaces." + type: string + partition: + description: "Partition is the topmost administrative + boundary within a cluster. https://developer.hashicorp.com/consul/docs/enterprise/admin-partitions + \n When using the List and WatchList endpoints, provide + the wildcard value \"*\" to list resources across + all partitions." + type: string + peerName: + description: "PeerName identifies which peer the resource + is imported from. https://developer.hashicorp.com/consul/docs/connect/cluster-peering + \n When using the List and WatchList endpoints, provide + the wildcard value \"*\" to list resources across + all peers." + type: string + type: object + type: + description: Type identifies the resource's type. + properties: + group: + description: Group describes the area of functionality + to which this resource type relates (e.g. "catalog", + "authorization"). + type: string + groupVersion: + description: GroupVersion is incremented when sweeping + or backward-incompatible changes are made to the group's + resource types. + type: string + kind: + description: Kind identifies the specific resource type + within the group. + type: string + type: object + type: object + type: object + type: array + rules: + description: Rules are a list of TCP matchers and actions. + items: + properties: + backendRefs: + description: BackendRefs defines the backend(s) where matching + requests should be sent. If unspecified or invalid (refers + to a non-existent resource or a Service with no endpoints), + the underlying implementation MUST actively reject connection + attempts to this backend. Connection rejections must respect + weight; if an invalid backend is requested to have 80% of + connections, then 80% of connections must be rejected instead. + items: + properties: + backendRef: + properties: + datacenter: + type: string + port: + description: "For east/west this is the name of the + Consul Service port to direct traffic to or empty + to imply using the same value as the parent ref. + \n For north/south this is TBD." + type: string + ref: + description: For east/west configuration, this should + point to a Service. + properties: + name: + description: Name is the user-given name of the + resource (e.g. the "billing" service). + type: string + section: + description: Section identifies which part of + the resource the condition relates to. + type: string + tenancy: + description: Tenancy identifies the tenancy units + (i.e. partition, namespace) in which the resource + resides. + properties: + namespace: + description: "Namespace further isolates resources + within a partition. https://developer.hashicorp.com/consul/docs/enterprise/namespaces + \n When using the List and WatchList endpoints, + provide the wildcard value \"*\" to list + resources across all namespaces." + type: string + partition: + description: "Partition is the topmost administrative + boundary within a cluster. https://developer.hashicorp.com/consul/docs/enterprise/admin-partitions + \n When using the List and WatchList endpoints, + provide the wildcard value \"*\" to list + resources across all partitions." + type: string + peerName: + description: "PeerName identifies which peer + the resource is imported from. https://developer.hashicorp.com/consul/docs/connect/cluster-peering + \n When using the List and WatchList endpoints, + provide the wildcard value \"*\" to list + resources across all peers." + type: string + type: object + type: + description: Type identifies the resource's type. + properties: + group: + description: Group describes the area of functionality + to which this resource type relates (e.g. + "catalog", "authorization"). + type: string + groupVersion: + description: GroupVersion is incremented when + sweeping or backward-incompatible changes + are made to the group's resource types. + type: string + kind: + description: Kind identifies the specific + resource type within the group. + type: string + type: object + type: object + type: object + weight: + description: "Weight specifies the proportion of requests + forwarded to the referenced backend. This is computed + as weight/(sum of all weights in this BackendRefs list). + For non-zero values, there may be some epsilon from + the exact proportion defined here depending on the precision + an implementation supports. Weight is not a percentage + and the sum of weights does not need to equal 100. \n + If only one backend is specified and it has a weight + greater than 0, 100% of the traffic is forwarded to + that backend. If weight is set to 0, no traffic should + be forwarded for this entry. If unspecified, weight + defaults to 1." + format: int32 + type: integer + type: object + type: array + type: object + type: array + type: object + status: + properties: + conditions: + description: Conditions indicate the latest available observations + of a resource's current state. + items: + description: 'Conditions define a readiness condition for a Consul + resource. See: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties' + properties: + lastTransitionTime: + description: LastTransitionTime is the last time the condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition. + type: string + required: + - status + - type + type: object + type: array + lastSyncedTime: + description: LastSyncedTime is the last time the resource successfully + synced with Consul. + format: date-time + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +{{- end }} diff --git a/charts/consul/templates/crd-trafficpermissions.yaml b/charts/consul/templates/crd-trafficpermissions.yaml index 87727f4fbf..27ab6f5e3d 100644 --- a/charts/consul/templates/crd-trafficpermissions.yaml +++ b/charts/consul/templates/crd-trafficpermissions.yaml @@ -101,25 +101,23 @@ spec: when evaluating rules for the incoming connection. items: properties: - headers: - items: - properties: - exact: - type: string - invert: - type: boolean - name: - type: string - prefix: - type: string - present: - type: boolean - regex: - type: string - suffix: - type: string - type: object - type: array + header: + properties: + exact: + type: string + invert: + type: boolean + name: + type: string + prefix: + type: string + present: + type: boolean + regex: + type: string + suffix: + type: string + type: object methods: description: Methods is the list of HTTP methods. items: @@ -140,25 +138,23 @@ spec: type: array type: object type: array - headers: - items: - properties: - exact: - type: string - invert: - type: boolean - name: - type: string - prefix: - type: string - present: - type: boolean - regex: - type: string - suffix: - type: string - type: object - type: array + header: + properties: + exact: + type: string + invert: + type: boolean + name: + type: string + prefix: + type: string + present: + type: boolean + regex: + type: string + suffix: + type: string + type: object methods: description: Methods is the list of HTTP methods. If no methods are specified, this rule will apply to all methods. diff --git a/charts/consul/templates/create-federation-secret-job.yaml b/charts/consul/templates/create-federation-secret-job.yaml index 2092b97852..aff6b5a934 100644 --- a/charts/consul/templates/create-federation-secret-job.yaml +++ b/charts/consul/templates/create-federation-secret-job.yaml @@ -94,7 +94,6 @@ spec: containers: - name: create-federation-secret image: "{{ .Values.global.imageK8S }}" - {{ template "consul.imagePullPolicy" . }} {{- include "consul.restrictedSecurityContext" . | nindent 10 }} env: - name: NAMESPACE diff --git a/charts/consul/templates/dns-proxy-clusterrole.yaml b/charts/consul/templates/dns-proxy-clusterrole.yaml deleted file mode 100644 index 0ad128c959..0000000000 --- a/charts/consul/templates/dns-proxy-clusterrole.yaml +++ /dev/null @@ -1,34 +0,0 @@ -{{- if (or (and (ne (.Values.dns.proxy.enabled | toString) "-") .Values.dns.proxy.enabled) (and (eq (.Values.dns.proxy.enabled | toString) "-") .Values.global.enabled)) }} -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: {{ template "consul.fullname" . }}-dns-proxy - labels: - app: {{ template "consul.name" . }} - chart: {{ template "consul.chart" . }} - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} - component: dns-proxy -{{- if or .Values.global.acls.manageSystemACLs .Values.global.enablePodSecurityPolicies }} -rules: - {{- if .Values.global.acls.manageSystemACLs }} - - apiGroups: [""] - resources: - - secrets - resourceNames: - - {{ template "consul.fullname" . }}-dns-proxy-acl-token - verbs: - - get - {{- end }} - {{- if .Values.global.enablePodSecurityPolicies }} - - apiGroups: ["policy"] - resources: ["podsecuritypolicies"] - resourceNames: - - {{ template "consul.fullname" . }}-dns-proxy - verbs: - - use - {{- end }} - {{- else }} -rules: [] - {{- end }} -{{- end }} \ No newline at end of file diff --git a/charts/consul/templates/dns-proxy-clusterrolebinding.yaml b/charts/consul/templates/dns-proxy-clusterrolebinding.yaml deleted file mode 100644 index 346d09e912..0000000000 --- a/charts/consul/templates/dns-proxy-clusterrolebinding.yaml +++ /dev/null @@ -1,20 +0,0 @@ -{{- if (or (and (ne (.Values.dns.proxy.enabled | toString) "-") .Values.dns.proxy.enabled) (and (eq (.Values.dns.proxy.enabled | toString) "-") .Values.global.enabled)) }} -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: {{ template "consul.fullname" . }}-dns-proxy - labels: - app: {{ template "consul.name" . }} - chart: {{ template "consul.chart" . }} - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} - component: dns-proxy -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ template "consul.fullname" . }}-dns-proxy -subjects: - - kind: ServiceAccount - name: {{ template "consul.fullname" . }}-dns-proxy - namespace: {{ .Release.Namespace }} -{{- end }} diff --git a/charts/consul/templates/dns-proxy-deployment.yaml b/charts/consul/templates/dns-proxy-deployment.yaml deleted file mode 100644 index c4cad4332e..0000000000 --- a/charts/consul/templates/dns-proxy-deployment.yaml +++ /dev/null @@ -1,196 +0,0 @@ -{{- if (or (and (ne (.Values.dns.proxy.enabled | toString) "-") .Values.dns.proxy.enabled) (and (eq (.Values.dns.proxy.enabled | toString) "-") .Values.global.enabled)) }} -{{- if not .Values.connectInject.enabled }}{{ fail "connectInject.enabled must be true" }}{{ end -}} -{{ template "consul.validateRequiredCloudSecretsExist" . }} -{{ template "consul.validateCloudSecretKeys" . }} - -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ template "consul.fullname" . }}-dns-proxy - namespace: {{ .Release.Namespace }} - labels: - app: {{ template "consul.name" . }} - chart: {{ template "consul.chart" . }} - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} - component: dns-proxy - {{- if .Values.global.extraLabels }} - {{- toYaml .Values.global.extraLabels | nindent 4 }} - {{- end }} -spec: - replicas: {{ .Values.dns.proxy.replicas }} - selector: - matchLabels: - app: {{ template "consul.name" . }} - chart: {{ template "consul.chart" . }} - release: {{ .Release.Name }} - component: dns-proxy - template: - metadata: - labels: - app: {{ template "consul.name" . }} - chart: {{ template "consul.chart" . }} - release: {{ .Release.Name }} - component: dns-proxy - {{- if .Values.global.extraLabels }} - {{- toYaml .Values.global.extraLabels | nindent 8 }} - {{- end }} - annotations: - "consul.hashicorp.com/connect-inject": "false" - "consul.hashicorp.com/mesh-inject": "false" - {{- if (and .Values.global.secretsBackend.vault.enabled .Values.global.tls.enabled) }} - "vault.hashicorp.com/agent-init-first": "true" - "vault.hashicorp.com/agent-inject": "true" - "vault.hashicorp.com/role": {{ .Values.global.secretsBackend.vault.consulCARole }} - "vault.hashicorp.com/agent-inject-secret-serverca.crt": {{ .Values.global.tls.caCert.secretName }} - "vault.hashicorp.com/agent-inject-template-serverca.crt": {{ template "consul.serverTLSCATemplate" . }} - {{- if and .Values.global.secretsBackend.vault.ca.secretName .Values.global.secretsBackend.vault.ca.secretKey }} - "vault.hashicorp.com/agent-extra-secret": "{{ .Values.global.secretsBackend.vault.ca.secretName }}" - "vault.hashicorp.com/ca-cert": "/vault/custom/{{ .Values.global.secretsBackend.vault.ca.secretKey }}" - {{- end }} - {{- if .Values.global.secretsBackend.vault.agentAnnotations }} - {{ tpl .Values.global.secretsBackend.vault.agentAnnotations . | nindent 8 | trim }} - {{- end }} - {{- if (and (.Values.global.secretsBackend.vault.vaultNamespace) (not (hasKey (default "" .Values.global.secretsBackend.vault.agentAnnotations | fromYaml) "vault.hashicorp.com/namespace")))}} - "vault.hashicorp.com/namespace": "{{ .Values.global.secretsBackend.vault.vaultNamespace }}" - {{- end }} - {{- end }} - {{- if .Values.dns.annotations }} - {{- tpl .Values.dns.annotations . | nindent 8 }} - {{- end }} - spec: - terminationGracePeriodSeconds: 10 - serviceAccountName: {{ template "consul.fullname" . }}-dns-proxy - volumes: - - name: consul-service - emptyDir: - medium: "Memory" - {{- if .Values.global.tls.enabled }} - {{- if not (or (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots) .Values.global.secretsBackend.vault.enabled) }} - - name: consul-ca-cert - secret: - {{- if .Values.global.tls.caCert.secretName }} - secretName: {{ .Values.global.tls.caCert.secretName }} - {{- else }} - secretName: {{ template "consul.fullname" . }}-ca-cert - {{- end }} - items: - - key: {{ default "tls.crt" .Values.global.tls.caCert.secretKey }} - path: tls.crt - {{- end }} - {{- end }} - containers: - - name: dns-proxy - image: {{ .Values.global.imageConsulDataplane | quote }} - volumeMounts: - - mountPath: /consul/service - name: consul-service - readOnly: true - {{- if .Values.global.tls.enabled }} - {{- if not (or (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots) .Values.global.secretsBackend.vault.enabled) }} - - name: consul-ca-cert - mountPath: /consul/tls/ca - readOnly: true - {{- end }} - {{- end }} - env: - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: HOST_IP - valueFrom: - fieldRef: - fieldPath: status.hostIP - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: DP_CREDENTIAL_LOGIN_META1 - value: pod=$(NAMESPACE)/$(POD_NAME) - - name: DP_CREDENTIAL_LOGIN_META2 - value: component=dns-proxy - - name: DP_SERVICE_NODE_NAME - value: $(NODE_NAME)-virtual - command: - - consul-dataplane - args: - - -consul-dns-bind-addr=0.0.0.0 - - -consul-dns-bind-port={{ .Values.dns.proxy.port }} - {{- if .Values.externalServers.enabled }} - - -addresses={{ .Values.externalServers.hosts | first }} - {{- else }} - - -addresses={{ template "consul.fullname" . }}-server.{{ .Release.Namespace }}.svc - {{- end }} - {{- if .Values.externalServers.enabled }} - - -grpc-port={{ .Values.externalServers.grpcPort }} - {{- else }} - - -grpc-port=8502 - {{- end }} - {{- if .Values.global.tls.enabled }} - {{- if (not (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots)) }} - {{- if .Values.global.secretsBackend.vault.enabled }} - - -ca-certs=/vault/secrets/serverca.crt - {{- else }} - - -ca-certs=/consul/tls/ca/tls.crt - {{- end }} - {{- end }} - {{- if and .Values.externalServers.enabled .Values.externalServers.tlsServerName }} - - -tls-server-name={{.Values.externalServers.tlsServerName }} - {{- else if .Values.global.cloud.enabled }} - - -tls-server-name=server.{{ .Values.global.datacenter}}.{{ .Values.global.domain}} - {{- end }} - {{- else }} - - -tls-disabled - {{- end }} - {{- if .Values.global.acls.manageSystemACLs }} - - -credential-type=login - - -login-bearer-token-path=/var/run/secrets/kubernetes.io/serviceaccount/token - {{- if and .Values.global.federation.enabled .Values.global.federation.primaryDatacenter }} - - -login-auth-method={{ template "consul.fullname" . }}-k8s-component-auth-method-{{ .Values.global.datacenter }} - - -login-datacenter={{ .Values.global.federation.primaryDatacenter }} - {{- else }} - - -login-auth-method={{ template "consul.fullname" . }}-k8s-component-auth-method - {{- end }} - {{- if .Values.global.adminPartitions.enabled }} - - -login-partition={{ .Values.global.adminPartitions.name }} - {{- end }} - {{- end }} - {{- if .Values.global.adminPartitions.enabled }} - - -service-partition={{ .Values.global.adminPartitions.name }} - {{- end }} - - -log-level={{ default .Values.global.logLevel .Values.dns.proxy.logLevel }} - - -log-json={{ .Values.global.logJSON }} - - {{- if and .Values.externalServers.enabled .Values.externalServers.skipServerWatch }} - - -server-watch-disabled=true - {{- end }} - - -mode=dns-proxy - livenessProbe: - tcpSocket: - port: {{ .Values.dns.proxy.port }} - failureThreshold: 3 - initialDelaySeconds: 30 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 5 - readinessProbe: - tcpSocket: - port: {{ .Values.dns.proxy.port }} - failureThreshold: 3 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 5 - ports: - - containerPort: {{ .Values.dns.proxy.port }} - protocol: "TCP" - name: dns-tcp - - containerPort: {{ .Values.dns.proxy.port }} - protocol: "UDP" - name: dns-udp -{{- end }} \ No newline at end of file diff --git a/charts/consul/templates/dns-proxy-service.yaml b/charts/consul/templates/dns-proxy-service.yaml deleted file mode 100644 index f3f9624b54..0000000000 --- a/charts/consul/templates/dns-proxy-service.yaml +++ /dev/null @@ -1,39 +0,0 @@ -{{- if (or (and (ne (.Values.dns.proxy.enabled | toString) "-") .Values.dns.proxy.enabled) (and (eq (.Values.dns.proxy.enabled | toString) "-") .Values.global.enabled)) }} - -apiVersion: v1 -kind: Service -metadata: - name: {{ template "consul.fullname" . }}-dns-proxy - namespace: {{ .Release.Namespace }} - labels: - app: {{ template "consul.name" . }} - chart: {{ template "consul.chart" . }} - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} - component: dns-proxy -{{- if .Values.dns.annotations }} - annotations: - {{ tpl .Values.dns.annotations . | nindent 4 | trim }} -{{- end }} -spec: - selector: - app: {{ template "consul.name" . }} - release: "{{ .Release.Name }}" - component: dns-proxy -{{- if .Values.dns.type }} - type: {{ .Values.dns.type }} -{{- end }} -{{- if .Values.dns.clusterIP }} - clusterIP: {{ .Values.dns.clusterIP }} -{{- end }} - ports: - - name: dns-tcp - port: {{ .Values.dns.proxy.port }} - protocol: "TCP" - targetPort: dns-tcp - - name: dns-udp - port: {{ .Values.dns.proxy.port }} - protocol: "UDP" - targetPort: dns-udp ---- -{{- end }} diff --git a/charts/consul/templates/dns-service.yaml b/charts/consul/templates/dns-service.yaml index 740c81653d..5bb446bc19 100644 --- a/charts/consul/templates/dns-service.yaml +++ b/charts/consul/templates/dns-service.yaml @@ -1,5 +1,4 @@ {{- if (or (and (ne (.Values.dns.enabled | toString) "-") .Values.dns.enabled) (and (eq (.Values.dns.enabled | toString) "-") .Values.connectInject.transparentProxy.defaultEnabled)) }} -{{- if not .Values.dns.proxy.enabled }} # Service for Consul DNS. apiVersion: v1 kind: Service @@ -40,4 +39,3 @@ spec: {{ tpl .Values.dns.additionalSpec . | nindent 2 | trim }} {{- end }} {{- end }} -{{- end }} diff --git a/charts/consul/templates/enterprise-license-job.yaml b/charts/consul/templates/enterprise-license-job.yaml index 9dd0281978..8db9500a22 100644 --- a/charts/consul/templates/enterprise-license-job.yaml +++ b/charts/consul/templates/enterprise-license-job.yaml @@ -59,7 +59,6 @@ spec: containers: - name: apply-enterprise-license image: "{{ default .Values.global.image .Values.server.image }}" - {{ template "consul.imagePullPolicy" . }} env: - name: ENTERPRISE_LICENSE {{- if .Values.global.secretsBackend.vault.enabled }} @@ -126,7 +125,6 @@ spec: initContainers: - name: ent-license-acl-init image: {{ .Values.global.imageK8S }} - {{ template "consul.imagePullPolicy" . }} command: - "/bin/sh" - "-ec" diff --git a/charts/consul/templates/gateway-cleanup-clusterrole.yaml b/charts/consul/templates/gateway-cleanup-clusterrole.yaml index c533a882f5..5518bfc390 100644 --- a/charts/consul/templates/gateway-cleanup-clusterrole.yaml +++ b/charts/consul/templates/gateway-cleanup-clusterrole.yaml @@ -24,6 +24,15 @@ rules: verbs: - get - delete + - apiGroups: + - mesh.consul.hashicorp.com + resources: + - gatewayclassconfigs + - gatewayclasses + - meshgateways + verbs: + - get + - delete {{- if .Values.global.enablePodSecurityPolicies }} - apiGroups: ["policy"] resources: ["podsecuritypolicies"] diff --git a/charts/consul/templates/gateway-cleanup-job.yaml b/charts/consul/templates/gateway-cleanup-job.yaml index 0d38f6ec8b..0d4f84272c 100644 --- a/charts/consul/templates/gateway-cleanup-job.yaml +++ b/charts/consul/templates/gateway-cleanup-job.yaml @@ -38,7 +38,6 @@ spec: containers: - name: gateway-cleanup image: {{ .Values.global.imageK8S }} - {{ template "consul.imagePullPolicy" . }} {{- include "consul.restrictedSecurityContext" . | nindent 10 }} command: - consul-k8s-control-plane diff --git a/charts/consul/templates/gateway-resources-clusterrole.yaml b/charts/consul/templates/gateway-resources-clusterrole.yaml index c3bdfeb4a3..ad7082f060 100644 --- a/charts/consul/templates/gateway-resources-clusterrole.yaml +++ b/charts/consul/templates/gateway-resources-clusterrole.yaml @@ -10,8 +10,17 @@ metadata: release: {{ .Release.Name }} component: gateway-resources rules: + - apiGroups: + - mesh.consul.hashicorp.com + resources: + - meshgateways + verbs: + - get + - update + - create - apiGroups: - consul.hashicorp.com + - mesh.consul.hashicorp.com resources: - gatewayclassconfigs verbs: @@ -20,6 +29,7 @@ rules: - create - apiGroups: - gateway.networking.k8s.io + - mesh.consul.hashicorp.com resources: - gatewayclasses verbs: diff --git a/charts/consul/templates/gateway-resources-configmap.yaml b/charts/consul/templates/gateway-resources-configmap.yaml index 9b17504bdd..842ba6690d 100644 --- a/charts/consul/templates/gateway-resources-configmap.yaml +++ b/charts/consul/templates/gateway-resources-configmap.yaml @@ -21,4 +21,112 @@ data: resources.json: | {{ toJson .Values.connectInject.apiGateway.managedGatewayClass.resources }} {{- end }} + {{- if and (mustHas "resource-apis" .Values.global.experiments) .Values.meshGateway.enabled }} + config.yaml: | + gatewayClassConfigs: + - apiVersion: mesh.consul.hashicorp.com/v2beta1 + metadata: + name: consul-mesh-gateway + kind: GatewayClassConfig + spec: + labels: + set: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: mesh-gateway + deployment: + {{- if .Values.meshGateway.priorityClassName }} + priorityClassName: {{ .Values.meshGateway.priorityClassName | quote }} + {{- end }} + {{- if .Values.meshGateway.affinity }} + affinity: {{ toJson (default "{}" .Values.meshGateway.affinity) }} + {{- end }} + {{- if .Values.meshGateway.annotations }} + annotations: + set: {{ toJson .Values.meshGateway.annotations }} + {{- end }} + {{- if .Values.global.extraLabels }} + labels: + set: {{ toJson .Values.global.extraLabels }} + {{- end }} + container: + consul: + logging: + level: {{ default .Values.global.logLevel .Values.meshGateway.logLevel }} + portModifier: {{ sub .Values.meshGateway.containerPort .Values.meshGateway.service.port }} + {{- if .Values.meshGateway.hostPort }} + hostPort: {{ .Values.meshGateway.hostPort }} + {{- end }} + resources: {{ toJson .Values.meshGateway.resources }} + initContainer: + consul: + logging: + level: {{ default .Values.global.logLevel .Values.meshGateway.logLevel }} + resources: {{ toJson .Values.meshGateway.initServiceInitContainer.resources }} + {{- with .Values.meshGateway.nodeSelector }} + nodeSelector: {{ fromYaml . | toJson }} + {{- end }} + {{- with .Values.meshGateway.hostNetwork }} + hostNetwork: {{ . }} + {{- end }} + {{- with .Values.meshGateway.dnsPolicy }} + dnsPolicy: {{ . }} + {{- end }} + {{- with .Values.meshGateway.topologySpreadConstraints }} + topologySpreadConstraints: + {{ fromYamlArray . | toJson }} + {{- end }} + {{- if .Values.meshGateway.affinity }} + affinity: + {{ tpl .Values.meshGateway.affinity . | nindent 16 | trim }} + {{- end }} + replicas: + default: {{ .Values.meshGateway.replicas }} + min: {{ .Values.meshGateway.replicas }} + max: {{ .Values.meshGateway.replicas }} + {{- if .Values.meshGateway.tolerations }} + tolerations: {{ fromYamlArray .Values.meshGateway.tolerations | toJson }} + {{- end }} + service: + {{- if .Values.meshGateway.service.annotations }} + annotations: + set: {{ toJson .Values.meshGateway.service.annotations }} + {{- end }} + type: {{ .Values.meshGateway.service.type }} + {{- if .Values.meshGateway.serviceAccount.annotations }} + serviceAccount: + annotations: + set: {{ toJson .Values.meshGateway.serviceAccount.annotations }} + {{- end }} + meshGateways: + - apiVersion: mesh.consul.hashicorp.com/v2beta1 + kind: MeshGateway + metadata: + name: mesh-gateway + namespace: {{ .Release.Namespace }} + annotations: + # TODO are these annotations even necessary? + "consul.hashicorp.com/gateway-wan-address-source": {{ .Values.meshGateway.wanAddress.source | quote }} + "consul.hashicorp.com/gateway-wan-address-static": {{ .Values.meshGateway.wanAddress.static | quote }} + {{- if eq .Values.meshGateway.wanAddress.source "Service" }} + {{- if eq .Values.meshGateway.service.type "NodePort" }} + "consul.hashicorp.com/gateway-wan-port": {{ .Values.meshGateway.service.nodePort | quote }} + {{- else }} + "consul.hashicorp.com/gateway-wan-port": {{ .Values.meshGateway.service.port | quote }} + {{- end }} + {{- else }} + "consul.hashicorp.com/gateway-wan-port": {{ .Values.meshGateway.wanAddress.port | quote }} + {{- end }} + spec: + gatewayClassName: consul-mesh-gateway + listeners: + - name: "wan" + port: {{ .Values.meshGateway.service.port }} + protocol: "TCP" + workloads: + prefixes: + - "mesh-gateway" + {{- end }} {{- end }} diff --git a/charts/consul/templates/gateway-resources-job.yaml b/charts/consul/templates/gateway-resources-job.yaml index 5f3110479c..2a5905f304 100644 --- a/charts/consul/templates/gateway-resources-job.yaml +++ b/charts/consul/templates/gateway-resources-job.yaml @@ -1,4 +1,3 @@ -{{- if .Values.apiGateway}}{{fail "[DEPRECATED and REMOVED] the apiGateway stanza is no longer supported as of Consul 1.19.0. Use connectInject.apiGateway instead."}}{{- end -}} {{- if .Values.connectInject.enabled }} apiVersion: batch/v1 kind: Job @@ -39,7 +38,6 @@ spec: containers: - name: gateway-resources image: {{ .Values.global.imageK8S }} - {{ template "consul.imagePullPolicy" . }} {{- include "consul.restrictedSecurityContext" . | nindent 10 }} command: - consul-k8s-control-plane @@ -53,6 +51,29 @@ spec: - -heritage={{ .Release.Service }} - -release-name={{ .Release.Name }} - -component=api-gateway + {{- if .Values.apiGateway.enabled }} # Override values from the old stanza. To be removed after ~1.18 (t-eckert 2023-05-19) NET-6263 + {{- if .Values.apiGateway.managedGatewayClass.deployment }} + {{- if .Values.apiGateway.managedGatewayClass.deployment.defaultInstances }} + - -deployment-default-instances={{ .Values.apiGateway.managedGatewayClass.deployment.defaultInstances }} + {{- end}} + {{- if .Values.apiGateway.managedGatewayClass.deployment.maxInstances }} + - -deployment-max-instances={{ .Values.apiGateway.managedGatewayClass.deployment.maxInstances }} + {{- end}} + {{- if .Values.apiGateway.managedGatewayClass.deployment.minInstances }} + - -deployment-min-instances={{ .Values.apiGateway.managedGatewayClass.deployment.minInstances }} + {{- end}} + {{- end}} + {{- if .Values.apiGateway.managedGatewayClass.nodeSelector }} + - -node-selector={{ .Values.apiGateway.managedGatewayClass.nodeSelector }} + {{- end }} + {{- if .Values.apiGateway.managedGatewayClass.tolerations }} + - -tolerations={{ .Values.apiGateway.managedGatewayClass.tolerations }} + {{- end }} + {{- if .Values.apiGateway.managedGatewayClass.copyAnnotations.service }} + - -service-annotations={{ .Values.apiGateway.managedGatewayClass.copyAnnotations.service.annotations }} + {{- end }} + - -service-type={{ .Values.apiGateway.managedGatewayClass.serviceType }} + {{- else }} # the new stanza {{- if .Values.connectInject.apiGateway.managedGatewayClass.deployment }} {{- if .Values.connectInject.apiGateway.managedGatewayClass.deployment.defaultInstances }} - -deployment-default-instances={{ .Values.connectInject.apiGateway.managedGatewayClass.deployment.defaultInstances }} @@ -89,6 +110,7 @@ spec: {{- if .Values.connectInject.apiGateway.managedGatewayClass.metrics.port }} - -metrics-port={{ .Values.connectInject.apiGateway.managedGatewayClass.metrics.port }} {{- end }} + {{- end }} {{- with .Values.connectInject.apiGateway.managedGatewayClass.resourceJob.resources }} resources: {{- toYaml . | nindent 12 }} diff --git a/charts/consul/templates/gossip-encryption-autogenerate-job.yaml b/charts/consul/templates/gossip-encryption-autogenerate-job.yaml index 485064f80f..cea13c77fe 100644 --- a/charts/consul/templates/gossip-encryption-autogenerate-job.yaml +++ b/charts/consul/templates/gossip-encryption-autogenerate-job.yaml @@ -49,7 +49,6 @@ spec: containers: - name: gossip-encryption-autogen image: "{{ .Values.global.imageK8S }}" - {{ template "consul.imagePullPolicy" . }} {{- include "consul.restrictedSecurityContext" . | nindent 10 }} command: - "/bin/sh" diff --git a/charts/consul/templates/ingress-gateways-deployment.yaml b/charts/consul/templates/ingress-gateways-deployment.yaml index c7a38bb040..508ab64eff 100644 --- a/charts/consul/templates/ingress-gateways-deployment.yaml +++ b/charts/consul/templates/ingress-gateways-deployment.yaml @@ -160,9 +160,6 @@ spec: terminationGracePeriodSeconds: {{ default $defaults.terminationGracePeriodSeconds .terminationGracePeriodSeconds }} serviceAccountName: {{ template "consul.fullname" $root }}-{{ .name }} volumes: - - name: tmp - emptyDir: - medium: "Memory" - name: consul-service emptyDir: medium: "Memory" @@ -184,7 +181,6 @@ spec: # ingress-gateway-init registers the ingress gateway service with Consul. - name: ingress-gateway-init image: {{ $root.Values.global.imageK8S }} - {{ template "consul.imagePullPolicy" $root }} {{- include "consul.restrictedSecurityContext" $ | nindent 8 }} env: - name: NAMESPACE @@ -225,8 +221,6 @@ spec: -log-level={{ default $root.Values.global.logLevel $root.Values.ingressGateways.logLevel }} \ -log-json={{ $root.Values.global.logJSON }} volumeMounts: - - name: tmp - mountPath: /tmp - name: consul-service mountPath: /consul/service {{- if $root.Values.global.tls.enabled }} @@ -246,14 +240,11 @@ spec: containers: - name: ingress-gateway image: {{ $root.Values.global.imageConsulDataplane | quote }} - {{ template "consul.imagePullPolicy" $root }} {{- include "consul.restrictedSecurityContext" $ | nindent 8 }} {{- if (default $defaults.resources .resources) }} resources: {{ toYaml (default $defaults.resources .resources) | nindent 10 }} {{- end }} volumeMounts: - - name: tmp - mountPath: /tmp - name: consul-service mountPath: /consul/service readOnly: true diff --git a/charts/consul/templates/mesh-gateway-clusterrole.yaml b/charts/consul/templates/mesh-gateway-clusterrole.yaml index b951418b26..3053105105 100644 --- a/charts/consul/templates/mesh-gateway-clusterrole.yaml +++ b/charts/consul/templates/mesh-gateway-clusterrole.yaml @@ -1,4 +1,5 @@ {{- if .Values.meshGateway.enabled }} +{{- if not (mustHas "resource-apis" .Values.global.experiments) }} apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -32,3 +33,4 @@ rules: rules: [] {{- end }} {{- end }} +{{- end }} diff --git a/charts/consul/templates/mesh-gateway-clusterrolebinding.yaml b/charts/consul/templates/mesh-gateway-clusterrolebinding.yaml index f8150ebb53..2fb80fc04c 100644 --- a/charts/consul/templates/mesh-gateway-clusterrolebinding.yaml +++ b/charts/consul/templates/mesh-gateway-clusterrolebinding.yaml @@ -1,4 +1,5 @@ {{- if .Values.meshGateway.enabled }} +{{- if not (mustHas "resource-apis" .Values.global.experiments) }} apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: @@ -18,3 +19,4 @@ subjects: name: {{ template "consul.fullname" . }}-mesh-gateway namespace: {{ .Release.Namespace }} {{- end }} +{{- end }} diff --git a/charts/consul/templates/mesh-gateway-deployment.yaml b/charts/consul/templates/mesh-gateway-deployment.yaml index c6c33966ee..3d75d55613 100644 --- a/charts/consul/templates/mesh-gateway-deployment.yaml +++ b/charts/consul/templates/mesh-gateway-deployment.yaml @@ -1,4 +1,5 @@ {{- if .Values.meshGateway.enabled }} +{{- if not (mustHas "resource-apis" .Values.global.experiments) }} {{- if not .Values.connectInject.enabled }}{{ fail "connectInject.enabled must be true" }}{{ end -}} {{- if and .Values.global.acls.manageSystemACLs (ne .Values.meshGateway.consulServiceName "") (ne .Values.meshGateway.consulServiceName "mesh-gateway") }}{{ fail "if global.acls.manageSystemACLs is true, meshGateway.consulServiceName cannot be set" }}{{ end -}} {{- if .Values.meshGateway.globalMode }}{{ fail "meshGateway.globalMode is no longer supported; instead, you must migrate to CRDs (see www.consul.io/docs/k8s/crds/upgrade-to-crds)" }}{{ end -}} @@ -127,7 +128,6 @@ spec: initContainers: - name: mesh-gateway-init image: {{ .Values.global.imageK8S }} - {{ template "consul.imagePullPolicy" . }} env: - name: NAMESPACE valueFrom: @@ -186,7 +186,6 @@ spec: containers: - name: mesh-gateway image: {{ .Values.global.imageConsulDataplane | quote }} - {{ template "consul.imagePullPolicy" . }} securityContext: capabilities: {{ if not .Values.meshGateway.hostNetwork}} @@ -321,3 +320,4 @@ spec: {{ tpl .Values.meshGateway.nodeSelector . | indent 8 | trim }} {{- end }} {{- end }} +{{- end }} diff --git a/charts/consul/templates/mesh-gateway-podsecuritypolicy.yaml b/charts/consul/templates/mesh-gateway-podsecuritypolicy.yaml index 04576fe926..56e4b7924c 100644 --- a/charts/consul/templates/mesh-gateway-podsecuritypolicy.yaml +++ b/charts/consul/templates/mesh-gateway-podsecuritypolicy.yaml @@ -1,4 +1,5 @@ {{- if and .Values.global.enablePodSecurityPolicies .Values.meshGateway.enabled }} +{{- if not (mustHas "resource-apis" .Values.global.experiments) }} apiVersion: policy/v1beta1 kind: PodSecurityPolicy metadata: @@ -52,3 +53,4 @@ spec: rule: 'RunAsAny' readOnlyRootFilesystem: false {{- end }} +{{- end }} diff --git a/charts/consul/templates/mesh-gateway-service.yaml b/charts/consul/templates/mesh-gateway-service.yaml index 5fdceca8df..80f82ac897 100644 --- a/charts/consul/templates/mesh-gateway-service.yaml +++ b/charts/consul/templates/mesh-gateway-service.yaml @@ -1,4 +1,5 @@ {{- if and .Values.meshGateway.enabled }} +{{- if not (mustHas "resource-apis" .Values.global.experiments) }} apiVersion: v1 kind: Service metadata: @@ -31,3 +32,4 @@ spec: {{ tpl .Values.meshGateway.service.additionalSpec . | nindent 2 | trim }} {{- end }} {{- end }} +{{- end }} diff --git a/charts/consul/templates/mesh-gateway-serviceaccount.yaml b/charts/consul/templates/mesh-gateway-serviceaccount.yaml index 8c2da5ae06..b1a0661eaa 100644 --- a/charts/consul/templates/mesh-gateway-serviceaccount.yaml +++ b/charts/consul/templates/mesh-gateway-serviceaccount.yaml @@ -1,4 +1,5 @@ {{- if .Values.meshGateway.enabled }} +{{- if not (mustHas "resource-apis" .Values.global.experiments) }} apiVersion: v1 kind: ServiceAccount metadata: @@ -21,3 +22,4 @@ imagePullSecrets: {{- end }} {{- end }} {{- end }} +{{- end }} diff --git a/charts/consul/templates/partition-init-job.yaml b/charts/consul/templates/partition-init-job.yaml index 205341901c..21ad2930b8 100644 --- a/charts/consul/templates/partition-init-job.yaml +++ b/charts/consul/templates/partition-init-job.yaml @@ -85,7 +85,6 @@ spec: containers: - name: partition-init-job image: {{ .Values.global.imageK8S }} - {{ template "consul.imagePullPolicy" . }} {{- include "consul.restrictedSecurityContext" . | nindent 10 }} env: {{- include "consul.consulK8sConsulServerEnvVars" . | nindent 10 }} @@ -119,6 +118,9 @@ spec: {{- if .Values.global.cloud.enabled }} -tls-server-name=server.{{ .Values.global.datacenter}}.{{ .Values.global.domain}} \ {{- end }} + {{- if and (mustHas "resource-apis" .Values.global.experiments) (mustHas "v2tenancy" .Values.global.experiments) }} + -enable-v2tenancy=true + {{- end }} resources: requests: memory: "50Mi" diff --git a/charts/consul/templates/server-acl-init-cleanup-job.yaml b/charts/consul/templates/server-acl-init-cleanup-job.yaml index 3d7d6c8120..b47e04188f 100644 --- a/charts/consul/templates/server-acl-init-cleanup-job.yaml +++ b/charts/consul/templates/server-acl-init-cleanup-job.yaml @@ -61,7 +61,6 @@ spec: containers: - name: server-acl-init-cleanup image: {{ .Values.global.imageK8S }} - {{ template "consul.imagePullPolicy" . }} {{- if not .Values.server.containerSecurityContext.aclInit }} {{- include "consul.restrictedSecurityContext" . | nindent 10 }} {{- end }} diff --git a/charts/consul/templates/server-acl-init-job.yaml b/charts/consul/templates/server-acl-init-job.yaml index a6be262335..2e798a54d5 100644 --- a/charts/consul/templates/server-acl-init-job.yaml +++ b/charts/consul/templates/server-acl-init-job.yaml @@ -32,11 +32,6 @@ metadata: {{- if .Values.global.extraLabels }} {{- toYaml .Values.global.extraLabels | nindent 4 }} {{- end }} - {{- if .Values.global.argocd.enabled }} - annotations: - "argocd.argoproj.io/hook": "Sync" - "argocd.argoproj.io/hook-delete-policy": "HookSucceeded" - {{- end }} spec: template: metadata: @@ -55,7 +50,12 @@ spec: {{- if .Values.global.acls.annotations }} {{- tpl .Values.global.acls.annotations . | nindent 8 }} {{- end }} + {{- if .Values.global.argocd.enabled }} + "argocd.argoproj.io/hook": "Sync" + "argocd.argoproj.io/hook-delete-policy": "HookSucceeded" + {{- end }} {{- if .Values.global.secretsBackend.vault.enabled }} + {{- /* Run the Vault agent as both an init container and sidecar. The Vault agent sidecar is needed when server-acl-init bootstraps ACLs and writes the bootstrap token back to Vault. @@ -137,7 +137,6 @@ spec: containers: - name: server-acl-init-job image: {{ .Values.global.imageK8S }} - {{ template "consul.imagePullPolicy" . }} {{- if not .Values.server.containerSecurityContext.aclInit }} {{- include "consul.restrictedSecurityContext" . | nindent 8 }} {{- end }} @@ -193,6 +192,10 @@ spec: {{- else }} -secrets-backend=kubernetes \ {{- end }} + + {{- if (mustHas "resource-apis" .Values.global.experiments) }} + -enable-resource-apis=true \ + {{- end }} {{- if .Values.global.acls.bootstrapToken.secretName }} -bootstrap-token-secret-name={{ .Values.global.acls.bootstrapToken.secretName }} \ @@ -227,9 +230,7 @@ spec: {{- if .Values.meshGateway.enabled }} -mesh-gateway=true \ {{- end }} - {{- if .Values.dns.proxy.enabled }} - -dns-proxy=true \ - {{- end }} + {{- if .Values.ingressGateways.enabled }} {{- if .Values.global.enableConsulNamespaces }} {{- $root := . }} @@ -303,6 +304,10 @@ spec: -partition-token-file=/vault/secrets/partition-token \ {{- end }} + {{- if .Values.apiGateway.enabled }} + -api-gateway-controller=true \ + {{- end }} + {{- if .Values.global.enableConsulNamespaces }} -enable-namespaces=true \ {{- /* syncCatalog must be enabled to set sync flags */}} diff --git a/charts/consul/templates/server-statefulset.yaml b/charts/consul/templates/server-statefulset.yaml index f8cb9b4def..315c8c4666 100644 --- a/charts/consul/templates/server-statefulset.yaml +++ b/charts/consul/templates/server-statefulset.yaml @@ -132,7 +132,7 @@ spec: {{- tpl .Values.server.annotations . | nindent 8 }} {{- end }} {{- if (and .Values.global.metrics.enabled .Values.global.metrics.enableAgentMetrics) }} - {{- if (or (not .Values.global.metrics.datadog.enabled) (and .Values.global.metrics.datadog.enabled (.Values.global.metrics.datadog.dogstatsd.enabled))) }} + {{- if not .Values.global.metrics.datadog.openMetricsPrometheus.enabled }} "prometheus.io/scrape": "true" {{- if not (hasKey (default "" .Values.server.annotations | fromYaml) "prometheus.io/path")}} "prometheus.io/path": "/v1/agent/metrics" @@ -156,12 +156,12 @@ spec: "instances": [ { {{- if .Values.global.tls.enabled }} - "openmetrics_endpoint": "https://{{ template "consul.fullname" . }}-server.{{ .Release.Namespace }}.svc:8501/v1/agent/metrics?format=prometheus", + "openmetrics_endpoint": "https://consul-server.{{ .Release.Namespace }}.svc:8501/v1/agent/metrics?format=prometheus", "tls_cert": "/etc/datadog-agent/conf.d/consul.d/certs/tls.crt", "tls_private_key": "/etc/datadog-agent/conf.d/consul.d/certs/tls.key", "tls_ca_cert": "/etc/datadog-agent/conf.d/consul.d/ca/tls.crt", {{- else }} - "openmetrics_endpoint": "http://{{ template "consul.fullname" . }}-server.{{ .Release.Namespace }}.svc:8500/v1/agent/metrics?format=prometheus", + "openmetrics_endpoint": "http://consul-server.{{ .Release.Namespace }}.svc:8500/v1/agent/metrics?format=prometheus", {{- end }} {{- if ( .Values.global.acls.manageSystemACLs) }} "headers": { @@ -182,12 +182,12 @@ spec: "instances": [ { {{- if .Values.global.tls.enabled }} - "url": "https://{{ template "consul.fullname" . }}-server.{{ .Release.Namespace }}.svc:8501", + "url": "https://consul-server.{{ .Release.Namespace }}.svc:8501", "tls_cert": "/etc/datadog-agent/conf.d/consul.d/certs/tls.crt", "tls_private_key": "/etc/datadog-agent/conf.d/consul.d/certs/tls.key", "tls_ca_cert": "/etc/datadog-agent/conf.d/consul.d/ca/tls.crt", {{- else }} - "url": "http://{{ template "consul.fullname" . }}-server.{{ .Release.Namespace }}.svc:8500", + "url": "http://consul-server.consul.svc:8500", {{- end }} "use_prometheus_endpoint": true, {{- if ( .Values.global.acls.manageSystemACLs) }} @@ -226,8 +226,6 @@ spec: {{- toYaml .Values.server.securityContext | nindent 8 }} {{- end }} volumes: - - name: tmp - emptyDir: {} - name: config configMap: name: {{ template "consul.fullname" . }}-server-config @@ -296,7 +294,7 @@ spec: {{- if and .Values.global.metrics.datadog.enabled .Values.global.metrics.datadog.dogstatsd.enabled (eq .Values.global.metrics.datadog.dogstatsd.socketTransportType "UDS" ) }} - name: dsdsocket hostPath: - path: {{ dir .Values.global.metrics.datadog.dogstatsd.dogstatsdAddr | trimAll "\"" }} + path: /var/run/datadog type: DirectoryOrCreate {{- end }} {{- range .Values.server.extraVolumes }} @@ -321,7 +319,6 @@ spec: initContainers: - name: locality-init image: {{ .Values.global.imageK8S }} - {{ template "consul.imagePullPolicy" . }} env: - name: NODE_NAME valueFrom: @@ -339,7 +336,6 @@ spec: containers: - name: consul image: "{{ default .Values.global.image .Values.server.image | trimPrefix "\"" | trimSuffix "\"" }}" - {{ template "consul.imagePullPolicy" . }} imagePullPolicy: {{ .Values.global.imagePullPolicy }} env: - name: ADVERTISE_IP @@ -548,7 +544,7 @@ spec: {{- end }} {{- if and .Values.global.metrics.datadog.enabled .Values.global.metrics.datadog.dogstatsd.enabled (eq .Values.global.metrics.datadog.dogstatsd.socketTransportType "UDS" ) }} - name: dsdsocket - mountPath: {{ dir .Values.global.metrics.datadog.dogstatsd.dogstatsdAddr | trimAll "\"" }} + mountPath: /var/run/datadog readOnly: true {{- end }} {{- range .Values.server.extraVolumes }} @@ -566,9 +562,6 @@ spec: mountPath: /trusted-cas readOnly: false {{- end }} - - name: tmp - mountPath: /tmp - readOnly: false ports: {{- if (or (not .Values.global.tls.enabled) (not .Values.global.tls.httpsOnly)) }} - name: http @@ -659,7 +652,6 @@ spec: {{- if .Values.server.snapshotAgent.enabled }} - name: consul-snapshot-agent image: "{{ default .Values.global.image .Values.server.image }}" - {{ template "consul.imagePullPolicy" . }} env: {{- if .Values.server.snapshotAgent.caCert }} - name: SSL_CERT_DIR diff --git a/charts/consul/templates/sync-catalog-deployment.yaml b/charts/consul/templates/sync-catalog-deployment.yaml index 963e6b2485..3851f0a8e2 100644 --- a/charts/consul/templates/sync-catalog-deployment.yaml +++ b/charts/consul/templates/sync-catalog-deployment.yaml @@ -81,7 +81,6 @@ spec: containers: - name: sync-catalog image: "{{ default .Values.global.imageK8S .Values.syncCatalog.image }}" - {{ template "consul.imagePullPolicy" . }} {{- include "consul.restrictedSecurityContext" . | nindent 8 }} env: {{- include "consul.consulK8sConsulServerEnvVars" . | nindent 8 }} diff --git a/charts/consul/templates/telemetry-collector-deployment.yaml b/charts/consul/templates/telemetry-collector-deployment.yaml index 826e072bb1..f7b6d7bd2e 100644 --- a/charts/consul/templates/telemetry-collector-deployment.yaml +++ b/charts/consul/templates/telemetry-collector-deployment.yaml @@ -1,4 +1,4 @@ -{{- if .Values.telemetryCollector.enabled }} +{{- if and .Values.telemetryCollector.enabled (not (mustHas "resource-apis" .Values.global.experiments)) }} {{- if not .Values.telemetryCollector.image}}{{ fail "telemetryCollector.image must be set to enable consul-telemetry-collector" }}{{ end }} {{- if not .Values.connectInject.enabled }}{{ fail "connectInject.enabled must be true" }}{{ end -}} {{- if and .Values.global.adminPartitions.enabled (not .Values.global.enableConsulNamespaces) }}{{ fail "global.enableConsulNamespaces must be true if global.adminPartitions.enabled=true" }}{{ end }} @@ -143,7 +143,7 @@ spec: -service-name="" image: {{ .Values.global.imageK8S }} - {{ template "consul.imagePullPolicy" . }} + imagePullPolicy: IfNotPresent {{- if .Values.telemetryCollector.initContainer.resources }} resources: {{- toYaml .Values.telemetryCollector.initContainer.resources | nindent 12 }} @@ -171,7 +171,7 @@ spec: containers: - name: consul-telemetry-collector image: {{ .Values.telemetryCollector.image }} - {{ template "consul.imagePullPolicy" . }} + imagePullPolicy: {{ .Values.global.imagePullPolicy }} ports: - containerPort: 9090 name: metrics @@ -256,13 +256,9 @@ spec: {{- if eq (.Values.global.metrics.datadog.otlp.protocol | lower ) "http" }} - name: CO_OTEL_HTTP_ENDPOINT value: "http://$(HOST_IP):4318" - - name: OTEL_EXPORTER_OTLP_ENDPOINT - value: "http://$(HOST_IP):4318" {{- else if eq (.Values.global.metrics.datadog.otlp.protocol | lower) "grpc" }} - name: CO_OTEL_HTTP_ENDPOINT - value: "http://$(HOST_IP):4317" - - name: OTEL_EXPORTER_OTLP_ENDPOINT - value: "http://$(HOST_IP):4317" + value: "grpc://$(HOST_IP):4317" {{- end }} {{- end }} {{- include "consul.extraEnvironmentVars" .Values.telemetryCollector | nindent 12 }} @@ -299,7 +295,7 @@ spec: # consul-dataplane container - name: consul-dataplane image: "{{ .Values.global.imageConsulDataplane }}" - {{ template "consul.imagePullPolicy" . }} + imagePullPolicy: IfNotPresent command: - consul-dataplane args: diff --git a/charts/consul/templates/telemetry-collector-v2-deployment.yaml b/charts/consul/templates/telemetry-collector-v2-deployment.yaml new file mode 100644 index 0000000000..09f4a2dbbc --- /dev/null +++ b/charts/consul/templates/telemetry-collector-v2-deployment.yaml @@ -0,0 +1,415 @@ +{{- if and .Values.telemetryCollector.enabled (mustHas "resource-apis" .Values.global.experiments) }} +{{- if not .Values.telemetryCollector.image}}{{ fail "telemetryCollector.image must be set to enable consul-telemetry-collector" }}{{ end }} +{{- if not .Values.connectInject.enabled }}{{ fail "connectInject.enabled must be true" }}{{ end -}} +{{- if and .Values.global.adminPartitions.enabled (not .Values.global.enableConsulNamespaces) }}{{ fail "global.enableConsulNamespaces must be true if global.adminPartitions.enabled=true" }}{{ end }} +{{ template "consul.validateCloudSecretKeys" . }} +{{ template "consul.validateTelemetryCollectorCloud" . }} +{{ template "consul.validateTelemetryCollectorCloudSecretKeys" . }} +{{ template "consul.validateTelemetryCollectorResourceId" . }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "consul.fullname" . }}-telemetry-collector + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: consul-telemetry-collector + {{- if .Values.global.extraLabels }} + {{- toYaml .Values.global.extraLabels | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.telemetryCollector.replicas }} + selector: + matchLabels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + release: {{ .Release.Name }} + component: consul-telemetry-collector + template: + metadata: + annotations: + "consul.hashicorp.com/mesh-inject": "false" + # This annotation tells the pod controller that this pod was injected even though it wasn't. + # This ensures the pod controller will sync a workload for the pod into Consul + "consul.hashicorp.com/mesh-inject-status": "injected" + # We aren't using tproxy and we don't have an original pod. This would be simpler if we made a path similar + # to gateways + "consul.hashicorp.com/transparent-proxy": "false" + "consul.hashicorp.com/transparent-proxy-overwrite-probes": "false" + "consul.hashicorp.com/consul-k8s-version": {{ $.Chart.Version }} + {{- if .Values.telemetryCollector.customExporterConfig }} + # configmap checksum + "consul.hashicorp.com/config-checksum": {{ include (print $.Template.BasePath "/telemetry-collector-configmap.yaml") . | sha256sum }} + {{- end }} + # vault annotations + {{- if (and .Values.global.secretsBackend.vault.enabled .Values.global.tls.enabled) }} + "vault.hashicorp.com/agent-init-first": "true" + "vault.hashicorp.com/agent-inject": "true" + "vault.hashicorp.com/role": {{ .Values.global.secretsBackend.vault.consulCARole }} + "vault.hashicorp.com/agent-inject-secret-serverca.crt": {{ .Values.global.tls.caCert.secretName }} + "vault.hashicorp.com/agent-inject-template-serverca.crt": {{ template "consul.serverTLSCATemplate" . }} + {{- if and .Values.global.secretsBackend.vault.ca.secretName .Values.global.secretsBackend.vault.ca.secretKey }} + "vault.hashicorp.com/agent-extra-secret": "{{ .Values.global.secretsBackend.vault.ca.secretName }}" + "vault.hashicorp.com/ca-cert": "/vault/custom/{{ .Values.global.secretsBackend.vault.ca.secretKey }}" + {{- end }} + {{- if .Values.global.secretsBackend.vault.agentAnnotations }} + {{ tpl .Values.global.secretsBackend.vault.agentAnnotations . | nindent 8 | trim }} + {{- end }} + {{- if (and (.Values.global.secretsBackend.vault.vaultNamespace) (not (hasKey (default "" .Values.global.secretsBackend.vault.agentAnnotations | fromYaml) "vault.hashicorp.com/namespace")))}} + "vault.hashicorp.com/namespace": "{{ .Values.global.secretsBackend.vault.vaultNamespace }}" + {{- end }} + {{- end }} + + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + release: {{ .Release.Name }} + component: consul-telemetry-collector + {{- if .Values.global.extraLabels }} + {{- toYaml .Values.global.extraLabels | nindent 8 }} + {{- end }} + spec: + # This needs to explicitly be consul-telemetry-collector because we look this up from each service consul-dataplane + # to forward metrics to it. + serviceAccountName: consul-telemetry-collector + initContainers: + # We're manually managing this init container instead of using the mesh injector so that we don't run into + # any race conditions on the mesh-injector deployment or upgrade + - name: consul-mesh-init + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + # acl login info + {{- if .Values.global.acls.manageSystemACLs }} + - name: CONSUL_LOGIN_AUTH_METHOD + value: {{ template "consul.fullname" . }}-k8s-auth-method + - name: CONSUL_LOGIN_DATACENTER + value: {{ .Values.global.datacenter }} + - name: CONSUL_LOGIN_META + value: "component=consul-telemetry-collector,pod=$(NAMESPACE)/$(POD_NAME)" + {{- end }} + # service and login namespace + # this is attempting to replicate the behavior of webhooks in calculating namespace + # https://github.com/hashicorp/consul-k8s/blob/b84339050bb2c4b62b60cec96275f74952b0ac9d/control-plane/connect-inject/webhook/consul_dataplane_sidecar.go#L200 + {{- if .Values.global.enableConsulNamespaces }} + {{- if .Values.connectInject.consulNamespaces.mirroringK8S }} + - name: CONSUL_NAMESPACE + value: {{ .Values.connectInject.consulNamespaces.mirroringK8SPrefix }}{{ .Release.Namespace }} + {{- else }} + - name: CONSUL_NAMESPACE + value: {{ .Values.connectInject.consulNamespaces.consulDestinationNamespace }} + {{- end }} + {{- if .Values.global.acls.manageSystemACLs }} + {{- if .Values.connectInject.consulNamespaces.mirroringK8S }} + - name: CONSUL_LOGIN_NAMESPACE + value: "default" + {{- else }} + - name: CONSUL_LOGIN_NAMESPACE + value: {{ .Values.connectInject.consulNamespaces.consulDestinationNamespace }} + {{- end }} + {{- end }} + {{- end }} + command: + - /bin/sh + - -ec + - |- + exec consul-k8s-control-plane mesh-init -proxy-name=${POD_NAME} \ + -log-level={{ default .Values.global.logLevel .Values.telemetryCollector.logLevel }} \ + -log-json={{ .Values.global.logJSON }} + + image: {{ .Values.global.imageK8S }} + imagePullPolicy: IfNotPresent + {{- if .Values.telemetryCollector.initContainer.resources }} + resources: + {{- toYaml .Values.telemetryCollector.initContainer.resources | nindent 12 }} + {{- else }} + resources: + limits: + cpu: 50m + memory: 150Mi + requests: + cpu: 50m + memory: 25Mi + {{- end }} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /consul/mesh-inject + name: consul-mesh-inject-data + {{- if .Values.global.tls.enabled }} + {{- if not (or (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots) .Values.global.secretsBackend.vault.enabled) }} + - name: consul-ca-cert + mountPath: /consul/tls/ca + readOnly: true + {{- end }} + {{- end }} + containers: + - name: consul-telemetry-collector + image: {{ .Values.telemetryCollector.image }} + imagePullPolicy: {{ .Values.global.imagePullPolicy }} + ports: + - containerPort: 9090 + name: metrics + protocol: TCP + - containerPort: 9356 + name: metricsserver + protocol: TCP + env: + # These are mounted as secrets so that the telemetry-collector can use them when cloud is enabled. + # - the hcp-go-sdk in consul agent will already look for HCP_CLIENT_ID, HCP_CLIENT_SECRET, HCP_AUTH_URL, + # HCP_SCADA_ADDRESS, and HCP_API_HOST. so nothing more needs to be done. + # - HCP_RESOURCE_ID is created either in the global cloud section or in telemetryCollector.cloud + {{- if .Values.telemetryCollector.cloud.resourceId.secretName }} + - name: HCP_RESOURCE_ID + valueFrom: + secretKeyRef: + name: {{ .Values.telemetryCollector.cloud.resourceId.secretName }} + key: {{ .Values.telemetryCollector.cloud.resourceId.secretKey }} + {{- else if .Values.global.cloud.resourceId.secretName }} + - name: HCP_RESOURCE_ID + valueFrom: + secretKeyRef: + name: {{ .Values.global.cloud.resourceId.secretName }} + key: {{ .Values.global.cloud.resourceId.secretKey }} + {{- end }} + {{- if .Values.telemetryCollector.cloud.clientId.secretName }} + - name: HCP_CLIENT_ID + valueFrom: + secretKeyRef: + name: {{ .Values.telemetryCollector.cloud.clientId.secretName }} + key: {{ .Values.telemetryCollector.cloud.clientId.secretKey }} + {{- else if .Values.global.cloud.clientId.secretName }} + - name: HCP_CLIENT_ID + valueFrom: + secretKeyRef: + name: {{ .Values.global.cloud.clientId.secretName }} + key: {{ .Values.global.cloud.clientId.secretKey }} + {{- end }} + {{- if .Values.telemetryCollector.cloud.clientSecret.secretName }} + - name: HCP_CLIENT_SECRET + valueFrom: + secretKeyRef: + name: {{ .Values.telemetryCollector.cloud.clientSecret.secretName }} + key: {{ .Values.telemetryCollector.cloud.clientSecret.secretKey }} + {{- else if .Values.global.cloud.clientSecret.secretName }} + - name: HCP_CLIENT_SECRET + valueFrom: + secretKeyRef: + name: {{ .Values.global.cloud.clientSecret.secretName }} + key: {{ .Values.global.cloud.clientSecret.secretKey }} + {{- end}} + {{- if .Values.global.cloud.authUrl.secretName }} + - name: HCP_AUTH_URL + valueFrom: + secretKeyRef: + name: {{ .Values.global.cloud.authUrl.secretName }} + key: {{ .Values.global.cloud.authUrl.secretKey }} + {{- end}} + {{- if .Values.global.cloud.apiHost.secretName }} + - name: HCP_API_HOST + valueFrom: + secretKeyRef: + name: {{ .Values.global.cloud.apiHost.secretName }} + key: {{ .Values.global.cloud.apiHost.secretKey }} + {{- end}} + {{- if .Values.global.cloud.scadaAddress.secretName }} + - name: HCP_SCADA_ADDRESS + valueFrom: + secretKeyRef: + name: {{ .Values.global.cloud.scadaAddress.secretName }} + key: {{ .Values.global.cloud.scadaAddress.secretKey }} + {{- end}} + {{- if .Values.global.trustedCAs }} + - name: SSL_CERT_DIR + value: "/etc/ssl/certs:/trusted-cas" + {{- end }} + {{- include "consul.extraEnvironmentVars" .Values.telemetryCollector | nindent 12 }} + command: + - "/bin/sh" + - "-ec" + - | + {{- if .Values.global.trustedCAs }} + {{- range $i, $cert := .Values.global.trustedCAs }} + cat < /trusted-cas/custom-ca-{{$i}}.pem + {{- $cert | nindent 10 }} + EOF + {{- end }} + {{- end }} + + exec consul-telemetry-collector agent \ + {{- if .Values.telemetryCollector.customExporterConfig }} + -config-file-path /consul/config/config.json \ + {{ end }} + volumeMounts: + {{- if .Values.telemetryCollector.customExporterConfig }} + - name: config + mountPath: /consul/config + {{- end }} + {{- if .Values.global.trustedCAs }} + - name: trusted-cas + mountPath: /trusted-cas + readOnly: false + {{- end }} + resources: + {{- if .Values.telemetryCollector.resources }} + {{- toYaml .Values.telemetryCollector.resources | nindent 12 }} + {{- end }} + # consul-dataplane container + - name: consul-dataplane + image: "{{ .Values.global.imageConsulDataplane }}" + imagePullPolicy: IfNotPresent + command: + - consul-dataplane + args: + # addresses + {{- if .Values.externalServers.enabled }} + - -addresses={{ .Values.externalServers.hosts | first }} + {{- else }} + - -addresses={{ template "consul.fullname" . }}-server.{{ .Release.Namespace }}.svc + {{- end }} + # grpc + {{- if .Values.externalServers.enabled }} + - -grpc-port={{ .Values.externalServers.grpcPort }} + {{- else }} + - -grpc-port=8502 + {{- end }} + # tls + {{- if .Values.global.tls.enabled }} + {{- if (not (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots)) }} + {{- if .Values.global.secretsBackend.vault.enabled }} + - -ca-certs=/vault/secrets/serverca.crt + {{- else }} + - -ca-certs=/consul/tls/ca/tls.crt + {{- end }} + {{- end }} + {{- if and .Values.externalServers.enabled .Values.externalServers.tlsServerName }} + - -tls-server-name={{.Values.externalServers.tlsServerName }} + {{- else if .Values.global.cloud.enabled }} + - -tls-server-name=server.{{ .Values.global.datacenter}}.{{ .Values.global.domain}} + {{- end }} + {{- else }} + - -tls-disabled + {{- end }} + # credentials + {{- if .Values.global.acls.manageSystemACLs }} + - -credential-type=login + - -login-bearer-token-path=/var/run/secrets/kubernetes.io/serviceaccount/token + - -login-auth-method={{ template "consul.fullname" . }}-k8s-auth-method + {{- end }} + # service and login namespace + {{- if .Values.global.enableConsulNamespaces }} + {{- if .Values.connectInject.consulNamespaces.mirroringK8S }} + - -service-namespace={{ .Values.connectInject.consulNamespaces.mirroringK8SPrefix }}{{ .Release.Namespace }} + {{- else }} + - -service-namespace={{ .Values.connectInject.consulNamespaces.consulDestinationNamespace }} + {{- end }} + {{- if .Values.global.acls.manageSystemACLs }} + {{- if .Values.connectInject.consulNamespaces.mirroringK8S }} + - -login-namespace=default + {{- else }} + - -login-namespace={{ .Values.connectInject.consulNamespaces.consulDestinationNamespace }} + {{- end }} + {{- end }} + {{- end }} + # service and login partition + {{- if .Values.global.adminPartitions.enabled }} + - -service-partition={{ .Values.global.adminPartitions.name }} + {{- if .Values.global.acls.manageSystemACLs }} + - -login-partition={{ .Values.global.adminPartitions.name }} + {{- end }} + {{- end }} + # telemetry + {{- if .Values.global.metrics.enabled }} + - -telemetry-prom-scrape-path=/metrics + {{- end }} + - -log-level={{ default .Values.global.logLevel .Values.telemetryCollector.logLevel }} + - -log-json={{ .Values.global.logJSON }} + - -envoy-concurrency=2 + {{- if and .Values.externalServers.enabled .Values.externalServers.skipServerWatch }} + - -server-watch-disabled=true + {{- end }} + env: + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: DP_PROXY_ID + value: $(POD_NAME) + - name: DP_CREDENTIAL_LOGIN_META1 + value: pod=$(NAMESPACE)/$(POD_NAME) + - name: DP_CREDENTIAL_LOGIN_META2 + value: component=consul-telemetry-collector + - name: TMPDIR + value: /consul/mesh-inject + readinessProbe: + failureThreshold: 3 + initialDelaySeconds: 1 + periodSeconds: 10 + successThreshold: 1 + tcpSocket: + port: 20000 + timeoutSeconds: 1 + securityContext: + readOnlyRootFilesystem: true + runAsGroup: 5995 + runAsNonRoot: true + runAsUser: 5995 + # dataplane volume mounts + volumeMounts: + - mountPath: /consul/mesh-inject + name: consul-mesh-inject-data + {{- if .Values.global.tls.enabled }} + {{- if not (or (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots) .Values.global.secretsBackend.vault.enabled) }} + - name: consul-ca-cert + mountPath: /consul/tls/ca + readOnly: true + {{- end }} + {{- end }} + + {{- if .Values.telemetryCollector.nodeSelector }} + nodeSelector: + {{ tpl .Values.telemetryCollector.nodeSelector . | indent 8 | trim }} + {{- end }} + {{- if .Values.telemetryCollector.priorityClassName }} + priorityClassName: {{ .Values.telemetryCollector.priorityClassName }} + {{- end }} + volumes: + - emptyDir: + medium: Memory + name: consul-mesh-inject-data + {{- if .Values.global.trustedCAs }} + - name: trusted-cas + emptyDir: + medium: "Memory" + {{- end }} + {{- if .Values.global.tls.enabled }} + {{- if not (or (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots) .Values.global.secretsBackend.vault.enabled) }} + - name: consul-ca-cert + secret: + {{- if .Values.global.tls.caCert.secretName }} + secretName: {{ .Values.global.tls.caCert.secretName }} + {{- else }} + secretName: {{ template "consul.fullname" . }}-ca-cert + {{- end }} + items: + - key: {{ default "tls.crt" .Values.global.tls.caCert.secretKey }} + path: tls.crt + {{- end }} + {{- end }} + - name: config + configMap: + name: {{ template "consul.fullname" . }}-telemetry-collector +{{- end }} diff --git a/charts/consul/templates/terminating-gateways-deployment.yaml b/charts/consul/templates/terminating-gateways-deployment.yaml index c4970979b1..9afe938e56 100644 --- a/charts/consul/templates/terminating-gateways-deployment.yaml +++ b/charts/consul/templates/terminating-gateways-deployment.yaml @@ -129,9 +129,6 @@ spec: terminationGracePeriodSeconds: 10 serviceAccountName: {{ template "consul.fullname" $root }}-{{ .name }} volumes: - - name: tmp - emptyDir: - medium: "Memory" - name: consul-service emptyDir: medium: "Memory" @@ -169,7 +166,6 @@ spec: # terminating-gateway-init registers the terminating gateway service with Consul. - name: terminating-gateway-init image: {{ $root.Values.global.imageK8S }} - {{ template "consul.imagePullPolicy" $root }} {{- include "consul.restrictedSecurityContext" $ | nindent 10 }} env: - name: NAMESPACE @@ -210,8 +206,6 @@ spec: -log-level={{ default $root.Values.global.logLevel $root.Values.terminatingGateways.logLevel }} \ -log-json={{ $root.Values.global.logJSON }} volumeMounts: - - name: tmp - mountPath: /tmp - name: consul-service mountPath: /consul/service {{- if $root.Values.global.tls.enabled }} @@ -231,11 +225,8 @@ spec: containers: - name: terminating-gateway image: {{ $root.Values.global.imageConsulDataplane | quote }} - {{ template "consul.imagePullPolicy" $root }} {{- include "consul.restrictedSecurityContext" $ | nindent 10 }} volumeMounts: - - name: tmp - mountPath: /tmp - name: consul-service mountPath: /consul/service readOnly: true diff --git a/charts/consul/templates/tests/test-runner.yaml b/charts/consul/templates/tests/test-runner.yaml index 4c3e81ccea..b8b078003b 100644 --- a/charts/consul/templates/tests/test-runner.yaml +++ b/charts/consul/templates/tests/test-runner.yaml @@ -37,7 +37,6 @@ spec: containers: - name: consul-test image: "{{ .Values.global.image }}" - {{ template "consul.imagePullPolicy" . }} env: - name: HOST_IP valueFrom: diff --git a/charts/consul/templates/tls-init-cleanup-job.yaml b/charts/consul/templates/tls-init-cleanup-job.yaml index 5ebe236df5..9500410a53 100644 --- a/charts/consul/templates/tls-init-cleanup-job.yaml +++ b/charts/consul/templates/tls-init-cleanup-job.yaml @@ -49,7 +49,6 @@ spec: containers: - name: tls-init-cleanup image: "{{ .Values.global.image }}" - {{ template "consul.imagePullPolicy" . }} {{- if not .Values.server.containerSecurityContext.tlsInit }} {{- include "consul.restrictedSecurityContext" . | nindent 10 }} {{- end }} diff --git a/charts/consul/templates/tls-init-job.yaml b/charts/consul/templates/tls-init-job.yaml index 177472c9a4..41c0c2827e 100644 --- a/charts/consul/templates/tls-init-job.yaml +++ b/charts/consul/templates/tls-init-job.yaml @@ -64,7 +64,6 @@ spec: containers: - name: tls-init image: "{{ .Values.global.imageK8S }}" - {{ template "consul.imagePullPolicy" . }} {{- if not .Values.server.containerSecurityContext.tlsInit }} {{- include "consul.restrictedSecurityContext" . | nindent 10 }} {{- end }} diff --git a/charts/consul/templates/webhook-cert-manager-deployment.yaml b/charts/consul/templates/webhook-cert-manager-deployment.yaml index 71cddcaf84..78bb50168e 100644 --- a/charts/consul/templates/webhook-cert-manager-deployment.yaml +++ b/charts/consul/templates/webhook-cert-manager-deployment.yaml @@ -51,7 +51,6 @@ spec: -deployment-name={{ template "consul.fullname" . }}-webhook-cert-manager \ -deployment-namespace={{ .Release.Namespace }} image: {{ .Values.global.imageK8S }} - {{ template "consul.imagePullPolicy" . }} name: webhook-cert-manager {{- include "consul.restrictedSecurityContext" . | nindent 8 }} {{- with .Values.webhookCertManager.resources }} diff --git a/charts/consul/test/unit/api-gateway-controller-clusterrole.bats b/charts/consul/test/unit/api-gateway-controller-clusterrole.bats new file mode 100644 index 0000000000..f26fdfeebd --- /dev/null +++ b/charts/consul/test/unit/api-gateway-controller-clusterrole.bats @@ -0,0 +1,45 @@ +#!/usr/bin/env bats + +load _helpers + +@test "apiGateway/ClusterRole: disabled by default" { + cd `chart_dir` + assert_empty helm template \ + -s templates/api-gateway-controller-clusterrole.yaml \ + . +} + +@test "apiGateway/ClusterRole: enabled with apiGateway.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-clusterrole.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "apiGateway/ClusterRole: can use podsecuritypolicies with apiGateway.enabled=true and global.enablePodSecurityPolicies=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-clusterrole.yaml \ + --set 'global.enablePodSecurityPolicies=true' \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + . | tee /dev/stderr | + yq '.rules[] | select((.resources[0] == "podsecuritypolicies") and (.verbs[0] == "use")) | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "apiGateway/ClusterRole: can create roles and rolebindings with apiGateway.enabled=true and global.enablePodSecurityPolicies=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-clusterrole.yaml \ + --set 'global.enablePodSecurityPolicies=true' \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + . | tee /dev/stderr | + yq '.rules[] | select((.resources[0] == "roles") and (.resources[1] == "rolebindings") and (.verbs | contains(["create","get","list","watch"]))) | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} diff --git a/charts/consul/test/unit/api-gateway-controller-clusterrolebinding.bats b/charts/consul/test/unit/api-gateway-controller-clusterrolebinding.bats new file mode 100644 index 0000000000..3dfd94c36f --- /dev/null +++ b/charts/consul/test/unit/api-gateway-controller-clusterrolebinding.bats @@ -0,0 +1,22 @@ +#!/usr/bin/env bats + +load _helpers + +@test "apiGateway/ClusterRoleBinding: disabled by default" { + cd `chart_dir` + assert_empty helm template \ + -s templates/api-gateway-controller-clusterrolebinding.yaml \ + . +} + +@test "apiGateway/ClusterRoleBinding: enabled with global.enabled false" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-clusterrolebinding.yaml \ + --set 'global.enabled=false' \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + . | tee /dev/stderr | + yq -s 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} diff --git a/charts/consul/test/unit/api-gateway-controller-deployment.bats b/charts/consul/test/unit/api-gateway-controller-deployment.bats new file mode 100755 index 0000000000..696d5f7cbb --- /dev/null +++ b/charts/consul/test/unit/api-gateway-controller-deployment.bats @@ -0,0 +1,1754 @@ +#!/usr/bin/env bats + +load _helpers + +@test "apiGateway/Deployment: disabled by default" { + cd `chart_dir` + assert_empty helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + . +} + +@test "apiGateway/Deployment: fails if no image is set" { + cd `chart_dir` + run helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + . + [ "$status" -eq 1 ] + [[ "$output" =~ "apiGateway.image must be set to enable api gateway" ]] +} + +@test "apiGateway/Deployment: disable with apiGateway.enabled" { + cd `chart_dir` + assert_empty helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=false' \ + . +} + +@test "apiGateway/Deployment: disable with global.enabled" { + cd `chart_dir` + assert_empty helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'global.enabled=false' \ + . +} + +@test "apiGateway/Deployment: enable namespaces" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=bar' \ + --set 'global.enableConsulNamespaces=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | join(" ") | contains("-consul-destination-namespace=default")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "apiGateway/Deployment: enable namespace mirroring" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=bar' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'connectInject.consulNamespaces.mirroringK8S=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | join(" ") | contains("-mirroring-k8s=true")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "apiGateway/Deployment: enable namespace mirroring prefixes" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=bar' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'connectInject.consulNamespaces.mirroringK8S=true' \ + --set 'connectInject.consulNamespaces.mirroringK8SPrefix=foo' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | join(" ") | contains("-mirroring-k8s-prefix=foo")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "apiGateway/Deployment: container image overrides" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=bar' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].image' | tee /dev/stderr) + [ "${actual}" = "\"bar\"" ] +} + +@test "apiGateway/Deployment: SDS host set correctly" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=bar' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | join(" ") | contains("-sds-server-host release-name-consul-api-gateway-controller.default.svc")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# nodeSelector + +@test "apiGateway/Deployment: nodeSelector is not set by default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + . | tee /dev/stderr | + yq '.spec.template.spec.nodeSelector' | tee /dev/stderr) + [ "${actual}" = "null" ] +} + +@test "apiGateway/Deployment: specified nodeSelector" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'apiGateway.controller.nodeSelector=testing' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.nodeSelector' | tee /dev/stderr) + [ "${actual}" = "testing" ] +} + +#-------------------------------------------------------------------- +# global.tls.enabled + +@test "apiGateway/Deployment: Adds tls-ca-cert volume when global.tls.enabled is true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) + [ "${actual}" != "" ] +} + +@test "apiGateway/Deployment: Adds tls-ca-cert volumeMounts when global.tls.enabled is true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) + [ "${actual}" != "" ] +} + +@test "apiGateway/Deployment: can overwrite CA secret with the provided one" { + cd `chart_dir` + local ca_cert_volume=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.caCert.secretName=foo-ca-cert' \ + --set 'global.tls.caCert.secretKey=key' \ + --set 'global.tls.caKey.secretName=foo-ca-key' \ + --set 'global.tls.caKey.secretKey=key' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[] | select(.name=="consul-ca-cert")' | tee /dev/stderr) + + # check that the provided ca cert secret is attached as a volume + local actual + actual=$(echo $ca_cert_volume | jq -r '.secret.secretName' | tee /dev/stderr) + [ "${actual}" = "foo-ca-cert" ] + + # check that the volume uses the provided secret key + actual=$(echo $ca_cert_volume | jq -r '.secret.items[0].key' | tee /dev/stderr) + [ "${actual}" = "key" ] +} + +#-------------------------------------------------------------------- +# global.tls.enableAutoEncrypt + +@test "apiGateway/Deployment: consul-auto-encrypt-ca-cert volume is added when TLS with auto-encrypt is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[] | select(.name == "consul-auto-encrypt-ca-cert") | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "apiGateway/Deployment: consul-auto-encrypt-ca-cert volumeMount is added when TLS with auto-encrypt is enabled with clients" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'client.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "consul-auto-encrypt-ca-cert") | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "apiGateway/Deployment: consul-ca-cert volumeMount is added when TLS with auto-encrypt is enabled without clients" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'client.enabled=false' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "consul-ca-cert") | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "apiGateway/Deployment: get-auto-encrypt-client-ca init container is created when TLS with auto-encrypt is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "get-auto-encrypt-client-ca") | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "apiGateway/Deployment: adds both init containers when TLS with auto-encrypt and ACLs + namespaces are enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers | length == 3' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "apiGateway/Deployment: consul-ca-cert volume is not added if externalServers.enabled=true and externalServers.useSystemRoots=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.hosts[0]=foo.com' \ + --set 'externalServers.useSystemRoots=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) + [ "${actual}" = "" ] +} + +#-------------------------------------------------------------------- +# global.acls.manageSystemACLs + +@test "apiGateway/Deployment: consul-logout preStop hook is added when ACLs are enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].lifecycle.preStop.exec.command[1]] | any(contains("logout"))' | tee /dev/stderr) + [ "${object}" = "true" ] +} + +@test "apiGateway/Deployment: CONSUL_HTTP_TOKEN_FILE is not set when acls are disabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[1].name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "apiGateway/Deployment: CONSUL_HTTP_TOKEN_FILE is set when acls are enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[1].name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "apiGateway/Deployment: CONSUL_LOGIN_DATACENTER is set when acls are enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[2].name] | any(contains("CONSUL_LOGIN_DATACENTER"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "apiGateway/Deployment: init container is created when global.acls.manageSystemACLs=true" { + cd `chart_dir` + local object=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[1]' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.name' | tee /dev/stderr) + [ "${actual}" = "api-gateway-controller-acl-init" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[0].name] | any(contains("NAMESPACE"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].name] | any(contains("POD_NAME"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].name] | any(contains("CONSUL_LOGIN_META"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].value] | any(contains("component=api-gateway-controller,pod=$(NAMESPACE)/$(POD_NAME)"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[3].name] | any(contains("CONSUL_LOGIN_DATACENTER"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '[.env[8].value] | any(contains("5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "apiGateway/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command and environment with tls enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.consulAPITimeout=5s' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "api-gateway-controller-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.env[] | select(.name == "NAMESPACE") | [.valueFrom.fieldRef.fieldPath] | any(contains("metadata.namespace"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.env[] | select(.name == "POD_NAME") | [.valueFrom.fieldRef.fieldPath] | any(contains("metadata.name"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.env[] | select(.name == "CONSUL_LOGIN_META") | [.value] | any(contains("component=api-gateway-controller,pod=$(NAMESPACE)/$(POD_NAME)"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.env[] | select(.name == "CONSUL_ADDRESSES") | [.value] | any(contains("release-name-consul-server.default.svc"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.env[] | select(.name == "CONSUL_GRPC_PORT") | [.value] | any(contains("8502"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.env[] | select(.name == "CONSUL_HTTP_PORT") | [.value] | any(contains("8501"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.env[] | select(.name == "CONSUL_DATACENTER") | [.value] | any(contains("dc1"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.env[] | select(.name == "CONSUL_API_TIMEOUT") | [.value] | any(contains("5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.env[] | select(.name == "CONSUL_USE_TLS") | [.value] | any(contains("true"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.env[] | select(.name == "CONSUL_CACERT_FILE") | [.value] | any(contains("/consul/tls/ca/tls.crt"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.volumeMounts[] | select(.name == "consul-ca-cert") | [.mountPath] | any(contains("/consul/tls/ca"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.volumeMounts[] | select(.name == "consul-data") | [.mountPath] | any(contains("/consul/login"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "apiGateway/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command with Partitions enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'global.adminPartitions.enabled=true' \ + --set 'global.adminPartitions.name=default' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "api-gateway-controller-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-auth-method-name=release-name-consul-k8s-component-auth-method"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.env[] | select(.name == "NAMESPACE") | [.valueFrom.fieldRef.fieldPath] | any(contains("metadata.namespace"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.env[] | select(.name == "POD_NAME") | [.valueFrom.fieldRef.fieldPath] | any(contains("metadata.name"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.env[] | select(.name == "CONSUL_LOGIN_META") | [.value] | any(contains("component=api-gateway-controller,pod=$(NAMESPACE)/$(POD_NAME)"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.env[] | select(.name == "CONSUL_ADDRESSES") | [.value] | any(contains("release-name-consul-server.default.svc"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.env[] | select(.name == "CONSUL_GRPC_PORT") | [.value] | any(contains("8502"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.env[] | select(.name == "CONSUL_HTTP_PORT") | [.value] | any(contains("8501"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.env[] | select(.name == "CONSUL_DATACENTER") | [.value] | any(contains("dc1"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.env[] | select(.name == "CONSUL_API_TIMEOUT") | [.value] | any(contains("5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.env[] | select(.name == "CONSUL_PARTITION") | [.value] | any(contains("default"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.env[] | select(.name == "CONSUL_LOGIN_PARTITION") | [.value] | any(contains("default"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.env[] | select(.name == "CONSUL_USE_TLS") | [.value] | any(contains("true"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.env[] | select(.name == "CONSUL_CACERT_FILE") | [.value] | any(contains("/consul/tls/ca/tls.crt"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.volumeMounts[] | select(.name == "consul-ca-cert") | [.mountPath] | any(contains("/consul/tls/ca"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.volumeMounts[] | select(.name == "consul-data") | [.mountPath] | any(contains("/consul/login"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "apiGateway/Deployment: consul login datacenter is set to primary when when federation enabled in non-primary datacenter" { + cd `chart_dir` + local object=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'meshGateway.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.datacenter=dc1' \ + --set 'global.federation.enabled=true' \ + --set 'global.federation.primaryDatacenter=dc2' \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[1]' | tee /dev/stderr) + + local actual=$(echo $object | + yq '[.env[3].name] | any(contains("CONSUL_LOGIN_DATACENTER"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[3].value] | any(contains("dc2"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "apiGateway/Deployment: primary-datacenter flag provided when federation enabled in non-primary datacenter" { + cd `chart_dir` + local object=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.datacenter=dc2' \ + --set 'global.federation.enabled=true' \ + --set 'global.federation.primaryDatacenter=dc1' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[] | select(.name == "api-gateway-controller")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-api-gateway server"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-primary-datacenter=dc1"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "apiGateway/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command when federation enabled in non-primary datacenter" { + cd `chart_dir` + local object=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.datacenter=dc2' \ + --set 'global.federation.enabled=true' \ + --set 'global.federation.primaryDatacenter=dc1' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "api-gateway-controller-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-auth-method-name=release-name-consul-k8s-component-auth-method-dc2"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[3].value] | any(contains("dc1"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "apiGateway/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command and environment with tls enabled and autoencrypt enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "api-gateway-controller-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.env[] | select(.name == "NAMESPACE") | [.valueFrom.fieldRef.fieldPath] | any(contains("metadata.namespace"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.env[] | select(.name == "POD_NAME") | [.valueFrom.fieldRef.fieldPath] | any(contains("metadata.name"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.env[] | select(.name == "CONSUL_LOGIN_META") | [.value] | any(contains("component=api-gateway-controller,pod=$(NAMESPACE)/$(POD_NAME)"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.env[] | select(.name == "CONSUL_ADDRESSES") | [.value] | any(contains("release-name-consul-server.default.svc"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.env[] | select(.name == "CONSUL_GRPC_PORT") | [.value] | any(contains("8502"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.env[] | select(.name == "CONSUL_HTTP_PORT") | [.value] | any(contains("8501"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.env[] | select(.name == "CONSUL_DATACENTER") | [.value] | any(contains("dc1"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.env[] | select(.name == "CONSUL_API_TIMEOUT") | [.value] | any(contains("5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.env[] | select(.name == "CONSUL_USE_TLS") | [.value] | any(contains("true"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.env[] | select(.name == "CONSUL_CACERT_FILE") | [.value] | any(contains("/consul/tls/ca/tls.crt"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.volumeMounts[] | select(.name == "consul-ca-cert") | [.mountPath] | any(contains("/consul/tls/ca"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.volumeMounts[] | select(.name == "consul-data") | [.mountPath] | any(contains("/consul/login"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "apiGateway/Deployment: init container for copy consul is created when global.acls.manageSystemACLs=true" { + cd `chart_dir` + local object=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "copy-consul-bin")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("cp"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.volumeMounts[0] | any(contains("consul-bin"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "apiGateway/Deployment: volumeMount for copy consul is created on container when global.acls.manageSystemACLs=true" { + cd `chart_dir` + local object=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].volumeMounts[0] | any(contains("consul-bin"))' | tee /dev/stderr) + + [ "${object}" = "true" ] +} + +@test "apiGateway/Deployment: volume for copy consul is created when global.acls.manageSystemACLs=true" { + cd `chart_dir` + local object=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[0] | any(contains("consul-bin"))' | tee /dev/stderr) + + [ "${object}" = "true" ] +} + +@test "apiGateway/Deployment: auto-encrypt init container is created and is the first init-container when global.acls.manageSystemACLs=true and has correct command and environment with tls enabled and autoencrypt enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[1]' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.name' | tee /dev/stderr) + [ "${actual}" = "get-auto-encrypt-client-ca" ] +} + +#-------------------------------------------------------------------- +# resources + +@test "apiGateway/Deployment: resources has default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].resources' | tee /dev/stderr) + + [ $(echo "${actual}" | yq -r '.requests.memory') = "100Mi" ] + [ $(echo "${actual}" | yq -r '.requests.cpu') = "100m" ] + [ $(echo "${actual}" | yq -r '.limits.memory') = "100Mi" ] + [ $(echo "${actual}" | yq -r '.limits.cpu') = "100m" ] +} + +@test "apiGateway/Deployment: resources can be overridden" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'apiGateway.resources.foo=bar' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].resources.foo' | tee /dev/stderr) + [ "${actual}" = "bar" ] +} + +#-------------------------------------------------------------------- +# init container resources + +@test "apiGateway/Deployment: init container has default resources" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.initContainers[0].resources' | tee /dev/stderr) + + [ $(echo "${actual}" | yq -r '.requests.memory') = "25Mi" ] + [ $(echo "${actual}" | yq -r '.requests.cpu') = "50m" ] + [ $(echo "${actual}" | yq -r '.limits.memory') = "150Mi" ] + [ $(echo "${actual}" | yq -r '.limits.cpu') = "50m" ] +} + +@test "apiGateway/Deployment: init container resources can be set" { + cd `chart_dir` + local object=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'apiGateway.initCopyConsulContainer.resources.requests.memory=memory' \ + --set 'apiGateway.initCopyConsulContainer.resources.requests.cpu=cpu' \ + --set 'apiGateway.initCopyConsulContainer.resources.limits.memory=memory2' \ + --set 'apiGateway.initCopyConsulContainer.resources.limits.cpu=cpu2' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.initContainers[0].resources' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.requests.memory' | tee /dev/stderr) + [ "${actual}" = "memory" ] + + local actual=$(echo $object | yq -r '.requests.cpu' | tee /dev/stderr) + [ "${actual}" = "cpu" ] + + local actual=$(echo $object | yq -r '.limits.memory' | tee /dev/stderr) + [ "${actual}" = "memory2" ] + + local actual=$(echo $object | yq -r '.limits.cpu' | tee /dev/stderr) + [ "${actual}" = "cpu2" ] +} + +#-------------------------------------------------------------------- +# priorityClassName + +@test "apiGateway/Deployment: no priorityClassName by default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.priorityClassName' | tee /dev/stderr) + + [ "${actual}" = "null" ] +} + +@test "apiGateway/Deployment: can set a priorityClassName" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'apiGateway.controller.priorityClassName=name' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.priorityClassName' | tee /dev/stderr) + + [ "${actual}" = "name" ] +} + +#-------------------------------------------------------------------- +# logLevel + +@test "apiGateway/Deployment: logLevel info by default from global" { + cd `chart_dir` + local cmd=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual=$(echo "$cmd" | + yq 'any(contains("-log-level info"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "apiGateway/Deployment: logLevel can be overridden" { + cd `chart_dir` + local cmd=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'apiGateway.logLevel=debug' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual=$(echo "$cmd" | + yq 'any(contains("-log-level debug"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# replicas + +@test "apiGateway/Deployment: replicas defaults to 1" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + . | tee /dev/stderr | + yq '.spec.replicas' | tee /dev/stderr) + + [ "${actual}" = "1" ] +} + +@test "apiGateway/Deployment: replicas can be set" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'apiGateway.controller.replicas=3' \ + . | tee /dev/stderr | + yq '.spec.replicas' | tee /dev/stderr) + + [ "${actual}" = "3" ] +} + + +#-------------------------------------------------------------------- +# get-auto-encrypt-client-ca + +@test "apiGateway/Deployment: get-auto-encrypt-client-ca uses server's stateful set address by default and passes ca cert" { + cd `chart_dir` + local command=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "get-auto-encrypt-client-ca").command | join(" ")' | tee /dev/stderr) + + # check server address + actual=$(echo $command | jq ' . | contains("-server-addr=release-name-consul-server")') + [ "${actual}" = "true" ] + + # check server port + actual=$(echo $command | jq ' . | contains("-server-port=8501")') + [ "${actual}" = "true" ] + + # check server's CA cert + actual=$(echo $command | jq ' . | contains("-ca-file=/consul/tls/ca/tls.crt")') + [ "${actual}" = "true" ] + + # check consul-api-timeout + actual=$(echo $command | jq ' . | contains("-consul-api-timeout=5s")') + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# Vault + +@test "apiGateway/Deployment: vault CA is not configured by default" { + cd `chart_dir` + local object=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.secretsBackend.vault.consulCARole=test' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/agent-extra-secret")') + [ "${actual}" = "false" ] + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/ca-cert")') + [ "${actual}" = "false" ] +} + +@test "apiGateway/Deployment: vault CA is not configured when secretName is set but secretKey is not" { + cd `chart_dir` + local object=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.secretsBackend.vault.consulCARole=test' \ + --set 'global.secretsBackend.vault.ca.secretName=ca' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/agent-extra-secret")') + [ "${actual}" = "false" ] + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/ca-cert")') + [ "${actual}" = "false" ] +} + +@test "apiGateway/Deployment: vault CA is not configured when secretKey is set but secretName is not" { + cd `chart_dir` + local object=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.secretsBackend.vault.consulCARole=test' \ + --set 'global.secretsBackend.vault.ca.secretKey=tls.crt' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/agent-extra-secret")') + [ "${actual}" = "false" ] + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/ca-cert")') + [ "${actual}" = "false" ] +} + +@test "apiGateway/Deployment: vault CA is configured when both secretName and secretKey are set" { + cd `chart_dir` + local object=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.secretsBackend.vault.consulCARole=test' \ + --set 'global.secretsBackend.vault.ca.secretName=ca' \ + --set 'global.secretsBackend.vault.ca.secretKey=tls.crt' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.metadata.annotations."vault.hashicorp.com/agent-extra-secret"') + [ "${actual}" = "ca" ] + local actual=$(echo $object | yq -r '.metadata.annotations."vault.hashicorp.com/ca-cert"') + [ "${actual}" = "/vault/custom/tls.crt" ] +} + +@test "apiGateway/Deployment: vault tls annotations are set when tls is enabled" { + cd `chart_dir` + local cmd=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=bar' \ + --set 'global.secretsBackend.vault.consulCARole=test' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'server.serverCert.secretName=pki_int/issue/test' \ + --set 'global.tls.caCert.secretName=pki_int/cert/ca' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata' | tee /dev/stderr) + + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/agent-inject-template-serverca.crt"]' | tee /dev/stderr)" + local expected=$'{{- with secret \"pki_int/cert/ca\" -}}\n{{- .Data.certificate -}}\n{{- end -}}' + [ "${actual}" = "${expected}" ] + + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/agent-inject-secret-serverca.crt"]' | tee /dev/stderr)" + [ "${actual}" = "pki_int/cert/ca" ] + + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/agent-init-first"]' | tee /dev/stderr)" + [ "${actual}" = "true" ] + + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/agent-inject"]' | tee /dev/stderr)" + [ "${actual}" = "true" ] + + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/role"]' | tee /dev/stderr)" + [ "${actual}" = "test" ] +} + +@test "apiGateway/Deployment: vault namespace annotations is set when global.secretsBackend.vault.vaultNamespace is set" { + cd `chart_dir` + local cmd=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=bar' \ + --set 'global.secretsBackend.vault.consulCARole=test' \ + --set 'global.secretsBackend.vault.vaultNamespace=vns' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata' | tee /dev/stderr) + + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/namespace"]' | tee /dev/stderr)" + [ "${actual}" = "vns" ] +} + +@test "apiGateway/Deployment: correct vault namespace annotations is set when global.secretsBackend.vault.vaultNamespace is set and agentAnnotations are set without vaultNamespace annotation" { + cd `chart_dir` + local cmd=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=bar' \ + --set 'global.secretsBackend.vault.consulCARole=test' \ + --set 'global.secretsBackend.vault.vaultNamespace=vns' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.secretsBackend.vault.agentAnnotations=vault.hashicorp.com/agent-extra-secret: bar' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata' | tee /dev/stderr) + + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/namespace"]' | tee /dev/stderr)" + [ "${actual}" = "vns" ] +} + +@test "apiGateway/Deployment: correct vault namespace annotations is set when global.secretsBackend.vault.vaultNamespace is set and agentAnnotations are also set with vaultNamespace annotation" { + cd `chart_dir` + local cmd=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=bar' \ + --set 'global.secretsBackend.vault.consulCARole=test' \ + --set 'global.secretsBackend.vault.vaultNamespace=vns' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.secretsBackend.vault.agentAnnotations="vault.hashicorp.com/namespace": bar' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata' | tee /dev/stderr) + + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/namespace"]' | tee /dev/stderr)" + [ "${actual}" = "bar" ] +} + +@test "apiGateway/Deployment: vault agent annotations can be set" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=test' \ + --set 'global.secretsBackend.vault.consulServerRole=foo' \ + --set 'global.secretsBackend.vault.consulCARole=test' \ + --set 'global.secretsBackend.vault.agentAnnotations=foo: bar' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata.annotations.foo' | tee /dev/stderr) + [ "${actual}" = "bar" ] +} + +#-------------------------------------------------------------------- +# global.cloud + +@test "apiGateway/Deployment: fails when global.cloud.enabled is true and global.cloud.clientId.secretName is not set but global.cloud.clientSecret.secretName and global.cloud.resourceId.secretName is set" { + cd `chart_dir` + run helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.datacenter=dc-foo' \ + --set 'global.domain=bar' \ + --set 'global.cloud.enabled=true' \ + --set 'global.cloud.clientSecret.secretName=client-id-name' \ + --set 'global.cloud.clientSecret.secretKey=client-id-key' \ + --set 'global.cloud.resourceId.secretName=client-resource-id-name' \ + --set 'global.cloud.resourceId.secretKey=client-resource-id-key' \ + . + [ "$status" -eq 1 ] + [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] +} + +@test "apiGateway/Deployment: fails when global.cloud.enabled is true and global.cloud.clientSecret.secretName is not set but global.cloud.clientId.secretName and global.cloud.resourceId.secretName is set" { + cd `chart_dir` + run helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.datacenter=dc-foo' \ + --set 'global.domain=bar' \ + --set 'global.cloud.enabled=true' \ + --set 'global.cloud.clientId.secretName=client-id-name' \ + --set 'global.cloud.clientId.secretKey=client-id-key' \ + --set 'global.cloud.resourceId.secretName=resource-id-name' \ + --set 'global.cloud.resourceId.secretKey=resource-id-key' \ + . + [ "$status" -eq 1 ] + [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] +} + +@test "apiGateway/Deployment: fails when global.cloud.enabled is true and global.cloud.resourceId.secretName is not set but global.cloud.clientId.secretName and global.cloud.clientSecret.secretName is set" { + cd `chart_dir` + run helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.datacenter=dc-foo' \ + --set 'global.domain=bar' \ + --set 'global.cloud.enabled=true' \ + --set 'global.cloud.clientId.secretName=client-id-name' \ + --set 'global.cloud.clientId.secretKey=client-id-key' \ + --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ + --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ + . + [ "$status" -eq 1 ] + [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] +} + +@test "apiGateway/Deployment: fails when global.cloud.resourceId.secretName is set but global.cloud.resourceId.secretKey is not set." { + cd `chart_dir` + run helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.datacenter=dc-foo' \ + --set 'global.domain=bar' \ + --set 'global.cloud.enabled=true' \ + --set 'global.cloud.clientId.secretName=client-id-name' \ + --set 'global.cloud.clientId.secretKey=client-id-key' \ + --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ + --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ + --set 'global.cloud.resourceId.secretName=resource-id-name' \ + . + [ "$status" -eq 1 ] + [[ "$output" =~ "When either global.cloud.resourceId.secretName or global.cloud.resourceId.secretKey is defined, both must be set." ]] +} + +@test "apiGateway/Deployment: fails when global.cloud.authURL.secretName is set but global.cloud.authURL.secretKey is not set." { + cd `chart_dir` + run helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.datacenter=dc-foo' \ + --set 'global.domain=bar' \ + --set 'global.cloud.enabled=true' \ + --set 'global.cloud.clientId.secretName=client-id-name' \ + --set 'global.cloud.clientId.secretKey=client-id-key' \ + --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ + --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ + --set 'global.cloud.resourceId.secretName=resource-id-name' \ + --set 'global.cloud.resourceId.secretKey=resource-id-key' \ + --set 'global.cloud.authUrl.secretName=auth-url-name' \ + . + [ "$status" -eq 1 ] + + [[ "$output" =~ "When either global.cloud.authUrl.secretName or global.cloud.authUrl.secretKey is defined, both must be set." ]] +} + +@test "apiGateway/Deployment: fails when global.cloud.authURL.secretKey is set but global.cloud.authURL.secretName is not set." { + cd `chart_dir` + run helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.datacenter=dc-foo' \ + --set 'global.domain=bar' \ + --set 'global.cloud.enabled=true' \ + --set 'global.cloud.clientId.secretName=client-id-name' \ + --set 'global.cloud.clientId.secretKey=client-id-key' \ + --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ + --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ + --set 'global.cloud.resourceId.secretName=resource-id-name' \ + --set 'global.cloud.resourceId.secretKey=resource-id-key' \ + --set 'global.cloud.authUrl.secretKey=auth-url-key' \ + . + [ "$status" -eq 1 ] + + [[ "$output" =~ "When either global.cloud.authUrl.secretName or global.cloud.authUrl.secretKey is defined, both must be set." ]] +} + +@test "apiGateway/Deployment: fails when global.cloud.apiHost.secretName is set but global.cloud.apiHost.secretKey is not set." { + cd `chart_dir` + run helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.datacenter=dc-foo' \ + --set 'global.domain=bar' \ + --set 'global.cloud.enabled=true' \ + --set 'global.cloud.clientId.secretName=client-id-name' \ + --set 'global.cloud.clientId.secretKey=client-id-key' \ + --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ + --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ + --set 'global.cloud.resourceId.secretName=resource-id-name' \ + --set 'global.cloud.resourceId.secretKey=resource-id-key' \ + --set 'global.cloud.apiHost.secretName=auth-url-name' \ + . + [ "$status" -eq 1 ] + + [[ "$output" =~ "When either global.cloud.apiHost.secretName or global.cloud.apiHost.secretKey is defined, both must be set." ]] +} + +@test "apiGateway/Deployment: fails when global.cloud.apiHost.secretKey is set but global.cloud.apiHost.secretName is not set." { + cd `chart_dir` + run helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.datacenter=dc-foo' \ + --set 'global.domain=bar' \ + --set 'global.cloud.enabled=true' \ + --set 'global.cloud.clientId.secretName=client-id-name' \ + --set 'global.cloud.clientId.secretKey=client-id-key' \ + --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ + --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ + --set 'global.cloud.resourceId.secretName=resource-id-name' \ + --set 'global.cloud.resourceId.secretKey=resource-id-key' \ + --set 'global.cloud.apiHost.secretKey=auth-url-key' \ + . + [ "$status" -eq 1 ] + + [[ "$output" =~ "When either global.cloud.apiHost.secretName or global.cloud.apiHost.secretKey is defined, both must be set." ]] +} + +@test "apiGateway/Deployment: fails when global.cloud.scadaAddress.secretName is set but global.cloud.scadaAddress.secretKey is not set." { + cd `chart_dir` + run helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.datacenter=dc-foo' \ + --set 'global.domain=bar' \ + --set 'global.cloud.enabled=true' \ + --set 'global.cloud.clientId.secretName=client-id-name' \ + --set 'global.cloud.clientId.secretKey=client-id-key' \ + --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ + --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ + --set 'global.cloud.resourceId.secretName=resource-id-name' \ + --set 'global.cloud.resourceId.secretKey=resource-id-key' \ + --set 'global.cloud.scadaAddress.secretName=scada-address-name' \ + . + [ "$status" -eq 1 ] + + [[ "$output" =~ "When either global.cloud.scadaAddress.secretName or global.cloud.scadaAddress.secretKey is defined, both must be set." ]] +} + +@test "apiGateway/Deployment: fails when global.cloud.scadaAddress.secretKey is set but global.cloud.scadaAddress.secretName is not set." { + cd `chart_dir` + run helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.datacenter=dc-foo' \ + --set 'global.domain=bar' \ + --set 'global.cloud.enabled=true' \ + --set 'global.cloud.clientId.secretName=client-id-name' \ + --set 'global.cloud.clientId.secretKey=client-id-key' \ + --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ + --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ + --set 'global.cloud.resourceId.secretName=resource-id-name' \ + --set 'global.cloud.resourceId.secretKey=resource-id-key' \ + --set 'global.cloud.scadaAddress.secretKey=scada-address-key' \ + . + [ "$status" -eq 1 ] + + [[ "$output" =~ "When either global.cloud.scadaAddress.secretName or global.cloud.scadaAddress.secretKey is defined, both must be set." ]] +} + +#-------------------------------------------------------------------- +# CONSUL_HTTP_SSL + +@test "apiGateway/Deployment: CONSUL_HTTP_SSL set correctly when not using TLS." { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=bar' \ + --set 'global.tls.enabled=false' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].env[2].value' | tee /dev/stderr) + [ "${actual}" = "\"false\"" ] +} + +@test "apiGateway/Deployment: CONSUL_HTTP_SSL set correctly when using TLS." { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=bar' \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].env[3].value' | tee /dev/stderr) + [ "${actual}" = "\"true\"" ] +} + +#-------------------------------------------------------------------- +# CONSUL_HTTP_ADDR + +@test "apiGateway/Deployment: CONSUL_HTTP_ADDR set correctly with external servers, TLS, and no clients." { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=bar' \ + --set 'global.tls.enabled=true' \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.hosts[0]=external-consul.host' \ + --set 'externalServers.httpsPort=8501' \ + --set 'server.enabled=false' \ + --set 'client.enabled=false' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[2].value] | any(contains("external-consul.host:8501"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "apiGateway/Deployment: CONSUL_HTTP_ADDR set correctly with external servers, no TLS, and no clients" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=bar' \ + --set 'global.tls.enabled=false' \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.hosts[0]=external-consul.host' \ + --set 'externalServers.httpsPort=8500' \ + --set 'server.enabled=false' \ + --set 'client.enabled=false' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[1].value] | any(contains("external-consul.host:8500"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "apiGateway/Deployment: CONSUL_HTTP_ADDR set correctly with local servers, TLS, and clients" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=bar' \ + --set 'global.tls.enabled=true' \ + --set 'client.enabled=true' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[2].value] | any(contains("$(HOST_IP):8501"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "apiGateway/Deployment: CONSUL_HTTP_ADDR set correctly with local servers, no TLS, and clients" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=bar' \ + --set 'global.tls.enabled=false' \ + --set 'client.enabled=true' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[1].value] | any(contains("$(HOST_IP):8500"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "apiGateway/Deployment: CONSUL_HTTP_ADDR set correctly with local servers, TLS, and no clients" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=bar' \ + --set 'global.tls.enabled=true' \ + --set 'client.enabled=false' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[2].value] | any(contains("release-name-consul-server:8501"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "apiGateway/Deployment: CONSUL_HTTP_ADDR set correctly with local servers, no TLS, and no clients" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=bar' \ + --set 'global.tls.enabled=false' \ + --set 'client.enabled=false' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[1].value] | any(contains("release-name-consul-server:8500"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# externalServers tlsServerName + +@test "apiGateway/Deployment: CONSUL_TLS_SERVER_NAME can be set for externalServers" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=bar' \ + --set 'global.tls.enabled=true' \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.hosts[0]=external-consul.host' \ + --set 'externalServers.httpsPort=8501' \ + --set 'externalServers.tlsServerName=hashi' \ + --set 'server.enabled=false' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].env[4].value == "hashi"' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "apiGateway/Deployment: CONSUL_TLS_SERVER_NAME will not be set for when clients are used" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=bar' \ + --set 'global.tls.enabled=true' \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.hosts[0]=external-consul.host' \ + --set 'externalServers.httpsPort=8501' \ + --set 'externalServers.tlsServerName=hashi' \ + --set 'client.enabled=true' \ + --set 'server.enabled=false' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[] | select (.name == "api-gateway-controller") | .env[] | select(.name == "CONSUL_TLS_SERVER_NAME")' | tee /dev/stderr) + [ "${actual}" = "" ] +} + +#-------------------------------------------------------------------- +# Admin Partitions + +@test "apiGateway/Deployment: CONSUL_PARTITION is set when using admin partitions" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=bar' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'global.adminPartitions.enabled=true' \ + --set 'global.adminPartitions.name=hashi' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].env[3].value == "hashi"' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "apiGateway/Deployment: CONSUL_LOGIN_PARTITION is set when using admin partitions with ACLs" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=bar' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'global.adminPartitions.enabled=true' \ + --set 'global.adminPartitions.name=hashi' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].env[6].value == "hashi"' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "apiGateway/Deployment: CONSUL_DYNAMIC_SERVER_DISCOVERY is set when not using clients" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=bar' \ + --set 'client.enabled=false' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].env[3].value == "true"' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "apiGateway/Deployment: CONSUL_DYNAMIC_SERVER_DISCOVERY is not set when using clients" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=bar' \ + --set 'client.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].env[3]' | tee /dev/stderr) + [ "${actual}" = "null" ] +} + +@test "apiGateway/Deployment: CONSUL_CACERT is set when using tls and clients even when useSystemRoots is true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=bar' \ + --set 'global.tls.enabled=true' \ + --set 'server.enabled=false' \ + --set 'externalServers.hosts[0]=external-consul.host' \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.useSystemRoots=true' \ + --set 'client.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].env[0].name == "CONSUL_CACERT"' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "apiGateway/Deployment: CONSUL_CACERT is set when using tls and internal servers" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=bar' \ + --set 'global.tls.enabled=true' \ + --set 'server.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].env[0].name == "CONSUL_CACERT"' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "apiGateway/Deployment: CONSUL_CACERT has correct path with Vault as secrets backend and client disabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=bar' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'server.enabled=true' \ + --set 'client.enabled=false' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulServerRole=foo' \ + . | tee /dev/stderr| + yq '.spec.template.spec.containers[0].env[0].value == "/vault/secrets/serverca.crt"' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "apiGateway/Deployment: CONSUL_CACERT is not set when using tls and useSystemRoots" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=bar' \ + --set 'global.tls.enabled=true' \ + --set 'server.enabled=false' \ + --set 'externalServers.hosts[0]=external-consul.host' \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.useSystemRoots=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].env[0].name == "CONSUL_CACERT"' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "apiGateway/Deployment: consul-ca-cert volume mount is not set when using externalServers and useSystemRoots" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=bar' \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.tls.enabled=true' \ + --set 'server.enabled=false' \ + --set 'externalServers.hosts[0]=external-consul.host' \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.useSystemRoots=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) + [ "${actual}" = "" ] +} + +@test "apiGateway/Deployment: consul-ca-cert volume mount is not set when using Vault as a secrets backend" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=bar' \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.tls.enabled=true' \ + --set 'server.enabled=true' \ + --set 'global.secretsBackend.vault.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) + [ "${actual}" = "" ] +} + +@test "apiGateway/Deployment: consul-ca-cert volume mount is not set on acl-init when using externalServers and useSystemRoots" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=bar' \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.tls.enabled=true' \ + --set 'server.enabled=false' \ + --set 'externalServers.hosts[0]=external-consul.host' \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.useSystemRoots=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[1].volumeMounts[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) + [ "${actual}" = "" ] +} + +@test "apiGateway/Deployment: consul-ca-cert volume mount is not set on acl-init when using Vault as secrets backend" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=bar' \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.tls.enabled=true' \ + --set 'server.enabled=true' \ + --set 'global.secretsBackend.vault.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[1].volumeMounts[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) + [ "${actual}" = "" ] +} + +@test "apiGateway/Deployment: consul-auto-encrypt-ca-cert volume mount is set when tls.enabled, client.enabled, externalServers, useSystemRoots, and autoencrypt" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=bar' \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.tls.enabled=true' \ + --set 'client.enabled=true' \ + --set 'server.enabled=false' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'externalServers.hosts[0]=external-consul.host' \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.useSystemRoots=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "consul-auto-encrypt-ca-cert") | .mountPath' | tee /dev/stderr) + [ "${actual}" = '"/consul/tls/ca"' ] +} + +#-------------------------------------------------------------------- +# extraLabels + +@test "apiGateway/Deployment: no extra labels defined by default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=bar' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata.labels | del(."app") | del(."chart") | del(."release") | del(."component")' | tee /dev/stderr) + [ "${actual}" = "{}" ] +} + +@test "apiGateway/Deployment: extra global labels can be set" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=bar' \ + --set 'global.extraLabels.foo=bar' \ + . | tee /dev/stderr) + local actualBar=$(echo "${actual}" | yq -r '.metadata.labels.foo' | tee /dev/stderr) + [ "${actualBar}" = "bar" ] + local actualTemplateBar=$(echo "${actual}" | yq -r '.spec.template.metadata.labels.foo' | tee /dev/stderr) + [ "${actualTemplateBar}" = "bar" ] +} + +@test "apiGateway/Deployment: multiple global extra labels can be set" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-deployment.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=bar' \ + --set 'global.extraLabels.foo=bar' \ + --set 'global.extraLabels.baz=qux' \ + . | tee /dev/stderr) + local actualFoo=$(echo "${actual}" | yq -r '.metadata.labels.foo' | tee /dev/stderr) + local actualBaz=$(echo "${actual}" | yq -r '.metadata.labels.baz' | tee /dev/stderr) + [ "${actualFoo}" = "bar" ] + [ "${actualBaz}" = "qux" ] + local actualTemplateFoo=$(echo "${actual}" | yq -r '.spec.template.metadata.labels.foo' | tee /dev/stderr) + local actualTemplateBaz=$(echo "${actual}" | yq -r '.spec.template.metadata.labels.baz' | tee /dev/stderr) + [ "${actualTemplateFoo}" = "bar" ] + [ "${actualTemplateBaz}" = "qux" ] +} diff --git a/charts/consul/test/unit/api-gateway-controller-podsecuritypolicy.bats b/charts/consul/test/unit/api-gateway-controller-podsecuritypolicy.bats new file mode 100644 index 0000000000..dfd40c793f --- /dev/null +++ b/charts/consul/test/unit/api-gateway-controller-podsecuritypolicy.bats @@ -0,0 +1,22 @@ +#!/usr/bin/env bats + +load _helpers + +@test "apiGateway/PodSecurityPolicy: disabled by default" { + cd `chart_dir` + assert_empty helm template \ + -s templates/api-gateway-controller-podsecuritypolicy.yaml \ + . +} + +@test "apiGateway/PodSecurityPolicy: enabled with apiGateway.enabled=true and global.enablePodSecurityPolicies=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-podsecuritypolicy.yaml \ + --set 'global.enablePodSecurityPolicies=true' \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} diff --git a/charts/consul/test/unit/api-gateway-controller-service.bats b/charts/consul/test/unit/api-gateway-controller-service.bats new file mode 100755 index 0000000000..47cb7ff9aa --- /dev/null +++ b/charts/consul/test/unit/api-gateway-controller-service.bats @@ -0,0 +1,30 @@ +#!/usr/bin/env bats + +load _helpers + +@test "apiGateway/Service: disabled by default" { + cd `chart_dir` + assert_empty helm template \ + -s templates/api-gateway-controller-service.yaml \ + . +} + +@test "apiGateway/Service: enable with apiGateway.enabled set to true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-service.yaml \ + --set 'global.enabled=false' \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "apiGateway/Service: disable with apiGateway.enabled" { + cd `chart_dir` + assert_empty helm template \ + -s templates/api-gateway-controller-service.yaml \ + --set 'apiGateway.enabled=false' \ + . +} diff --git a/charts/consul/test/unit/api-gateway-controller-serviceaccount.bats b/charts/consul/test/unit/api-gateway-controller-serviceaccount.bats new file mode 100644 index 0000000000..22486799b2 --- /dev/null +++ b/charts/consul/test/unit/api-gateway-controller-serviceaccount.bats @@ -0,0 +1,76 @@ +#!/usr/bin/env bats + +load _helpers + +@test "apiGateway/ServiceAccount: disabled by default" { + cd `chart_dir` + assert_empty helm template \ + -s templates/api-gateway-controller-serviceaccount.yaml \ + . +} + +@test "apiGateway/ServiceAccount: enabled with apiGateway.enabled true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-serviceaccount.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + . | tee /dev/stderr | + yq -s 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "apiGateway/ServiceAccount: disabled with apiGateway.enabled false" { + cd `chart_dir` + assert_empty helm template \ + -s templates/api-gateway-controller-serviceaccount.yaml \ + --set 'apiGateway.enabled=false' \ + . +} +#-------------------------------------------------------------------- +# global.imagePullSecrets + +@test "apiGateway/ServiceAccount: can set image pull secrets" { + cd `chart_dir` + local object=$(helm template \ + -s templates/api-gateway-controller-serviceaccount.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.imagePullSecrets[0].name=my-secret' \ + --set 'global.imagePullSecrets[1].name=my-secret2' \ + . | tee /dev/stderr) + + local actual=$(echo "$object" | + yq -r '.imagePullSecrets[0].name' | tee /dev/stderr) + [ "${actual}" = "my-secret" ] + + local actual=$(echo "$object" | + yq -r '.imagePullSecrets[1].name' | tee /dev/stderr) + [ "${actual}" = "my-secret2" ] +} + +#-------------------------------------------------------------------- +# apiGateway.serviceAccount.annotations + +@test "apiGateway/ServiceAccount: no annotations by default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-serviceaccount.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + . | tee /dev/stderr | + yq '.metadata.annotations | length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "apiGateway/ServiceAccount: annotations when enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-controller-serviceaccount.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set "apiGateway.serviceAccount.annotations=foo: bar" \ + . | tee /dev/stderr | + yq -r '.metadata.annotations.foo' | tee /dev/stderr) + [ "${actual}" = "bar" ] +} diff --git a/charts/consul/test/unit/api-gateway-gatewayclass.bats b/charts/consul/test/unit/api-gateway-gatewayclass.bats new file mode 100755 index 0000000000..c79753c2f3 --- /dev/null +++ b/charts/consul/test/unit/api-gateway-gatewayclass.bats @@ -0,0 +1,48 @@ +#!/usr/bin/env bats + +load _helpers + +@test "apiGateway/GatewayClass: disabled by default" { + cd `chart_dir` + assert_empty helm template \ + -s templates/api-gateway-gatewayclass.yaml \ + . +} + +@test "apiGateway/GatewayClass: enable with global.enabled false" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-gatewayclass.yaml \ + --set 'global.enabled=false' \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "apiGateway/GatewayClass: disable with apiGateway.enabled" { + cd `chart_dir` + assert_empty helm template \ + -s templates/api-gateway-gatewayclass.yaml \ + --set 'apiGateway.enabled=false' \ + . +} + +@test "apiGateway/GatewayClass: disable with global.enabled" { + cd `chart_dir` + assert_empty helm template \ + -s templates/api-gateway-gatewayclass.yaml \ + --set 'global.enabled=false' \ + . +} + +@test "apiGateway/GatewayClass: disable with apiGateway.managedGatewayClass.enabled" { + cd `chart_dir` + assert_empty helm template \ + -s templates/api-gateway-gatewayclass.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'apiGateway.managedGatewayClass.enabled=false' \ + . +} diff --git a/charts/consul/test/unit/api-gateway-gatewayclassconfig.bats b/charts/consul/test/unit/api-gateway-gatewayclassconfig.bats new file mode 100644 index 0000000000..742f31afa0 --- /dev/null +++ b/charts/consul/test/unit/api-gateway-gatewayclassconfig.bats @@ -0,0 +1,186 @@ +#!/usr/bin/env bats + +load _helpers + +@test "apiGateway/GatewayClassConfig: disabled by default" { + cd `chart_dir` + assert_empty helm template \ + -s templates/api-gateway-gatewayclassconfig.yaml \ + . +} + +@test "apiGateway/GatewayClassConfig: enabled with apiGateway.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-gatewayclassconfig.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "apiGateway/GatewayClassConfig: deployment config disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-gatewayclassconfig.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + . | tee /dev/stderr | + yq '.spec | has("deployment") | not' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "apiGateway/GatewayClassConfig: deployment config enabled with defaultInstances=3" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-gatewayclassconfig.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'apiGateway.managedGatewayClass.deployment.defaultInstances=3' \ + . | tee /dev/stderr | + yq '.spec.deployment.defaultInstances == 3' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "apiGateway/GatewayClassConfig: deployment config enabled with maxInstances=3" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-gatewayclassconfig.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'apiGateway.managedGatewayClass.deployment.maxInstances=3' \ + . | tee /dev/stderr | + yq '.spec.deployment.maxInstances == 3' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "apiGateway/GatewayClassConfig: deployment config enabled with minInstances=3" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-gatewayclassconfig.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'apiGateway.managedGatewayClass.deployment.minInstances=3' \ + . | tee /dev/stderr | + yq '.spec.deployment.minInstances == 3' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "apiGateway/GatewayClassConfig: imageEnvoy can be set" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-gatewayclassconfig.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'apiGateway.imageEnvoy=bar' \ + . | tee /dev/stderr | + yq '.spec.image.envoy' | tee /dev/stderr) + [ "${actual}" = "\"bar\"" ] +} + +#-------------------------------------------------------------------- +# Consul server address + +@test "apiGateway/GatewayClassConfig: Consul server address set with external servers and no clients." { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-gatewayclassconfig.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.hosts[0]=external-consul.host' \ + --set 'server.enabled=false' \ + --set 'client.enabled=false' \ + . | tee /dev/stderr | + yq '.spec.consul.address == "external-consul.host"' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "apiGateway/GatewayClassConfig: Consul server address set with external servers and clients." { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-gatewayclassconfig.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.hosts[0]=external-consul.host' \ + --set 'server.enabled=false' \ + --set 'client.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.consul.address == "$(HOST_IP)"' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "apiGateway/GatewayClassConfig: Consul server address set with local servers and no clients." { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-gatewayclassconfig.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'client.enabled=false' \ + . | tee /dev/stderr | + yq '.spec.consul.address == "release-name-consul-server.default.svc"' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "apiGateway/GatewayClassConfig: Consul server address set with local servers and clients." { + cd `chart_dir` + local actual=$(helm template \ + -s templates/api-gateway-gatewayclassconfig.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'client.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.consul.address == "$(HOST_IP)"' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# externalServers ports + +@test "apiGateway/GatewayClassConfig: ports for externalServers when not using TLS." { + cd `chart_dir` + local ports=$(helm template \ + -s templates/api-gateway-gatewayclassconfig.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.tls.enabled=false' \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.hosts[0]=external-consul.host' \ + --set 'externalServers.grpcPort=1234' \ + --set 'externalServers.httpsPort=5678' \ + --set 'server.enabled=false' \ + . | tee /dev/stderr | + yq '.spec.consul.ports' | tee /dev/stderr) + + local actual + actual=$(echo $ports | jq -r '.grpc' | tee /dev/stderr) + [ "${actual}" = "1234" ] + + actual=$(echo $ports | jq -r '.http' | tee /dev/stderr) + [ "${actual}" = "5678" ] +} + +@test "apiGateway/GatewayClassConfig: ports for externalServers when using TLS." { + cd `chart_dir` + local ports=$(helm template \ + -s templates/api-gateway-gatewayclassconfig.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ + --set 'global.tls.enabled=true' \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.hosts[0]=external-consul.host' \ + --set 'externalServers.grpcPort=1234' \ + --set 'externalServers.httpsPort=5678' \ + --set 'server.enabled=false' \ + . | tee /dev/stderr | + yq '.spec.consul.ports' | tee /dev/stderr) + + local actual + actual=$(echo $ports | jq -r '.grpc' | tee /dev/stderr) + [ "${actual}" = "1234" ] + + actual=$(echo $ports | jq -r '.http' | tee /dev/stderr) + [ "${actual}" = "5678" ] +} diff --git a/charts/consul/test/unit/client-daemonset.bats b/charts/consul/test/unit/client-daemonset.bats index 00fb346e26..77c23c4672 100755 --- a/charts/consul/test/unit/client-daemonset.bats +++ b/charts/consul/test/unit/client-daemonset.bats @@ -2824,6 +2824,8 @@ rollingUpdate: cd `chart_dir` run helm template \ -s templates/client-daemonset.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ --set 'global.tls.enabled=true' \ --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.datacenter=dc-foo' \ @@ -2842,6 +2844,8 @@ rollingUpdate: cd `chart_dir` run helm template \ -s templates/client-daemonset.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ --set 'global.tls.enabled=true' \ --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.datacenter=dc-foo' \ @@ -2861,6 +2865,8 @@ rollingUpdate: cd `chart_dir` run helm template \ -s templates/client-daemonset.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ --set 'global.tls.enabled=true' \ --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.datacenter=dc-foo' \ @@ -2883,6 +2889,8 @@ rollingUpdate: cd `chart_dir` run helm template \ -s templates/client-daemonset.yaml \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=foo' \ --set 'global.tls.enabled=true' \ --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.datacenter=dc-foo' \ diff --git a/charts/consul/test/unit/connect-inject-clusterrole.bats b/charts/consul/test/unit/connect-inject-clusterrole.bats index d02b9eacde..cfe64337d9 100644 --- a/charts/consul/test/unit/connect-inject-clusterrole.bats +++ b/charts/consul/test/unit/connect-inject-clusterrole.bats @@ -241,3 +241,28 @@ load _helpers yq '.rules[13].resourceNames | index("fakescc")' | tee /dev/stderr) [ "${object}" == 0 ] } + +#-------------------------------------------------------------------- +# resource-apis + +@test "connectInject/ClusterRole: adds permission to mesh.consul.hashicorp.com with resource-apis in global.experiments" { + cd `chart_dir` + local object=$(helm template \ + -s templates/connect-inject-clusterrole.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments={resource-apis}' \ + . | tee /dev/stderr | + yq '.rules[4].apiGroups | index("mesh.consul.hashicorp.com")' | tee /dev/stderr) + [ "${object}" == 0 ] +} + +@test "connectInject/ClusterRole: adds permission to multicluster.consul.hashicorp.com with resource-apis in global.experiments" { + cd `chart_dir` + local object=$(helm template \ + -s templates/connect-inject-clusterrole.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments={resource-apis}' \ + . | tee /dev/stderr | + yq '.rules[6].apiGroups | index("multicluster.consul.hashicorp.com")' | tee /dev/stderr) + [ "${object}" == 0 ] +} \ No newline at end of file diff --git a/charts/consul/test/unit/connect-inject-deployment.bats b/charts/consul/test/unit/connect-inject-deployment.bats index 3cc6409d08..a25bcfeee7 100755 --- a/charts/consul/test/unit/connect-inject-deployment.bats +++ b/charts/consul/test/unit/connect-inject-deployment.bats @@ -2738,3 +2738,58 @@ reservedNameTest() { jq -r '. | select( .name == "CONSUL_TLS_SERVER_NAME").value' | tee /dev/stderr) [ "${actual}" = "server.dc1.consul" ] } + +#-------------------------------------------------------------------- +# resource-apis + +@test "connectInject/Deployment: resource-apis is not set by default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-enable-resource-apis=true"))' | tee /dev/stderr) + + [ "${actual}" = "false" ] +} + +@test "connectInject/Deployment: -enable-resource-apis=true is set when global.experiments contains [\"resource-apis\"] " { + cd `chart_dir` + local actual=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'ui.enabled=false' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-enable-resource-apis=true"))' | tee /dev/stderr) + + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# v2tenancy + +@test "connectInject/Deployment: v2tenancy is not set by default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-enable-v2tenancy=true"))' | tee /dev/stderr) + + [ "${actual}" = "false" ] +} + +@test "connectInject/Deployment: -enable-v2tenancy=true is set when global.experiments contains [\"resource-apis\", \"v2tenancy\"]" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'global.experiments[1]=v2tenancy' \ + --set 'ui.enabled=false' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-enable-v2tenancy=true"))' | tee /dev/stderr) + + [ "${actual}" = "true" ] +} diff --git a/charts/consul/test/unit/crd-exportedservices.bats b/charts/consul/test/unit/crd-exportedservices.bats new file mode 100644 index 0000000000..235fe6bd24 --- /dev/null +++ b/charts/consul/test/unit/crd-exportedservices.bats @@ -0,0 +1,26 @@ +#!/usr/bin/env bats + +load _helpers + +@test "exportedServices/CustomResourceDefinition: enabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/crd-exportedservices.yaml \ + . | tee /dev/stderr | + yq -s 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "exportedServices/CustomResourceDefinition: enabled with connectInject.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/crd-exportedservices.yaml \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + # The generated CRDs have "---" at the top which results in two objects + # being detected by yq, the first of which is null. We must therefore use + # yq -s so that length operates on both objects at once rather than + # individually, which would output false\ntrue and fail the test. + yq -s 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} diff --git a/charts/consul/test/unit/crd-gatewayclassconfigs.bats b/charts/consul/test/unit/crd-gatewayclassconfigs.bats new file mode 100644 index 0000000000..0228110b6b --- /dev/null +++ b/charts/consul/test/unit/crd-gatewayclassconfigs.bats @@ -0,0 +1,20 @@ +#!/usr/bin/env bats + +load _helpers + +@test "gatewayclassconfigs/CustomResourceDefinition: enabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/crd-gatewayclassconfigs.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "$actual" = "true" ] +} + +@test "gatewayclassconfigs/CustomResourceDefinition: disabled with connectInject.enabled=false" { + cd `chart_dir` + assert_empty helm template \ + -s templates/crd-gatewayclassconfigs.yaml \ + --set 'connectInject.enabled=false' \ + . +} diff --git a/charts/consul/test/unit/dns-proxy-clusterrole.bats b/charts/consul/test/unit/dns-proxy-clusterrole.bats deleted file mode 100644 index a1bf12ae4d..0000000000 --- a/charts/consul/test/unit/dns-proxy-clusterrole.bats +++ /dev/null @@ -1,91 +0,0 @@ -#!/usr/bin/env bats - -load _helpers - -@test "dnsProxy/ClusterRole: disabled by default" { - cd `chart_dir` - assert_empty helm template \ - -s templates/dns-proxy-clusterrole.yaml \ - . -} - -@test "dnsProxy/ClusterRole: dns-proxy with global.enabled false" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/dns-proxy-clusterrole.yaml \ - --set 'global.enabled=false' \ - --set 'dns.proxy.enabled=true' \ - . | tee /dev/stderr | - yq -s 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "dnsProxy/ClusterRole: disabled with dns.proxy.enabled" { - cd `chart_dir` - assert_empty helm template \ - -s templates/dns-proxy-clusterrole.yaml \ - --set 'dns.proxy.enabled=false' \ - . -} - -##-------------------------------------------------------------------- -## rules -# -@test "dnsProxy/ClusterRole: sets get, list, and watch access to endpoints, services, namespaces and nodes in all api groups" { - cd `chart_dir` - local object=$(helm template \ - -s templates/dns-proxy-clusterrole.yaml \ - --set 'global.enabled=false' \ - --set 'dns.proxy.enabled=true' \ - . | tee /dev/stderr | - yq -r '.rules | length' | tee /dev/stderr) - [ "${object}" == 0 ] -} - - -@test "dnsProxy/ClusterRole: sets get access to serviceaccounts and secrets when manageSystemACLSis true" { - cd `chart_dir` - local object=$(helm template \ - -s templates/dns-proxy-clusterrole.yaml \ - --set 'global.enabled=false' \ - --set 'dns.proxy.enabled=true' \ - --set 'global.acls.manageSystemACLs=true' \ - . | tee /dev/stderr | - yq -r '.rules[0]' | tee /dev/stderr) - - local actual=$(echo $object | yq -r '.resources[| index("serviceaccounts")' | tee /dev/stderr) - [ "${actual}" != null ] - - local actual=$(echo $object | yq -r '.resources[| index("secrets")' | tee /dev/stderr) - [ "${actual}" != null ] - - local actual=$(echo $object | yq -r '.apiGroups[0]' | tee /dev/stderr) - [ "${actual}" = "" ] - - local actual=$(echo $object | yq -r '.verbs | index("get")' | tee /dev/stderr) - [ "${actual}" != null ] -} - -#-------------------------------------------------------------------- -# global.enablePodSecurityPolicies -@test "dnsProxy/ClusterRole: allows podsecuritypolicies access with global.enablePodSecurityPolicies=false" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/dns-proxy-clusterrole.yaml \ - --set 'dns.proxy.enabled=true' \ - --set 'global.enablePodSecurityPolicies=false' \ - . | tee /dev/stderr | - yq -r '.rules | map(select(.resources[0] == "podsecuritypolicies")) | length' | tee /dev/stderr) - [ "${actual}" = "0" ] -} - -@test "dnsProxy/ClusterRole: allows podsecuritypolicies access with global.enablePodSecurityPolicies=true" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/dns-proxy-clusterrole.yaml \ - --set 'dns.proxy.enabled=true' \ - --set 'global.enablePodSecurityPolicies=true' \ - . | tee /dev/stderr | - yq -r '.rules | map(select(.resources[0] == "podsecuritypolicies")) | length' | tee /dev/stderr) - [ "${actual}" = "1" ] -} diff --git a/charts/consul/test/unit/dns-proxy-clusterrolebinding.bats b/charts/consul/test/unit/dns-proxy-clusterrolebinding.bats deleted file mode 100644 index c6e7005b1d..0000000000 --- a/charts/consul/test/unit/dns-proxy-clusterrolebinding.bats +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env bats - -load _helpers - -@test "dnsProxy/ClusterRoleBinding: disabled by default" { - cd `chart_dir` - assert_empty helm template \ - -s templates/dns-proxy-clusterrolebinding.yaml \ - . -} - -@test "dnsProxy/ClusterRoleBinding: enabled with global.enabled false" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/dns-proxy-clusterrolebinding.yaml \ - --set 'global.enabled=false' \ - --set 'dns.proxy.enabled=true' \ - . | tee /dev/stderr | - yq -s 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "dnsProxy/ClusterRoleBinding: disabled with connectInject.enabled false" { - cd `chart_dir` - assert_empty helm template \ - -s templates/dns-proxy-clusterrolebinding.yaml \ - --set 'dns.proxy.enabled=false' \ - . -} diff --git a/charts/consul/test/unit/dns-proxy-deployment.bats b/charts/consul/test/unit/dns-proxy-deployment.bats deleted file mode 100755 index 522476cbfc..0000000000 --- a/charts/consul/test/unit/dns-proxy-deployment.bats +++ /dev/null @@ -1,418 +0,0 @@ -#!/usr/bin/env bats - -load _helpers - -@test "dnsProxy/Deployment: disabled by default" { - cd `chart_dir` - assert_empty helm template \ - -s templates/dns-proxy-deployment.yaml \ - . -} - -@test "dnsProxy/Deployment: enable with dns.proxy.enabled" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/dns-proxy-deployment.yaml \ - --set 'dns.proxy.enabled=true' \ - . | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "dnsProxy/Deployment: disable with dns.proxy.enabled" { - cd `chart_dir` - assert_empty helm template \ - -s templates/dns-proxy-deployment.yaml \ - --set 'dns.proxy.enabled=false' \ - . -} - -@test "dnsProxy/Deployment: disable with global.enabled" { - cd `chart_dir` - assert_empty helm template \ - -s templates/dns-proxy-deployment.yaml \ - --set 'dns.proxy.enabled=-' \ - --set 'global.enabled=false' \ - . -} - -#-------------------------------------------------------------------- -# flags - -@test "dnsProxy/Deployment: default dns-proxy mode flag" { - cd `chart_dir` - local cmd=$(helm template \ - -s templates/dns-proxy-deployment.yaml \ - --set 'dns.proxy.enabled=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].args' | tee /dev/stderr) - - local actual=$(echo "$cmd" | - yq 'any(contains("-mode=dns-proxy"))' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -#-------------------------------------------------------------------- -# consul and consul-dataplane images - -@test "dnsProxy/Deployment: container image is global default" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/dns-proxy-deployment.yaml \ - --set 'dns.proxy.enabled=true' \ - --set 'global.imageConsulDataplane=foo' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].image' | tee /dev/stderr) - [ "${actual}" = "\"foo\"" ] -} - - -#-------------------------------------------------------------------- -# nodeSelector - -@test "dnsProxy/Deployment: nodeSelector is not set by default" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/dns-proxy-deployment.yaml \ - --set 'dns.proxy.enabled=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec.nodeSelector' | tee /dev/stderr) - [ "${actual}" = "null" ] -} - - -#-------------------------------------------------------------------- -# authMethod - -@test "dnsProxy/Deployment: -login-auth-method is not set by default" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/dns-proxy-deployment.yaml \ - --set 'dns.proxy.enabled=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].command | any(contains("-login-auth-method="))' | tee /dev/stderr) - [ "${actual}" = "false" ] -} - -@test "dnsProxy/Deployment: -login-auth-method is set when global.acls.manageSystemACLs is true -login-auth-method" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/dns-proxy-deployment.yaml \ - --set 'dns.proxy.enabled=true' \ - --set 'global.acls.manageSystemACLs=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].args | any(contains("-login-auth-method=release-name-consul-k8s-component-auth-method"))' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "dnsProxy/Deployment: -login-auth-method is set when global.acls.manageSystemACLs is true -credential-type" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/dns-proxy-deployment.yaml \ - --set 'dns.proxy.enabled=true' \ - --set 'global.acls.manageSystemACLs=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].args | any(contains("-credential-type=login"))' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "dnsProxy/Deployment: -login-auth-method is set when global.acls.manageSystemACLs is true login-bearer-token-path" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/dns-proxy-deployment.yaml \ - --set 'dns.proxy.enabled=true' \ - --set 'global.acls.manageSystemACLs=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].args | any(contains("-login-bearer-token-path=/var/run/secrets/kubernetes.io/serviceaccount/token"))' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -#-------------------------------------------------------------------- -# global.tls.enabled - -@test "dnsProxy/Deployment: Adds consul-ca-cert volume when global.tls.enabled is true" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/dns-proxy-deployment.yaml \ - --set 'dns.proxy.enabled=true' \ - --set 'global.tls.enabled=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec.volumes[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) - [ "${actual}" != "" ] -} - -@test "dnsProxy/Deployment: Adds consul-ca-cert volumeMount when global.tls.enabled is true" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/dns-proxy-deployment.yaml \ - --set 'dns.proxy.enabled=true' \ - --set 'global.tls.enabled=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) - [ "${actual}" != "" ] -} - -@test "dnsProxy/Deployment: can overwrite CA secret with the provided one" { - cd `chart_dir` - local ca_cert_volume=$(helm template \ - -s templates/dns-proxy-deployment.yaml \ - --set 'dns.proxy.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.caCert.secretName=foo-ca-cert' \ - --set 'global.tls.caCert.secretKey=key' \ - --set 'global.tls.caKey.secretName=foo-ca-key' \ - --set 'global.tls.caKey.secretKey=key' \ - . | tee /dev/stderr | - yq '.spec.template.spec.volumes[] | select(.name=="consul-ca-cert")' | tee /dev/stderr) - - # check that the provided ca cert secret is attached as a volume - local actual - actual=$(echo $ca_cert_volume | jq -r '.secret.secretName' | tee /dev/stderr) - [ "${actual}" = "foo-ca-cert" ] - - # check that the volume uses the provided secret key - actual=$(echo $ca_cert_volume | jq -r '.secret.items[0].key' | tee /dev/stderr) - [ "${actual}" = "key" ] -} - -@test "dnsProxy/Deployment: consul env vars when global.tls.enabled is true" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/dns-proxy-deployment.yaml \ - --set 'dns.proxy.enabled=true' \ - --set 'global.tls.enabled=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].args | any(contains("-ca-certs=/consul/tls/ca/tls.crt"))' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - - -#-------------------------------------------------------------------- -# partitions - -@test "dnsProxy/Deployment: partitions options disabled by default" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/dns-proxy-deployment.yaml \ - --set 'dns.proxy.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].args | any(contains("service-partitions"))' | tee /dev/stderr) - - [ "${actual}" = "false" ] -} - -@test "dnsProxy/Deployment: partitions set with .global.adminPartitions.enabled=true" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/dns-proxy-deployment.yaml \ - --set 'dns.proxy.enabled=true' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].args | any(contains("service-partition=default"))' | tee /dev/stderr) - - [ "${actual}" = "true" ] -} - -@test "dnsProxy/Deployment: partitions set with .global.adminPartitions.enabled=true and name set" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/dns-proxy-deployment.yaml \ - --set 'dns.proxy.enabled=true' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.adminPartitions.name=ap1' \ - --set 'global.enableConsulNamespaces=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].args | any(contains("service-partition=ap1"))' | tee /dev/stderr) - - [ "${actual}" = "true" ] -} - -#-------------------------------------------------------------------- -# acl tokens - - -#-------------------------------------------------------------------- -# global.acls.manageSystemACLs -@test "dnsProxy/Deployment: sets global auth method and primary datacenter when federation and acls" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/dns-proxy-deployment.yaml \ - --set 'dns.proxy.enabled=true' \ - --set 'global.acls.manageSystemACLs=true' \ - --set 'global.federation.enabled=true' \ - --set 'global.federation.primaryDatacenter=dc1' \ - --set 'global.datacenter=dc2' \ - --set 'global.tls.enabled=true' \ - --set 'meshGateway.enabled=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].args | any(contains("-login-datacenter=dc1"))' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "dnsProxy/Deployment: sets default login partition and acls and partitions are enabled" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/dns-proxy-deployment.yaml \ - --set 'dns.proxy.enabled=true' \ - --set 'global.acls.manageSystemACLs=true' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].args | any(contains("-login-partition=default"))' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "dnsProxy/Deployment: sets non-default login partition and acls and partitions are enabled" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/dns-proxy-deployment.yaml \ - --set 'dns.proxy.enabled=true' \ - --set 'global.acls.manageSystemACLs=true' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.adminPartitions.name=foo' \ - --set 'global.enableConsulNamespaces=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].args | any(contains("-login-partition=foo"))' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -#-------------------------------------------------------------------- -# extraLabels - -@test "dnsProxy/Deployment: no extra labels defined by default" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/dns-proxy-deployment.yaml \ - --set 'dns.proxy.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.metadata.labels | del(."app") | del(."chart") | del(."release") | del(."component")' | tee /dev/stderr) - [ "${actual}" = "{}" ] -} - -@test "dnsProxy/Deployment: can set extra global labels" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/dns-proxy-deployment.yaml \ - --set 'dns.proxy.enabled=true' \ - --set 'global.extraLabels.foo=bar' \ - . | tee /dev/stderr) - local actualBar=$(echo "${actual}" | yq -r '.metadata.labels.foo' | tee /dev/stderr) - [ "${actualBar}" = "bar" ] - local actualTemplateBar=$(echo "${actual}" | yq -r '.spec.template.metadata.labels.foo' | tee /dev/stderr) - [ "${actualTemplateBar}" = "bar" ] -} - -@test "dnsProxy/Deployment: can set multiple extra global labels" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/dns-proxy-deployment.yaml \ - --set 'dns.proxy.enabled=true' \ - --set 'global.extraLabels.foo=bar' \ - --set 'global.extraLabels.baz=qux' \ - . | tee /dev/stderr) - - local actualFoo=$(echo "${actual}" | yq -r '.metadata.labels.foo' | tee /dev/stderr) - local actualBaz=$(echo "${actual}" | yq -r '.metadata.labels.baz' | tee /dev/stderr) - [ "${actualFoo}" = "bar" ] - [ "${actualBaz}" = "qux" ] - local actualTemplateFoo=$(echo "${actual}" | yq -r '.spec.template.metadata.labels.foo' | tee /dev/stderr) - local actualTemplateBaz=$(echo "${actual}" | yq -r '.spec.template.metadata.labels.baz' | tee /dev/stderr) - [ "${actualTemplateFoo}" = "bar" ] - [ "${actualTemplateBaz}" = "qux" ] -} - -#-------------------------------------------------------------------- -# annotations - -@test "dnsProxy/Deployment: no annotations defined by default" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/dns-proxy-deployment.yaml \ - --set 'dns.proxy.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.metadata.annotations | - del(."consul.hashicorp.com/connect-inject") | - del(."consul.hashicorp.com/mesh-inject")' | - tee /dev/stderr) - [ "${actual}" = "{}" ] -} - -@test "dnsProxy/Deployment: default annotations connect-inject" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/dns-proxy-deployment.yaml \ - --set 'dns.proxy.enabled=true' \ - . | tee /dev/stderr | - yq '.spec.template.metadata.annotations["consul.hashicorp.com/connect-inject"]' | tee /dev/stderr) - [ "${actual}" = "\"false\"" ] -} - -@test "dnsProxy/Deployment: default annotations mesh-inject" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/dns-proxy-deployment.yaml \ - --set 'dns.proxy.enabled=true' \ - . | tee /dev/stderr | - yq '.spec.template.metadata.annotations["consul.hashicorp.com/mesh-inject"]' | tee /dev/stderr) - [ "${actual}" = "\"false\"" ] -} - -#-------------------------------------------------------------------- -# logLevel - -@test "dnsProxy/Deployment: logLevel info by default from global" { - cd `chart_dir` - local cmd=$(helm template \ - -s templates/dns-proxy-deployment.yaml \ - --set 'dns.proxy.enabled=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].args' | tee /dev/stderr) - - local actual=$(echo "$cmd" | - yq 'any(contains("-log-level=info"))' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "dnsProxy/Deployment: logLevel can be overridden" { - cd `chart_dir` - local cmd=$(helm template \ - -s templates/dns-proxy-deployment.yaml \ - --set 'dns.proxy.enabled=true' \ - --set 'dns.proxy.logLevel=debug' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].args' | tee /dev/stderr) - - local actual=$(echo "$cmd" | - yq 'any(contains("-log-level=debug"))' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - - - -#-------------------------------------------------------------------- -# replicas - -@test "dnsProxy/Deployment: replicas defaults to 1" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/dns-proxy-deployment.yaml \ - --set 'dns.proxy.enabled=true' \ - . | tee /dev/stderr | - yq '.spec.replicas' | tee /dev/stderr) - - [ "${actual}" = "1" ] -} - -@test "dnsProxy/Deployment: replicas can be set" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/dns-proxy-deployment.yaml \ - --set 'dns.proxy.enabled=true' \ - --set 'dns.proxy.replicas=3' \ - . | tee /dev/stderr | - yq '.spec.replicas' | tee /dev/stderr) - - [ "${actual}" = "3" ] -} \ No newline at end of file diff --git a/charts/consul/test/unit/dns-proxy-service.bats b/charts/consul/test/unit/dns-proxy-service.bats deleted file mode 100755 index 5b308f9f84..0000000000 --- a/charts/consul/test/unit/dns-proxy-service.bats +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/env bats - -load _helpers - -@test "dnsProxy/Service: disabled by default" { - cd `chart_dir` - assert_empty helm template \ - -s templates/dns-proxy-service.yaml \ - . -} - -@test "dnsProxy/Service: enable with global.enabled false" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/dns-proxy-service.yaml \ - --set 'global.enabled=false' \ - --set 'dns.proxy.enabled=true' \ - . | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "dnsProxy/Service: disable with connectInject.enabled" { - cd `chart_dir` - assert_empty helm template \ - -s templates/dns-proxy-service.yaml \ - --set 'dns.proxy.enabled=false' \ - . -} - -@test "dnsProxy/Service: disable with global.enabled" { - cd `chart_dir` - assert_empty helm template \ - -s templates/dns-proxy-service.yaml \ - --set 'dns.proxy.enabled=-' \ - --set 'global.enabled=false' \ - . -} diff --git a/charts/consul/test/unit/dns-proxy-serviceaccount.bats b/charts/consul/test/unit/dns-proxy-serviceaccount.bats deleted file mode 100644 index 7f0a33d4c1..0000000000 --- a/charts/consul/test/unit/dns-proxy-serviceaccount.bats +++ /dev/null @@ -1,64 +0,0 @@ -#!/usr/bin/env bats - -load _helpers - -@test "dnsProxy/ServiceAccount: disabled by default" { - cd `chart_dir` - assert_empty helm template \ - -s templates/dns-proxy-serviceaccount.yaml \ - . -} - -@test "dnsProxy/ServiceAccount: enabled with global.enabled false" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/dns-proxy-serviceaccount.yaml \ - --set 'global.enabled=false' \ - --set 'client.enabled=true' \ - --set 'dns.proxy.enabled=true' \ - . | tee /dev/stderr | - yq -s 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "dnsProxy/ServiceAccount: disabled with connectInject.enabled false" { - cd `chart_dir` - assert_empty helm template \ - -s templates/dns-proxy-serviceaccount.yaml \ - --set 'dns.proxy.enabled=false' \ - . -} - -#-------------------------------------------------------------------- -# global.imagePullSecrets - -@test "dnsProxy/ServiceAccount: can set image pull secrets" { - cd `chart_dir` - local object=$(helm template \ - -s templates/dns-proxy-serviceaccount.yaml \ - --set 'dns.proxy.enabled=true' \ - --set 'global.imagePullSecrets[0].name=my-secret' \ - --set 'global.imagePullSecrets[1].name=my-secret2' \ - . | tee /dev/stderr) - - local actual=$(echo "$object" | - yq -r '.imagePullSecrets[0].name' | tee /dev/stderr) - [ "${actual}" = "my-secret" ] - - local actual=$(echo "$object" | - yq -r '.imagePullSecrets[1].name' | tee /dev/stderr) - [ "${actual}" = "my-secret2" ] -} - -#-------------------------------------------------------------------- -# connectInject.serviceAccount.annotations - -@test "dnsProxy/ServiceAccount: no annotations by default" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/dns-proxy-serviceaccount.yaml \ - --set 'dns.proxy.enabled=true' \ - . | tee /dev/stderr | - yq '.metadata.annotations | length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] -} diff --git a/charts/consul/test/unit/dns-service.bats b/charts/consul/test/unit/dns-service.bats index 780b5d0f57..bc5777ac53 100755 --- a/charts/consul/test/unit/dns-service.bats +++ b/charts/consul/test/unit/dns-service.bats @@ -38,15 +38,6 @@ load _helpers . } -@test "dns/Service: disable with dns.proxy.enabled set to true" { - cd `chart_dir` - assert_empty helm template \ - -s templates/dns-service.yaml \ - --set 'dns.enabled=true' \ - --set 'dns.proxy.enabled=true' \ - . -} - #-------------------------------------------------------------------- # annotations diff --git a/charts/consul/test/unit/gateway-resources-configmap.bats b/charts/consul/test/unit/gateway-resources-configmap.bats index 3ab11e4bb6..e827644792 100644 --- a/charts/consul/test/unit/gateway-resources-configmap.bats +++ b/charts/consul/test/unit/gateway-resources-configmap.bats @@ -47,7 +47,7 @@ target=templates/gateway-resources-configmap.yaml [ $actual = '220m' ] } -@test "gateway-resources/ConfigMap: does not contain config.yaml resources" { +@test "gateway-resources/ConfigMap: does not contain config.yaml resources without .global.experiments equal to resource-apis" { cd `chart_dir` local resources=$(helm template \ -s $target \ @@ -59,5 +59,200 @@ target=templates/gateway-resources-configmap.yaml } +@test "gateway-resources/ConfigMap: contains config.yaml resources with .global.experiments equal to resource-apis" { + cd `chart_dir` + local resources=$(helm template \ + -s $target \ + --set 'connectInject.enabled=true' \ + --set 'meshGateway.enabled=true' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'ui.enabled=false' \ + . | tee /dev/stderr | + yq '.data["config.yaml"]' | tee /dev/stderr) + + [ "$resources" != null ] +} + +#-------------------------------------------------------------------- +# Mesh Gateway logLevel configuration + +@test "gateway-resources/ConfigMap: Mesh Gateway logLevel default configuration" { + cd `chart_dir` + local config=$(helm template \ + -s $target \ + --set 'meshGateway.enabled=true' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'ui.enabled=false' \ + . | tee /dev/stderr | + yq -r '.data["config.yaml"]' | yq -r '.gatewayClassConfigs[0].spec.deployment' | tee /dev/stderr) + + local actual=$(echo "$config" | yq -r '.container.consul.logging.level') + [ "${actual}" = 'info' ] + + local actual=$(echo "$config" | yq -r '.initContainer.consul.logging.level') + [ "${actual}" = 'info' ] +} + + +@test "gateway-resources/ConfigMap: Mesh Gateway logLevel custom global configuration" { + cd `chart_dir` + local config=$(helm template \ + -s $target \ + --set 'meshGateway.enabled=true' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'ui.enabled=false' \ + --set 'global.logLevel=debug' \ + . | tee /dev/stderr | + yq -r '.data["config.yaml"]' | yq -r '.gatewayClassConfigs[0].spec.deployment' | tee /dev/stderr) + + local actual=$(echo "$config" | yq -r '.container.consul.logging.level') + [ "${actual}" = 'debug' ] + + local actual=$(echo "$config" | yq -r '.initContainer.consul.logging.level') + [ "${actual}" = 'debug' ] +} + +@test "gateway-resources/ConfigMap: Mesh Gateway logLevel custom meshGateway configuration" { + cd `chart_dir` + local config=$(helm template \ + -s $target \ + --set 'meshGateway.enabled=true' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'ui.enabled=false' \ + --set 'meshGateway.logLevel=debug' \ + . | tee /dev/stderr | + yq -r '.data["config.yaml"]' | yq -r '.gatewayClassConfigs[0].spec.deployment' | tee /dev/stderr) + + local actual=$(echo "$config" | yq -r '.container.consul.logging.level') + [ "${actual}" = 'debug' ] + + local actual=$(echo "$config" | yq -r '.initContainer.consul.logging.level') + [ "${actual}" = 'debug' ] +} + +@test "gateway-resources/ConfigMap: Mesh Gateway logLevel custom meshGateway configuration overrides global configuration" { + cd `chart_dir` + local config=$(helm template \ + -s $target \ + --set 'meshGateway.enabled=true' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'ui.enabled=false' \ + --set 'global.logLevel=error' \ + --set 'meshGateway.logLevel=debug' \ + . | tee /dev/stderr | + yq -r '.data["config.yaml"]' | yq -r '.gatewayClassConfigs[0].spec.deployment' | tee /dev/stderr) + + local actual=$(echo "$config" | yq -r '.container.consul.logging.level') + [ "${actual}" = 'debug' ] + + local actual=$(echo "$config" | yq -r '.initContainer.consul.logging.level') + [ "${actual}" = 'debug' ] +} + +#-------------------------------------------------------------------- +# Mesh Gateway Extra Labels configuration + +@test "gateway-resources/ConfigMap: Mesh Gateway gets Extra Labels when set" { + cd `chart_dir` + local actual=$(helm template \ + -s $target \ + --set 'connectInject.enabled=true' \ + --set 'meshGateway.enabled=true' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'ui.enabled=false' \ + --set 'global.extraLabels.foo'='bar' \ + . | tee /dev/stderr | + yq -r '.data["config.yaml"]' | yq -r '.gatewayClassConfigs[0].spec.deployment.labels.set.foo' | tee /dev/stderr + ) + [ "$actual" = 'bar' ] +} + +#-------------------------------------------------------------------- +# Mesh Gateway annotations configuration + +@test "gateway-resources/ConfigMap: Mesh Gateway gets annotations when set" { + cd `chart_dir` + local actual=$(helm template \ + -s $target \ + --set 'connectInject.enabled=true' \ + --set 'meshGateway.enabled=true' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'ui.enabled=false' \ + --set 'meshGateway.annotations.foo'='bar' \ + . | tee /dev/stderr | + yq -r '.data["config.yaml"]' | yq -r '.gatewayClassConfigs[0].spec.deployment.annotations.set.foo' | tee /dev/stderr + ) + [ "$actual" = 'bar' ] +} + #-------------------------------------------------------------------- -# TODO openShiftSSCName +# Mesh Gateway WAN Address configuration + +@test "gateway-resources/ConfigMap: Mesh Gateway WAN Address default annotations" { + cd `chart_dir` + local annotations=$(helm template \ + -s $target \ + --set 'connectInject.enabled=true' \ + --set 'meshGateway.enabled=true' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'ui.enabled=false' \ + . | tee /dev/stderr | + yq -r '.data["config.yaml"]' | yq -r '.meshGateways[0].metadata.annotations' | tee /dev/stderr) + + local actual=$(echo "$annotations" | jq -r '.["consul.hashicorp.com/gateway-wan-address-source"]') + [ "${actual}" = 'Service' ] + + local actual=$(echo "$annotations" | jq -r '.["consul.hashicorp.com/gateway-wan-port"]') + [ "${actual}" = '443' ] + + local actual=$(echo "$annotations" | jq -r '.["consul.hashicorp.com/gateway-wan-address-static"]') + [ "${actual}" = '' ] +} + +@test "gateway-resources/ConfigMap: Mesh Gateway WAN Address NodePort annotations" { + cd `chart_dir` + local annotations=$(helm template \ + -s $target \ + --set 'connectInject.enabled=true' \ + --set 'meshGateway.enabled=true' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'ui.enabled=false' \ + --set 'meshGateway.wanAddress.source=Service' \ + --set 'meshGateway.service.type=NodePort' \ + --set 'meshGateway.service.nodePort=30000' \ + . | tee /dev/stderr | + yq -r '.data["config.yaml"]' | yq -r '.meshGateways[0].metadata.annotations' | tee /dev/stderr) + + local actual=$(echo "$annotations" | jq -r '.["consul.hashicorp.com/gateway-wan-address-source"]') + [ "${actual}" = 'Service' ] + + local actual=$(echo "$annotations" | jq -r '.["consul.hashicorp.com/gateway-wan-port"]') + [ "${actual}" = '30000' ] + + local actual=$(echo "$annotations" | jq -r '.["consul.hashicorp.com/gateway-wan-address-static"]') + [ "${actual}" = '' ] +} + +@test "gateway-resources/ConfigMap: Mesh Gateway WAN Address static configuration" { + cd `chart_dir` + local annotations=$(helm template \ + -s $target \ + --set 'connectInject.enabled=true' \ + --set 'meshGateway.enabled=true' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'ui.enabled=false' \ + --set 'meshGateway.wanAddress.source=Static' \ + --set 'meshGateway.wanAddress.static=127.0.0.1' \ + . | tee /dev/stderr | + yq -r '.data["config.yaml"]' | yq -r '.meshGateways[0].metadata.annotations' | tee /dev/stderr) + + local actual=$(echo "$annotations" | jq -r '.["consul.hashicorp.com/gateway-wan-address-source"]') + [ "${actual}" = 'Static' ] + + local actual=$(echo "$annotations" | jq -r '.["consul.hashicorp.com/gateway-wan-port"]') + [ "${actual}" = '443' ] + + local actual=$(echo "$annotations" | jq -r '.["consul.hashicorp.com/gateway-wan-address-static"]') + [ "${actual}" = '127.0.0.1' ] +} + diff --git a/charts/consul/test/unit/gateway-resources-job.bats b/charts/consul/test/unit/gateway-resources-job.bats index 32173838fe..e38397231b 100644 --- a/charts/consul/test/unit/gateway-resources-job.bats +++ b/charts/consul/test/unit/gateway-resources-job.bats @@ -4,15 +4,6 @@ load _helpers target=templates/gateway-resources-job.yaml -@test "gatewayresources/Job: fails if .values.apiGateway is set" { - cd `chart_dir` - run helm template \ - -s templates/tests/test-runner.yaml \ - --set 'apiGateway.enabled=true' . - [ "$status" -eq 1 ] - [[ "$output" =~ "[DEPRECATED and REMOVED] the apiGateway stanza is no longer supported as of Consul 1.19.0. Use connectInject.apiGateway instead." ]] -} - @test "gatewayresources/Job: enabled by default" { cd `chart_dir` local actual=$(helm template \ @@ -40,6 +31,33 @@ target=templates/gateway-resources-job.yaml [ "$actual" = "true" ] } +#-------------------------------------------------------------------- +# fallback configuration +# to be removed in 1.17 (t-eckert 2023-05-23) + +@test "gatewayresources/Job: fallback configuration is used when apiGateway.enabled is true" { + cd `chart_dir` + local spec=$(helm template \ + -s $target \ + --set 'apiGateway.enabled=true' \ + --set 'apiGateway.image=testing' \ + --set 'apiGateway.managedGatewayClass.nodeSelector=foo: bar' \ + --set 'apiGateway.managedGatewayClass.tolerations=- key: bar' \ + --set 'apiGateway.managedGatewayClass.copyAnnotations.service.annotations=- bingo' \ + --set 'apiGateway.managedGatewayClass.serviceType=LoadBalancer' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].args' | tee /dev/stderr) + + local actual=$(echo "$spec" | jq '.[9] | ."-node-selector=foo"') + [ "${actual}" = "\"bar\"" ] + + local actual=$(echo "$spec" | jq '.[10] | ."-tolerations=- key"') + [ "${actual}" = "\"bar\"" ] + + local actual=$(echo "$spec" | jq '.[11]') + [ "${actual}" = "\"-service-annotations=- bingo\"" ] +} + #-------------------------------------------------------------------- # configuration diff --git a/charts/consul/test/unit/helpers.bats b/charts/consul/test/unit/helpers.bats index 9edc8ddf18..20772788f8 100644 --- a/charts/consul/test/unit/helpers.bats +++ b/charts/consul/test/unit/helpers.bats @@ -328,55 +328,141 @@ load _helpers [ "${actual}" = "" ] } - #-------------------------------------------------------------------- -# consul.imagePullPolicy -# These tests use test-runner.yaml to "unit test" the imagePullPolicy function +# consul.validateResourceAPIs +# These tests use test-runner.yaml to test the +# consul.validateResourceAPIs helper since we need an existing template -@test "helper/consul.imagePullPolicy: bad input" { +@test "connectInject/Deployment: fails if resource-apis is set and peering is enabled" { cd `chart_dir` run helm template \ -s templates/tests/test-runner.yaml \ - --set 'global.imagePullPolicy=Garbage' . - [ "$status" -eq 1 ] - [[ "$output" =~ "imagePullPolicy can only be IfNotPresent, Always, Never, or empty" ]] + --set 'connectInject.enabled=true' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'ui.enabled=false' \ + --set 'global.tls.enabled=true' \ + --set 'meshGateway.enabled=true' \ + --set 'global.peering.enabled=true' \ + . + [ "$status" -eq 1 ] + [[ "$output" =~ "When the value global.experiments.resourceAPIs is set, global.peering.enabled is currently unsupported." ]] } -@test "helper/consul.imagePullPolicy: empty input" { +@test "connectInject/Deployment: fails if resource-apis is set, v2tenancy is unset, and admin partitions are enabled" { cd `chart_dir` - local output=$(helm template \ + run helm template \ -s templates/tests/test-runner.yaml \ - . | tee /dev/stderr | - yq -r '.spec.containers[0].imagePullPolicy' | tee /dev/stderr) - [ "${output}" = null ] + --set 'connectInject.enabled=true' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'ui.enabled=false' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'global.adminPartitions.enabled=true' \ + . + [ "$status" -eq 1 ] + [[ "$output" =~ "When the value global.experiments.resourceAPIs is set, global.experiments.v2tenancy must also be set to support global.adminPartitions.enabled." ]] } -@test "helper/consul.imagePullPolicy: IfNotPresent" { +@test "connectInject/Deployment: fails if resource-apis is set and federation is enabled" { cd `chart_dir` - local output=$(helm template \ + run helm template \ -s templates/tests/test-runner.yaml \ - --set 'global.imagePullPolicy=IfNotPresent' \ - . | tee /dev/stderr | - yq -r '.spec.containers[0].imagePullPolicy' | tee /dev/stderr) - [ "${output}" = "IfNotPresent" ] + --set 'connectInject.enabled=true' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'ui.enabled=false' \ + --set 'global.tls.enabled=true' \ + --set 'meshGateway.enabled=true' \ + --set 'global.federation.enabled=true' \ + . + [ "$status" -eq 1 ] + [[ "$output" =~ "When the value global.experiments.resourceAPIs is set, global.federation.enabled is currently unsupported." ]] } -@test "helper/consul.imagePullPolicy: Always" { +@test "connectInject/Deployment: fails if resource-apis is set and cloud is enabled" { cd `chart_dir` - local output=$(helm template \ + run helm template \ -s templates/tests/test-runner.yaml \ - --set 'global.imagePullPolicy=Always' \ - . | tee /dev/stderr | - yq -r '.spec.containers[0].imagePullPolicy' | tee /dev/stderr) - [ "${output}" = "Always" ] + --set 'connectInject.enabled=true' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'ui.enabled=false' \ + --set 'global.cloud.enabled=true' \ + --set 'global.cloud.resourceId.secretName=hello' \ + --set 'global.cloud.resourceId.secretKey=hello' \ + --set 'global.cloud.clientId.secretName=hello' \ + --set 'global.cloud.clientId.secretKey=hello' \ + --set 'global.cloud.clientSecret.secretName=hello' \ + --set 'global.cloud.clientSecret.secretKey=hello' \ + . + [ "$status" -eq 1 ] + [[ "$output" =~ "When the value global.experiments.resourceAPIs is set, global.cloud.enabled is currently unsupported." ]] } -@test "helper/consul.imagePullPolicy: Never" { +@test "connectInject/Deployment: fails if resource-apis is set and client is enabled" { cd `chart_dir` - local output=$(helm template \ + run helm template \ -s templates/tests/test-runner.yaml \ - --set 'global.imagePullPolicy=Never' \ - . | tee /dev/stderr | - yq -r '.spec.containers[0].imagePullPolicy' | tee /dev/stderr) - [ "${output}" = "Never" ] + --set 'connectInject.enabled=true' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'ui.enabled=false' \ + --set 'client.enabled=true' . + [ "$status" -eq 1 ] + [[ "$output" =~ "When the value global.experiments.resourceAPIs is set, client.enabled is currently unsupported." ]] +} + +@test "connectInject/Deployment: fails if resource-apis is set and ui is enabled" { + cd `chart_dir` + run helm template \ + -s templates/tests/test-runner.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.experiments[0]=resource-apis' \ + . + [ "$status" -eq 1 ] + [[ "$output" =~ "When the value global.experiments.resourceAPIs is set, ui.enabled is currently unsupported." ]] +} + +@test "connectInject/Deployment: fails if resource-apis is set and syncCatalog is enabled" { + cd `chart_dir` + run helm template \ + -s templates/tests/test-runner.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'ui.enabled=false' \ + --set 'syncCatalog.enabled=true' . + [ "$status" -eq 1 ] + [[ "$output" =~ "When the value global.experiments.resourceAPIs is set, syncCatalog.enabled is currently unsupported." ]] +} + +@test "connectInject/Deployment: fails if resource-apis is set and ingressGateways is enabled" { + cd `chart_dir` + run helm template \ + -s templates/tests/test-runner.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'ui.enabled=false' \ + --set 'ingressGateways.enabled=true' . + [ "$status" -eq 1 ] + [[ "$output" =~ "When the value global.experiments.resourceAPIs is set, ingressGateways.enabled is currently unsupported." ]] +} + +@test "connectInject/Deployment: fails if resource-apis is set and terminatingGateways is enabled" { + cd `chart_dir` + run helm template \ + -s templates/tests/test-runner.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'ui.enabled=false' \ + --set 'terminatingGateways.enabled=true' . + [ "$status" -eq 1 ] + [[ "$output" =~ "When the value global.experiments.resourceAPIs is set, terminatingGateways.enabled is currently unsupported." ]] +} + +@test "connectInject/Deployment: fails if resource-apis is set and apiGateway is enabled" { + cd `chart_dir` + run helm template \ + -s templates/tests/test-runner.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'ui.enabled=false' \ + --set 'apiGateway.enabled=true' . + [ "$status" -eq 1 ] + [[ "$output" =~ "When the value global.experiments.resourceAPIs is set, apiGateway.enabled is currently unsupported." ]] } diff --git a/charts/consul/test/unit/partition-init-job.bats b/charts/consul/test/unit/partition-init-job.bats index a682855fcf..745e23adfe 100644 --- a/charts/consul/test/unit/partition-init-job.bats +++ b/charts/consul/test/unit/partition-init-job.bats @@ -109,6 +109,27 @@ load _helpers [ "${actual}" = "5s" ] } +#-------------------------------------------------------------------- +# v2tenancy experiment + +@test "partitionInit/Job: -enable-v2tenancy=true is set when global.experiments contains [\"resource-apis\", \"v2tenancy\"]" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/partition-init-job.yaml \ + --set 'global.adminPartitions.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'server.enabled=false' \ + --set 'global.adminPartitions.name=bar' \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.hosts[0]=foo' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'global.experiments[1]=v2tenancy' \ + --set 'ui.enabled=false' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-enable-v2tenancy=true"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + #-------------------------------------------------------------------- # global.tls.enabled @@ -1023,4 +1044,4 @@ reservedNameTest() { local actualTemplateBaz=$(echo "${actual}" | yq -r '.spec.template.metadata.labels.baz' | tee /dev/stderr) [ "${actualTemplateFoo}" = "bar" ] [ "${actualTemplateBaz}" = "qux" ] -} +} \ No newline at end of file diff --git a/charts/consul/test/unit/server-acl-init-job.bats b/charts/consul/test/unit/server-acl-init-job.bats index f5b255c5ee..99fc6b9a9e 100644 --- a/charts/consul/test/unit/server-acl-init-job.bats +++ b/charts/consul/test/unit/server-acl-init-job.bats @@ -2383,14 +2383,14 @@ load _helpers --set 'global.acls.manageSystemACLs=true' \ --set 'global.argocd.enabled=true' \ . | tee /dev/stderr | - yq -r '.metadata.annotations["argocd.argoproj.io/hook"]' | tee /dev/stderr) + yq -r '.spec.template.metadata.annotations["argocd.argoproj.io/hook"]' | tee /dev/stderr) [ "${actual}" = "Sync" ] local actual=$(helm template \ -s templates/server-acl-init-job.yaml \ --set 'global.acls.manageSystemACLs=true' \ --set 'global.argocd.enabled=true' \ . | tee /dev/stderr | - yq -r '.metadata.annotations["argocd.argoproj.io/hook-delete-policy"]' | tee /dev/stderr) + yq -r '.spec.template.metadata.annotations["argocd.argoproj.io/hook-delete-policy"]' | tee /dev/stderr) [ "${actual}" = "HookSucceeded" ] } @@ -2401,17 +2401,50 @@ load _helpers --set 'global.acls.manageSystemACLs=true' \ --set 'global.argocd.enabled=false' \ . | tee /dev/stderr | - yq -r '.metadata.annotations["argocd.argoproj.io/hook"]' | tee /dev/stderr) + yq -r '.spec.template.metadata.annotations["argocd.argoproj.io/hook"]' | tee /dev/stderr) [ "${actual}" = null ] local actual=$(helm template \ -s templates/server-acl-init-job.yaml \ --set 'global.acls.manageSystemACLs=true' \ --set 'global.argocd.enabled=false' \ . | tee /dev/stderr | - yq -r '.metadata.annotations["argocd.argoproj.io/hook-delete-policy"]' | tee /dev/stderr) + yq -r '.spec.template.metadata.annotations["argocd.argoproj.io/hook-delete-policy"]' | tee /dev/stderr) [ "${actual}" = null ] } +#-------------------------------------------------------------------- +# resource-apis + +@test "serverACLInit/Job: resource-apis is not set by default" { + cd `chart_dir` + local object=$(helm template \ + -s templates/server-acl-init-job.yaml \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual=$(echo $object | + yq 'any(contains("-enable-resource-apis"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "serverACLInit/Job: -enable-resource-apis=true is set when global.experiments contains [\"resource-apis\"] " { + cd `chart_dir` + local object=$(helm template \ + -s templates/server-acl-init-job.yaml \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.tls.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'ui.enabled=false' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual=$(echo $object | + yq 'any(contains("-enable-resource-apis=true"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + #-------------------------------------------------------------------- # global.metrics.datadog @@ -2459,4 +2492,4 @@ load _helpers local actual=$( echo "$command" | yq 'any(contains("-create-dd-agent-token"))' | tee /dev/stderr) [ "${actual}" = "false" ] -} +} \ No newline at end of file diff --git a/charts/consul/test/unit/server-statefulset.bats b/charts/consul/test/unit/server-statefulset.bats index 96b5501e79..0f62803ec0 100755 --- a/charts/consul/test/unit/server-statefulset.bats +++ b/charts/consul/test/unit/server-statefulset.bats @@ -823,7 +823,7 @@ load _helpers local actual="$( echo "$consul_checks" | \ jq -r .consul.instances | jq -r .[0].url | tee /dev/stderr)" - [ "${actual}" = http://release-name-consul-server.default.svc:8500 ] + [ "${actual}" = "http://consul-server.consul.svc:8500" ] local actual="$( echo "$consul_checks" | \ jq -r .consul.instances | jq -r .[0].new_leader_checks | tee /dev/stderr)" @@ -866,7 +866,7 @@ load _helpers local actual="$( echo "$consul_checks" | \ jq -r .consul.instances | jq -r .[0].url | tee /dev/stderr)" - [ "${actual}" = "https://release-name-consul-server.default.svc:8501" ] + [ "${actual}" = "https://consul-server.default.svc:8501" ] local actual="$( echo "$consul_checks" | \ jq -r .consul.instances | jq -r .[0].tls_cert | tee /dev/stderr)" @@ -933,7 +933,7 @@ load _helpers local actual="$( echo "$consul_checks" | \ jq -r .openmetrics.instances | jq -r .[0].openmetrics_endpoint | tee /dev/stderr)" - [ "${actual}" = "http://release-name-consul-server.default.svc:8500/v1/agent/metrics?format=prometheus" ] + [ "${actual}" = "http://consul-server.default.svc:8500/v1/agent/metrics?format=prometheus" ] local actual="$( echo "$consul_checks" | \ jq -r .openmetrics.instances | jq -r .[0].headers | tee /dev/stderr)" @@ -971,7 +971,7 @@ load _helpers local actual="$( echo "$consul_checks" | \ jq -r .openmetrics.instances | jq -r .[0].openmetrics_endpoint | tee /dev/stderr)" - [ "${actual}" = "https://release-name-consul-server.default.svc:8501/v1/agent/metrics?format=prometheus" ] + [ "${actual}" = "https://consul-server.default.svc:8501/v1/agent/metrics?format=prometheus" ] local actual="$( echo "$consul_checks" | \ jq -r .openmetrics.instances | jq -r .[0].headers | tee /dev/stderr)" @@ -1020,7 +1020,7 @@ load _helpers local actual="$( echo "$consul_checks" | \ jq -r .openmetrics.instances | jq -r .[0].openmetrics_endpoint | tee /dev/stderr)" - [ "${actual}" = "http://release-name-consul-server.default.svc:8500/v1/agent/metrics?format=prometheus" ] + [ "${actual}" = "http://consul-server.default.svc:8500/v1/agent/metrics?format=prometheus" ] local actual="$( echo "$consul_checks" | \ jq -r .openmetrics.instances | jq -r '.[0].headers["X-Consul-Token"]' | tee /dev/stderr)" @@ -1083,37 +1083,6 @@ load _helpers [ "${actual}" = "consul-server" ] } -@test "server/StatefulSet: datadog unix socket path name rendering for hostPath volume and volumeMount using default" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'global.metrics.enabled=true' \ - --set 'telemetryCollector.enabled=true' \ - --set 'global.metrics.enableAgentMetrics=true' \ - --set 'global.metrics.datadog.enabled=true' \ - --set 'global.metrics.datadog.dogstatsd.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.volumes[] | select(.name=="dsdsocket") | .hostPath.path' | tee /dev/stderr) - - [ "${actual}" = "/var/run/datadog" ] -} - -@test "server/StatefulSet: datadog unix socket path name rendering for hostPath volume and volumeMount using non default" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'global.metrics.enabled=true' \ - --set 'telemetryCollector.enabled=true' \ - --set 'global.metrics.enableAgentMetrics=true' \ - --set 'global.metrics.datadog.enabled=true' \ - --set 'global.metrics.datadog.dogstatsd.enabled=true' \ - --set 'global.metrics.datadog.dogstatsd.dogstatsdAddr="/this/otherpath/datadog/dsd.socket"' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.volumes[] | select(.name=="dsdsocket") | .hostPath.path' | tee /dev/stderr) - - [ "${actual}" = "/this/otherpath/datadog" ] -} - #-------------------------------------------------------------------- # config-configmap @@ -1385,7 +1354,6 @@ load _helpers "drop": ["ALL"], "add": ["NET_BIND_SERVICE"] }, - "readOnlyRootFilesystem": true, "runAsNonRoot": true, "seccompProfile": { "type": "RuntimeDefault" @@ -1418,7 +1386,6 @@ load _helpers "drop": ["ALL"], "add": ["NET_BIND_SERVICE"] }, - "readOnlyRootFilesystem": true, "runAsNonRoot": true, "seccompProfile": { "type": "RuntimeDefault" @@ -3498,3 +3465,30 @@ MIICFjCCAZsCCQCdwLtdjbzlYzAKBggqhkjOPQQDAjB0MQswCQYDVQQGEwJDQTEL' \ [ "${actual}" = "true" ] } +#-------------------------------------------------------------------- +# global.experiments=["resource-apis"] + +@test "server/StatefulSet: experiments=[\"resource-apis\"] is not set in command when global.experiments is empty" { + cd `chart_dir` + local object=$(helm template \ + -s templates/server-statefulset.yaml \ + . | tee /dev/stderr) + + # Test the flag is set. + local actual=$(echo "$object" | + yq '.spec.template.spec.containers[] | select(.name == "consul") | .command | any(contains("-hcl=\"experiments=[\\\"resource-apis\\\"]\""))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "server/StatefulSet: experiments=[\"resource-apis\"] is set in command when global.experiments contains \"resource-apis\"" { + cd `chart_dir` + local object=$(helm template \ + -s templates/server-statefulset.yaml \ + --set 'global.experiments[0]=resource-apis' \ + --set 'ui.enabled=false' \ + . | tee /dev/stderr) + + local actual=$(echo "$object" | + yq '.spec.template.spec.containers[] | select(.name == "consul") | .command | any(contains("-hcl=\"experiments=[\\\"resource-apis\\\"]\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} \ No newline at end of file diff --git a/charts/consul/test/unit/telemetry-collector-deployment.bats b/charts/consul/test/unit/telemetry-collector-deployment.bats index dbd2a87804..71f10d3934 100755 --- a/charts/consul/test/unit/telemetry-collector-deployment.bats +++ b/charts/consul/test/unit/telemetry-collector-deployment.bats @@ -1315,6 +1315,20 @@ MIICFjCCAZsCCQCdwLtdjbzlYzAKBggqhkjOPQQDAjB0MQswCQYDVQQGEwJDQTEL' \ [ "${actual}" = "true" ] } +#-------------------------------------------------------------------- +# global.experiments=["resource-apis"] + +@test "telemetryCollector/Deployment: disabled when V2 is enabled" { + cd `chart_dir` + assert_empty helm template \ + -s templates/telemetry-collector-deployment.yaml \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + . +} + #-------------------------------------------------------------------- # Namespaces @@ -1415,7 +1429,7 @@ MIICFjCCAZsCCQCdwLtdjbzlYzAKBggqhkjOPQQDAjB0MQswCQYDVQQGEwJDQTEL' \ local actual=$(echo "$object" | yq -r '.[] | select(.name=="CO_OTEL_HTTP_ENDPOINT").value' | tee /dev/stderr) - [ "${actual}" = 'http://$(HOST_IP):4317' ] + [ "${actual}" = 'grpc://$(HOST_IP):4317' ] } @test "telemetryCollector/Deployment: DataDog OTLP Collector gRPC protocol verification, case-insensitive" { @@ -1434,5 +1448,5 @@ MIICFjCCAZsCCQCdwLtdjbzlYzAKBggqhkjOPQQDAjB0MQswCQYDVQQGEwJDQTEL' \ local actual=$(echo "$object" | yq -r '.[] | select(.name=="CO_OTEL_HTTP_ENDPOINT").value' | tee /dev/stderr) - [ "${actual}" = 'http://$(HOST_IP):4317' ] -} + [ "${actual}" = 'grpc://$(HOST_IP):4317' ] +} \ No newline at end of file diff --git a/charts/consul/test/unit/telemetry-collector-v2-deployment.bats b/charts/consul/test/unit/telemetry-collector-v2-deployment.bats new file mode 100755 index 0000000000..5cfdab96cf --- /dev/null +++ b/charts/consul/test/unit/telemetry-collector-v2-deployment.bats @@ -0,0 +1,1406 @@ +#!/usr/bin/env bats + +load _helpers + +@test "telemetryCollector/Deployment(V2): disabled by default" { + cd `chart_dir` + assert_empty helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + . +} + +@test "telemetryCollector/Deployment(V2): fails if no image is set" { + cd `chart_dir` + run helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=null' \ + . + [ "$status" -eq 1 ] + [[ "$output" =~ "telemetryCollector.image must be set to enable consul-telemetry-collector" ]] +} + +@test "telemetryCollector/Deployment(V2): disable with telemetry-collector.enabled" { + cd `chart_dir` + assert_empty helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=false' \ + . +} + +@test "telemetryCollector/Deployment(V2): disable with global.enabled" { + cd `chart_dir` + assert_empty helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'global.enabled=false' \ + . +} + +@test "telemetryCollector/Deployment(V2): container image overrides" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].image' | tee /dev/stderr) + [ "${actual}" = "\"bar\"" ] +} + +#-------------------------------------------------------------------- +# nodeSelector + +@test "telemetryCollector/Deployment(V2): nodeSelector is not set by default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + . | tee /dev/stderr | + yq '.spec.template.spec.nodeSelector' | tee /dev/stderr) + [ "${actual}" = "null" ] +} + +@test "telemetryCollector/Deployment(V2): specified nodeSelector" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'telemetryCollector.nodeSelector=testing' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.nodeSelector' | tee /dev/stderr) + [ "${actual}" = "testing" ] +} + +#-------------------------------------------------------------------- +# consul.name + +@test "telemetryCollector/Deployment(V2): name is constant regardless of consul name" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'consul.name=foobar' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].name' | tee /dev/stderr) + [ "${actual}" = "consul-telemetry-collector" ] +} + +#-------------------------------------------------------------------- +# global.tls.enabled + +@test "telemetryCollector/Deployment(V2): Adds tls-ca-cert volume when global.tls.enabled is true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=foo' \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) + [ "${actual}" != "" ] +} + +@test "telemetryCollector/Deployment(V2): Adds tls-ca-cert volumeMounts when global.tls.enabled is true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=foo' \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[1].volumeMounts[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) + [ "${actual}" != "" ] +} + +@test "telemetryCollector/Deployment(V2): can overwrite CA secret with the provided one" { + cd `chart_dir` + local ca_cert_volume=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.caCert.secretName=foo-ca-cert' \ + --set 'global.tls.caCert.secretKey=key' \ + --set 'global.tls.caKey.secretName=foo-ca-key' \ + --set 'global.tls.caKey.secretKey=key' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[] | select(.name=="consul-ca-cert")' | tee /dev/stderr) + + # check that the provided ca cert secret is attached as a volume + local actual + actual=$(echo $ca_cert_volume | jq -r '.secret.secretName' | tee /dev/stderr) + [ "${actual}" = "foo-ca-cert" ] + + # check that the volume uses the provided secret key + actual=$(echo $ca_cert_volume | jq -r '.secret.items[0].key' | tee /dev/stderr) + [ "${actual}" = "key" ] +} + +#-------------------------------------------------------------------- +# global.tls.enableAutoEncrypt + +@test "telemetryCollector/Deployment(V2): consul-ca-cert volumeMount is added when TLS with auto-encrypt is enabled without clients" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'client.enabled=false' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[1].volumeMounts[] | select(.name == "consul-ca-cert") | length > 0' | tee + /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "telemetryCollector/Deployment(V2): consul-ca-cert volume is not added if externalServers.enabled=true and externalServers.useSystemRoots=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.hosts[0]=foo.com' \ + --set 'externalServers.useSystemRoots=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) + [ "${actual}" = "" ] +} + +#-------------------------------------------------------------------- +# resources + +@test "telemetryCollector/Deployment(V2): resources has default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=foo' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].resources' | tee /dev/stderr) + + [ $(echo "${actual}" | yq -r '.requests.memory') = "512Mi" ] + [ $(echo "${actual}" | yq -r '.requests.cpu') = "1000m" ] + [ $(echo "${actual}" | yq -r '.limits.memory') = "512Mi" ] + [ $(echo "${actual}" | yq -r '.limits.cpu') = "1000m" ] +} + +@test "telemetryCollector/Deployment(V2): resources can be overridden" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=foo' \ + --set 'telemetryCollector.resources.foo=bar' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].resources.foo' | tee /dev/stderr) + [ "${actual}" = "bar" ] +} + +#-------------------------------------------------------------------- +# init container resources + +@test "telemetryCollector/Deployment(V2): init container has default resources" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=foo' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.initContainers[0].resources' | tee /dev/stderr) + + [ $(echo "${actual}" | yq -r '.requests.memory') = "25Mi" ] + [ $(echo "${actual}" | yq -r '.requests.cpu') = "50m" ] + [ $(echo "${actual}" | yq -r '.limits.memory') = "150Mi" ] + [ $(echo "${actual}" | yq -r '.limits.cpu') = "50m" ] +} + +@test "telemetryCollector/Deployment(V2): init container resources can be set" { + cd `chart_dir` + local object=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=foo' \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'telemetryCollector.initContainer.resources.requests.memory=memory' \ + --set 'telemetryCollector.initContainer.resources.requests.cpu=cpu' \ + --set 'telemetryCollector.initContainer.resources.limits.memory=memory2' \ + --set 'telemetryCollector.initContainer.resources.limits.cpu=cpu2' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.initContainers[0].resources' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.requests.memory' | tee /dev/stderr) + [ "${actual}" = "memory" ] + + local actual=$(echo $object | yq -r '.requests.cpu' | tee /dev/stderr) + [ "${actual}" = "cpu" ] + + local actual=$(echo $object | yq -r '.limits.memory' | tee /dev/stderr) + [ "${actual}" = "memory2" ] + + local actual=$(echo $object | yq -r '.limits.cpu' | tee /dev/stderr) + [ "${actual}" = "cpu2" ] +} + +#-------------------------------------------------------------------- +# priorityClassName + +@test "telemetryCollector/Deployment(V2): no priorityClassName by default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=foo' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.priorityClassName' | tee /dev/stderr) + + [ "${actual}" = "null" ] +} + +@test "telemetryCollector/Deployment(V2): can set a priorityClassName" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=foo' \ + --set 'telemetryCollector.priorityClassName=name' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.priorityClassName' | tee /dev/stderr) + + [ "${actual}" = "name" ] +} + +#-------------------------------------------------------------------- +# replicas + +@test "telemetryCollector/Deployment(V2): replicas defaults to 1" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=foo' \ + . | tee /dev/stderr | + yq '.spec.replicas' | tee /dev/stderr) + + [ "${actual}" = "1" ] +} + +@test "telemetryCollector/Deployment(V2): replicas can be set" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=foo' \ + --set 'telemetryCollector.replicas=3' \ + . | tee /dev/stderr | + yq '.spec.replicas' | tee /dev/stderr) + + [ "${actual}" = "3" ] +} + +#-------------------------------------------------------------------- +# Vault + +@test "telemetryCollector/Deployment(V2): vault CA is not configured by default" { + cd `chart_dir` + local object=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.secretsBackend.vault.consulCARole=test' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/agent-extra-secret")') + [ "${actual}" = "false" ] + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/ca-cert")') + [ "${actual}" = "false" ] +} + +@test "telemetryCollector/Deployment(V2): vault CA is not configured when secretName is set but secretKey is not" { + cd `chart_dir` + local object=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.secretsBackend.vault.consulCARole=test' \ + --set 'global.secretsBackend.vault.ca.secretName=ca' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/agent-extra-secret")') + [ "${actual}" = "false" ] + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/ca-cert")') + [ "${actual}" = "false" ] +} + +@test "telemetryCollector/Deployment(V2): vault namespace annotations is set when global.secretsBackend.vault.vaultNamespace is set" { + cd `chart_dir` + local cmd=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=bar' \ + --set 'global.secretsBackend.vault.consulCARole=test' \ + --set 'global.secretsBackend.vault.vaultNamespace=vns' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata' | tee /dev/stderr) + + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/namespace"]' | tee /dev/stderr)" + [ "${actual}" = "vns" ] +} + +@test "telemetryCollector/Deployment(V2): correct vault namespace annotations is set when global.secretsBackend.vault.vaultNamespace is set and agentAnnotations are also set without vaultNamespace annotation" { + cd `chart_dir` + local cmd=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=bar' \ + --set 'global.secretsBackend.vault.consulCARole=test' \ + --set 'global.secretsBackend.vault.vaultNamespace=vns' \ + --set 'global.secretsBackend.vault.agentAnnotations=vault.hashicorp.com/agent-extra-secret: bar' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata' | tee /dev/stderr) + + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/namespace"]' | tee /dev/stderr)" + [ "${actual}" = "vns" ] +} + +@test "telemetryCollector/Deployment(V2): correct vault namespace annotations is set when global.secretsBackend.vault.vaultNamespace is set and agentAnnotations are also set with vaultNamespace annotation" { + cd `chart_dir` + local cmd=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=bar' \ + --set 'global.secretsBackend.vault.consulCARole=test' \ + --set 'global.secretsBackend.vault.vaultNamespace=vns' \ + --set 'global.secretsBackend.vault.agentAnnotations=vault.hashicorp.com/namespace: bar' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata' | tee /dev/stderr) + + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/namespace"]' | tee /dev/stderr)" + [ "${actual}" = "bar" ] +} + +@test "telemetryCollector/Deployment(V2): vault CA is not configured when secretKey is set but secretName is not" { + cd `chart_dir` + local object=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.secretsBackend.vault.consulCARole=test' \ + --set 'global.secretsBackend.vault.ca.secretKey=tls.crt' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/agent-extra-secret")') + [ "${actual}" = "false" ] + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/ca-cert")') + [ "${actual}" = "false" ] +} + +@test "telemetryCollector/Deployment(V2): vault CA is configured when both secretName and secretKey are set" { + cd `chart_dir` + local object=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.secretsBackend.vault.consulCARole=test' \ + --set 'global.secretsBackend.vault.ca.secretName=ca' \ + --set 'global.secretsBackend.vault.ca.secretKey=tls.crt' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.metadata.annotations."vault.hashicorp.com/agent-extra-secret"') + [ "${actual}" = "ca" ] + local actual=$(echo $object | yq -r '.metadata.annotations."vault.hashicorp.com/ca-cert"') + [ "${actual}" = "/vault/custom/tls.crt" ] +} + +@test "telemetryCollector/Deployment(V2): vault tls annotations are set when tls is enabled" { + cd `chart_dir` + local cmd=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=bar' \ + --set 'global.secretsBackend.vault.consulCARole=test' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'server.serverCert.secretName=pki_int/issue/test' \ + --set 'global.tls.caCert.secretName=pki_int/cert/ca' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata' | tee /dev/stderr) + + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/agent-inject-template-serverca.crt"]' | tee /dev/stderr)" + local expected=$'{{- with secret \"pki_int/cert/ca\" -}}\n{{- .Data.certificate -}}\n{{- end -}}' + [ "${actual}" = "${expected}" ] + + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/agent-inject-secret-serverca.crt"]' | tee /dev/stderr)" + [ "${actual}" = "pki_int/cert/ca" ] + + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/agent-init-first"]' | tee /dev/stderr)" + [ "${actual}" = "true" ] + + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/agent-inject"]' | tee /dev/stderr)" + [ "${actual}" = "true" ] + + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/role"]' | tee /dev/stderr)" + [ "${actual}" = "test" ] +} + +@test "telemetryCollector/Deployment(V2): vault agent annotations can be set" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=test' \ + --set 'global.secretsBackend.vault.consulServerRole=foo' \ + --set 'global.secretsBackend.vault.consulCARole=test' \ + --set 'global.secretsBackend.vault.agentAnnotations=foo: bar' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata.annotations.foo' | tee /dev/stderr) + [ "${actual}" = "bar" ] +} + +#-------------------------------------------------------------------- +# telemetryCollector.cloud + +@test "telemetryCollector/Deployment(V2): success with all cloud bits set" { + cd `chart_dir` + run helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'global.cloud.enabled=true' \ + --set 'global.cloud.clientSecret.secretName=client-secret-name' \ + --set 'global.cloud.clientSecret.secretKey=client-secret-key' \ + --set 'global.cloud.clientId.secretName=client-id-name' \ + --set 'global.cloud.clientId.secretKey=client-id-key' \ + --set 'global.cloud.resourceId.secretName=client-resource-id-name' \ + --set 'global.cloud.resourceId.secretKey=client-resource-id-key' \ + . +} + +@test "telemetryCollector/Deployment(V2): fails when telemetryCollector.cloud.clientId is set and global.cloud.resourceId is not set or global.cloud.clientSecret.secretName is not set" { + cd `chart_dir` + run helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.datacenter=dc-foo' \ + --set 'global.domain=bar' \ + --set 'global.cloud.enabled=true' \ + --set 'global.cloud.clientSecret.secretName=client-id-name' \ + --set 'global.cloud.clientSecret.secretKey=client-id-key' \ + --set 'global.cloud.resourceId.secretName=client-resource-id-name' \ + --set 'global.cloud.resourceId.secretKey=client-resource-id-key' \ + . + [ "$status" -eq 1 ] + [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] +} + +@test "telemetryCollector/Deployment(V2): fails when global.cloud.enabled is true and global.cloud.clientSecret.secretName is not set but global.cloud.clientId.secretName and global.cloud.resourceId.secretName is set" { + cd `chart_dir` + run helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.datacenter=dc-foo' \ + --set 'global.domain=bar' \ + --set 'global.cloud.enabled=true' \ + --set 'global.cloud.clientId.secretName=client-id-name' \ + --set 'global.cloud.clientId.secretKey=client-id-key' \ + --set 'global.cloud.resourceId.secretName=resource-id-name' \ + --set 'global.cloud.resourceId.secretKey=resource-id-key' \ + . + [ "$status" -eq 1 ] + [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] +} + +@test "telemetryCollector/Deployment(V2): fails when global.cloud.enabled is true and global.cloud.resourceId.secretName is not set but global.cloud.clientId.secretName and global.cloud.clientSecret.secretName is set" { + cd `chart_dir` + run helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.datacenter=dc-foo' \ + --set 'global.domain=bar' \ + --set 'global.cloud.enabled=true' \ + --set 'global.cloud.clientId.secretName=client-id-name' \ + --set 'global.cloud.clientId.secretKey=client-id-key' \ + --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ + --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ + . + [ "$status" -eq 1 ] + [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] +} + +@test "telemetryCollector/Deployment(V2): fails when global.cloud.resourceId.secretName is set but global.cloud.resourceId.secretKey is not set." { + cd `chart_dir` + run helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.datacenter=dc-foo' \ + --set 'global.domain=bar' \ + --set 'global.cloud.enabled=true' \ + --set 'global.cloud.clientId.secretName=client-id-name' \ + --set 'global.cloud.clientId.secretKey=client-id-key' \ + --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ + --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ + --set 'global.cloud.resourceId.secretName=resource-id-name' \ + . + [ "$status" -eq 1 ] + [[ "$output" =~ "When either global.cloud.resourceId.secretName or global.cloud.resourceId.secretKey is defined, both must be set." ]] +} + +@test "telemetryCollector/Deployment(V2): fails when global.cloud.authURL.secretName is set but global.cloud.authURL.secretKey is not set." { + cd `chart_dir` + run helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.datacenter=dc-foo' \ + --set 'global.domain=bar' \ + --set 'global.cloud.enabled=true' \ + --set 'global.cloud.clientId.secretName=client-id-name' \ + --set 'global.cloud.clientId.secretKey=client-id-key' \ + --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ + --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ + --set 'global.cloud.resourceId.secretName=resource-id-name' \ + --set 'global.cloud.resourceId.secretKey=resource-id-key' \ + --set 'global.cloud.authUrl.secretName=auth-url-name' \ + . + [ "$status" -eq 1 ] + + [[ "$output" =~ "When either global.cloud.authUrl.secretName or global.cloud.authUrl.secretKey is defined, both must be set." ]] +} + +@test "telemetryCollector/Deployment(V2): fails when global.cloud.authURL.secretKey is set but global.cloud.authURL.secretName is not set." { + cd `chart_dir` + run helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.datacenter=dc-foo' \ + --set 'global.domain=bar' \ + --set 'global.cloud.enabled=true' \ + --set 'global.cloud.clientId.secretName=client-id-name' \ + --set 'global.cloud.clientId.secretKey=client-id-key' \ + --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ + --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ + --set 'global.cloud.resourceId.secretName=resource-id-name' \ + --set 'global.cloud.resourceId.secretKey=resource-id-key' \ + --set 'global.cloud.authUrl.secretKey=auth-url-key' \ + . + [ "$status" -eq 1 ] + + [[ "$output" =~ "When either global.cloud.authUrl.secretName or global.cloud.authUrl.secretKey is defined, both must be set." ]] +} + +@test "telemetryCollector/Deployment(V2): fails when global.cloud.apiHost.secretName is set but global.cloud.apiHost.secretKey is not set." { + cd `chart_dir` + run helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.datacenter=dc-foo' \ + --set 'global.domain=bar' \ + --set 'global.cloud.enabled=true' \ + --set 'global.cloud.clientId.secretName=client-id-name' \ + --set 'global.cloud.clientId.secretKey=client-id-key' \ + --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ + --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ + --set 'global.cloud.resourceId.secretName=resource-id-name' \ + --set 'global.cloud.resourceId.secretKey=resource-id-key' \ + --set 'global.cloud.apiHost.secretName=auth-url-name' \ + . + [ "$status" -eq 1 ] + + [[ "$output" =~ "When either global.cloud.apiHost.secretName or global.cloud.apiHost.secretKey is defined, both must be set." ]] +} + +@test "telemetryCollector/Deployment(V2): fails when global.cloud.apiHost.secretKey is set but global.cloud.apiHost.secretName is not set." { + cd `chart_dir` + run helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.datacenter=dc-foo' \ + --set 'global.domain=bar' \ + --set 'global.cloud.enabled=true' \ + --set 'global.cloud.clientId.secretName=client-id-name' \ + --set 'global.cloud.clientId.secretKey=client-id-key' \ + --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ + --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ + --set 'global.cloud.resourceId.secretName=resource-id-name' \ + --set 'global.cloud.resourceId.secretKey=resource-id-key' \ + --set 'global.cloud.apiHost.secretKey=auth-url-key' \ + . + [ "$status" -eq 1 ] + + [[ "$output" =~ "When either global.cloud.apiHost.secretName or global.cloud.apiHost.secretKey is defined, both must be set." ]] +} + +@test "telemetryCollector/Deployment(V2): fails when global.cloud.scadaAddress.secretName is set but global.cloud.scadaAddress.secretKey is not set." { + cd `chart_dir` + run helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.datacenter=dc-foo' \ + --set 'global.domain=bar' \ + --set 'global.cloud.enabled=true' \ + --set 'global.cloud.clientId.secretName=client-id-name' \ + --set 'global.cloud.clientId.secretKey=client-id-key' \ + --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ + --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ + --set 'global.cloud.resourceId.secretName=resource-id-name' \ + --set 'global.cloud.resourceId.secretKey=resource-id-key' \ + --set 'global.cloud.scadaAddress.secretName=scada-address-name' \ + . + [ "$status" -eq 1 ] + + [[ "$output" =~ "When either global.cloud.scadaAddress.secretName or global.cloud.scadaAddress.secretKey is defined, both must be set." ]] +} + +@test "telemetryCollector/Deployment(V2): fails when global.cloud.scadaAddress.secretKey is set but global.cloud.scadaAddress.secretName is not set." { + cd `chart_dir` + run helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.datacenter=dc-foo' \ + --set 'global.domain=bar' \ + --set 'global.cloud.enabled=true' \ + --set 'global.cloud.clientId.secretName=client-id-name' \ + --set 'global.cloud.clientId.secretKey=client-id-key' \ + --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ + --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ + --set 'global.cloud.resourceId.secretName=resource-id-name' \ + --set 'global.cloud.resourceId.secretKey=resource-id-key' \ + --set 'global.cloud.scadaAddress.secretKey=scada-address-key' \ + . + [ "$status" -eq 1 ] + + [[ "$output" =~ "When either global.cloud.scadaAddress.secretName or global.cloud.scadaAddress.secretKey is defined, both must be set." ]] +} + +@test "telemetryCollector/Deployment(V2): fails when telemetryCollector.cloud.clientId.secretName is set but telemetryCollector.cloud.clientId.secretKey is not set." { + cd `chart_dir` + run helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'telemetryCollector.cloud.clientId.secretName=client-id-name' \ + --set 'telemetryCollector.cloud.clientSecret.secretName=client-secret-id-name' \ + --set 'telemetryCollector.cloud.clientSecret.secretKey=client-secret-id-key' \ + --set 'global.cloud.resourceId.secretName=resource-id-name' \ + --set 'global.cloud.resourceId.secretKey=resource-id-key' \ + . + [ "$status" -eq 1 ] + + echo "$output" > /dev/stderr + + [[ "$output" =~ "When either telemetryCollector.cloud.clientId.secretName or telemetryCollector.cloud.clientId.secretKey is defined, both must be set." ]] +} + +@test "telemetryCollector/Deployment(V2): fails when telemetryCollector.cloud.clientId.secretKey is set but telemetryCollector.cloud.clientId.secretName is not set." { + cd `chart_dir` + run helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'telemetryCollector.cloud.clientId.secretName=client-id-name' \ + --set 'telemetryCollector.cloud.clientId.secretKey=client-id-key' \ + --set 'telemetryCollector.cloud.clientSecret.secretName=client-secret-id-name' \ + --set 'global.cloud.resourceId.secretName=resource-id-name' \ + --set 'global.cloud.resourceId.secretKey=resource-id-key' \ + . + [ "$status" -eq 1 ] + + echo "$output" > /dev/stderr + + [[ "$output" =~ "When either telemetryCollector.cloud.clientSecret.secretName or telemetryCollector.cloud.clientSecret.secretKey is defined, both must be set." ]] +} + +@test "telemetryCollector/Deployment(V2): fails when telemetryCollector.cloud.clientSecret.secretName is set but telemetryCollector.cloud.clientId.secretName is not set." { + cd `chart_dir` + run helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'telemetryCollector.cloud.clientId.secretKey=client-id-key' \ + --set 'telemetryCollector.cloud.clientSecret.secretName=client-secret-id-name' \ + --set 'telemetryCollector.cloud.clientSecret.secretKey=client-secret-key-name' \ + --set 'global.cloud.resourceId.secretName=resource-id-name' \ + --set 'global.cloud.resourceId.secretKey=resource-id-key' \ + . + [ "$status" -eq 1 ] + + echo "$output" > /dev/stderr + + [[ "$output" =~ "When telemetryCollector.cloud.clientSecret.secretName is set, telemetryCollector.cloud.clientId.secretName must also be set." ]] +} + +@test "telemetryCollector/Deployment(V2): fails when telemetryCollector.cloud.clientId.secretName is set but telemetry.cloud.clientId.secretKey is not set." { + cd `chart_dir` + run helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'telemetryCollector.cloud.clientId.secretName=client-id-name' \ + --set 'telemetryCollector.cloud.clientSecret.secretName=client-secret-name' \ + --set 'global.cloud.resourceId.secretName=resource-id-name' \ + . + [ "$status" -eq 1 ] + + [[ "$output" =~ "When either telemetryCollector.cloud.clientId.secretName or telemetryCollector.cloud.clientId.secretKey is defined, both must be set." ]] +} + +@test "telemetryCollector/Deployment(V2): fails when telemetryCollector.cloud.clientSecret.secretName is set but telemetry.cloud.clientSecret.secretKey is not set." { + cd `chart_dir` + run helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'telemetryCollector.cloud.clientId.secretName=client-id-name' \ + --set 'telemetryCollector.cloud.clientId.secretKey=client-id-key' \ + --set 'telemetryCollector.cloud.clientSecret.secretName=client-secret-name' \ + --set 'global.cloud.resourceId.secretName=resource-id-name' \ + . + [ "$status" -eq 1 ] + + [[ "$output" =~ "When either telemetryCollector.cloud.clientSecret.secretName or telemetryCollector.cloud.clientSecret.secretKey is defined, both must be set." ]] +} + +@test "telemetryCollector/Deployment(V2): fails when telemetryCollector.cloud.clientId and telemetryCollector.cloud.clientSecret is set but global.cloud.resourceId.secretKey is not set." { + cd `chart_dir` + run helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'telemetryCollector.cloud.clientId.secretName=client-id-name' \ + --set 'telemetryCollector.cloud.clientId.secretKey=client-id-key' \ + --set 'telemetryCollector.cloud.clientSecret.secretName=client-secret-name' \ + --set 'telemetryCollector.cloud.clientSecret.secretKey=client-secret-key' \ + --set 'global.cloud.resourceId.secretName=resource-id-name' \ + . + [ "$status" -eq 1 ] + + echo "$output" > /dev/stderr + + [[ "$output" =~ "When telemetryCollector has clientId and clientSecret, telemetryCollector.cloud.resourceId.secretKey or global.cloud.resourceId.secretKey must be set" ]] +} + +#-------------------------------------------------------------------- +# global.tls.enabled + +@test "telemetryCollector/Deployment(V2): sets -tls-disabled args when when not using TLS." { + cd `chart_dir` + + local flags=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'global.tls.enabled=false' \ + . | yq -r .spec.template.spec.containers[1].args) + + local actual=$(echo $flags | yq -r '. | any(contains("-tls-disabled"))') + [ "${actual}" = 'true' ] + +} + +@test "telemetryCollector/Deployment(V2): -ca-certs set correctly when using TLS." { + cd `chart_dir` + local flags=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[1].args' | tee /dev/stderr) + + local actual=$(echo $flags | yq -r '. | any(contains("-ca-certs=/consul/tls/ca/tls.crt"))' | tee /dev/stderr) + [ "${actual}" = 'true' ] +} + +#-------------------------------------------------------------------- +# External Server + +@test "telemetryCollector/Deployment(V2): sets external server args when global.tls.enabled and externalServers.enabled" { + cd `chart_dir` + local flags=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'global.tls.enabled=true' \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.hosts[0]=external-consul.host' \ + --set 'externalServers.httpsPort=8501' \ + --set 'externalServers.tlsServerName=foo.tls.server' \ + --set 'externalServers.useSystemRoots=true' \ + --set 'server.enabled=false' \ + --set 'client.enabled=false' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[1].args' | tee /dev/stderr) + + local actual=$(echo $flags | yq -r '. | any(contains("-ca-certs=/consul/tls/ca/tls.crt"))' | tee /dev/stderr) + [ "${actual}" = 'false' ] + + local actual=$(echo $flags | yq -r '. | any(contains("-tls-server-name=foo.tls.server"))' | tee /dev/stderr) + [ "${actual}" = 'true' ] + + local actual=$(echo $flags | jq -r '. | any(contains("-addresses=external-consul.host"))' | tee /dev/stderr) + [ "${actual}" = 'true' ] +} + +#-------------------------------------------------------------------- +# Admin Partitions +# TODO: re-enable this test when V2 supports admin partitions. + +# @test "telemetryCollector/Deployment: partition flags are set when using admin partitions" { +# cd `chart_dir` +# local flags=$(helm template \ +# -s templates/telemetry-collector-deployment.yaml \ +# --set 'ui.enabled=false' \ +# --set 'global.experiments[0]=resource-apis' \ +# --set 'telemetryCollector.enabled=true' \ +# --set 'telemetryCollector.image=bar' \ +# --set 'global.enableConsulNamespaces=true' \ +# --set 'global.adminPartitions.enabled=true' \ +# --set 'global.adminPartitions.name=hashi' \ +# --set 'global.acls.manageSystemACLs=true' \ +# . | tee /dev/stderr | +# yq '.spec.template.spec.containers[1].args' | tee /dev/stderr) +# +# local actual=$(echo $flags | jq -r '. | any(contains("-login-partition=hashi"))' | tee /dev/stderr) +# [ "${actual}" = 'true' ] +# +# local actual=$(echo $flags | jq -r '. | any(contains("-service-partition=hashi"))' | tee /dev/stderr) +# [ "${actual}" = "true" ] +# } + +@test "telemetryCollector/Deployment(V2): consul-ca-cert volume mount is not set when using externalServers and useSystemRoots" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.tls.enabled=true' \ + --set 'server.enabled=false' \ + --set 'externalServers.hosts[0]=external-consul.host' \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.useSystemRoots=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) + [ "${actual}" = "" ] +} + +@test "telemetryCollector/Deployment(V2): config volume mount is set when config exists" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'telemetryCollector.customExporterConfig="foo"' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "config") | .name' | tee /dev/stderr) + [ "${actual}" = "config" ] +} + +@test "telemetryCollector/Deployment(V2): config flag is set when config exists" { + cd `chart_dir` + local flags=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'telemetryCollector.customExporterConfig="foo"' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command') + + local actual=$(echo $flags | yq -r '. | any(contains("-config-file-path /consul/config/config.json"))') + [ "${actual}" = "true" ] +} + +@test "telemetryCollector/Deployment(V2): consul-ca-cert volume mount is not set on acl-init when using externalServers and useSystemRoots" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.tls.enabled=true' \ + --set 'server.enabled=false' \ + --set 'externalServers.hosts[0]=external-consul.host' \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.useSystemRoots=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[1].volumeMounts[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) + [ "${actual}" = "" ] +} +#-------------------------------------------------------------------- +# trustedCAs + +@test "telemetryCollector/Deployment(V2): trustedCAs: if trustedCAs is set command is modified correctly" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'global.trustedCAs[0]=-----BEGIN CERTIFICATE----- +MIICFjCCAZsCCQCdwLtdjbzlYzAKBggqhkjOPQQDAjB0MQswCQYDVQQGEwJDQTEL' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].command[2] | contains("cat < /trusted-cas/custom-ca-0.pem")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "telemetryCollector/Deployment(V2): trustedCAs: if multiple Trusted cas were set" { + cd `chart_dir` + local object=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'global.trustedCAs[0]=-----BEGIN CERTIFICATE----- +MIICFjCCAZsCCQCdwLtdjbzlYzAKBggqhkjOPQQDAjB0MQswCQYDVQQGEwJDQTEL' \ + --set 'global.trustedCAs[1]=-----BEGIN CERTIFICATE----- +MIICFjCCAZsCCQCdwLtdjbzlYzAKBggqhkjOPQQDAjB0MQswCQYDVQQGEwJDQTEL' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0]' | tee /dev/stderr) + + + local actual=$(echo $object | jq '.command[2] | contains("cat < /trusted-cas/custom-ca-0.pem")' | tee /dev/stderr) + [ "${actual}" = "true" ] + local actual=$(echo $object | jq '.command[2] | contains("cat < /trusted-cas/custom-ca-1.pem")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "telemetryCollector/Deployment(V2): trustedCAs: if trustedCAs is set /trusted-cas volumeMount is added" { + cd `chart_dir` + local object=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'global.trustedCAs[0]=-----BEGIN CERTIFICATE----- +MIICFjCCAZsCCQCdwLtdjbzlYzAKBggqhkjOPQQDAjB0MQswCQYDVQQGEwJDQTEL' \ + . | tee /dev/stderr | yq -r '.spec.template.spec' | tee /dev/stderr) + local actual=$(echo $object | jq -r '.volumes[] | select(.name == "trusted-cas") | .name' | tee /dev/stderr) + [ "${actual}" = "trusted-cas" ] +} + + +@test "telemetryCollector/Deployment(V2): trustedCAs: if trustedCAs is set SSL_CERT_DIR env var is set" { + cd `chart_dir` + local object=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'global.trustedCAs[0]=-----BEGIN CERTIFICATE----- +MIICFjCCAZsCCQCdwLtdjbzlYzAKBggqhkjOPQQDAjB0MQswCQYDVQQGEwJDQTEL' \ + . | tee /dev/stderr | yq -r '.spec.template.spec.containers[0].env[] | select(.name == "SSL_CERT_DIR")' | tee /dev/stderr) + + local actual=$(echo $object | jq -r '.name' | tee /dev/stderr) + [ "${actual}" = "SSL_CERT_DIR" ] + local actual=$(echo $object | jq -r '.value' | tee /dev/stderr) + [ "${actual}" = "/etc/ssl/certs:/trusted-cas" ] +} + +#-------------------------------------------------------------------- +# extraLabels + +@test "telemetryCollector/Deployment(V2): no extra labels defined by default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata.labels | del(."app") | del(."chart") | del(."release") | del(."component") | del(."consul.hashicorp.com/connect-inject-managed-by")' \ + | tee /dev/stderr) + [ "${actual}" = "{}" ] +} + +@test "telemetryCollector/Deployment(V2): extra global labels can be set" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'global.extraLabels.foo=bar' \ + . | tee /dev/stderr) + local actualBar=$(echo "${actual}" | yq -r '.metadata.labels.foo' | tee /dev/stderr) + [ "${actualBar}" = "bar" ] + local actualTemplateBar=$(echo "${actual}" | yq -r '.spec.template.metadata.labels.foo' | tee /dev/stderr) + [ "${actualTemplateBar}" = "bar" ] +} + +@test "telemetryCollector/Deployment(V2): multiple global extra labels can be set" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'global.extraLabels.foo=bar' \ + --set 'global.extraLabels.baz=qux' \ + . | tee /dev/stderr) + local actualFoo=$(echo "${actual}" | yq -r '.metadata.labels.foo' | tee /dev/stderr) + local actualBaz=$(echo "${actual}" | yq -r '.metadata.labels.baz' | tee /dev/stderr) + [ "${actualFoo}" = "bar" ] + [ "${actualBaz}" = "qux" ] + local actualTemplateFoo=$(echo "${actual}" | yq -r '.spec.template.metadata.labels.foo' | tee /dev/stderr) + local actualTemplateBaz=$(echo "${actual}" | yq -r '.spec.template.metadata.labels.baz' | tee /dev/stderr) + [ "${actualTemplateFoo}" = "bar" ] + [ "${actualTemplateBaz}" = "qux" ] +} + +#-------------------------------------------------------------------- +# extraEnvironmentVariables + +@test "telemetryCollector/Deployment(V2): extra environment variables" { + cd `chart_dir` + local object=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.extraEnvironmentVars.HCP_AUTH_TLS=insecure' \ + --set 'telemetryCollector.extraEnvironmentVars.foo=bar' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].env' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r 'map(select(.name == "HCP_AUTH_TLS")) | .[0].value' | tee /dev/stderr) + [ "${actual}" = "insecure" ] + + local actual=$(echo $object | + yq -r 'map(select(.name == "foo")) | .[0].value' | tee /dev/stderr) + [ "${actual}" = "bar" ] +} + +#-------------------------------------------------------------------- +# logLevel + +@test "telemetryCollector/Deployment(V2): use global.logLevel by default" { + cd `chart_dir` + local cmd=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.initContainers[0].command' | tee /dev/stderr) + + local actual=$(echo "$cmd" | + yq 'any(contains("-log-level=info"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "telemetryCollector/Deployment(V2): override global.logLevel when telemetryCollector.logLevel is set" { + cd `chart_dir` + local cmd=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.logLevel=warn' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.initContainers[0].command' | tee /dev/stderr) + + local actual=$(echo "$cmd" | + yq 'any(contains("-log-level=warn"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "telemetryCollector/Deployment(V2): use global.logLevel by default for dataplane container" { + cd `chart_dir` + local cmd=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[1].args' | tee /dev/stderr) + + local actual=$(echo "$cmd" | + yq 'any(contains("-log-level=info"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "telemetryCollector/Deployment(V2): override global.logLevel when telemetryCollector.logLevel is set for dataplane container" { + cd `chart_dir` + local cmd=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.logLevel=debug' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[1].args' | tee /dev/stderr) + + local actual=$(echo "$cmd" | + yq 'any(contains("-log-level=debug"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# global.experiments=["resource-apis"] + +@test "telemetryCollector/Deployment(V2): disabled when V2 is disabled" { + cd `chart_dir` + assert_empty helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + . +} + +#-------------------------------------------------------------------- +# Namespaces + +@test "telemetryCollector/Deployment(V2): namespace flags when mirroringK8S" { + cd `chart_dir` + local object=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'connectInject.consulNamespaces.mirroringK8S=true' \ + --namespace 'test-namespace' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec' | tee /dev/stderr) + + local actual=$(echo $object | jq -r '.containers[1].args | any(contains("-login-namespace=default"))' | tee /dev/stderr) + [ "${actual}" = 'true' ] + + local actual=$(echo $object | jq -r '.containers[1].args | any(contains("-service-namespace=test-namespace"))' | tee /dev/stderr) + [ "${actual}" = 'true' ] +} + +@test "telemetryCollector/Deployment(V2): namespace flags when not mirroringK8S" { + cd `chart_dir` + local object=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'connectInject.consulNamespaces.mirroringK8S=false' \ + --set 'connectInject.consulNamespaces.consulDestinationNamespace=fakenamespace' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers' | tee /dev/stderr) + + local actual=$(echo $object | jq -r '.[1].args | any(contains("-login-namespace=fakenamespace"))' | tee /dev/stderr) + [ "${actual}" = 'true' ] + + local actual=$(echo $object | jq -r '.[1].args | any(contains("-service-namespace=fakenamespace"))' | tee /dev/stderr) + [ "${actual}" = 'true' ] +} diff --git a/charts/consul/values.yaml b/charts/consul/values.yaml index 6f49cb67c8..117108f2fd 100644 --- a/charts/consul/values.yaml +++ b/charts/consul/values.yaml @@ -66,7 +66,7 @@ global: # image: "hashicorp/consul-enterprise:1.10.0-ent" # ``` # @default: hashicorp/consul: - image: docker.mirror.hashicorp.services/hashicorppreview/consul:1.20-dev + image: docker.mirror.hashicorp.services/hashicorppreview/consul:1.18-dev # Array of objects containing image pull secret names that will be applied to each service account. # This can be used to reference image pull secrets if using a custom consul or consul-k8s-control-plane Docker image. @@ -86,12 +86,7 @@ global: # image that is used for functionality such as catalog sync. # This can be overridden per component. # @default: hashicorp/consul-k8s-control-plane: - imageK8S: docker.mirror.hashicorp.services/hashicorppreview/consul-k8s-control-plane:1.6-dev - - # The image pull policy used globally for images controlled by Consul (consul, consul-dataplane, consul-k8s, consul-telemetry-collector). - # One of "IfNotPresent", "Always", "Never", and "". Refer to https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy - # @default: "" - imagePullPolicy: "" + imageK8S: docker.mirror.hashicorp.services/hashicorppreview/consul-k8s-control-plane:1.4-dev # The name of the datacenter that the agents should # register as. This can't be changed once the Consul cluster is up and running @@ -793,7 +788,7 @@ global: # The name (and tag) of the consul-dataplane Docker image used for the # connect-injected sidecar proxies and mesh, terminating, and ingress gateways. # @default: hashicorp/consul-dataplane: - imageConsulDataplane: docker.mirror.hashicorp.services/hashicorppreview/consul-dataplane:1.6-dev + imageConsulDataplane: docker.mirror.hashicorp.services/hashicorppreview/consul-dataplane:1.4-dev # Configuration for running this Helm chart on the Red Hat OpenShift platform. # This Helm chart currently supports OpenShift v4.x+. @@ -907,15 +902,25 @@ global: # Consul feature flags that will be enabled across components. # Supported feature flags: - # - # - `v1dns`: - # When this flag is set, Consul agents use the legacy DNS implementation. - # This setting exists in the case a DNS bug is found after the refactoring introduced in v1.19.0. + # - `resource-apis`: + # _**Warning**_! This feature is under active development. It is not + # recommended for production use. Setting this flag during an + # upgrade could risk breaking your Consul cluster. + # If this flag is set, Consul components will use the + # V2 resources APIs for all operations. + # - `v2tenancy`: + # _**Warning**_! This feature is under active development. It is not + # recommended for production use. Setting this flag during an + # upgrade could risk breaking your Consul cluster. + # If this flag is set, Consul V2 resources (catalog, mesh, auth, etc) + # will use V2 implementations for tenancy (partitions and namesapces) + # instead of bridging to the existing V1 implementations. The + # `resource-apis` feature flag must also be set. # # Example: # # ```yaml - # experiments: [ "v1dns" ] + # experiments: [ "resource-apis" ] # ``` # @type: array experiments: [] @@ -1914,17 +1919,6 @@ dns: # @type: string additionalSpec: null - # Configures dns-proxy deployment. - proxy: - # True if you want to enable dns-proxy - enabled: false - - # The number of deployment replicas. - replicas: 1 - - # Port number to be used by DNS proxy - port: 53 - # Values that configure the Consul UI. ui: # If true, the UI will be enabled. This will @@ -2317,8 +2311,6 @@ syncCatalog: # @type: string annotations: null - - # Configures the automatic Connect sidecar injector. connectInject: # True if you want to enable connect injection. Set to "-" to inherit from @@ -3487,6 +3479,175 @@ terminatingGateways: gateways: - name: terminating-gateway +# [DEPRECATED] Use connectInject.apiGateway instead. +# Configuration settings for the Consul API Gateway integration +apiGateway: + # When true the helm chart will install the Consul API Gateway controller + enabled: false + + # Image to use for the api-gateway-controller pods and gateway instances + # + # ~> **Note:** Using API Gateway <= 0.4 with external servers requires setting `client.enabled: true`. + # @type: string + image: null + + # The name (and tag) of the Envoy Docker image used for the + # apiGateway. For other Consul compoenents, imageEnvoy has been replaced with Consul Dataplane. + # @default: envoyproxy/envoy: + imageEnvoy: "envoyproxy/envoy:v1.25.11" + + # Override global log verbosity level for api-gateway-controller pods. One of "debug", "info", "warn", or "error". + # @type: string + logLevel: info + + # Configuration settings for the optional GatewayClass installed by consul-k8s (enabled by default) + managedGatewayClass: + # When true a GatewayClass is configured to automatically work with Consul as installed by helm. + enabled: true + + # This value defines [`nodeSelector`](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector) + # labels for gateway pod assignment, formatted as a multi-line string. + # + # Example: + # + # ```yaml + # nodeSelector: | + # beta.kubernetes.io/arch: amd64 + # ``` + # + # @type: string + nodeSelector: null + + # Toleration settings for gateway pods created with the managed gateway class. + # This should be a multi-line string matching the + # [Tolerations](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) array in a Pod spec. + # + # @type: string + tolerations: null + + # This value defines the type of service created for gateways (e.g. LoadBalancer, ClusterIP) + serviceType: LoadBalancer + + # This value toggles if the gateway ports should be mapped to host ports + useHostPorts: false + + # Configuration settings for annotations to be copied from the Gateway to other child resources. + copyAnnotations: + # This value defines a list of annotations to be copied from the Gateway to the Service created, formatted as a multi-line string. + # + # Example: + # + # ```yaml + # service: + # annotations: | + # - external-dns.alpha.kubernetes.io/hostname + # ``` + # + # @type: string + service: null + + # This value defines the number of pods to deploy for each Gateway as well as a min and max number of pods for all Gateways + # + # Example: + # + # ```yaml + # deployment: + # defaultInstances: 3 + # maxInstances: 8 + # minInstances: 1 + # ``` + # + # @type: map + deployment: null + + # Configuration for the ServiceAccount created for the api-gateway component + serviceAccount: + # This value defines additional annotations for the client service account. This should be formatted as a multi-line + # string. + # + # ```yaml + # annotations: | + # "sample/annotation1": "foo" + # "sample/annotation2": "bar" + # ``` + # + # @type: string + annotations: null + + # Configuration for the api-gateway controller component + controller: + # This value sets the number of controller replicas to deploy. + replicas: 1 + + # Annotations to apply to the api-gateway-controller pods. + # + # ```yaml + # annotations: | + # "annotation-key": "annotation-value" + # ``` + # + # @type: string + annotations: null + + # This value references an existing + # Kubernetes [`priorityClassName`](https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#pod-priority) + # that can be assigned to api-gateway-controller pods. + priorityClassName: "" + + # This value defines [`nodeSelector`](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector) + # labels for api-gateway-controller pod assignment, formatted as a multi-line string. + # + # Example: + # + # ```yaml + # nodeSelector: | + # beta.kubernetes.io/arch: amd64 + # ``` + # + # @type: string + nodeSelector: null + + # This value defines the tolerations for api-gateway-controller pod, this should be a multi-line string matching the + # [Tolerations](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) array in a Pod spec. + # + # @type: string + tolerations: null + + # Configuration for the Service created for the api-gateway-controller + service: + # Annotations to apply to the api-gateway-controller service. + # + # ```yaml + # annotations: | + # "annotation-key": "annotation-value" + # ``` + # + # @type: string + annotations: null + + # The resource settings for api gateway pods. + # @recurse: false + # @type: map + resources: + requests: + memory: "100Mi" + cpu: "100m" + limits: + memory: "100Mi" + cpu: "100m" + + # The resource settings for the `copy-consul-bin` init container. + # @recurse: false + # @type: map + initCopyConsulContainer: + resources: + requests: + memory: "25Mi" + cpu: "50m" + limits: + memory: "150Mi" + cpu: "50m" + # Configuration settings for the webhook-cert-manager # `webhook-cert-manager` ensures that cert bundles are up to date for the mutating webhook. webhookCertManager: diff --git a/cli/go.mod b/cli/go.mod index 7067d7c9b0..4b1f7bef19 100644 --- a/cli/go.mod +++ b/cli/go.mod @@ -13,7 +13,7 @@ require ( github.com/google/go-cmp v0.6.0 github.com/hashicorp/consul-k8s/charts v0.0.0-00010101000000-000000000000 github.com/hashicorp/consul-k8s/version v0.0.0 - github.com/hashicorp/consul/troubleshoot v0.6.1 + github.com/hashicorp/consul/troubleshoot v0.6.0 github.com/hashicorp/go-hclog v1.5.0 github.com/hashicorp/hcp-sdk-go v0.62.1-0.20230913154003-cf69c0370c54 github.com/kr/text v0.2.0 @@ -63,7 +63,7 @@ require ( github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-metrics v0.0.1 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect - github.com/envoyproxy/go-control-plane v0.12.0 // indirect + github.com/envoyproxy/go-control-plane v0.11.1 // indirect github.com/envoyproxy/go-control-plane/xdsmatcher v0.0.0-20230524161521-aaaacbfbe53e // indirect github.com/envoyproxy/protoc-gen-validate v1.0.2 // indirect github.com/evanphx/json-patch v5.7.0+incompatible // indirect @@ -96,8 +96,8 @@ require ( github.com/gorilla/websocket v1.5.0 // indirect github.com/gosuri/uitable v0.0.4 // indirect github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect - github.com/hashicorp/consul/api v1.29.1 // indirect - github.com/hashicorp/consul/envoyextensions v0.7.0 // indirect + github.com/hashicorp/consul/api v1.28.2 // indirect + github.com/hashicorp/consul/envoyextensions v0.6.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect @@ -143,7 +143,7 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_golang v1.16.0 // indirect - github.com/prometheus/client_model v0.5.0 // indirect + github.com/prometheus/client_model v0.4.0 // indirect github.com/prometheus/common v0.44.0 // indirect github.com/prometheus/procfs v0.10.1 // indirect github.com/rubenv/sql-migrate v1.5.2 // indirect diff --git a/cli/go.sum b/cli/go.sum index 492c6ce2eb..aa2b2188fe 100644 --- a/cli/go.sum +++ b/cli/go.sum @@ -126,8 +126,8 @@ github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxER github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.12.0 h1:4X+VP1GHd1Mhj6IB5mMeGbLCleqxjletLK6K0rbxyZI= -github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= +github.com/envoyproxy/go-control-plane v0.11.1 h1:wSUXTlLfiAQRWs2F+p+EKOY9rUyis1MyGqJ2DIk5HpM= +github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= github.com/envoyproxy/go-control-plane/xdsmatcher v0.0.0-20230524161521-aaaacbfbe53e h1:g8euodkL4GdSpVAjfzhssb07KgVmOUqyF4QOmwFumTs= github.com/envoyproxy/go-control-plane/xdsmatcher v0.0.0-20230524161521-aaaacbfbe53e/go.mod h1:/NGEcKqwNq3HAS2vCqHfsPx9sJZbkiNQ6dGx9gTE/NA= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= @@ -294,16 +294,14 @@ github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY= github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/hashicorp/consul/api v1.29.1 h1:UEwOjYJrd3lG1x5w7HxDRMGiAUPrb3f103EoeKuuEcc= -github.com/hashicorp/consul/api v1.29.1/go.mod h1:lumfRkY/coLuqMICkI7Fh3ylMG31mQSRZyef2c5YvJI= -github.com/hashicorp/consul/envoyextensions v0.7.0 h1:DiZcA2tCgwD3tAoixBML3pYAPCKWLnOrKzzt843YTrU= -github.com/hashicorp/consul/envoyextensions v0.7.0/go.mod h1:oZlopILhl2oaJhcs2szKlFcdVYBWzjqEYaG4SSQdBjY= -github.com/hashicorp/consul/proto-public v0.6.1 h1:+uzH3olCrksXYWAYHKqK782CtK9scfqH+Unlw3UHhCg= -github.com/hashicorp/consul/proto-public v0.6.1/go.mod h1:cXXbOg74KBNGajC+o8RlA502Esf0R9prcoJgiOX/2Tg= -github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg= -github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s= -github.com/hashicorp/consul/troubleshoot v0.6.1 h1:Nmk0fXjpgmMhEEzeBdV6+OcoD3bUJtKCP1ONo4vZPaw= -github.com/hashicorp/consul/troubleshoot v0.6.1/go.mod h1:Yenla7oy9UpI9vZr7puDLnfIFwYcmd1XBy4q2nAhea8= +github.com/hashicorp/consul/api v1.28.2 h1:mXfkRHrpHN4YY3RqL09nXU1eHKLNiuAN4kHvDQ16k/8= +github.com/hashicorp/consul/api v1.28.2/go.mod h1:KyzqzgMEya+IZPcD65YFoOVAgPpbfERu4I/tzG6/ueE= +github.com/hashicorp/consul/envoyextensions v0.6.0 h1:PtbJUVKBMSGKXnTdSIw1EGyjWvMrmFvXcZuVeIT79Ls= +github.com/hashicorp/consul/envoyextensions v0.6.0/go.mod h1:MwSQg5WUuAle1bGsJWS6/z/RNvs8Ob0sNvFZ98l/+Mc= +github.com/hashicorp/consul/sdk v0.16.0 h1:SE9m0W6DEfgIVCJX7xU+iv/hUl4m/nxqMTnCdMxDpJ8= +github.com/hashicorp/consul/sdk v0.16.0/go.mod h1:7pxqqhqoaPqnBnzXD1StKed62LqJeClzVsUEy85Zr0A= +github.com/hashicorp/consul/troubleshoot v0.6.0 h1:5PKcTBrx/XDHZBPhpLqldJFZYNiO+LDXdi26Aa1J6BI= +github.com/hashicorp/consul/troubleshoot v0.6.0/go.mod h1:7fSd1Nn89vPxnZ8BiQTVS4fTygS42crtfm0psMoJJms= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -523,8 +521,8 @@ github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1: github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= -github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= +github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= diff --git a/control-plane/Dockerfile b/control-plane/Dockerfile index 42a36e872f..26f4b90ef0 100644 --- a/control-plane/Dockerfile +++ b/control-plane/Dockerfile @@ -32,7 +32,6 @@ ARG CNI_BIN_NAME=consul-cni ARG VERSION ARG TARGETARCH ARG TARGETOS -ENV PRODUCT_NAME=$BIN_NAME LABEL name=${BIN_NAME} \ maintainer="Team Consul Kubernetes " \ @@ -43,6 +42,7 @@ LABEL name=${BIN_NAME} \ description="consul-k8s-control-plane provides first-class integrations between Consul and Kubernetes." \ org.opencontainers.image.licenses="MPL-2.0" +RUN mkdir -p /usr/share/doc/$PRODUCT_NAME COPY LICENSE /usr/share/doc/$PRODUCT_NAME/LICENSE.txt # Set ARGs as ENV so that they can be used in ENTRYPOINT/CMD @@ -84,7 +84,6 @@ FROM alpine:3.19 AS release-default ARG BIN_NAME=consul-k8s-control-plane ARG CNI_BIN_NAME=consul-cni ARG PRODUCT_VERSION -ENV PRODUCT_NAME=$BIN_NAME LABEL name=${BIN_NAME} \ maintainer="Team Consul Kubernetes " \ @@ -95,6 +94,7 @@ LABEL name=${BIN_NAME} \ description="consul-k8s-control-plane provides first-class integrations between Consul and Kubernetes." \ org.opencontainers.image.licenses="MPL-2.0" +RUN mkdir -p /usr/share/doc/$PRODUCT_NAME COPY LICENSE /usr/share/doc/$PRODUCT_NAME/LICENSE.txt # Set ARGs as ENV so that they can be used in ENTRYPOINT/CMD @@ -149,7 +149,8 @@ ARG VERSION # and the version to download. Example: PRODUCT_NAME=consul PRODUCT_VERSION=1.2.3. ENV BIN_NAME=$BIN_NAME ENV PRODUCT_VERSION=$PRODUCT_VERSION -ENV PRODUCT_NAME=$BIN_NAME + +ARG PRODUCT_NAME=$BIN_NAME LABEL name=$PRODUCT_NAME \ maintainer="Team Consul Kubernetes " \ @@ -160,6 +161,7 @@ LABEL name=$PRODUCT_NAME \ description="consul-k8s-control-plane provides first-class integrations between Consul and Kubernetes." \ org.opencontainers.image.licenses="MPL-2.0" +RUN mkdir -p /usr/share/doc/$PRODUCT_NAME COPY LICENSE /usr/share/doc/$PRODUCT_NAME/LICENSE.txt # Set ARGs as ENV so that they can be used in ENTRYPOINT/CMD diff --git a/control-plane/api-gateway/binding/binder.go b/control-plane/api-gateway/binding/binder.go index cbd46dd0e8..c704a6b04e 100644 --- a/control-plane/api-gateway/binding/binder.go +++ b/control-plane/api-gateway/binding/binder.go @@ -172,7 +172,10 @@ func (b *Binder) Snapshot() *Snapshot { for secret := range gatewaySecrets.Iter() { // ignore the error if the certificate cannot be processed and just don't add it into the final // sync set - b.config.Resources.TranslateFileSystemCertificate(secret.(types.NamespacedName)) + if err := b.config.Resources.TranslateInlineCertificate(secret.(types.NamespacedName)); err != nil { + b.config.Logger.Error(err, "error parsing referenced secret, ignoring") + continue + } } } diff --git a/control-plane/api-gateway/binding/binder_test.go b/control-plane/api-gateway/binding/binder_test.go index c975a6e791..b4a274c8fa 100644 --- a/control-plane/api-gateway/binding/binder_test.go +++ b/control-plane/api-gateway/binding/binder_test.go @@ -55,19 +55,19 @@ var ( ) type resourceMapResources struct { - grants []gwv1beta1.ReferenceGrant - secrets []corev1.Secret - gateways []gwv1beta1.Gateway - httpRoutes []gwv1beta1.HTTPRoute - tcpRoutes []gwv1alpha2.TCPRoute - meshServices []v1alpha1.MeshService - services []types.NamespacedName - jwtProviders []*v1alpha1.JWTProvider - gatewayPolicies []*v1alpha1.GatewayPolicy - externalAuthFilters []*v1alpha1.RouteAuthFilter - consulFileSystemCertificates []api.FileSystemCertificateConfigEntry - consulHTTPRoutes []api.HTTPRouteConfigEntry - consulTCPRoutes []api.TCPRouteConfigEntry + grants []gwv1beta1.ReferenceGrant + secrets []corev1.Secret + gateways []gwv1beta1.Gateway + httpRoutes []gwv1beta1.HTTPRoute + tcpRoutes []gwv1alpha2.TCPRoute + meshServices []v1alpha1.MeshService + services []types.NamespacedName + jwtProviders []*v1alpha1.JWTProvider + gatewayPolicies []*v1alpha1.GatewayPolicy + externalAuthFilters []*v1alpha1.RouteAuthFilter + consulInlineCertificates []api.InlineCertificateConfigEntry + consulHTTPRoutes []api.HTTPRouteConfigEntry + consulTCPRoutes []api.TCPRouteConfigEntry } func newTestResourceMap(t *testing.T, resources resourceMapResources) *common.ResourceMap { @@ -282,7 +282,7 @@ func TestBinder_Lifecycle(t *testing.T) { Protocol: "http", TLS: api.APIGatewayTLSConfiguration{ Certificates: []api.ResourceReference{{ - Kind: api.FileSystemCertificate, + Kind: api.InlineCertificate, Name: "secret-one", }}, }, @@ -644,7 +644,7 @@ func TestBinder_Lifecycle(t *testing.T) { }, }, }, - consulFileSystemCertificates: []api.FileSystemCertificateConfigEntry{ + consulInlineCertificates: []api.InlineCertificateConfigEntry{ *certificateOne, *certificateTwo, }, @@ -771,7 +771,7 @@ func TestBinder_Lifecycle(t *testing.T) { expectedConsulDeletions: []api.ResourceReference{ {Kind: api.HTTPRoute, Name: "http-route-one"}, {Kind: api.TCPRoute, Name: "tcp-route-one"}, - {Kind: api.FileSystemCertificate, Name: "secret-two"}, + {Kind: api.InlineCertificate, Name: "secret-two"}, {Kind: api.APIGateway, Name: "gateway-deleted"}, }, }, @@ -3133,7 +3133,7 @@ func controlledBinder(config BinderConfig) BinderConfig { return config } -func generateTestCertificate(t *testing.T, namespace, name string) (*api.FileSystemCertificateConfigEntry, corev1.Secret) { +func generateTestCertificate(t *testing.T, namespace, name string) (*api.InlineCertificateConfigEntry, corev1.Secret) { privateKey, err := rsa.GenerateKey(rand.Reader, common.MinKeyLength) require.NoError(t, err) @@ -3180,7 +3180,8 @@ func generateTestCertificate(t *testing.T, namespace, name string) (*api.FileSys }, } - certificate := (common.ResourceTranslator{}).ToFileSystemCertificate(secret) + certificate, err := (common.ResourceTranslator{}).ToInlineCertificate(secret) + require.NoError(t, err) return certificate, secret } diff --git a/control-plane/api-gateway/binding/cleanup.go b/control-plane/api-gateway/binding/cleanup.go index 4fe11382d0..3f944718e8 100644 --- a/control-plane/api-gateway/binding/cleanup.go +++ b/control-plane/api-gateway/binding/cleanup.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package binding import ( @@ -52,12 +49,7 @@ func (c Cleaner) Run(ctx context.Context) { c.Logger.Error(err, "failed to cleanup old ACL role and policy") } - inlineCertsAllCleanedUp, err := c.cleanupInlineCerts(client) - if err != nil { - c.Logger.Error(err, "failed to cleanup inline-certificate configuration entries") - } - - if aclsCleanedUp && inlineCertsAllCleanedUp { + if aclsCleanedUp { c.Logger.Info("Cleanup complete") return } @@ -142,59 +134,6 @@ func (c Cleaner) cleanupACLRoleAndPolicy(client *api.Client) (bool, error) { return true, nil } -// cleanupInlineCerts deletes all inline certs that are not used by any gateway. -func (c Cleaner) cleanupInlineCerts(client *api.Client) (bool, error) { - certs, _, err := client.ConfigEntries().List(api.InlineCertificate, &api.QueryOptions{}) - if err != nil { - return false, fmt.Errorf("failed to list the inline certs: %w", err) - } - - gateways, _, err := client.ConfigEntries().List(api.APIGateway, &api.QueryOptions{}) - if err != nil { - return false, fmt.Errorf("failed to list the gateways: %w", err) - } - - if len(certs) == 0 { - return true, nil - } - - certSet := mapset.NewSet[string]() - certsToKeep := mapset.NewSet[string]() - for _, cert := range certs { - certSet.Add(cert.GetName()) - } - - for _, gateway := range gateways { - gtw := gateway.(*api.APIGatewayConfigEntry) - for _, listener := range gtw.Listeners { - if len(listener.TLS.Certificates) == 0 { - continue - } - - for _, cert := range listener.TLS.Certificates { - if cert.Kind == api.InlineCertificate && certSet.Contains(cert.Name) { - certsToKeep.Add(cert.Name) - } - } - } - } - - certsToDelete := certSet.Difference(certsToKeep) - var mErr error - deletedCerts := 0 - for cert := range certsToDelete.Iter() { - _, err := client.ConfigEntries().Delete(api.InlineCertificate, cert, &api.WriteOptions{}) - if err != nil { - mErr = errors.Join(mErr, fmt.Errorf("failed to delete inline-certificate %s: %w", cert, err)) - continue - } - c.Logger.Info("Deleted unused inline-certificate", "name", cert) - deletedCerts++ - } - - return certSet.Cardinality() == deletedCerts, mErr -} - func ignoreNotFoundError(err error) error { if err == nil { return nil diff --git a/control-plane/api-gateway/binding/cleanup_test.go b/control-plane/api-gateway/binding/cleanup_test.go index 76fcd60ef9..379944ef38 100644 --- a/control-plane/api-gateway/binding/cleanup_test.go +++ b/control-plane/api-gateway/binding/cleanup_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package binding import ( @@ -32,9 +29,6 @@ func TestCleaner_Run(t *testing.T) { expectedDeletedACLRoleIDs mapset.Set[string] aclPolicy *api.ACLPolicy expectedDeletedACLPolicyIDs mapset.Set[string] - inlineCerts []*api.InlineCertificateConfigEntry - expxectedDeletedCertsName mapset.Set[string] - apiGateways []*api.APIGatewayConfigEntry }{ // add binding rules that match on selector and name to be cleaned up "all old roles/policies/bindingrules and inline certs get cleaned up": { @@ -66,32 +60,6 @@ func TestCleaner_Run(t *testing.T) { Name: oldACLPolicyName, }, expectedDeletedACLPolicyIDs: mapset.NewSet("defg"), - inlineCerts: []*api.InlineCertificateConfigEntry{ - { - Kind: api.InlineCertificate, - Name: "my-inline-cert", - }, - }, - expxectedDeletedCertsName: mapset.NewSet("my-inline-cert"), - apiGateways: []*api.APIGatewayConfigEntry{ - { - Kind: api.APIGateway, - Name: "my-api-gateway", - Listeners: []api.APIGatewayListener{ - { - Name: "listener", - TLS: api.APIGatewayTLSConfiguration{ - Certificates: []api.ResourceReference{ - { - Kind: api.FileSystemCertificate, - Name: "cert", - }, - }, - }, - }, - }, - }, - }, }, "acl roles/policies/binding-rules do not get cleaned up because they are still being referenced": { bindingRules: []*api.ACLBindingRule{ @@ -117,32 +85,6 @@ func TestCleaner_Run(t *testing.T) { Name: oldACLPolicyName, }, expectedDeletedACLPolicyIDs: mapset.NewSet[string](), - inlineCerts: []*api.InlineCertificateConfigEntry{ - { - Kind: api.InlineCertificate, - Name: "my-inline-cert", - }, - }, - expxectedDeletedCertsName: mapset.NewSet("my-inline-cert"), - apiGateways: []*api.APIGatewayConfigEntry{ - { - Kind: api.APIGateway, - Name: "my-api-gateway", - Listeners: []api.APIGatewayListener{ - { - Name: "listener", - TLS: api.APIGatewayTLSConfiguration{ - Certificates: []api.ResourceReference{ - { - Kind: api.FileSystemCertificate, - Name: "cert", - }, - }, - }, - }, - }, - }, - }, }, "acl roles/policies aren't deleted because one binding-rule still references them": { bindingRules: []*api.ACLBindingRule{ @@ -178,32 +120,6 @@ func TestCleaner_Run(t *testing.T) { Name: oldACLPolicyName, }, expectedDeletedACLPolicyIDs: mapset.NewSet[string](), - inlineCerts: []*api.InlineCertificateConfigEntry{ - { - Kind: api.InlineCertificate, - Name: "my-inline-cert", - }, - }, - expxectedDeletedCertsName: mapset.NewSet("my-inline-cert"), - apiGateways: []*api.APIGatewayConfigEntry{ - { - Kind: api.APIGateway, - Name: "my-api-gateway", - Listeners: []api.APIGatewayListener{ - { - Name: "listener", - TLS: api.APIGatewayTLSConfiguration{ - Certificates: []api.ResourceReference{ - { - Kind: api.FileSystemCertificate, - Name: "cert", - }, - }, - }, - }, - }, - }, - }, }, "inline cert does not get cleaned up because it is still being referenced": { bindingRules: []*api.ACLBindingRule{ @@ -234,55 +150,11 @@ func TestCleaner_Run(t *testing.T) { Name: oldACLPolicyName, }, expectedDeletedACLPolicyIDs: mapset.NewSet("defg"), - inlineCerts: []*api.InlineCertificateConfigEntry{ - { - Kind: api.InlineCertificate, - Name: "my-inline-cert", - }, - }, - expxectedDeletedCertsName: mapset.NewSet[string](), - apiGateways: []*api.APIGatewayConfigEntry{ - { - Kind: api.APIGateway, - Name: "my-api-gateway", - Listeners: []api.APIGatewayListener{ - { - Name: "listener", - TLS: api.APIGatewayTLSConfiguration{ - Certificates: []api.ResourceReference{ - { - Kind: api.FileSystemCertificate, - Name: "cert", - }, - }, - }, - }, - }, - }, - { - Kind: api.APIGateway, - Name: "my-api-gateway-2", - Listeners: []api.APIGatewayListener{ - { - Name: "listener", - TLS: api.APIGatewayTLSConfiguration{ - Certificates: []api.ResourceReference{ - { - Kind: api.InlineCertificate, - Name: "my-inline-cert", - }, - }, - }, - }, - }, - }, - }, }, } for name, tc := range cases { t.Run(name, func(t *testing.T) { - deletedCertsName := mapset.NewSet[string]() deletedACLPolicyIDs := mapset.NewSet[string]() deletedACLRoleIDs := mapset.NewSet[string]() deletedACLBindingRuleIDs := mapset.NewSet[string]() @@ -309,16 +181,6 @@ func TestCleaner_Run(t *testing.T) { fmt.Fprintln(w, string(val)) case strings.HasPrefix(path, "/v1/acl/policy/") && method == "DELETE": deletedACLPolicyIDs.Add(strings.TrimPrefix(path, "/v1/acl/policy/")) - case path == "/v1/config/inline-certificate" && method == "GET": - val, err := json.Marshal(tc.inlineCerts) - require.NoError(t, err) - fmt.Fprintln(w, string(val)) - case path == "/v1/config/api-gateway": - val, err := json.Marshal(tc.apiGateways) - require.NoError(t, err) - fmt.Fprintln(w, string(val)) - case strings.HasPrefix(path, "/v1/config/inline-certificate/") && method == "DELETE": - deletedCertsName.Add(strings.TrimPrefix(path, "/v1/config/inline-certificate/")) default: w.WriteHeader(500) fmt.Fprintln(w, "Mock Server not configured for this route: "+r.URL.Path) @@ -352,7 +214,6 @@ func TestCleaner_Run(t *testing.T) { require.ElementsMatch(t, mapset.Sorted(tc.expectedDeletedACLBindingRuleIDs), mapset.Sorted(deletedACLBindingRuleIDs)) require.ElementsMatch(t, mapset.Sorted(tc.expectedDeletedACLRoleIDs), mapset.Sorted(deletedACLRoleIDs)) require.ElementsMatch(t, mapset.Sorted(tc.expectedDeletedACLPolicyIDs), mapset.Sorted(deletedACLPolicyIDs)) - require.ElementsMatch(t, mapset.Sorted(tc.expxectedDeletedCertsName), mapset.Sorted(deletedCertsName)) }) } } diff --git a/control-plane/api-gateway/binding/validation.go b/control-plane/api-gateway/binding/validation.go index de61569eb1..02ca210294 100644 --- a/control-plane/api-gateway/binding/validation.go +++ b/control-plane/api-gateway/binding/validation.go @@ -4,7 +4,6 @@ package binding import ( - "errors" "fmt" "strings" @@ -323,18 +322,9 @@ func validateJWT(gateway gwv1beta1.Gateway, listener gwv1beta1.Listener, resourc func validateCertificateRefs(gateway gwv1beta1.Gateway, refs []gwv1beta1.SecretObjectReference, resources *common.ResourceMap) error { for _, cert := range refs { - var mErr error // Verify that the reference has a group and kind that we support - if !common.NilOrEqual(cert.Group, "") { - mErr = errors.Join(mErr, fmt.Errorf("group is not supported: %q, supported Groups are \"\"", common.DerefStringOr(cert.Group, ""))) - } - - if !common.NilOrEqual(cert.Kind, common.KindSecret) { - mErr = errors.Join(mErr, fmt.Errorf("kind is not supported: %q, supported Kinds are %q", common.DerefStringOr(cert.Kind, ""), common.KindSecret)) - } - - if mErr != nil { - return fmt.Errorf("%w: %s", errListenerInvalidCertificateRef_NotSupported, mErr.Error()) + if !common.NilOrEqual(cert.Group, "") || !common.NilOrEqual(cert.Kind, common.KindSecret) { + return errListenerInvalidCertificateRef_NotSupported } // Verify that the reference is within the namespace or, diff --git a/control-plane/api-gateway/binding/validation_test.go b/control-plane/api-gateway/binding/validation_test.go index b784e514e1..c1c9e250ed 100644 --- a/control-plane/api-gateway/binding/validation_test.go +++ b/control-plane/api-gateway/binding/validation_test.go @@ -563,8 +563,8 @@ func TestValidateTLS(t *testing.T) { } actualAcceptedError, actualResolvedRefsError := validateTLS(tt.gateway, tt.tls, resources) - require.ErrorIs(t, actualResolvedRefsError, tt.expectedResolvedRefsErr) - require.ErrorIs(t, actualAcceptedError, tt.expectedAcceptedErr) + require.Equal(t, tt.expectedResolvedRefsErr, actualResolvedRefsError) + require.Equal(t, tt.expectedAcceptedErr, actualAcceptedError) }) } } diff --git a/control-plane/api-gateway/cache/consul.go b/control-plane/api-gateway/cache/consul.go index 21c8dcd3ec..8d2f5de9c9 100644 --- a/control-plane/api-gateway/cache/consul.go +++ b/control-plane/api-gateway/cache/consul.go @@ -60,7 +60,7 @@ const ( apiTimeout = 5 * time.Minute ) -var Kinds = []string{api.APIGateway, api.HTTPRoute, api.TCPRoute, api.FileSystemCertificate, api.JWTProvider} +var Kinds = []string{api.APIGateway, api.HTTPRoute, api.TCPRoute, api.InlineCertificate, api.JWTProvider} type Config struct { ConsulClientConfig *consul.Config @@ -758,12 +758,6 @@ func ignoreACLsDisabled(err error) error { return err } -// isPolicyExistsErr returns true if err is due to trying to call the -// policy create API when the policy already exists. -func isPolicyExistsErr(err error, policyName string) bool { - return isExistsErr(err, "Policy", policyName) -} - // isExistsErr returns true if err is due to trying to call an API for a given type and it already exists. func isExistsErr(err error, typeName, name string) bool { return err != nil && @@ -776,3 +770,9 @@ func isExistsErr(err error, typeName, name string) bool { func isRoleExistsErr(err error, roleName string) bool { return isExistsErr(err, "Role", roleName) } + +// isPolicyExistsErr returns true if err is due to trying to call the +// policy create API when the policy already exists. +func isPolicyExistsErr(err error, policyName string) bool { + return isExistsErr(err, "Policy", policyName) +} diff --git a/control-plane/api-gateway/cache/consul_test.go b/control-plane/api-gateway/cache/consul_test.go index 10ea7fc1ab..6ff8661498 100644 --- a/control-plane/api-gateway/cache/consul_test.go +++ b/control-plane/api-gateway/cache/consul_test.go @@ -1539,9 +1539,9 @@ func Test_Run(t *testing.T) { tcpRoute := setupTCPRoute() tcpRoutes := []*api.TCPRouteConfigEntry{tcpRoute} - // setup file-system certs - fileSystemCert := setupFileSystemCertificate() - certs := []*api.FileSystemCertificateConfigEntry{fileSystemCert} + // setup inline certs + inlineCert := setupInlineCertificate() + certs := []*api.InlineCertificateConfigEntry{inlineCert} // setup jwt providers jwtProvider := setupJWTProvider() @@ -1573,7 +1573,7 @@ func Test_Run(t *testing.T) { return } fmt.Fprintln(w, string(val)) - case "/v1/config/file-system-certificate": + case "/v1/config/inline-certificate": val, err := json.Marshal(certs) if err != nil { w.WriteHeader(500) @@ -1627,7 +1627,7 @@ func Test_Run(t *testing.T) { } expectedCache := loadedReferenceMaps([]api.ConfigEntry{ - gw, tcpRoute, httpRouteOne, httpRouteTwo, fileSystemCert, jwtProvider, + gw, tcpRoute, httpRouteOne, httpRouteTwo, inlineCert, jwtProvider, }) ctx, cancelFn := context.WithCancel(context.Background()) @@ -1677,11 +1677,11 @@ func Test_Run(t *testing.T) { }) certNsn := types.NamespacedName{ - Name: fileSystemCert.Name, - Namespace: fileSystemCert.Namespace, + Name: inlineCert.Name, + Namespace: inlineCert.Namespace, } - certSubscriber := c.Subscribe(ctx, api.FileSystemCertificate, func(cfe api.ConfigEntry) []types.NamespacedName { + certSubscriber := c.Subscribe(ctx, api.InlineCertificate, func(cfe api.ConfigEntry) []types.NamespacedName { return []types.NamespacedName{ {Name: cfe.GetName(), Namespace: cfe.GetNamespace()}, } @@ -1968,10 +1968,10 @@ func setupTCPRoute() *api.TCPRouteConfigEntry { } } -func setupFileSystemCertificate() *api.FileSystemCertificateConfigEntry { - return &api.FileSystemCertificateConfigEntry{ - Kind: api.FileSystemCertificate, - Name: "file-system-cert", +func setupInlineCertificate() *api.InlineCertificateConfigEntry { + return &api.InlineCertificateConfigEntry{ + Kind: api.InlineCertificate, + Name: "inline-cert", Certificate: "cert", PrivateKey: "super secret", Meta: map[string]string{ diff --git a/control-plane/api-gateway/common/diff.go b/control-plane/api-gateway/common/diff.go index 6f6b20b4cc..7db86807b7 100644 --- a/control-plane/api-gateway/common/diff.go +++ b/control-plane/api-gateway/common/diff.go @@ -70,8 +70,8 @@ func EntriesEqual(a, b api.ConfigEntry) bool { if bCast, ok := b.(*api.TCPRouteConfigEntry); ok { return tcpRoutesEqual(aCast, bCast) } - case *api.FileSystemCertificateConfigEntry: - if bCast, ok := b.(*api.FileSystemCertificateConfigEntry); ok { + case *api.InlineCertificateConfigEntry: + if bCast, ok := b.(*api.InlineCertificateConfigEntry); ok { return certificatesEqual(aCast, bCast) } } @@ -323,7 +323,7 @@ func (e entryComparator) tcpRouteServicesEqual(a, b api.TCPService) bool { orDefault(a.Partition, e.partitionA) == orDefault(b.Partition, e.partitionB) } -func certificatesEqual(a, b *api.FileSystemCertificateConfigEntry) bool { +func certificatesEqual(a, b *api.InlineCertificateConfigEntry) bool { if a == nil || b == nil { return false } @@ -336,7 +336,7 @@ func certificatesEqual(a, b *api.FileSystemCertificateConfigEntry) bool { }).certificatesEqual(*a, *b) } -func (e entryComparator) certificatesEqual(a, b api.FileSystemCertificateConfigEntry) bool { +func (e entryComparator) certificatesEqual(a, b api.InlineCertificateConfigEntry) bool { return a.Kind == b.Kind && a.Name == b.Name && e.namespaceA == e.namespaceB && diff --git a/control-plane/api-gateway/common/diff_test.go b/control-plane/api-gateway/common/diff_test.go index ae256a1b6b..04312c8162 100644 --- a/control-plane/api-gateway/common/diff_test.go +++ b/control-plane/api-gateway/common/diff_test.go @@ -32,7 +32,7 @@ func TestEntriesEqual(t *testing.T) { TLS: api.APIGatewayTLSConfiguration{ Certificates: []api.ResourceReference{ { - Kind: api.FileSystemCertificate, + Kind: api.InlineCertificate, Name: "cert", SectionName: "section", Partition: "partition", @@ -93,7 +93,7 @@ func TestEntriesEqual(t *testing.T) { TLS: api.APIGatewayTLSConfiguration{ Certificates: []api.ResourceReference{ { - Kind: api.FileSystemCertificate, + Kind: api.InlineCertificate, Name: "cert", SectionName: "section", Partition: "partition", @@ -157,7 +157,7 @@ func TestEntriesEqual(t *testing.T) { TLS: api.APIGatewayTLSConfiguration{ Certificates: []api.ResourceReference{ { - Kind: api.FileSystemCertificate, + Kind: api.InlineCertificate, Name: "cert", SectionName: "section", Partition: "partition", @@ -218,7 +218,7 @@ func TestEntriesEqual(t *testing.T) { TLS: api.APIGatewayTLSConfiguration{ Certificates: []api.ResourceReference{ { - Kind: api.FileSystemCertificate, + Kind: api.InlineCertificate, Name: "cert", SectionName: "section", Partition: "partition", @@ -282,7 +282,7 @@ func TestEntriesEqual(t *testing.T) { TLS: api.APIGatewayTLSConfiguration{ Certificates: []api.ResourceReference{ { - Kind: api.FileSystemCertificate, + Kind: api.InlineCertificate, Name: "cert", SectionName: "section", Partition: "partition", @@ -343,7 +343,7 @@ func TestEntriesEqual(t *testing.T) { TLS: api.APIGatewayTLSConfiguration{ Certificates: []api.ResourceReference{ { - Kind: api.FileSystemCertificate, + Kind: api.InlineCertificate, Name: "cert", SectionName: "section", Partition: "partition", @@ -407,7 +407,7 @@ func TestEntriesEqual(t *testing.T) { TLS: api.APIGatewayTLSConfiguration{ Certificates: []api.ResourceReference{ { - Kind: api.FileSystemCertificate, + Kind: api.InlineCertificate, Name: "cert", SectionName: "section", Partition: "partition", @@ -468,7 +468,7 @@ func TestEntriesEqual(t *testing.T) { TLS: api.APIGatewayTLSConfiguration{ Certificates: []api.ResourceReference{ { - Kind: api.FileSystemCertificate, + Kind: api.InlineCertificate, Name: "cert", SectionName: "section", Partition: "partition", @@ -532,7 +532,7 @@ func TestEntriesEqual(t *testing.T) { TLS: api.APIGatewayTLSConfiguration{ Certificates: []api.ResourceReference{ { - Kind: api.FileSystemCertificate, + Kind: api.InlineCertificate, Name: "cert", SectionName: "section", Partition: "partition", @@ -593,7 +593,7 @@ func TestEntriesEqual(t *testing.T) { TLS: api.APIGatewayTLSConfiguration{ Certificates: []api.ResourceReference{ { - Kind: api.FileSystemCertificate, + Kind: api.InlineCertificate, Name: "cert", SectionName: "section", Partition: "partition", @@ -657,7 +657,7 @@ func TestEntriesEqual(t *testing.T) { TLS: api.APIGatewayTLSConfiguration{ Certificates: []api.ResourceReference{ { - Kind: api.FileSystemCertificate, + Kind: api.InlineCertificate, Name: "cert", SectionName: "section", Partition: "partition", @@ -718,7 +718,7 @@ func TestEntriesEqual(t *testing.T) { TLS: api.APIGatewayTLSConfiguration{ Certificates: []api.ResourceReference{ { - Kind: api.FileSystemCertificate, + Kind: api.InlineCertificate, Name: "cert", SectionName: "section", Partition: "partition", @@ -782,7 +782,7 @@ func TestEntriesEqual(t *testing.T) { TLS: api.APIGatewayTLSConfiguration{ Certificates: []api.ResourceReference{ { - Kind: api.FileSystemCertificate, + Kind: api.InlineCertificate, Name: "cert", SectionName: "section", Partition: "partition", @@ -843,7 +843,7 @@ func TestEntriesEqual(t *testing.T) { TLS: api.APIGatewayTLSConfiguration{ Certificates: []api.ResourceReference{ { - Kind: api.FileSystemCertificate, + Kind: api.InlineCertificate, Name: "cert", SectionName: "section", Partition: "partition", @@ -907,7 +907,7 @@ func TestEntriesEqual(t *testing.T) { TLS: api.APIGatewayTLSConfiguration{ Certificates: []api.ResourceReference{ { - Kind: api.FileSystemCertificate, + Kind: api.InlineCertificate, Name: "cert", SectionName: "section", Partition: "partition", @@ -968,7 +968,7 @@ func TestEntriesEqual(t *testing.T) { TLS: api.APIGatewayTLSConfiguration{ Certificates: []api.ResourceReference{ { - Kind: api.FileSystemCertificate, + Kind: api.InlineCertificate, Name: "cert", SectionName: "section", Partition: "partition", @@ -1032,7 +1032,7 @@ func TestEntriesEqual(t *testing.T) { TLS: api.APIGatewayTLSConfiguration{ Certificates: []api.ResourceReference{ { - Kind: api.FileSystemCertificate, + Kind: api.InlineCertificate, Name: "cert", SectionName: "section", Partition: "partition", @@ -1093,7 +1093,7 @@ func TestEntriesEqual(t *testing.T) { TLS: api.APIGatewayTLSConfiguration{ Certificates: []api.ResourceReference{ { - Kind: api.FileSystemCertificate, + Kind: api.InlineCertificate, Name: "cert", SectionName: "section", Partition: "partition", @@ -1157,7 +1157,7 @@ func TestEntriesEqual(t *testing.T) { TLS: api.APIGatewayTLSConfiguration{ Certificates: []api.ResourceReference{ { - Kind: api.FileSystemCertificate, + Kind: api.InlineCertificate, Name: "cert", SectionName: "section", Partition: "partition", @@ -1218,7 +1218,7 @@ func TestEntriesEqual(t *testing.T) { TLS: api.APIGatewayTLSConfiguration{ Certificates: []api.ResourceReference{ { - Kind: api.FileSystemCertificate, + Kind: api.InlineCertificate, Name: "cert", SectionName: "section", Partition: "partition", @@ -1282,7 +1282,7 @@ func TestEntriesEqual(t *testing.T) { TLS: api.APIGatewayTLSConfiguration{ Certificates: []api.ResourceReference{ { - Kind: api.FileSystemCertificate, + Kind: api.InlineCertificate, Name: "cert-2", SectionName: "section", Partition: "partition", @@ -1343,7 +1343,7 @@ func TestEntriesEqual(t *testing.T) { TLS: api.APIGatewayTLSConfiguration{ Certificates: []api.ResourceReference{ { - Kind: api.FileSystemCertificate, + Kind: api.InlineCertificate, Name: "cert", SectionName: "section", Partition: "partition", @@ -1407,7 +1407,7 @@ func TestEntriesEqual(t *testing.T) { TLS: api.APIGatewayTLSConfiguration{ Certificates: []api.ResourceReference{ { - Kind: api.FileSystemCertificate, + Kind: api.InlineCertificate, Name: "cert", SectionName: "section", Partition: "partition", @@ -1468,7 +1468,7 @@ func TestEntriesEqual(t *testing.T) { TLS: api.APIGatewayTLSConfiguration{ Certificates: []api.ResourceReference{ { - Kind: api.FileSystemCertificate, + Kind: api.InlineCertificate, Name: "cert", SectionName: "section", Partition: "partition", @@ -1532,7 +1532,7 @@ func TestEntriesEqual(t *testing.T) { TLS: api.APIGatewayTLSConfiguration{ Certificates: []api.ResourceReference{ { - Kind: api.FileSystemCertificate, + Kind: api.InlineCertificate, Name: "cert", SectionName: "section", Partition: "partition", @@ -1593,7 +1593,7 @@ func TestEntriesEqual(t *testing.T) { TLS: api.APIGatewayTLSConfiguration{ Certificates: []api.ResourceReference{ { - Kind: api.FileSystemCertificate, + Kind: api.InlineCertificate, Name: "cert", SectionName: "section", Partition: "partition", @@ -1657,7 +1657,7 @@ func TestEntriesEqual(t *testing.T) { TLS: api.APIGatewayTLSConfiguration{ Certificates: []api.ResourceReference{ { - Kind: api.FileSystemCertificate, + Kind: api.InlineCertificate, Name: "cert", SectionName: "section", Partition: "partition", @@ -1718,7 +1718,7 @@ func TestEntriesEqual(t *testing.T) { TLS: api.APIGatewayTLSConfiguration{ Certificates: []api.ResourceReference{ { - Kind: api.FileSystemCertificate, + Kind: api.InlineCertificate, Name: "cert", SectionName: "section", Partition: "partition", @@ -1782,7 +1782,7 @@ func TestEntriesEqual(t *testing.T) { TLS: api.APIGatewayTLSConfiguration{ Certificates: []api.ResourceReference{ { - Kind: api.FileSystemCertificate, + Kind: api.InlineCertificate, Name: "cert", SectionName: "section", Partition: "partition", @@ -1843,7 +1843,7 @@ func TestEntriesEqual(t *testing.T) { TLS: api.APIGatewayTLSConfiguration{ Certificates: []api.ResourceReference{ { - Kind: api.FileSystemCertificate, + Kind: api.InlineCertificate, Name: "cert", SectionName: "section", Partition: "partition", @@ -1907,7 +1907,7 @@ func TestEntriesEqual(t *testing.T) { TLS: api.APIGatewayTLSConfiguration{ Certificates: []api.ResourceReference{ { - Kind: api.FileSystemCertificate, + Kind: api.InlineCertificate, Name: "cert", SectionName: "section", Partition: "partition", @@ -1968,7 +1968,7 @@ func TestEntriesEqual(t *testing.T) { TLS: api.APIGatewayTLSConfiguration{ Certificates: []api.ResourceReference{ { - Kind: api.FileSystemCertificate, + Kind: api.InlineCertificate, Name: "cert", SectionName: "section", Partition: "partition", @@ -2032,7 +2032,7 @@ func TestEntriesEqual(t *testing.T) { TLS: api.APIGatewayTLSConfiguration{ Certificates: []api.ResourceReference{ { - Kind: api.FileSystemCertificate, + Kind: api.InlineCertificate, Name: "cert", SectionName: "section", Partition: "partition", @@ -2093,7 +2093,7 @@ func TestEntriesEqual(t *testing.T) { TLS: api.APIGatewayTLSConfiguration{ Certificates: []api.ResourceReference{ { - Kind: api.FileSystemCertificate, + Kind: api.InlineCertificate, Name: "cert", SectionName: "section", Partition: "partition", diff --git a/control-plane/api-gateway/common/helm_config.go b/control-plane/api-gateway/common/helm_config.go index d551757c5b..ecf245c04c 100644 --- a/control-plane/api-gateway/common/helm_config.go +++ b/control-plane/api-gateway/common/helm_config.go @@ -18,9 +18,7 @@ type HelmConfig struct { // ImageDataplane is the Consul Dataplane image to use in gateway deployments. ImageDataplane string // ImageConsulK8S is the Consul Kubernetes Control Plane image to use in gateway deployments. - ImageConsulK8S string - // GlobalImagePullPolicy is the pull policy to use for all images used in gateway deployments. - GlobalImagePullPolicy string + ImageConsulK8S string ConsulDestinationNamespace string NamespaceMirroringPrefix string EnableNamespaces bool diff --git a/control-plane/api-gateway/common/resources.go b/control-plane/api-gateway/common/resources.go index 514bb3c92a..051c914ae7 100644 --- a/control-plane/api-gateway/common/resources.go +++ b/control-plane/api-gateway/common/resources.go @@ -191,7 +191,7 @@ func (s *ResourceMap) Certificate(key types.NamespacedName) *corev1.Secret { if !s.certificates.Contains(key) { return nil } - consulKey := NormalizeMeta(s.toConsulReference(api.FileSystemCertificate, key)) + consulKey := NormalizeMeta(s.toConsulReference(api.InlineCertificate, key)) if secret, ok := s.certificateGateways[consulKey]; ok { return secret.secret } @@ -201,7 +201,7 @@ func (s *ResourceMap) Certificate(key types.NamespacedName) *corev1.Secret { func (s *ResourceMap) ReferenceCountCertificate(secret corev1.Secret) { key := client.ObjectKeyFromObject(&secret) s.certificates.Add(key) - consulKey := NormalizeMeta(s.toConsulReference(api.FileSystemCertificate, key)) + consulKey := NormalizeMeta(s.toConsulReference(api.InlineCertificate, key)) if _, ok := s.certificateGateways[consulKey]; !ok { s.certificateGateways[consulKey] = &certificate{ secret: &secret, @@ -231,7 +231,7 @@ func (s *ResourceMap) ReferenceCountGateway(gateway gwv1beta1.Gateway) { set.certificates.Add(certificateKey) - consulCertificateKey := s.toConsulReference(api.FileSystemCertificate, certificateKey) + consulCertificateKey := s.toConsulReference(api.InlineCertificate, certificateKey) certificate, ok := s.certificateGateways[NormalizeMeta(consulCertificateKey)] if ok { certificate.gateways.Add(key) @@ -270,7 +270,7 @@ func (s *ResourceMap) ResourcesToGC(key types.NamespacedName) []api.ResourceRefe // the route altogether toGC = append(toGC, id) } - case api.FileSystemCertificate: + case api.InlineCertificate: if s.processedCertificates.Contains(id) { continue } @@ -323,7 +323,7 @@ func (s *ResourceMap) ReferenceCountConsulTCPRoute(route api.TCPRouteConfigEntry s.consulTCPRoutes[NormalizeMeta(key)] = set } -func (s *ResourceMap) ReferenceCountConsulCertificate(cert api.FileSystemCertificateConfigEntry) { +func (s *ResourceMap) ReferenceCountConsulCertificate(cert api.InlineCertificateConfigEntry) { key := s.objectReference(&cert) var referenced *certificate @@ -644,29 +644,36 @@ func (s *ResourceMap) CanGCTCPRouteOnUnbind(id api.ResourceReference) bool { return true } -func (s *ResourceMap) TranslateFileSystemCertificate(key types.NamespacedName) { - consulKey := s.toConsulReference(api.FileSystemCertificate, key) +func (s *ResourceMap) TranslateInlineCertificate(key types.NamespacedName) error { + consulKey := s.toConsulReference(api.InlineCertificate, key) certificate, ok := s.certificateGateways[NormalizeMeta(consulKey)] if !ok { - return + return nil } if certificate.secret == nil { - return + return nil } - // add to the processed set so that we don't GC it. + consulCertificate, err := s.translator.ToInlineCertificate(*certificate.secret) + if err != nil { + return err + } + + // add to the processed set so we don't GC it. s.processedCertificates.Add(consulKey) s.consulMutations = append(s.consulMutations, &ConsulUpdateOperation{ - Entry: s.translator.ToFileSystemCertificate(*certificate.secret), + Entry: consulCertificate, // just swallow the error and log it since we can't propagate status back on a certificate. - OnUpdate: func(err error) { + OnUpdate: func(error) { if err != nil { s.logger.Error(err, "error syncing certificate to Consul") } }, }) + + return nil } func (s *ResourceMap) Mutations() []*ConsulUpdateOperation { diff --git a/control-plane/api-gateway/common/translation.go b/control-plane/api-gateway/common/translation.go index d955b0212d..5161e6b033 100644 --- a/control-plane/api-gateway/common/translation.go +++ b/control-plane/api-gateway/common/translation.go @@ -4,7 +4,6 @@ package common import ( - "fmt" "strings" corev1 "k8s.io/api/core/v1" @@ -110,7 +109,7 @@ func (t ResourceTranslator) toAPIGatewayListener(gateway gwv1beta1.Gateway, list ref := IndexedNamespacedNameWithDefault(ref.Name, ref.Namespace, namespace) if resources.Certificate(ref) != nil { - certificates = append(certificates, t.NonNormalizedConfigEntryReference(api.FileSystemCertificate, ref)) + certificates = append(certificates, t.NonNormalizedConfigEntryReference(api.InlineCertificate, ref)) } } } @@ -530,19 +529,31 @@ func (t ResourceTranslator) translateTCPRouteRule(route gwv1alpha2.TCPRoute, ref return api.TCPService{}, false } -func (t ResourceTranslator) ToFileSystemCertificate(secret corev1.Secret) *api.FileSystemCertificateConfigEntry { - return &api.FileSystemCertificateConfigEntry{ - Kind: api.FileSystemCertificate, +func (t ResourceTranslator) ToInlineCertificate(secret corev1.Secret) (*api.InlineCertificateConfigEntry, error) { + certificate, privateKey, err := ParseCertificateData(secret) + if err != nil { + return nil, err + } + + err = ValidateKeyLength(privateKey) + if err != nil { + return nil, err + } + + namespace := t.Namespace(secret.Namespace) + + return &api.InlineCertificateConfigEntry{ + Kind: api.InlineCertificate, Name: secret.Name, - Namespace: t.Namespace(secret.Namespace), + Namespace: namespace, Partition: t.ConsulPartition, - Certificate: fmt.Sprintf("/consul/gateway-certificates/%s_%s_tls.crt", secret.Namespace, secret.Name), - PrivateKey: fmt.Sprintf("/consul/gateway-certificates/%s_%s_tls.key", secret.Namespace, secret.Name), + Certificate: strings.TrimSpace(certificate), + PrivateKey: strings.TrimSpace(privateKey), Meta: t.addDatacenterToMeta(map[string]string{ constants.MetaKeyKubeNS: secret.Namespace, constants.MetaKeyKubeName: secret.Name, }), - } + }, nil } func EntryToNamespacedName(entry api.ConfigEntry) types.NamespacedName { diff --git a/control-plane/api-gateway/common/translation_test.go b/control-plane/api-gateway/common/translation_test.go index e841464b9a..4331e2b77a 100644 --- a/control-plane/api-gateway/common/translation_test.go +++ b/control-plane/api-gateway/common/translation_test.go @@ -303,7 +303,7 @@ func TestTranslator_ToAPIGateway(t *testing.T) { TLS: api.APIGatewayTLSConfiguration{ Certificates: []api.ResourceReference{ { - Kind: api.FileSystemCertificate, + Kind: api.InlineCertificate, Name: listenerOneCertName, Namespace: listenerOneCertConsulNamespace, }, @@ -321,7 +321,7 @@ func TestTranslator_ToAPIGateway(t *testing.T) { TLS: api.APIGatewayTLSConfiguration{ Certificates: []api.ResourceReference{ { - Kind: api.FileSystemCertificate, + Kind: api.InlineCertificate, Name: listenerTwoCertName, Namespace: listenerTwoCertConsulNamespace, }, diff --git a/control-plane/api-gateway/controllers/gateway_controller.go b/control-plane/api-gateway/controllers/gateway_controller.go index 004b2bc1b1..7cffc85924 100644 --- a/control-plane/api-gateway/controllers/gateway_controller.go +++ b/control-plane/api-gateway/controllers/gateway_controller.go @@ -136,10 +136,10 @@ func (r *GatewayController) Reconcile(ctx context.Context, req ctrl.Request) (ct return ctrl.Result{}, err } - // fetch our file-system-certificates from cache, this needs to happen + // fetch our inline certificates from cache, this needs to happen // here since the certificates need to be reference counted before // the gateways. - r.fetchConsulFileSystemCertificates(resources) + r.fetchConsulInlineCertificates(resources) // add our current gateway even if it's not controlled by us so we // can garbage collect any resources for it. @@ -365,7 +365,10 @@ func configEntriesTo[T api.ConfigEntry](entries []api.ConfigEntry) []T { func (r *GatewayController) deleteGatekeeperResources(ctx context.Context, log logr.Logger, gw *gwv1beta1.Gateway) error { gk := gatekeeper.New(log, r.Client) - err := gk.Delete(ctx, *gw) + err := gk.Delete(ctx, types.NamespacedName{ + Namespace: gw.Namespace, + Name: gw.Name, + }) if err != nil { return err } @@ -481,8 +484,8 @@ func SetupGatewayControllerWithManager(ctx context.Context, mgr ctrl.Manager, co &handler.EnqueueRequestForObject{}, ). WatchesRawSource( - // Subscribe to changes from Consul for FileSystemCertificates - &source.Channel{Source: c.Subscribe(ctx, api.FileSystemCertificate, r.transformConsulFileSystemCertificate(ctx)).Events()}, + // Subscribe to changes from Consul for InlineCertificates + &source.Channel{Source: c.Subscribe(ctx, api.InlineCertificate, r.transformConsulInlineCertificate(ctx)).Events()}, &handler.EnqueueRequestForObject{}, ). WatchesRawSource( @@ -664,7 +667,7 @@ func (r *GatewayController) transformConsulTCPRoute(ctx context.Context) func(en } } -func (r *GatewayController) transformConsulFileSystemCertificate(ctx context.Context) func(entry api.ConfigEntry) []types.NamespacedName { +func (r *GatewayController) transformConsulInlineCertificate(ctx context.Context) func(entry api.ConfigEntry) []types.NamespacedName { return func(entry api.ConfigEntry) []types.NamespacedName { certificateKey := api.ResourceReference{ Kind: entry.GetKind(), @@ -1216,8 +1219,8 @@ func (c *GatewayController) fetchConsulTCPRoutes(ref api.ResourceReference, reso } } -func (c *GatewayController) fetchConsulFileSystemCertificates(resources *common.ResourceMap) { - for _, cert := range configEntriesTo[*api.FileSystemCertificateConfigEntry](c.cache.List(api.FileSystemCertificate)) { +func (c *GatewayController) fetchConsulInlineCertificates(resources *common.ResourceMap) { + for _, cert := range configEntriesTo[*api.InlineCertificateConfigEntry](c.cache.List(api.InlineCertificate)) { resources.ReferenceCountConsulCertificate(*cert) } } diff --git a/control-plane/api-gateway/controllers/gateway_controller_integration_test.go b/control-plane/api-gateway/controllers/gateway_controller_integration_test.go index b9a3f9cc9e..3c64f488c9 100644 --- a/control-plane/api-gateway/controllers/gateway_controller_integration_test.go +++ b/control-plane/api-gateway/controllers/gateway_controller_integration_test.go @@ -153,7 +153,7 @@ func TestControllerDoesNotInfinitelyReconcile(t *testing.T) { gwSub := resourceCache.Subscribe(ctx, api.APIGateway, gwCtrl.transformConsulGateway) httpRouteSub := resourceCache.Subscribe(ctx, api.HTTPRoute, gwCtrl.transformConsulHTTPRoute(ctx)) tcpRouteSub := resourceCache.Subscribe(ctx, api.TCPRoute, gwCtrl.transformConsulTCPRoute(ctx)) - fileSystemCertSub := resourceCache.Subscribe(ctx, api.FileSystemCertificate, gwCtrl.transformConsulFileSystemCertificate(ctx)) + inlineCertSub := resourceCache.Subscribe(ctx, api.InlineCertificate, gwCtrl.transformConsulInlineCertificate(ctx)) cert := tc.certFn(t, ctx, k8sClient, tc.namespace) k8sGWObj := tc.gwFn(t, ctx, k8sClient, tc.namespace) @@ -227,7 +227,7 @@ func TestControllerDoesNotInfinitelyReconcile(t *testing.T) { tcpRouteDone = true w.Done() } - case <-fileSystemCertSub.Events(): + case <-inlineCertSub.Events(): } } }(wg) @@ -257,7 +257,7 @@ func TestControllerDoesNotInfinitelyReconcile(t *testing.T) { gwRef := gwCtrl.Translator.ConfigEntryReference(api.APIGateway, gwNamespaceName) httpRouteRef := gwCtrl.Translator.ConfigEntryReference(api.HTTPRoute, httpRouteNamespaceName) tcpRouteRef := gwCtrl.Translator.ConfigEntryReference(api.TCPRoute, tcpRouteNamespaceName) - certRef := gwCtrl.Translator.ConfigEntryReference(api.FileSystemCertificate, certNamespaceName) + certRef := gwCtrl.Translator.ConfigEntryReference(api.InlineCertificate, certNamespaceName) curGWModifyIndex := resourceCache.Get(gwRef).GetModifyIndex() curHTTPRouteModifyIndex := resourceCache.Get(httpRouteRef).GetModifyIndex() diff --git a/control-plane/api-gateway/gatekeeper/dataplane.go b/control-plane/api-gateway/gatekeeper/dataplane.go index f4b6e25a88..16839cbb09 100644 --- a/control-plane/api-gateway/gatekeeper/dataplane.go +++ b/control-plane/api-gateway/gatekeeper/dataplane.go @@ -23,11 +23,10 @@ const ( consulDataplaneDNSBindHost = "127.0.0.1" consulDataplaneDNSBindPort = 8600 defaultEnvoyProxyConcurrency = 1 - volumeNameForConnectInject = "consul-connect-inject-data" - volumeNameForTLSCerts = "consul-gateway-tls-certificates" + volumeName = "consul-connect-inject-data" ) -func consulDataplaneContainer(metrics common.MetricsConfig, config common.HelmConfig, gcc v1alpha1.GatewayClassConfig, name, namespace string, mounts []corev1.VolumeMount) (corev1.Container, error) { +func consulDataplaneContainer(metrics common.MetricsConfig, config common.HelmConfig, gcc v1alpha1.GatewayClassConfig, name, namespace string) (corev1.Container, error) { // Extract the service account token's volume mount. var ( err error @@ -54,9 +53,8 @@ func consulDataplaneContainer(metrics common.MetricsConfig, config common.HelmCo } container := corev1.Container{ - Name: name, - Image: config.ImageDataplane, - ImagePullPolicy: corev1.PullPolicy(config.GlobalImagePullPolicy), + Name: name, + Image: config.ImageDataplane, // We need to set tmp dir to an ephemeral volume that we're mounting so that // consul-dataplane can write files to it. Otherwise, it wouldn't be able to @@ -79,7 +77,12 @@ func consulDataplaneContainer(metrics common.MetricsConfig, config common.HelmCo Value: "$(NODE_NAME)-virtual", }, }, - VolumeMounts: mounts, + VolumeMounts: []corev1.VolumeMount{ + { + Name: volumeName, + MountPath: "/consul/connect-inject", + }, + }, Args: args, ReadinessProbe: probe, } diff --git a/control-plane/api-gateway/gatekeeper/deployment.go b/control-plane/api-gateway/gatekeeper/deployment.go index 9519a42d74..511d1ebb29 100644 --- a/control-plane/api-gateway/gatekeeper/deployment.go +++ b/control-plane/api-gateway/gatekeeper/deployment.go @@ -107,9 +107,7 @@ func (g *Gatekeeper) deployment(gateway gwv1beta1.Gateway, gcc v1alpha1.GatewayC annotations[constants.AnnotationPrometheusPort] = strconv.Itoa(metrics.Port) } - volumes, mounts := volumesAndMounts(gateway) - - container, err := consulDataplaneContainer(metrics, config, gcc, gateway.Name, gateway.Namespace, mounts) + container, err := consulDataplaneContainer(metrics, config, gcc, gateway.Name, gateway.Namespace) if err != nil { return nil, err } @@ -131,7 +129,14 @@ func (g *Gatekeeper) deployment(gateway gwv1beta1.Gateway, gcc v1alpha1.GatewayC Annotations: annotations, }, Spec: corev1.PodSpec{ - Volumes: volumes, + Volumes: []corev1.Volume{ + { + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{Medium: corev1.StorageMediumMemory}, + }, + }, + }, InitContainers: []corev1.Container{ initContainer, }, diff --git a/control-plane/api-gateway/gatekeeper/gatekeeper.go b/control-plane/api-gateway/gatekeeper/gatekeeper.go index 538444303f..6cb7170fc8 100644 --- a/control-plane/api-gateway/gatekeeper/gatekeeper.go +++ b/control-plane/api-gateway/gatekeeper/gatekeeper.go @@ -8,12 +8,11 @@ import ( "fmt" "github.com/go-logr/logr" + "github.com/hashicorp/consul-k8s/control-plane/api-gateway/common" + "github.com/hashicorp/consul-k8s/control-plane/api/v1alpha1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" gwv1beta1 "sigs.k8s.io/gateway-api/apis/v1beta1" - - "github.com/hashicorp/consul-k8s/control-plane/api-gateway/common" - "github.com/hashicorp/consul-k8s/control-plane/api/v1alpha1" ) // Gatekeeper is used to manage the lifecycle of Gateway deployments and services. @@ -51,10 +50,6 @@ func (g *Gatekeeper) Upsert(ctx context.Context, gateway gwv1beta1.Gateway, gcc return err } - if err := g.upsertSecret(ctx, gateway); err != nil { - return err - } - if err := g.upsertDeployment(ctx, gateway, gcc, config); err != nil { return err } @@ -64,18 +59,13 @@ func (g *Gatekeeper) Upsert(ctx context.Context, gateway gwv1beta1.Gateway, gcc // Delete removes the resources for handling routing of network traffic. // This is done in the reverse order of Upsert due to dependencies between resources. -func (g *Gatekeeper) Delete(ctx context.Context, gateway gwv1beta1.Gateway) error { - gatewayName := g.namespacedName(gateway) +func (g *Gatekeeper) Delete(ctx context.Context, gatewayName types.NamespacedName) error { g.Log.V(1).Info(fmt.Sprintf("Delete Gateway Deployment %s/%s", gatewayName.Namespace, gatewayName.Name)) if err := g.deleteDeployment(ctx, gatewayName); err != nil { return err } - if err := g.deleteSecret(ctx, gateway); err != nil { - return err - } - if err := g.deleteService(ctx, gatewayName); err != nil { return err } diff --git a/control-plane/api-gateway/gatekeeper/gatekeeper_test.go b/control-plane/api-gateway/gatekeeper/gatekeeper_test.go index f6342ec725..81f219a9ae 100644 --- a/control-plane/api-gateway/gatekeeper/gatekeeper_test.go +++ b/control-plane/api-gateway/gatekeeper/gatekeeper_test.go @@ -103,7 +103,6 @@ type resources struct { namespaces []*corev1.Namespace roles []*rbac.Role roleBindings []*rbac.RoleBinding - secrets []*corev1.Secret services []*corev1.Service serviceAccounts []*corev1.ServiceAccount } @@ -155,7 +154,6 @@ func TestUpsert(t *testing.T) { configureDeployment(name, namespace, labels, 3, nil, nil, "", "1"), }, roles: []*rbac.Role{}, - secrets: []*corev1.Secret{}, services: []*corev1.Service{}, serviceAccounts: []*corev1.ServiceAccount{}, }, @@ -205,9 +203,6 @@ func TestUpsert(t *testing.T) { configureDeployment(name, namespace, labels, 3, nil, nil, "", "1"), }, roles: []*rbac.Role{}, - secrets: []*corev1.Secret{ - configureSecret(name, namespace, labels, "1", nil), - }, services: []*corev1.Service{ configureService(name, namespace, labels, nil, (corev1.ServiceType)("NodePort"), []corev1.ServicePort{ { @@ -260,9 +255,6 @@ func TestUpsert(t *testing.T) { configureDeployment(name, namespace, labels, 3, nil, nil, "", "1"), }, roles: []*rbac.Role{}, - secrets: []*corev1.Secret{ - configureSecret(name, namespace, labels, "1", nil), - }, services: []*corev1.Service{ configureService(name, namespace, labels, nil, (corev1.ServiceType)("NodePort"), []corev1.ServicePort{ { @@ -321,9 +313,6 @@ func TestUpsert(t *testing.T) { roleBindings: []*rbac.RoleBinding{ configureRoleBinding(name, namespace, labels, "1"), }, - secrets: []*corev1.Secret{ - configureSecret(name, namespace, labels, "1", nil), - }, services: []*corev1.Service{ configureService(name, namespace, labels, nil, (corev1.ServiceType)("NodePort"), []corev1.ServicePort{ { @@ -378,7 +367,6 @@ func TestUpsert(t *testing.T) { configureDeployment(name, namespace, labels, 5, nil, nil, "", "1"), }, roles: []*rbac.Role{}, - secrets: []*corev1.Secret{}, services: []*corev1.Service{}, serviceAccounts: []*corev1.ServiceAccount{}, }, @@ -416,7 +404,6 @@ func TestUpsert(t *testing.T) { configureDeployment(name, namespace, labels, 2, nil, nil, "", "1"), }, roles: []*rbac.Role{}, - secrets: []*corev1.Secret{}, services: []*corev1.Service{}, serviceAccounts: []*corev1.ServiceAccount{}, }, @@ -459,9 +446,6 @@ func TestUpsert(t *testing.T) { roleBindings: []*rbac.RoleBinding{ configureRoleBinding(name, namespace, labels, "1"), }, - secrets: []*corev1.Secret{ - configureSecret(name, namespace, labels, "1", nil), - }, services: []*corev1.Service{ configureService(name, namespace, labels, nil, (corev1.ServiceType)("NodePort"), []corev1.ServicePort{ { @@ -485,9 +469,6 @@ func TestUpsert(t *testing.T) { roleBindings: []*rbac.RoleBinding{ configureRoleBinding(name, namespace, labels, "1"), }, - secrets: []*corev1.Secret{ - configureSecret(name, namespace, labels, "1", nil), - }, services: []*corev1.Service{ configureService(name, namespace, labels, nil, (corev1.ServiceType)("NodePort"), []corev1.ServicePort{ { @@ -550,9 +531,6 @@ func TestUpsert(t *testing.T) { roleBindings: []*rbac.RoleBinding{ configureRoleBinding(name, namespace, labels, "1"), }, - secrets: []*corev1.Secret{ - configureSecret(name, namespace, labels, "1", nil), - }, services: []*corev1.Service{ configureService(name, namespace, labels, nil, (corev1.ServiceType)("NodePort"), []corev1.ServicePort{ { @@ -581,9 +559,6 @@ func TestUpsert(t *testing.T) { roleBindings: []*rbac.RoleBinding{ configureRoleBinding(name, namespace, labels, "1"), }, - secrets: []*corev1.Secret{ - configureSecret(name, namespace, labels, "1", nil), - }, services: []*corev1.Service{ configureService(name, namespace, labels, nil, (corev1.ServiceType)("NodePort"), []corev1.ServicePort{ { @@ -637,7 +612,6 @@ func TestUpsert(t *testing.T) { configureDeployment(name, namespace, labels, 5, nil, nil, "", "1"), }, roles: []*rbac.Role{}, - secrets: []*corev1.Secret{}, services: []*corev1.Service{}, serviceAccounts: []*corev1.ServiceAccount{}, }, @@ -691,7 +665,6 @@ func TestUpsert(t *testing.T) { finalResources: resources{ deployments: []*appsv1.Deployment{}, roles: []*rbac.Role{}, - secrets: []*corev1.Secret{}, services: []*corev1.Service{ configureService(name, namespace, labels, externalAndCopyAnnotations, (corev1.ServiceType)("NodePort"), []corev1.ServicePort{ { @@ -761,7 +734,6 @@ func TestUpsert(t *testing.T) { finalResources: resources{ deployments: []*appsv1.Deployment{}, roles: []*rbac.Role{}, - secrets: []*corev1.Secret{}, services: []*corev1.Service{ configureService(name, namespace, labels, copyAnnotations, (corev1.ServiceType)("NodePort"), []corev1.ServicePort{ { @@ -819,7 +791,6 @@ func TestUpsert(t *testing.T) { configureDeployment(name, namespace, labels, 8, nil, nil, "", "1"), }, roles: []*rbac.Role{}, - secrets: []*corev1.Secret{}, services: []*corev1.Service{}, serviceAccounts: []*corev1.ServiceAccount{}, }, @@ -861,7 +832,6 @@ func TestUpsert(t *testing.T) { configureDeployment(name, namespace, labels, 2, nil, nil, "", "1"), }, roles: []*rbac.Role{}, - secrets: []*corev1.Secret{}, services: []*corev1.Service{}, serviceAccounts: []*corev1.ServiceAccount{}, }, @@ -903,7 +873,6 @@ func TestUpsert(t *testing.T) { configureDeployment(name, namespace, labels, 5, nil, nil, "", "1"), }, roles: []*rbac.Role{}, - secrets: []*corev1.Secret{}, services: []*corev1.Service{}, serviceAccounts: []*corev1.ServiceAccount{}, }, @@ -963,178 +932,12 @@ func TestUpsert(t *testing.T) { roleBindings: []*rbac.RoleBinding{ configureRoleBinding(name, namespace, labels, "1"), }, - secrets: []*corev1.Secret{}, services: []*corev1.Service{}, serviceAccounts: []*corev1.ServiceAccount{ configureServiceAccount(name, namespace, labels, "1"), }, }, }, - "create a new gateway with TLS certificate reference in the same namespace": { - gateway: gwv1beta1.Gateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - Spec: gwv1beta1.GatewaySpec{ - Listeners: []gwv1beta1.Listener{ - { - Name: "Listener 1", - Port: 443, - Protocol: "TCP", - TLS: &gwv1beta1.GatewayTLSConfig{ - CertificateRefs: []gwv1beta1.SecretObjectReference{ - { - Namespace: common.PointerTo(gwv1beta1.Namespace(namespace)), - Name: "tls-cert", - }, - }, - }, - }, - }, - }, - }, - gatewayClassConfig: v1alpha1.GatewayClassConfig{ - ObjectMeta: metav1.ObjectMeta{ - Name: "consul-gatewayclassconfig", - }, - Spec: v1alpha1.GatewayClassConfigSpec{ - DeploymentSpec: v1alpha1.DeploymentSpec{ - DefaultInstances: common.PointerTo(int32(3)), - MaxInstances: common.PointerTo(int32(3)), - MinInstances: common.PointerTo(int32(1)), - }, - CopyAnnotations: v1alpha1.CopyAnnotationsSpec{}, - OpenshiftSCCName: "test-api-gateway", - }, - }, - helmConfig: common.HelmConfig{ - EnableOpenShift: false, - ImageDataplane: "hashicorp/consul-dataplane", - }, - initialResources: resources{ - secrets: []*corev1.Secret{ - { - TypeMeta: metav1.TypeMeta{ - APIVersion: "v1", - Kind: "Secret", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "tls-cert", - Namespace: namespace, - }, - Data: map[string][]byte{ - corev1.TLSCertKey: []byte("cert"), - corev1.TLSPrivateKeyKey: []byte("key"), - }, - Type: corev1.SecretTypeTLS, - }, - }, - }, - finalResources: resources{ - deployments: []*appsv1.Deployment{ - configureDeployment(name, namespace, labels, 3, nil, nil, "", "1"), - }, - roles: []*rbac.Role{}, - roleBindings: []*rbac.RoleBinding{}, - secrets: []*corev1.Secret{ - configureSecret(name, namespace, labels, "1", map[string][]byte{ - "default_tls-cert_tls.crt": []byte("cert"), - "default_tls-cert_tls.key": []byte("key"), - }), - }, - services: []*corev1.Service{}, - serviceAccounts: []*corev1.ServiceAccount{}, - }, - }, - "create a new gateway with TLS certificate reference in a different namespace": { - gateway: gwv1beta1.Gateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - Spec: gwv1beta1.GatewaySpec{ - Listeners: []gwv1beta1.Listener{ - { - Name: "Listener 1", - Port: 443, - Protocol: "TCP", - TLS: &gwv1beta1.GatewayTLSConfig{ - CertificateRefs: []gwv1beta1.SecretObjectReference{ - { - Namespace: common.PointerTo(gwv1beta1.Namespace("non-default")), - Name: "tls-cert", - }, - }, - }, - }, - }, - }, - }, - gatewayClassConfig: v1alpha1.GatewayClassConfig{ - ObjectMeta: metav1.ObjectMeta{ - Name: "consul-gatewayclassconfig", - }, - Spec: v1alpha1.GatewayClassConfigSpec{ - DeploymentSpec: v1alpha1.DeploymentSpec{ - DefaultInstances: common.PointerTo(int32(3)), - MaxInstances: common.PointerTo(int32(3)), - MinInstances: common.PointerTo(int32(1)), - }, - CopyAnnotations: v1alpha1.CopyAnnotationsSpec{}, - OpenshiftSCCName: "test-api-gateway", - }, - }, - helmConfig: common.HelmConfig{ - EnableOpenShift: false, - ImageDataplane: "hashicorp/consul-dataplane", - }, - initialResources: resources{ - namespaces: []*corev1.Namespace{ - { - TypeMeta: metav1.TypeMeta{ - APIVersion: "v1", - Kind: "Namespace", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "non-default", - }, - }, - }, - secrets: []*corev1.Secret{ - { - TypeMeta: metav1.TypeMeta{ - APIVersion: "v1", - Kind: "Secret", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "tls-cert", - Namespace: "non-default", - }, - Data: map[string][]byte{ - corev1.TLSCertKey: []byte("cert"), - corev1.TLSPrivateKeyKey: []byte("key"), - }, - Type: corev1.SecretTypeTLS, - }, - }, - }, - finalResources: resources{ - deployments: []*appsv1.Deployment{ - configureDeployment(name, namespace, labels, 3, nil, nil, "", "1"), - }, - roles: []*rbac.Role{}, - roleBindings: []*rbac.RoleBinding{}, - secrets: []*corev1.Secret{ - configureSecret(name, namespace, labels, "1", map[string][]byte{ - "non-default_tls-cert_tls.crt": []byte("cert"), - "non-default_tls-cert_tls.key": []byte("key"), - }), - }, - services: []*corev1.Service{}, - serviceAccounts: []*corev1.ServiceAccount{}, - }, - }, } for name, tc := range cases { @@ -1321,73 +1124,6 @@ func TestDelete(t *testing.T) { serviceAccounts: []*corev1.ServiceAccount{}, }, }, - "delete a gateway deployment with a Secret": { - gateway: gwv1beta1.Gateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - Spec: gwv1beta1.GatewaySpec{ - Listeners: listeners, - }, - }, - gatewayClassConfig: v1alpha1.GatewayClassConfig{ - ObjectMeta: metav1.ObjectMeta{ - Name: "consul-gatewayclassconfig", - }, - Spec: v1alpha1.GatewayClassConfigSpec{ - DeploymentSpec: v1alpha1.DeploymentSpec{ - DefaultInstances: common.PointerTo(int32(3)), - MaxInstances: common.PointerTo(int32(3)), - MinInstances: common.PointerTo(int32(1)), - }, - CopyAnnotations: v1alpha1.CopyAnnotationsSpec{}, - ServiceType: (*corev1.ServiceType)(common.PointerTo("NodePort")), - }, - }, - helmConfig: common.HelmConfig{ - AuthMethod: "method", - ImageDataplane: dataplaneImage, - }, - initialResources: resources{ - deployments: []*appsv1.Deployment{ - configureDeployment(name, namespace, labels, 3, nil, nil, "", "1"), - }, - roles: []*rbac.Role{ - configureRole(name, namespace, labels, "1", false), - }, - roleBindings: []*rbac.RoleBinding{ - configureRoleBinding(name, namespace, labels, "1"), - }, - secrets: []*corev1.Secret{ - configureSecret(name, namespace, labels, "1", nil), - }, - services: []*corev1.Service{ - configureService(name, namespace, labels, nil, (corev1.ServiceType)("NodePort"), []corev1.ServicePort{ - { - Name: "Listener 1", - Protocol: "TCP", - Port: 8080, - }, - { - Name: "Listener 2", - Protocol: "TCP", - Port: 8081, - }, - }, "1", true, false), - }, - serviceAccounts: []*corev1.ServiceAccount{ - configureServiceAccount(name, namespace, labels, "1"), - }, - }, - finalResources: resources{ - deployments: []*appsv1.Deployment{}, - roles: []*rbac.Role{}, - secrets: []*corev1.Secret{}, - services: []*corev1.Service{}, - serviceAccounts: []*corev1.ServiceAccount{}, - }, - }, } for name, tc := range cases { @@ -1406,7 +1142,10 @@ func TestDelete(t *testing.T) { gatekeeper := New(log, client) - err := gatekeeper.Delete(context.Background(), tc.gateway) + err := gatekeeper.Delete(context.Background(), types.NamespacedName{ + Namespace: tc.gateway.Namespace, + Name: tc.gateway.Name, + }) require.NoError(t, err) require.NoError(t, validateResourcesExist(t, client, tc.helmConfig, tc.finalResources, false)) require.NoError(t, validateResourcesAreDeleted(t, client, tc.initialResources)) @@ -1431,10 +1170,6 @@ func joinResources(resources resources) (objs []client.Object) { objs = append(objs, roleBinding) } - for _, secret := range resources.secrets { - objs = append(objs, secret) - } - for _, service := range resources.services { objs = append(objs, service) } @@ -1530,22 +1265,6 @@ func validateResourcesExist(t *testing.T, client client.Client, helmConfig commo require.Equal(t, namespace, actual) } - for _, expected := range resources.secrets { - actual := &corev1.Secret{} - err := client.Get(context.Background(), types.NamespacedName{ - Name: expected.Name, - Namespace: expected.Namespace, - }, actual) - if err != nil { - return err - } - - // Patch the createdAt label - actual.Labels[createdAtLabelKey] = createdAtLabelValue - - require.Equal(t, expected, actual) - } - for _, expected := range resources.roles { actual := &rbac.Role{} err := client.Get(context.Background(), types.NamespacedName{ @@ -1744,31 +1463,6 @@ func configureDeployment(name, namespace string, labels map[string]string, repli } } -func configureSecret(name, namespace string, labels map[string]string, resourceVersion string, data map[string][]byte) *corev1.Secret { - return &corev1.Secret{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "v1", - Kind: "Secret", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Labels: labels, - ResourceVersion: resourceVersion, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: "gateway.networking.k8s.io/v1beta1", - Kind: "Gateway", - Name: name, - Controller: common.PointerTo(true), - BlockOwnerDeletion: common.PointerTo(true), - }, - }, - }, - Data: data, - } -} - func configureRole(name, namespace string, labels map[string]string, resourceVersion string, openshiftEnabled bool) *rbac.Role { rules := []rbac.PolicyRule{} diff --git a/control-plane/api-gateway/gatekeeper/init.go b/control-plane/api-gateway/gatekeeper/init.go index 875e15dff3..aefded0735 100644 --- a/control-plane/api-gateway/gatekeeper/init.go +++ b/control-plane/api-gateway/gatekeeper/init.go @@ -50,7 +50,7 @@ func (g Gatekeeper) initContainer(config common.HelmConfig, name, namespace stri // Create expected volume mounts volMounts := []corev1.VolumeMount{ { - Name: volumeNameForConnectInject, + Name: volumeName, MountPath: "/consul/connect-inject", }, } @@ -72,9 +72,8 @@ func (g Gatekeeper) initContainer(config common.HelmConfig, name, namespace stri initContainerName := injectInitContainerName container := corev1.Container{ - Name: initContainerName, - Image: config.ImageConsulK8S, - ImagePullPolicy: corev1.PullPolicy(config.GlobalImagePullPolicy), + Name: initContainerName, + Image: config.ImageConsulK8S, Env: []corev1.EnvVar{ { diff --git a/control-plane/api-gateway/gatekeeper/ownership.go b/control-plane/api-gateway/gatekeeper/ownership.go deleted file mode 100644 index 9822dc226a..0000000000 --- a/control-plane/api-gateway/gatekeeper/ownership.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package gatekeeper - -import ( - "sigs.k8s.io/controller-runtime/pkg/client" - gwv1beta1 "sigs.k8s.io/gateway-api/apis/v1beta1" -) - -func isOwnedByGateway(o client.Object, gateway gwv1beta1.Gateway) bool { - for _, ref := range o.GetOwnerReferences() { - if ref.UID == gateway.GetUID() && ref.Name == gateway.GetName() { - // We found our gateway! - return true - } - } - return false -} diff --git a/control-plane/api-gateway/gatekeeper/secret.go b/control-plane/api-gateway/gatekeeper/secret.go deleted file mode 100644 index 65ee4c0a8b..0000000000 --- a/control-plane/api-gateway/gatekeeper/secret.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package gatekeeper - -import ( - "context" - "fmt" - - corev1 "k8s.io/api/core/v1" - k8serrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - controllerruntime "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - gwv1beta1 "sigs.k8s.io/gateway-api/apis/v1beta1" - - "github.com/hashicorp/consul-k8s/control-plane/api-gateway/common" -) - -func (g *Gatekeeper) upsertSecret(ctx context.Context, gateway gwv1beta1.Gateway) error { - desiredSecret, err := g.secret(ctx, gateway) - if err != nil { - return fmt.Errorf("failed to create certificate secret for gateway %s/%s: %w", gateway.Namespace, gateway.Name, err) - } - - // If the Secret already exists, ensure that we own the Secret - existingSecret := &corev1.Secret{ObjectMeta: desiredSecret.ObjectMeta} - err = g.Client.Get(ctx, g.namespacedName(gateway), existingSecret) - if err != nil && !k8serrors.IsNotFound(err) { - return fmt.Errorf("failed to fetch existing Secret %s/%s: %w", gateway.Namespace, gateway.Name, err) - } else if !k8serrors.IsNotFound(err) { - if !isOwnedByGateway(existingSecret, gateway) { - return fmt.Errorf("existing Secret %s/%s is not owned by Gateway %s/%s", existingSecret.Namespace, existingSecret.Name, gateway.Namespace, gateway.Name) - } - } - - mutator := newSecretMutator(existingSecret, desiredSecret, gateway, g.Client.Scheme()) - - result, err := controllerruntime.CreateOrUpdate(ctx, g.Client, existingSecret, mutator) - if err != nil { - return err - } - - switch result { - case controllerutil.OperationResultCreated: - g.Log.V(1).Info("Created Secret") - case controllerutil.OperationResultUpdated: - g.Log.V(1).Info("Updated Secret") - case controllerutil.OperationResultNone: - g.Log.V(1).Info("No change to Secret") - } - - return nil -} - -func (g *Gatekeeper) deleteSecret(ctx context.Context, gw gwv1beta1.Gateway) error { - secret := &corev1.Secret{} - if err := g.Client.Get(ctx, g.namespacedName(gw), secret); err != nil { - if k8serrors.IsNotFound(err) { - return nil - } - return err - } - - if !isOwnedByGateway(secret, gw) { - return fmt.Errorf("existing Secret %s/%s is not owned by Gateway %s/%s", secret.Namespace, secret.Name, gw.Namespace, gw.Name) - } - - if err := g.Client.Delete(ctx, secret); err != nil { - if k8serrors.IsNotFound(err) { - return nil - } - return err - } - - return nil -} - -func (g *Gatekeeper) secret(ctx context.Context, gateway gwv1beta1.Gateway) (*corev1.Secret, error) { - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: gateway.Namespace, - Name: gateway.Name, - Labels: common.LabelsForGateway(&gateway), - }, - Data: map[string][]byte{}, - Type: corev1.SecretTypeOpaque, - } - - for _, listener := range gateway.Spec.Listeners { - if listener.TLS == nil { - continue - } - - for _, ref := range listener.TLS.CertificateRefs { - // Only take action on Secret references - if !common.NilOrEqual(ref.Group, "") || !common.NilOrEqual(ref.Kind, common.KindSecret) { - continue - } - - key := types.NamespacedName{ - Namespace: common.ValueOr(ref.Namespace, gateway.Namespace), - Name: string(ref.Name), - } - - referencedSecret := &corev1.Secret{} - if err := g.Client.Get(ctx, key, referencedSecret); err != nil && k8serrors.IsNotFound(err) { - // If the referenced Secret is not found, log a message and continue. - // The issue will be raised on the Gateway status by the validation process. - g.Log.V(1).Info(fmt.Sprintf("Referenced certificate secret %s/%s not found", key.Namespace, key.Name)) - } else if err != nil { - return nil, fmt.Errorf("failed to fetch certificate secret %s/%s: %w", key.Namespace, key.Name, err) - } - - prefix := fmt.Sprintf("%s_%s_", key.Namespace, key.Name) - for k, v := range referencedSecret.Data { - secret.Data[prefix+k] = v - } - } - } - - return secret, nil -} - -func newSecretMutator(existing, desired *corev1.Secret, gateway gwv1beta1.Gateway, scheme *runtime.Scheme) resourceMutator { - return func() error { - existing.Data = desired.Data - return controllerruntime.SetControllerReference(&gateway, existing, scheme) - } -} diff --git a/control-plane/api-gateway/gatekeeper/volumes.go b/control-plane/api-gateway/gatekeeper/volumes.go deleted file mode 100644 index ef8c414575..0000000000 --- a/control-plane/api-gateway/gatekeeper/volumes.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package gatekeeper - -import ( - corev1 "k8s.io/api/core/v1" - "sigs.k8s.io/gateway-api/apis/v1beta1" - - "github.com/hashicorp/consul-k8s/control-plane/api-gateway/common" -) - -// volumesAndMounts generates the list of volumes for the Deployment and the list of volume -// mounts for the primary container in the Deployment. There are two volumes that are created: -// - one empty volume for holding connect-inject data -// - one volume holding all TLS certificates referenced by the Gateway. -func volumesAndMounts(gateway v1beta1.Gateway) ([]corev1.Volume, []corev1.VolumeMount) { - volumes := []corev1.Volume{ - { - Name: volumeNameForConnectInject, - VolumeSource: corev1.VolumeSource{ - EmptyDir: &corev1.EmptyDirVolumeSource{Medium: corev1.StorageMediumMemory}, - }, - }, - { - Name: volumeNameForTLSCerts, - VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: gateway.Name, - DefaultMode: common.PointerTo(int32(444)), - Optional: common.PointerTo(false), - }, - }, - }, - } - - mounts := []corev1.VolumeMount{ - { - Name: volumeNameForConnectInject, - MountPath: "/consul/connect-inject", - }, - { - Name: volumeNameForTLSCerts, - MountPath: "/consul/gateway-certificates", - }, - } - - return volumes, mounts -} diff --git a/control-plane/api/auth/v2beta1/auth_groupversion_info.go b/control-plane/api/auth/v2beta1/auth_groupversion_info.go new file mode 100644 index 0000000000..3329d86855 --- /dev/null +++ b/control-plane/api/auth/v2beta1/auth_groupversion_info.go @@ -0,0 +1,27 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package v2beta1 contains API Schema definitions for the consul.hashicorp.com v2beta1 API group +// +kubebuilder:object:generate=true +// +groupName=auth.consul.hashicorp.com +package v2beta1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + + // AuthGroup is a collection of auth resources. + AuthGroup = "auth.consul.hashicorp.com" + + // AuthGroupVersion is group version used to register these objects. + AuthGroupVersion = schema.GroupVersion{Group: AuthGroup, Version: "v2beta1"} + + // AuthSchemeBuilder is used to add go types to the GroupVersionKind scheme. + AuthSchemeBuilder = &scheme.Builder{GroupVersion: AuthGroupVersion} + + // AddAuthToScheme adds the types in this group-version to the given scheme. + AddAuthToScheme = AuthSchemeBuilder.AddToScheme +) diff --git a/control-plane/api/auth/v2beta1/shared_types.go b/control-plane/api/auth/v2beta1/shared_types.go new file mode 100644 index 0000000000..a5225afb71 --- /dev/null +++ b/control-plane/api/auth/v2beta1/shared_types.go @@ -0,0 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package v2beta1 + +import ( + "github.com/hashicorp/consul-k8s/control-plane/api/common" +) + +func meshConfigMeta() map[string]string { + return map[string]string{ + common.SourceKey: common.SourceValue, + } +} diff --git a/control-plane/api/auth/v2beta1/status.go b/control-plane/api/auth/v2beta1/status.go new file mode 100644 index 0000000000..cc75a1cd82 --- /dev/null +++ b/control-plane/api/auth/v2beta1/status.go @@ -0,0 +1,93 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package v2beta1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Conditions is the schema for the conditions portion of the payload. +type Conditions []Condition + +// ConditionType is a camel-cased condition type. +type ConditionType string + +const ( + // ConditionSynced specifies that the resource has been synced with Consul. + ConditionSynced ConditionType = "Synced" +) + +// Conditions define a readiness condition for a Consul resource. +// See: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties +// +k8s:deepcopy-gen=true +// +k8s:openapi-gen=true +type Condition struct { + // Type of condition. + // +required + Type ConditionType `json:"type" description:"type of status condition"` + + // Status of the condition, one of True, False, Unknown. + // +required + Status corev1.ConditionStatus `json:"status" description:"status of the condition, one of True, False, Unknown"` + + // LastTransitionTime is the last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" description:"last time the condition transitioned from one status to another"` + + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" description:"one-word CamelCase reason for the condition's last transition"` + + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty" description:"human-readable message indicating details about last transition"` +} + +// IsTrue is true if the condition is True. +func (c *Condition) IsTrue() bool { + if c == nil { + return false + } + return c.Status == corev1.ConditionTrue +} + +// IsFalse is true if the condition is False. +func (c *Condition) IsFalse() bool { + if c == nil { + return false + } + return c.Status == corev1.ConditionFalse +} + +// IsUnknown is true if the condition is Unknown. +func (c *Condition) IsUnknown() bool { + if c == nil { + return true + } + return c.Status == corev1.ConditionUnknown +} + +// +k8s:deepcopy-gen=true +// +k8s:openapi-gen=true +type Status struct { + // Conditions indicate the latest available observations of a resource's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions Conditions `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + + // LastSyncedTime is the last time the resource successfully synced with Consul. + // +optional + LastSyncedTime *metav1.Time `json:"lastSyncedTime,omitempty" description:"last time the condition transitioned from one status to another"` +} + +func (s *Status) GetCondition(t ConditionType) *Condition { + for _, cond := range s.Conditions { + if cond.Type == t { + return &cond + } + } + return nil +} diff --git a/control-plane/api/auth/v2beta1/traffic_permissions_types.go b/control-plane/api/auth/v2beta1/traffic_permissions_types.go new file mode 100644 index 0000000000..f237fd3e12 --- /dev/null +++ b/control-plane/api/auth/v2beta1/traffic_permissions_types.go @@ -0,0 +1,237 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package v2beta1 + +import ( + "fmt" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + pbauth "github.com/hashicorp/consul/proto-public/pbauth/v2beta1" + "github.com/hashicorp/consul/proto-public/pbresource" + "google.golang.org/protobuf/testing/protocmp" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + + "github.com/hashicorp/consul-k8s/control-plane/api/common" + inject "github.com/hashicorp/consul-k8s/control-plane/connect-inject/common" +) + +const ( + trafficpermissionsKubeKind = "trafficpermissions" +) + +func init() { + AuthSchemeBuilder.Register(&TrafficPermissions{}, &TrafficPermissionsList{}) +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// TrafficPermissions is the Schema for the traffic-permissions API +// +kubebuilder:printcolumn:name="Synced",type="string",JSONPath=".status.conditions[?(@.type==\"Synced\")].status",description="The sync status of the resource with Consul" +// +kubebuilder:printcolumn:name="Last Synced",type="date",JSONPath=".status.lastSyncedTime",description="The last successful synced time of the resource with Consul" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="The age of the resource" +// +kubebuilder:resource:shortName="traffic-permissions" +type TrafficPermissions struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec pbauth.TrafficPermissions `json:"spec,omitempty"` + Status `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// TrafficPermissionsList contains a list of TrafficPermissions. +type TrafficPermissionsList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []*TrafficPermissions `json:"items"` +} + +func (in *TrafficPermissions) ResourceID(namespace, partition string) *pbresource.ID { + return &pbresource.ID{ + Name: in.Name, + Type: pbauth.TrafficPermissionsType, + Tenancy: &pbresource.Tenancy{ + Partition: partition, + Namespace: namespace, + }, + } +} + +func (in *TrafficPermissions) Resource(namespace, partition string) *pbresource.Resource { + return &pbresource.Resource{ + Id: in.ResourceID(namespace, partition), + Data: inject.ToProtoAny(&in.Spec), + Metadata: meshConfigMeta(), + } +} + +func (in *TrafficPermissions) MatchesConsul(candidate *pbresource.Resource, namespace, partition string) bool { + return cmp.Equal( + in.Resource(namespace, partition), + candidate, + protocmp.IgnoreFields(&pbresource.Resource{}, "status", "generation", "version"), + protocmp.IgnoreFields(&pbresource.ID{}, "uid"), + protocmp.Transform(), + cmpopts.SortSlices(func(a, b any) bool { return fmt.Sprintf("%v", a) < fmt.Sprintf("%v", b) }), + ) +} + +func (in *TrafficPermissions) AddFinalizer(f string) { + in.ObjectMeta.Finalizers = append(in.Finalizers(), f) +} + +func (in *TrafficPermissions) RemoveFinalizer(f string) { + var newFinalizers []string + for _, oldF := range in.Finalizers() { + if oldF != f { + newFinalizers = append(newFinalizers, oldF) + } + } + in.ObjectMeta.Finalizers = newFinalizers +} + +func (in *TrafficPermissions) Finalizers() []string { + return in.ObjectMeta.Finalizers +} + +func (in *TrafficPermissions) KubeKind() string { + return trafficpermissionsKubeKind +} + +func (in *TrafficPermissions) KubernetesName() string { + return in.ObjectMeta.Name +} + +func (in *TrafficPermissions) SetSyncedCondition(status corev1.ConditionStatus, reason, message string) { + in.Status.Conditions = Conditions{ + { + Type: ConditionSynced, + Status: status, + LastTransitionTime: metav1.Now(), + Reason: reason, + Message: message, + }, + } +} + +func (in *TrafficPermissions) SetLastSyncedTime(time *metav1.Time) { + in.Status.LastSyncedTime = time +} + +func (in *TrafficPermissions) SyncedCondition() (status corev1.ConditionStatus, reason, message string) { + cond := in.Status.GetCondition(ConditionSynced) + if cond == nil { + return corev1.ConditionUnknown, "", "" + } + return cond.Status, cond.Reason, cond.Message +} + +func (in *TrafficPermissions) SyncedConditionStatus() corev1.ConditionStatus { + condition := in.Status.GetCondition(ConditionSynced) + if condition == nil { + return corev1.ConditionUnknown + } + return condition.Status +} + +func (in *TrafficPermissions) Validate(tenancy common.ConsulTenancyConfig) error { + var errs field.ErrorList + path := field.NewPath("spec") + var tp pbauth.TrafficPermissions + res := in.Resource(tenancy.ConsulDestinationNamespace, tenancy.ConsulPartition) + if err := res.Data.UnmarshalTo(&tp); err != nil { + return fmt.Errorf("error parsing resource data as type %q: %s", &tp, err) + } + + switch tp.Action { + case pbauth.Action_ACTION_ALLOW: + case pbauth.Action_ACTION_DENY: + case pbauth.Action_ACTION_UNSPECIFIED: + fallthrough + default: + errs = append(errs, field.Invalid(path.Child("action"), tp.Action, "action must be either allow or deny")) + } + + if tp.Destination == nil || (len(tp.Destination.IdentityName) == 0) { + errs = append(errs, field.Invalid(path.Child("destination"), tp.Destination, "cannot be empty")) + } + // Validate permissions + for i, permission := range tp.Permissions { + if err := validatePermission(permission, path.Child("permissions").Index(i)); err != nil { + errs = append(errs, err...) + } + } + if len(errs) > 0 { + return apierrors.NewInvalid( + schema.GroupKind{Group: AuthGroup, Kind: common.TrafficPermissions}, + in.KubernetesName(), errs) + } + return nil +} + +func validatePermission(p *pbauth.Permission, path *field.Path) field.ErrorList { + var errs field.ErrorList + + for s, src := range p.Sources { + if sourceHasIncompatibleTenancies(src) { + errs = append(errs, field.Invalid(path.Child("sources").Index(s), src, "permission sources may not specify partitions, peers, and sameness_groups together")) + } + + if src.Namespace == "" && src.IdentityName != "" { + errs = append(errs, field.Invalid(path.Child("sources").Index(s), src, "permission sources may not have wildcard namespaces and explicit names")) + } + + // Excludes are only valid for wildcard sources. + if src.IdentityName != "" && len(src.Exclude) > 0 { + errs = append(errs, field.Invalid(path.Child("sources").Index(s), src, "must be defined on wildcard sources")) + continue + } + + for e, d := range src.Exclude { + if sourceHasIncompatibleTenancies(d) { + errs = append(errs, field.Invalid(path.Child("sources").Index(s).Child("exclude").Index(e), d, "permissions sources may not specify partitions, peers, and sameness_groups together")) + } + + if d.Namespace == "" && d.IdentityName != "" { + errs = append(errs, field.Invalid(path.Child("sources").Index(s).Child("exclude").Index(e), d, "permission sources may not have wildcard namespaces and explicit names")) + } + } + } + for d, dest := range p.DestinationRules { + if (len(dest.PathExact) > 0 && len(dest.PathPrefix) > 0) || + (len(dest.PathRegex) > 0 && len(dest.PathExact) > 0) || + (len(dest.PathRegex) > 0 && len(dest.PathPrefix) > 0) { + errs = append(errs, field.Invalid(path.Child("destinationRules").Index(d), dest, "prefix values, regex values, and explicit names must not combined")) + } + if len(dest.Exclude) > 0 { + for e, excl := range dest.Exclude { + if (len(excl.PathExact) > 0 && len(excl.PathPrefix) > 0) || + (len(excl.PathRegex) > 0 && len(excl.PathExact) > 0) || + (len(excl.PathRegex) > 0 && len(excl.PathPrefix) > 0) { + errs = append(errs, field.Invalid(path.Child("destinationRules").Index(d).Child("exclude").Index(e), excl, "prefix values, regex values, and explicit names must not combined")) + } + } + } + } + + return errs +} + +func sourceHasIncompatibleTenancies(src pbauth.SourceToSpiffe) bool { + peerSet := src.GetPeer() != common.DefaultPeerName && src.GetPeer() != "" + apSet := src.GetPartition() != common.DefaultPartitionName && src.GetPartition() != "" + sgSet := src.GetSamenessGroup() != "" + + return (apSet && peerSet) || (apSet && sgSet) || (peerSet && sgSet) +} + +// DefaultNamespaceFields is required as part of the common.MeshConfig interface. +func (in *TrafficPermissions) DefaultNamespaceFields(tenancy common.ConsulTenancyConfig) {} diff --git a/control-plane/api/auth/v2beta1/traffic_permissions_types_test.go b/control-plane/api/auth/v2beta1/traffic_permissions_types_test.go new file mode 100644 index 0000000000..170f02fb20 --- /dev/null +++ b/control-plane/api/auth/v2beta1/traffic_permissions_types_test.go @@ -0,0 +1,1040 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package v2beta1 + +import ( + "testing" + "time" + + "github.com/google/go-cmp/cmp" + pbauth "github.com/hashicorp/consul/proto-public/pbauth/v2beta1" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1" + "github.com/hashicorp/consul/proto-public/pbresource" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/testing/protocmp" + "google.golang.org/protobuf/types/known/timestamppb" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/hashicorp/consul-k8s/control-plane/api/common" + inject "github.com/hashicorp/consul-k8s/control-plane/connect-inject/common" + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" + "github.com/hashicorp/consul-k8s/control-plane/helper/test" +) + +func TestTrafficPermissions_MatchesConsul(t *testing.T) { + cases := map[string]struct { + OurConsulNamespace string + OurConsulPartition string + OurData *TrafficPermissions + + TheirName string + TheirConsulNamespace string + TheirConsulPartition string + TheirData *pbauth.TrafficPermissions + ResourceOverride *pbresource.Resource // Used to test that an empty resource of another type will not match + + Matches bool + }{ + "empty fields matches": { + OurConsulNamespace: constants.DefaultConsulNS, + OurConsulPartition: constants.DefaultConsulPartition, + OurData: &TrafficPermissions{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + }, + Spec: pbauth.TrafficPermissions{}, + }, + TheirName: "name", + TheirConsulNamespace: constants.DefaultConsulNS, + TheirConsulPartition: constants.DefaultConsulPartition, + TheirData: &pbauth.TrafficPermissions{ + Destination: nil, + Action: pbauth.Action_ACTION_UNSPECIFIED, + Permissions: nil, + }, + Matches: true, + }, + "source namespaces and partitions are compared": { + OurConsulNamespace: "consul-ns", + OurConsulPartition: "consul-partition", + OurData: &TrafficPermissions{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "kube-ns", + }, + Spec: pbauth.TrafficPermissions{ + Destination: &pbauth.Destination{ + IdentityName: "destination-identity", + }, + Action: pbauth.Action_ACTION_ALLOW, + Permissions: []*pbauth.Permission{ + { + Sources: []*pbauth.Source{ + { + IdentityName: "source-identity", + Namespace: "the space namespace space", + }, + }, + }, + }, + }, + }, + TheirName: "foo", + TheirConsulNamespace: "consul-ns", + TheirConsulPartition: "consul-partition", + TheirData: &pbauth.TrafficPermissions{ + Destination: &pbauth.Destination{ + IdentityName: "destination-identity", + }, + Action: pbauth.Action_ACTION_ALLOW, + Permissions: []*pbauth.Permission{ + { + Sources: []*pbauth.Source{ + { + IdentityName: "source-identity", + Namespace: "not space namespace", + }, + }, + }, + }, + }, + Matches: false, + }, + "destination namespaces and partitions are compared": { + OurConsulNamespace: "not-consul-ns", + OurConsulPartition: "not-consul-partition", + OurData: &TrafficPermissions{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "kube-ns", + }, + Spec: pbauth.TrafficPermissions{ + Destination: &pbauth.Destination{ + IdentityName: "destination-identity", + }, + Action: pbauth.Action_ACTION_DENY, + Permissions: []*pbauth.Permission{ + { + Sources: []*pbauth.Source{ + { + IdentityName: "source-identity", + }, + }, + }, + }, + }, + }, + TheirName: "foo", + TheirConsulNamespace: "consul-ns", + TheirConsulPartition: "consul-partition", + TheirData: &pbauth.TrafficPermissions{ + Destination: &pbauth.Destination{ + IdentityName: "destination-identity", + }, + Action: pbauth.Action_ACTION_ALLOW, + Permissions: []*pbauth.Permission{ + { + Sources: []*pbauth.Source{ + { + IdentityName: "source-identity", + }, + }, + }, + }, + }, + Matches: false, + }, + "all fields set matches": { + OurConsulNamespace: "consul-ns", + OurConsulPartition: "consul-partition", + OurData: &TrafficPermissions{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "kube-ns", + }, + Spec: pbauth.TrafficPermissions{ + Destination: &pbauth.Destination{ + IdentityName: "destination-identity", + }, + Action: pbauth.Action_ACTION_ALLOW, + Permissions: []*pbauth.Permission{ + { + Sources: []*pbauth.Source{ + { + Namespace: "the space namespace space", + Partition: "space-partition", + Peer: "space-peer", + SamenessGroup: "space-group", + Exclude: []*pbauth.ExcludeSource{ + { + IdentityName: "not-source-identity", + Namespace: "the space namespace space", + Partition: "space-partition", + Peer: "space-peer", + SamenessGroup: "space-group", + }, + }, + }, + { + IdentityName: "source-identity", + }, + }, + DestinationRules: []*pbauth.DestinationRule{ + { + PathExact: "/hello", + PathPrefix: "/world", + PathRegex: "/.*/foo", + Headers: []*pbauth.DestinationRuleHeader{ + { + Name: "x-consul-test", + Present: true, + Exact: "true", + Prefix: "prefix", + Suffix: "suffix", + Regex: "reg.*ex", + Invert: true, + }, + }, + Methods: []string{"GET", "POST"}, + Exclude: []*pbauth.ExcludePermissionRule{ + { + PathExact: "/hello", + PathPrefix: "/world", + PathRegex: "/.*/foo", + Headers: []*pbauth.DestinationRuleHeader{ + { + Name: "x-consul-not-test", + Present: true, + Exact: "false", + Prefix: "~prefix", + Suffix: "~suffix", + Regex: "~reg.*ex", + Invert: true, + }, + }, + Methods: []string{"DELETE"}, + PortNames: []string{"log"}, + }, + }, + PortNames: []string{"web", "admin"}, + }, + }, + }, + }, + }, + }, + TheirName: "foo", + TheirConsulNamespace: "consul-ns", + TheirConsulPartition: "consul-partition", + TheirData: &pbauth.TrafficPermissions{ + Destination: &pbauth.Destination{ + IdentityName: "destination-identity", + }, + Action: pbauth.Action_ACTION_ALLOW, + Permissions: []*pbauth.Permission{ + { + Sources: []*pbauth.Source{ + // These are intentionally in a different order to show that it doesn't matter + { + IdentityName: "source-identity", + }, + { + Namespace: "the space namespace space", + Partition: "space-partition", + Peer: "space-peer", + SamenessGroup: "space-group", + Exclude: []*pbauth.ExcludeSource{ + { + IdentityName: "not-source-identity", + Namespace: "the space namespace space", + Partition: "space-partition", + Peer: "space-peer", + SamenessGroup: "space-group", + }, + }, + }, + }, + DestinationRules: []*pbauth.DestinationRule{ + { + PathExact: "/hello", + PathPrefix: "/world", + PathRegex: "/.*/foo", + Headers: []*pbauth.DestinationRuleHeader{ + { + Name: "x-consul-test", + Present: true, + Exact: "true", + Prefix: "prefix", + Suffix: "suffix", + Regex: "reg.*ex", + Invert: true, + }, + }, + Methods: []string{"GET", "POST"}, + Exclude: []*pbauth.ExcludePermissionRule{ + { + PathExact: "/hello", + PathPrefix: "/world", + PathRegex: "/.*/foo", + Headers: []*pbauth.DestinationRuleHeader{ + { + Name: "x-consul-not-test", + Present: true, + Exact: "false", + Prefix: "~prefix", + Suffix: "~suffix", + Regex: "~reg.*ex", + Invert: true, + }, + }, + Methods: []string{"DELETE"}, + PortNames: []string{"log"}, + }, + }, + PortNames: []string{"web", "admin"}, + }, + }, + }, + }, + }, + Matches: true, + }, + "different types does not match": { + OurConsulNamespace: constants.DefaultConsulNS, + OurConsulPartition: constants.DefaultConsulPartition, + OurData: &TrafficPermissions{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + }, + Spec: pbauth.TrafficPermissions{}, + }, + ResourceOverride: &pbresource.Resource{ + Id: &pbresource.ID{ + Name: "name", + Type: pbmesh.ProxyConfigurationType, + Tenancy: &pbresource.Tenancy{ + Partition: constants.DefaultConsulNS, + Namespace: constants.DefaultConsulPartition, + }, + }, + Data: inject.ToProtoAny(&pbmesh.ProxyConfiguration{}), + Metadata: meshConfigMeta(), + }, + Matches: false, + }, + } + for name, c := range cases { + t.Run(name, func(t *testing.T) { + consulResource := c.ResourceOverride + if c.TheirName != "" { + consulResource = constructTrafficPermissionResource(c.TheirData, c.TheirName, c.TheirConsulNamespace, c.TheirConsulPartition) + } + require.Equal(t, c.Matches, c.OurData.MatchesConsul(consulResource, c.OurConsulNamespace, c.OurConsulPartition)) + }) + } +} + +// TestTrafficPermissions_Resource also includes test to verify ResourceID(). +func TestTrafficPermissions_Resource(t *testing.T) { + cases := map[string]struct { + Ours *TrafficPermissions + ConsulNamespace string + ConsulPartition string + ExpectedName string + ExpectedData *pbauth.TrafficPermissions + }{ + "empty fields": { + Ours: &TrafficPermissions{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + Spec: pbauth.TrafficPermissions{}, + }, + ConsulNamespace: constants.DefaultConsulNS, + ConsulPartition: constants.DefaultConsulPartition, + ExpectedName: "foo", + ExpectedData: &pbauth.TrafficPermissions{}, + }, + "every field set": { + Ours: &TrafficPermissions{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "kube-ns", + }, + Spec: pbauth.TrafficPermissions{ + Destination: &pbauth.Destination{ + IdentityName: "destination-identity", + }, + Action: pbauth.Action_ACTION_ALLOW, + Permissions: []*pbauth.Permission{ + { + Sources: []*pbauth.Source{ + { + Namespace: "the space namespace space", + Partition: "space-partition", + Peer: "space-peer", + SamenessGroup: "space-group", + Exclude: []*pbauth.ExcludeSource{ + { + IdentityName: "not-source-identity", + Namespace: "the space namespace space", + Partition: "space-partition", + Peer: "space-peer", + SamenessGroup: "space-group", + }, + }, + }, + { + IdentityName: "source-identity", + }, + }, + DestinationRules: []*pbauth.DestinationRule{ + { + PathExact: "/hello", + PathPrefix: "/world", + PathRegex: "/.*/foo", + Headers: []*pbauth.DestinationRuleHeader{{ + Name: "x-consul-test", + Present: true, + Exact: "true", + Prefix: "prefix", + Suffix: "suffix", + Regex: "reg.*ex", + Invert: true, + }}, + Methods: []string{"GET", "POST"}, + Exclude: []*pbauth.ExcludePermissionRule{ + { + PathExact: "/hello", + PathPrefix: "/world", + PathRegex: "/.*/foo", + Headers: []*pbauth.DestinationRuleHeader{{ + Name: "x-consul-not-test", + Present: true, + Exact: "false", + Prefix: "~prefix", + Suffix: "~suffix", + Regex: "~reg.*ex", + Invert: true, + }}, + Methods: []string{"DELETE"}, + PortNames: []string{"log"}, + }, + }, + PortNames: []string{"web", "admin"}, + }, + }, + }, + }, + }, + }, + ConsulNamespace: "not-default-namespace", + ConsulPartition: "not-default-partition", + ExpectedName: "foo", + ExpectedData: &pbauth.TrafficPermissions{ + Destination: &pbauth.Destination{ + IdentityName: "destination-identity", + }, + Action: pbauth.Action_ACTION_ALLOW, + Permissions: []*pbauth.Permission{ + { + Sources: []*pbauth.Source{ + // These are intentionally in a different order to show that it doesn't matter + { + IdentityName: "source-identity", + }, + { + Namespace: "the space namespace space", + Partition: "space-partition", + Peer: "space-peer", + SamenessGroup: "space-group", + Exclude: []*pbauth.ExcludeSource{ + { + IdentityName: "not-source-identity", + Namespace: "the space namespace space", + Partition: "space-partition", + Peer: "space-peer", + SamenessGroup: "space-group", + }, + }, + }, + }, + DestinationRules: []*pbauth.DestinationRule{ + { + PathExact: "/hello", + PathPrefix: "/world", + PathRegex: "/.*/foo", + Headers: []*pbauth.DestinationRuleHeader{{ + Name: "x-consul-test", + Present: true, + Exact: "true", + Prefix: "prefix", + Suffix: "suffix", + Regex: "reg.*ex", + Invert: true, + }}, + Methods: []string{"GET", "POST"}, + Exclude: []*pbauth.ExcludePermissionRule{ + { + PathExact: "/hello", + PathPrefix: "/world", + PathRegex: "/.*/foo", + Headers: []*pbauth.DestinationRuleHeader{{ + Name: "x-consul-not-test", + Present: true, + Exact: "false", + Prefix: "~prefix", + Suffix: "~suffix", + Regex: "~reg.*ex", + Invert: true, + }}, + Methods: []string{"DELETE"}, + PortNames: []string{"log"}, + }, + }, + PortNames: []string{"web", "admin"}, + }, + }, + }, + }, + }, + }, + } + for name, c := range cases { + t.Run(name, func(t *testing.T) { + actual := c.Ours.Resource(c.ConsulNamespace, c.ConsulPartition) + expected := constructTrafficPermissionResource(c.ExpectedData, c.ExpectedName, c.ConsulNamespace, c.ConsulPartition) + + opts := append([]cmp.Option{ + protocmp.IgnoreFields(&pbresource.Resource{}, "status", "generation", "version"), + protocmp.IgnoreFields(&pbresource.ID{}, "uid"), + }, test.CmpProtoIgnoreOrder()...) + diff := cmp.Diff(expected, actual, opts...) + require.Equal(t, "", diff, "TrafficPermissions do not match") + }) + } +} + +func TestTrafficPermissions_SetSyncedCondition(t *testing.T) { + trafficPermissions := &TrafficPermissions{} + trafficPermissions.SetSyncedCondition(corev1.ConditionTrue, "reason", "message") + + require.Equal(t, corev1.ConditionTrue, trafficPermissions.Status.Conditions[0].Status) + require.Equal(t, "reason", trafficPermissions.Status.Conditions[0].Reason) + require.Equal(t, "message", trafficPermissions.Status.Conditions[0].Message) + now := metav1.Now() + require.True(t, trafficPermissions.Status.Conditions[0].LastTransitionTime.Before(&now)) +} + +func TestTrafficPermissions_SetLastSyncedTime(t *testing.T) { + trafficPermissions := &TrafficPermissions{} + syncedTime := metav1.NewTime(time.Now()) + trafficPermissions.SetLastSyncedTime(&syncedTime) + + require.Equal(t, &syncedTime, trafficPermissions.Status.LastSyncedTime) +} + +func TestTrafficPermissions_GetSyncedConditionStatus(t *testing.T) { + cases := []corev1.ConditionStatus{ + corev1.ConditionUnknown, + corev1.ConditionFalse, + corev1.ConditionTrue, + } + for _, status := range cases { + t.Run(string(status), func(t *testing.T) { + trafficPermissions := &TrafficPermissions{ + Status: Status{ + Conditions: []Condition{{ + Type: ConditionSynced, + Status: status, + }}, + }, + } + + require.Equal(t, status, trafficPermissions.SyncedConditionStatus()) + }) + } +} + +func TestTrafficPermissions_GetConditionWhenStatusNil(t *testing.T) { + require.Nil(t, (&TrafficPermissions{}).GetCondition(ConditionSynced)) +} + +func TestTrafficPermissions_SyncedConditionStatusWhenStatusNil(t *testing.T) { + require.Equal(t, corev1.ConditionUnknown, (&TrafficPermissions{}).SyncedConditionStatus()) +} + +func TestTrafficPermissions_SyncedConditionWhenStatusNil(t *testing.T) { + status, reason, message := (&TrafficPermissions{}).SyncedCondition() + require.Equal(t, corev1.ConditionUnknown, status) + require.Equal(t, "", reason) + require.Equal(t, "", message) +} + +func TestTrafficPermissions_KubeKind(t *testing.T) { + require.Equal(t, "trafficpermissions", (&TrafficPermissions{}).KubeKind()) +} + +func TestTrafficPermissions_KubernetesName(t *testing.T) { + require.Equal(t, "test", (&TrafficPermissions{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "bar", + }, + Spec: pbauth.TrafficPermissions{ + Destination: &pbauth.Destination{ + IdentityName: "foo", + }, + }, + }).KubernetesName()) +} + +func TestTrafficPermissions_ObjectMeta(t *testing.T) { + meta := metav1.ObjectMeta{ + Name: "name", + Namespace: "namespace", + } + trafficPermissions := &TrafficPermissions{ + ObjectMeta: meta, + } + require.Equal(t, &meta, trafficPermissions.GetObjectMeta()) +} + +// Test defaulting behavior when namespaces are enabled as well as disabled. +// TODO: add when implemented +// func TestTrafficPermissions_DefaultNamespaceFields(t *testing.T) + +func TestTrafficPermissions_Validate(t *testing.T) { + cases := []struct { + name string + input *TrafficPermissions + expectedErrMsgs []string + }{ + { + name: "kitchen sink OK", + input: &TrafficPermissions{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "kube-ns", + }, + Spec: pbauth.TrafficPermissions{ + Destination: &pbauth.Destination{ + IdentityName: "destination-identity", + }, + Action: pbauth.Action_ACTION_ALLOW, + Permissions: []*pbauth.Permission{ + { + Sources: []*pbauth.Source{ + { + Namespace: "the space namespace space", + Partition: "space-partition", + Exclude: []*pbauth.ExcludeSource{ + { + IdentityName: "not-source-identity", + Namespace: "the space namespace space", + SamenessGroup: "space-group", + }, + }, + }, + { + IdentityName: "source-identity", + Namespace: "another-namespace", + }, + }, + DestinationRules: []*pbauth.DestinationRule{ + { + PathExact: "/hello", + Headers: []*pbauth.DestinationRuleHeader{ + { + Name: "x-consul-test", + Present: true, + Exact: "true", + Prefix: "prefix", + Suffix: "suffix", + Regex: "reg.*ex", + Invert: true, + }, + }, + Methods: []string{"GET", "POST"}, + Exclude: []*pbauth.ExcludePermissionRule{ + { + PathPrefix: "/world", + Headers: []*pbauth.DestinationRuleHeader{ + { + Name: "x-consul-not-test", + Present: true, + Exact: "false", + Prefix: "~prefix", + Suffix: "~suffix", + Regex: "~reg.*ex", + Invert: true, + }, + }, + Methods: []string{"DELETE"}, + PortNames: []string{"log"}, + }, + }, + PortNames: []string{"web", "admin"}, + }, + }, + }, + }, + }, + }, + expectedErrMsgs: nil, + }, + { + name: "must have an action", + input: &TrafficPermissions{ + ObjectMeta: metav1.ObjectMeta{ + Name: "does-not-matter", + Namespace: "not-default-ns", + }, + Spec: pbauth.TrafficPermissions{ + Destination: &pbauth.Destination{ + IdentityName: "dest-service", + }, + }, + }, + expectedErrMsgs: []string{ + `trafficpermissions.auth.consul.hashicorp.com "does-not-matter" is invalid: spec.action: Invalid value: ACTION_UNSPECIFIED: action must be either allow or deny`, + }, + }, + { + name: "destination is required", + input: &TrafficPermissions{ + ObjectMeta: metav1.ObjectMeta{ + Name: "does-not-matter", + Namespace: "not-default-ns", + }, + Spec: pbauth.TrafficPermissions{ + Action: pbauth.Action_ACTION_ALLOW, + }, + }, + expectedErrMsgs: []string{ + `trafficpermissions.auth.consul.hashicorp.com "does-not-matter" is invalid: spec.destination: Invalid value: "null": cannot be empty`, + }, + }, + { + name: "destination.identityName is required", + input: &TrafficPermissions{ + ObjectMeta: metav1.ObjectMeta{ + Name: "does-not-matter", + Namespace: "not-default-ns", + }, + Spec: pbauth.TrafficPermissions{ + Action: pbauth.Action_ACTION_ALLOW, + Destination: &pbauth.Destination{}, + }, + }, + expectedErrMsgs: []string{ + `trafficpermissions.auth.consul.hashicorp.com "does-not-matter" is invalid: spec.destination: Invalid value: authv2beta1.Destination{state:impl.MessageState{NoUnkeyedLiterals:pragma.NoUnkeyedLiterals{}, DoNotCompare:pragma.DoNotCompare{}, DoNotCopy:pragma.DoNotCopy{}, atomicMessageInfo:(*impl.MessageInfo)(nil)}, sizeCache:0, unknownFields:[]uint8(nil), IdentityName:""}: cannot be empty`, + }, + }, + { + name: "permission.sources: partitions, peers, and sameness_groups", + input: &TrafficPermissions{ + ObjectMeta: metav1.ObjectMeta{ + Name: "does-not-matter", + Namespace: "not-default-ns", + }, + Spec: pbauth.TrafficPermissions{ + Destination: &pbauth.Destination{ + IdentityName: "destination-identity", + }, + Action: pbauth.Action_ACTION_ALLOW, + Permissions: []*pbauth.Permission{ + { + Sources: []*pbauth.Source{ + { + Namespace: "the space namespace space", + Partition: "space-partition", + Peer: "space-peer", + }, + { + Namespace: "the space namespace space", + Partition: "space-partition", + SamenessGroup: "space-sameness", + }, + { + Namespace: "the space namespace space", + Peer: "space-peer", + SamenessGroup: "space-sameness", + }, + }, + }, + }, + }, + }, + expectedErrMsgs: []string{ + `spec.permissions[0].sources[0]: Invalid value: authv2beta1.Source{state:impl.MessageState{NoUnkeyedLiterals:pragma.NoUnkeyedLiterals{}, DoNotCompare:pragma.DoNotCompare{}, DoNotCopy:pragma.DoNotCopy{}, atomicMessageInfo:(*impl.MessageInfo)(nil)}, sizeCache:0, unknownFields:[]uint8(nil), IdentityName:"", Namespace:"the space namespace space", Partition:"space-partition", Peer:"space-peer", SamenessGroup:"", Exclude:[]*authv2beta1.ExcludeSource(nil)}: permission sources may not specify partitions, peers, and sameness_groups together`, + `spec.permissions[0].sources[1]: Invalid value: authv2beta1.Source{state:impl.MessageState{NoUnkeyedLiterals:pragma.NoUnkeyedLiterals{}, DoNotCompare:pragma.DoNotCompare{}, DoNotCopy:pragma.DoNotCopy{}, atomicMessageInfo:(*impl.MessageInfo)(nil)}, sizeCache:0, unknownFields:[]uint8(nil), IdentityName:"", Namespace:"the space namespace space", Partition:"space-partition", Peer:"", SamenessGroup:"space-sameness", Exclude:[]*authv2beta1.ExcludeSource(nil)}: permission sources may not specify partitions, peers, and sameness_groups together`, + `spec.permissions[0].sources[2]: Invalid value: authv2beta1.Source{state:impl.MessageState{NoUnkeyedLiterals:pragma.NoUnkeyedLiterals{}, DoNotCompare:pragma.DoNotCompare{}, DoNotCopy:pragma.DoNotCopy{}, atomicMessageInfo:(*impl.MessageInfo)(nil)}, sizeCache:0, unknownFields:[]uint8(nil), IdentityName:"", Namespace:"the space namespace space", Partition:"", Peer:"space-peer", SamenessGroup:"space-sameness", Exclude:[]*authv2beta1.ExcludeSource(nil)}: permission sources may not specify partitions, peers, and sameness_groups together`, + }, + }, + { + name: "permission.sources: identity name without namespace", + input: &TrafficPermissions{ + ObjectMeta: metav1.ObjectMeta{ + Name: "does-not-matter", + Namespace: "not-default-ns", + }, + Spec: pbauth.TrafficPermissions{ + Destination: &pbauth.Destination{ + IdentityName: "destination-identity", + }, + Action: pbauth.Action_ACTION_ALLOW, + Permissions: []*pbauth.Permission{ + { + Sources: []*pbauth.Source{ + { + IdentityName: "false-identity", + }, + }, + }, + }, + }, + }, + expectedErrMsgs: []string{ + `spec.permissions[0].sources[0]: Invalid value: authv2beta1.Source{state:impl.MessageState{NoUnkeyedLiterals:pragma.NoUnkeyedLiterals{}, DoNotCompare:pragma.DoNotCompare{}, DoNotCopy:pragma.DoNotCopy{}, atomicMessageInfo:(*impl.MessageInfo)(nil)}, sizeCache:0, unknownFields:[]uint8(nil), IdentityName:"false-identity", Namespace:"", Partition:"", Peer:"", SamenessGroup:"", Exclude:[]*authv2beta1.ExcludeSource(nil)}: permission sources may not have wildcard namespaces and explicit names`, + }, + }, + { + name: "permission.sources: identity name with excludes", + input: &TrafficPermissions{ + ObjectMeta: metav1.ObjectMeta{ + Name: "does-not-matter", + Namespace: "not-default-ns", + }, + Spec: pbauth.TrafficPermissions{ + Destination: &pbauth.Destination{ + IdentityName: "destination-identity", + }, + Action: pbauth.Action_ACTION_ALLOW, + Permissions: []*pbauth.Permission{ + { + Sources: []*pbauth.Source{ + { + Namespace: "default-namespace", + IdentityName: "false-identity", + Exclude: []*pbauth.ExcludeSource{ + { + IdentityName: "not-source-identity", + }, + }, + }, + }, + }, + }, + }, + }, + expectedErrMsgs: []string{ + `must be defined on wildcard sources`, + }, + }, + { + name: "permission.sources.exclude: incompatible tenancies", + input: &TrafficPermissions{ + ObjectMeta: metav1.ObjectMeta{ + Name: "does-not-matter", + Namespace: "not-default-ns", + }, + Spec: pbauth.TrafficPermissions{ + Destination: &pbauth.Destination{ + IdentityName: "destination-identity", + }, + Action: pbauth.Action_ACTION_ALLOW, + Permissions: []*pbauth.Permission{ + { + Sources: []*pbauth.Source{ + { + Namespace: "default-namespace", + Exclude: []*pbauth.ExcludeSource{ + { + Namespace: "the space namespace space", + Partition: "space-partition", + Peer: "space-peer", + }, + { + Namespace: "the space namespace space", + Partition: "space-partition", + SamenessGroup: "space-sameness", + }, + { + Namespace: "the space namespace space", + Peer: "space-peer", + SamenessGroup: "space-sameness", + }, + }, + }, + }, + }, + }, + }, + }, + expectedErrMsgs: []string{ + `spec.permissions[0].sources[0].exclude[0]: Invalid value: authv2beta1.ExcludeSource{state:impl.MessageState{NoUnkeyedLiterals:pragma.NoUnkeyedLiterals{}, DoNotCompare:pragma.DoNotCompare{}, DoNotCopy:pragma.DoNotCopy{}, atomicMessageInfo:(*impl.MessageInfo)(nil)}, sizeCache:0, unknownFields:[]uint8(nil), IdentityName:"", Namespace:"the space namespace space", Partition:"space-partition", Peer:"space-peer", SamenessGroup:""}: permissions sources may not specify partitions, peers, and sameness_groups together`, + `spec.permissions[0].sources[0].exclude[1]: Invalid value: authv2beta1.ExcludeSource{state:impl.MessageState{NoUnkeyedLiterals:pragma.NoUnkeyedLiterals{}, DoNotCompare:pragma.DoNotCompare{}, DoNotCopy:pragma.DoNotCopy{}, atomicMessageInfo:(*impl.MessageInfo)(nil)}, sizeCache:0, unknownFields:[]uint8(nil), IdentityName:"", Namespace:"the space namespace space", Partition:"space-partition", Peer:"", SamenessGroup:"space-sameness"}: permissions sources may not specify partitions, peers, and sameness_groups together`, + `spec.permissions[0].sources[0].exclude[2]: Invalid value: authv2beta1.ExcludeSource{state:impl.MessageState{NoUnkeyedLiterals:pragma.NoUnkeyedLiterals{}, DoNotCompare:pragma.DoNotCompare{}, DoNotCopy:pragma.DoNotCopy{}, atomicMessageInfo:(*impl.MessageInfo)(nil)}, sizeCache:0, unknownFields:[]uint8(nil), IdentityName:"", Namespace:"the space namespace space", Partition:"", Peer:"space-peer", SamenessGroup:"space-sameness"}: permissions sources may not specify partitions, peers, and sameness_groups together`, + }, + }, + { + name: "permission.sources.exclude: identity name without namespace", + input: &TrafficPermissions{ + ObjectMeta: metav1.ObjectMeta{ + Name: "does-not-matter", + Namespace: "not-default-ns", + }, + Spec: pbauth.TrafficPermissions{ + Destination: &pbauth.Destination{ + IdentityName: "destination-identity", + }, + Action: pbauth.Action_ACTION_ALLOW, + Permissions: []*pbauth.Permission{ + { + Sources: []*pbauth.Source{ + { + Namespace: "default-namespace", + Exclude: []*pbauth.ExcludeSource{ + { + IdentityName: "false-identity", + }, + }, + }, + }, + }, + }, + }, + }, + expectedErrMsgs: []string{ + `spec.permissions[0].sources[0].exclude[0]: Invalid value: authv2beta1.ExcludeSource{state:impl.MessageState{NoUnkeyedLiterals:pragma.NoUnkeyedLiterals{}, DoNotCompare:pragma.DoNotCompare{}, DoNotCopy:pragma.DoNotCopy{}, atomicMessageInfo:(*impl.MessageInfo)(nil)}, sizeCache:0, unknownFields:[]uint8(nil), IdentityName:"false-identity", Namespace:"", Partition:"", Peer:"", SamenessGroup:""}: permission sources may not have wildcard namespaces and explicit names`, + }, + }, + { + name: "permission.destinationRules: incompatible destination rules", + input: &TrafficPermissions{ + ObjectMeta: metav1.ObjectMeta{ + Name: "does-not-matter", + Namespace: "not-default-ns", + }, + Spec: pbauth.TrafficPermissions{ + Destination: &pbauth.Destination{ + IdentityName: "destination-identity", + }, + Action: pbauth.Action_ACTION_ALLOW, + Permissions: []*pbauth.Permission{ + { + DestinationRules: []*pbauth.DestinationRule{ + { + PathExact: "/hello", + PathPrefix: "foobar", + }, + { + PathExact: "/hello", + PathRegex: "path-regex", + }, + { + PathPrefix: "foobar", + PathRegex: "path-regex", + }, + }, + }, + }, + }, + }, + expectedErrMsgs: []string{ + `spec.permissions[0].destinationRules[0]: Invalid value: authv2beta1.DestinationRule{state:impl.MessageState{NoUnkeyedLiterals:pragma.NoUnkeyedLiterals{}, DoNotCompare:pragma.DoNotCompare{}, DoNotCopy:pragma.DoNotCopy{}, atomicMessageInfo:(*impl.MessageInfo)(nil)}, sizeCache:0, unknownFields:[]uint8(nil), PathExact:"/hello", PathPrefix:"foobar", PathRegex:"", Methods:[]string(nil), Headers:[]*authv2beta1.DestinationRuleHeader(nil), PortNames:[]string(nil), Exclude:[]*authv2beta1.ExcludePermissionRule(nil)}: prefix values, regex values, and explicit names must not combined`, + `spec.permissions[0].destinationRules[1]: Invalid value: authv2beta1.DestinationRule{state:impl.MessageState{NoUnkeyedLiterals:pragma.NoUnkeyedLiterals{}, DoNotCompare:pragma.DoNotCompare{}, DoNotCopy:pragma.DoNotCopy{}, atomicMessageInfo:(*impl.MessageInfo)(nil)}, sizeCache:0, unknownFields:[]uint8(nil), PathExact:"/hello", PathPrefix:"", PathRegex:"path-regex", Methods:[]string(nil), Headers:[]*authv2beta1.DestinationRuleHeader(nil), PortNames:[]string(nil), Exclude:[]*authv2beta1.ExcludePermissionRule(nil)}: prefix values, regex values, and explicit names must not combined`, + `spec.permissions[0].destinationRules[2]: Invalid value: authv2beta1.DestinationRule{state:impl.MessageState{NoUnkeyedLiterals:pragma.NoUnkeyedLiterals{}, DoNotCompare:pragma.DoNotCompare{}, DoNotCopy:pragma.DoNotCopy{}, atomicMessageInfo:(*impl.MessageInfo)(nil)}, sizeCache:0, unknownFields:[]uint8(nil), PathExact:"", PathPrefix:"foobar", PathRegex:"path-regex", Methods:[]string(nil), Headers:[]*authv2beta1.DestinationRuleHeader(nil), PortNames:[]string(nil), Exclude:[]*authv2beta1.ExcludePermissionRule(nil)}: prefix values, regex values, and explicit names must not combined`, + }, + }, + { + name: "permission.destinationRules.exclude: incompatible destination rules", + input: &TrafficPermissions{ + ObjectMeta: metav1.ObjectMeta{ + Name: "does-not-matter", + Namespace: "not-default-ns", + }, + Spec: pbauth.TrafficPermissions{ + Destination: &pbauth.Destination{ + IdentityName: "destination-identity", + }, + Action: pbauth.Action_ACTION_ALLOW, + Permissions: []*pbauth.Permission{ + { + DestinationRules: []*pbauth.DestinationRule{ + { + Exclude: []*pbauth.ExcludePermissionRule{ + { + PathExact: "/hello", + PathPrefix: "foobar", + }, + { + PathExact: "/hello", + PathRegex: "path-regex", + }, + { + PathPrefix: "foobar", + PathRegex: "path-regex", + }, + }, + }, + }, + }, + }, + }, + }, + expectedErrMsgs: []string{ + `spec.permissions[0].destinationRules[0].exclude[0]: Invalid value: authv2beta1.ExcludePermissionRule{state:impl.MessageState{NoUnkeyedLiterals:pragma.NoUnkeyedLiterals{}, DoNotCompare:pragma.DoNotCompare{}, DoNotCopy:pragma.DoNotCopy{}, atomicMessageInfo:(*impl.MessageInfo)(nil)}, sizeCache:0, unknownFields:[]uint8(nil), PathExact:"/hello", PathPrefix:"foobar", PathRegex:"", Methods:[]string(nil), Headers:[]*authv2beta1.DestinationRuleHeader(nil), PortNames:[]string(nil)}: prefix values, regex values, and explicit names must not combined`, + `spec.permissions[0].destinationRules[0].exclude[1]: Invalid value: authv2beta1.ExcludePermissionRule{state:impl.MessageState{NoUnkeyedLiterals:pragma.NoUnkeyedLiterals{}, DoNotCompare:pragma.DoNotCompare{}, DoNotCopy:pragma.DoNotCopy{}, atomicMessageInfo:(*impl.MessageInfo)(nil)}, sizeCache:0, unknownFields:[]uint8(nil), PathExact:"/hello", PathPrefix:"", PathRegex:"path-regex", Methods:[]string(nil), Headers:[]*authv2beta1.DestinationRuleHeader(nil), PortNames:[]string(nil)}: prefix values, regex values, and explicit names must not combined`, + `spec.permissions[0].destinationRules[0].exclude[2]: Invalid value: authv2beta1.ExcludePermissionRule{state:impl.MessageState{NoUnkeyedLiterals:pragma.NoUnkeyedLiterals{}, DoNotCompare:pragma.DoNotCompare{}, DoNotCopy:pragma.DoNotCopy{}, atomicMessageInfo:(*impl.MessageInfo)(nil)}, sizeCache:0, unknownFields:[]uint8(nil), PathExact:"", PathPrefix:"foobar", PathRegex:"path-regex", Methods:[]string(nil), Headers:[]*authv2beta1.DestinationRuleHeader(nil), PortNames:[]string(nil)}: prefix values, regex values, and explicit names must not combined`, + }, + }, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + err := tc.input.Validate(common.ConsulTenancyConfig{}) + if len(tc.expectedErrMsgs) != 0 { + require.Error(t, err) + for _, s := range tc.expectedErrMsgs { + require.Contains(t, err.Error(), s) + } + } else { + require.NoError(t, err) + } + }) + } +} + +func constructTrafficPermissionResource(tp *pbauth.TrafficPermissions, name, namespace, partition string) *pbresource.Resource { + data := inject.ToProtoAny(tp) + + id := &pbresource.ID{ + Name: name, + Type: pbauth.TrafficPermissionsType, + Tenancy: &pbresource.Tenancy{ + Partition: partition, + Namespace: namespace, + }, + Uid: "ABCD", // We add this to show it does not factor into the comparison + } + + return &pbresource.Resource{ + Id: id, + Data: data, + Metadata: meshConfigMeta(), + + // We add the fields below to prove that they are not used in the Match when comparing the CRD to Consul. + Version: "123456", + Generation: "01ARZ3NDEKTSV4RRFFQ69G5FAV", + Status: map[string]*pbresource.Status{ + "knock": { + ObservedGeneration: "01ARZ3NDEKTSV4RRFFQ69G5FAV", + Conditions: make([]*pbresource.Condition, 0), + UpdatedAt: timestamppb.Now(), + }, + }, + } +} diff --git a/control-plane/api/auth/v2beta1/trafficpermissions_webhook.go b/control-plane/api/auth/v2beta1/trafficpermissions_webhook.go new file mode 100644 index 0000000000..21a4cee9b5 --- /dev/null +++ b/control-plane/api/auth/v2beta1/trafficpermissions_webhook.go @@ -0,0 +1,66 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package v2beta1 + +import ( + "context" + "net/http" + + "github.com/go-logr/logr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/hashicorp/consul-k8s/control-plane/api/common" +) + +// +kubebuilder:object:generate=false + +type TrafficPermissionsWebhook struct { + Logger logr.Logger + + // ConsulTenancyConfig contains the injector's namespace and partition configuration. + ConsulTenancyConfig common.ConsulTenancyConfig + + decoder *admission.Decoder + client.Client +} + +var _ common.ConsulResourceLister = &TrafficPermissionsWebhook{} + +// NOTE: The path value in the below line is the path to the webhook. +// If it is updated, run code-gen, update subcommand/inject-connect/command.go +// and the consul-helm value for the path to the webhook. +// +// NOTE: The below line cannot be combined with any other comment. If it is it will break the code generation. +// +// +kubebuilder:webhook:verbs=create;update,path=/mutate-v2beta1-trafficpermissions,mutating=true,failurePolicy=fail,groups=auth.consul.hashicorp.com,resources=trafficpermissions,versions=v2beta1,name=mutate-trafficpermissions.auth.consul.hashicorp.com,sideEffects=None,admissionReviewVersions=v1beta1;v1 + +func (v *TrafficPermissionsWebhook) Handle(ctx context.Context, req admission.Request) admission.Response { + var resource TrafficPermissions + err := v.decoder.Decode(req, &resource) + if err != nil { + return admission.Errored(http.StatusBadRequest, err) + } + + return common.ValidateConsulResource(ctx, req, v.Logger, v, &resource, v.ConsulTenancyConfig) +} + +func (v *TrafficPermissionsWebhook) List(ctx context.Context) ([]common.ConsulResource, error) { + var resourceList TrafficPermissionsList + if err := v.Client.List(ctx, &resourceList); err != nil { + return nil, err + } + var entries []common.ConsulResource + for _, item := range resourceList.Items { + entries = append(entries, common.ConsulResource(item)) + } + return entries, nil +} + +func (v *TrafficPermissionsWebhook) SetupWithManager(mgr ctrl.Manager) { + v.decoder = admission.NewDecoder(mgr.GetScheme()) + mgr.GetWebhookServer().Register("/mutate-v2beta1-trafficpermissions", &admission.Webhook{Handler: v}) +} diff --git a/control-plane/api/auth/v2beta1/zz_generated.deepcopy.go b/control-plane/api/auth/v2beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..3aa46646cb --- /dev/null +++ b/control-plane/api/auth/v2beta1/zz_generated.deepcopy.go @@ -0,0 +1,136 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v2beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Condition) DeepCopyInto(out *Condition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition. +func (in *Condition) DeepCopy() *Condition { + if in == nil { + return nil + } + out := new(Condition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in Conditions) DeepCopyInto(out *Conditions) { + { + in := &in + *out = make(Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Conditions. +func (in Conditions) DeepCopy() Conditions { + if in == nil { + return nil + } + out := new(Conditions) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Status) DeepCopyInto(out *Status) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LastSyncedTime != nil { + in, out := &in.LastSyncedTime, &out.LastSyncedTime + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Status. +func (in *Status) DeepCopy() *Status { + if in == nil { + return nil + } + out := new(Status) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrafficPermissions) DeepCopyInto(out *TrafficPermissions) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficPermissions. +func (in *TrafficPermissions) DeepCopy() *TrafficPermissions { + if in == nil { + return nil + } + out := new(TrafficPermissions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TrafficPermissions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrafficPermissionsList) DeepCopyInto(out *TrafficPermissionsList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*TrafficPermissions, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(TrafficPermissions) + (*in).DeepCopyInto(*out) + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficPermissionsList. +func (in *TrafficPermissionsList) DeepCopy() *TrafficPermissionsList { + if in == nil { + return nil + } + out := new(TrafficPermissionsList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TrafficPermissionsList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/control-plane/api/common/common.go b/control-plane/api/common/common.go index d40755f5ba..53d4c42e96 100644 --- a/control-plane/api/common/common.go +++ b/control-plane/api/common/common.go @@ -28,7 +28,17 @@ const ( ControlPlaneRequestLimit string = "controlplanerequestlimit" RouteAuthFilter string = "routeauthfilter" GatewayPolicy string = "gatewaypolicy" - Registration string = "registration" + + // V2 config entries. + TrafficPermissions string = "trafficpermissions" + GRPCRoute string = "grpcroute" + HTTPRoute string = "httproute" + TCPRoute string = "tcproute" + ProxyConfiguration string = "proxyconfiguration" + MeshGateway string = "meshgateway" + GatewayClass string = "gatewayclass" + GatewayClassConfig string = "gatewayclassconfig" + MeshConfiguration string = "meshconfiguration" Global string = "global" Mesh string = "mesh" @@ -47,6 +57,28 @@ const ( DefaultPeerName = "local" ) +// ConsulTenancyConfig manages settings related to Consul namespaces and partitions. +type ConsulTenancyConfig struct { + // EnableConsulPartitions indicates that a user is running Consul Enterprise. + EnableConsulPartitions bool + // ConsulPartition is the Consul Partition to which this controller belongs. + ConsulPartition string + // EnableConsulNamespaces indicates that a user is running Consul Enterprise. + EnableConsulNamespaces bool + // ConsulDestinationNamespace is the name of the Consul namespace to create + // all resources in. If EnableNSMirroring is true this is ignored. + ConsulDestinationNamespace string + // EnableNSMirroring causes Consul namespaces to be created to match the + // k8s namespace of any config entry custom resource. Resources will + // be created in the matching Consul namespace. + EnableNSMirroring bool + // NSMirroringPrefix is an optional prefix that can be added to the Consul + // namespaces created while mirroring. For example, if it is set to "k8s-", + // then the k8s `default` namespace will be mirrored in Consul's + // `k8s-default` namespace. + NSMirroringPrefix string +} + // K8sNamespaceConfig manages allow/deny Kubernetes namespaces. type K8sNamespaceConfig struct { // Only endpoints in the AllowK8sNamespacesSet are reconciled. diff --git a/control-plane/api/common/consul_resource.go b/control-plane/api/common/consul_resource.go new file mode 100644 index 0000000000..b957d0fb79 --- /dev/null +++ b/control-plane/api/common/consul_resource.go @@ -0,0 +1,59 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package common + +import ( + "github.com/hashicorp/consul/proto-public/pbresource" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type ConsulResource interface { + ResourceID(namespace, partition string) *pbresource.ID + Resource(namespace, partition string) *pbresource.Resource + + // GetObjectKind should be implemented by the generated code. + GetObjectKind() schema.ObjectKind + // DeepCopyObject should be implemented by the generated code. + DeepCopyObject() runtime.Object + + // AddFinalizer adds a finalizer to the list of finalizers. + AddFinalizer(name string) + // RemoveFinalizer removes this finalizer from the list. + RemoveFinalizer(name string) + // Finalizers returns the list of finalizers for this object. + Finalizers() []string + + // MatchesConsul returns true if the resource has the same fields as the Consul + // config entry. + MatchesConsul(candidate *pbresource.Resource, namespace, partition string) bool + + // KubeKind returns the Kube config entry kind, i.e. servicedefaults, not + // service-defaults. + KubeKind() string + // KubernetesName returns the name of the Kubernetes resource. + KubernetesName() string + + // SetSyncedCondition updates the synced condition. + SetSyncedCondition(status corev1.ConditionStatus, reason, message string) + // SetLastSyncedTime updates the last synced time. + SetLastSyncedTime(time *metav1.Time) + // SyncedCondition gets the synced condition. + SyncedCondition() (status corev1.ConditionStatus, reason, message string) + // SyncedConditionStatus returns the status of the synced condition. + SyncedConditionStatus() corev1.ConditionStatus + + // Validate returns an error if the resource is invalid. + Validate(tenancy ConsulTenancyConfig) error + + // DefaultNamespaceFields sets Consul namespace fields on the resource + // spec to their default values if namespaces are enabled. + DefaultNamespaceFields(tenancy ConsulTenancyConfig) + + // Object is required so that MeshConfig implements metav1.Object, which is + // the interface supported by controller-runtime reconcile-able resources. + metav1.Object +} diff --git a/control-plane/api/common/consul_resource_webhook.go b/control-plane/api/common/consul_resource_webhook.go new file mode 100644 index 0000000000..afda672873 --- /dev/null +++ b/control-plane/api/common/consul_resource_webhook.go @@ -0,0 +1,87 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package common + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + + "github.com/go-logr/logr" + "gomodules.xyz/jsonpatch/v2" + admissionv1 "k8s.io/api/admission/v1" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// ConsulResourceLister is implemented by CRD-specific webhooks. +type ConsulResourceLister interface { + // List returns all resources of this type across all namespaces in a + // Kubernetes cluster. + List(ctx context.Context) ([]ConsulResource, error) +} + +// ValidateConsulResource validates a Consul Resource. It is a generic method that +// can be used by all CRD-specific validators. +// Callers should pass themselves as validator and kind should be the custom +// resource name, e.g. "TrafficPermissions". +func ValidateConsulResource( + ctx context.Context, + req admission.Request, + logger logr.Logger, + resourceLister ConsulResourceLister, + resource ConsulResource, + tenancy ConsulTenancyConfig) admission.Response { + + defaultingPatches, err := ConsulResourceDefaultingPatches(resource, tenancy) + if err != nil { + return admission.Errored(http.StatusInternalServerError, err) + } + // On create we need to validate that there isn't already a resource with + // the same name in a different namespace if we're mapping all Kube + // resources to a single Consul namespace. The only case where we're not + // mapping all kube resources to a single Consul namespace is when we + // are running Consul enterprise with namespace mirroring. + singleConsulDestNS := !(tenancy.EnableConsulNamespaces && tenancy.EnableNSMirroring) + if req.Operation == admissionv1.Create && singleConsulDestNS { + logger.Info("validate create", "name", resource.KubernetesName()) + + list, err := resourceLister.List(ctx) + if err != nil { + return admission.Errored(http.StatusInternalServerError, err) + } + for _, item := range list { + if item.KubernetesName() == resource.KubernetesName() { + return admission.Errored(http.StatusBadRequest, + fmt.Errorf("%s resource with name %q is already defined – all %s resources must have unique names across namespaces", + resource.KubeKind(), + resource.KubernetesName(), + resource.KubeKind())) + } + } + } + if err := resource.Validate(tenancy); err != nil { + return admission.Errored(http.StatusBadRequest, err) + } + return admission.Patched(fmt.Sprintf("valid %s request", resource.KubeKind()), defaultingPatches...) +} + +// ConsulResourceDefaultingPatches returns the patches needed to set fields to their defaults. +func ConsulResourceDefaultingPatches(resource ConsulResource, tenancy ConsulTenancyConfig) ([]jsonpatch.Operation, error) { + beforeDefaulting, err := json.Marshal(resource) + if err != nil { + return nil, fmt.Errorf("marshalling input: %s", err) + } + resource.DefaultNamespaceFields(tenancy) + afterDefaulting, err := json.Marshal(resource) + if err != nil { + return nil, fmt.Errorf("marshalling after defaulting: %s", err) + } + + defaultingPatches, err := jsonpatch.CreatePatch(beforeDefaulting, afterDefaulting) + if err != nil { + return nil, fmt.Errorf("creating patches: %s", err) + } + return defaultingPatches, nil +} diff --git a/control-plane/api/common/consul_resource_webhook_test.go b/control-plane/api/common/consul_resource_webhook_test.go new file mode 100644 index 0000000000..63bbf9a6e0 --- /dev/null +++ b/control-plane/api/common/consul_resource_webhook_test.go @@ -0,0 +1,333 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package common + +import ( + "context" + "encoding/json" + "errors" + "testing" + + logrtest "github.com/go-logr/logr/testr" + "github.com/hashicorp/consul/proto-public/pbresource" + "github.com/stretchr/testify/require" + "gomodules.xyz/jsonpatch/v2" + admissionv1 "k8s.io/api/admission/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +func TestValidateConsulResource(t *testing.T) { + otherNS := "other" + + cases := map[string]struct { + existingResources []ConsulResource + newResource ConsulResource + enableNamespaces bool + nsMirroring bool + consulDestinationNS string + nsMirroringPrefix string + expAllow bool + expErrMessage string + }{ + "no duplicates, valid": { + existingResources: nil, + newResource: &mockConsulResource{ + MockName: "foo", + MockNamespace: otherNS, + Valid: true, + }, + expAllow: true, + }, + "no duplicates, invalid": { + existingResources: nil, + newResource: &mockConsulResource{ + MockName: "foo", + MockNamespace: otherNS, + Valid: false, + }, + expAllow: false, + expErrMessage: "invalid", + }, + "duplicate name": { + existingResources: []ConsulResource{&mockConsulResource{ + MockName: "foo", + MockNamespace: "default", + }}, + newResource: &mockConsulResource{ + MockName: "foo", + MockNamespace: otherNS, + Valid: true, + }, + expAllow: false, + expErrMessage: "mockkind resource with name \"foo\" is already defined – all mockkind resources must have unique names across namespaces", + }, + "duplicate name, namespaces enabled": { + existingResources: []ConsulResource{&mockConsulResource{ + MockName: "foo", + MockNamespace: "default", + }}, + newResource: &mockConsulResource{ + MockName: "foo", + MockNamespace: otherNS, + Valid: true, + }, + enableNamespaces: true, + expAllow: false, + expErrMessage: "mockkind resource with name \"foo\" is already defined – all mockkind resources must have unique names across namespaces", + }, + "duplicate name, namespaces enabled, mirroring enabled": { + existingResources: []ConsulResource{&mockConsulResource{ + MockName: "foo", + MockNamespace: "default", + }}, + newResource: &mockConsulResource{ + MockName: "foo", + MockNamespace: otherNS, + Valid: true, + }, + enableNamespaces: true, + nsMirroring: true, + expAllow: true, + }, + } + for name, c := range cases { + t.Run(name, func(t *testing.T) { + ctx := context.Background() + marshalledRequestObject, err := json.Marshal(c.newResource) + require.NoError(t, err) + + lister := &mockConsulResourceLister{ + Resources: c.existingResources, + } + response := ValidateConsulResource(ctx, admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Name: c.newResource.KubernetesName(), + Namespace: otherNS, + Operation: admissionv1.Create, + Object: runtime.RawExtension{ + Raw: marshalledRequestObject, + }, + }, + }, + logrtest.New(t), + lister, + c.newResource, + ConsulTenancyConfig{ + EnableConsulNamespaces: c.enableNamespaces, + ConsulDestinationNamespace: c.consulDestinationNS, + EnableNSMirroring: c.nsMirroring, + NSMirroringPrefix: c.nsMirroringPrefix, + }) + require.Equal(t, c.expAllow, response.Allowed) + if c.expErrMessage != "" { + require.Equal(t, c.expErrMessage, response.AdmissionResponse.Result.Message) + } + }) + } +} + +func TestConsulResourceDefaultingPatches(t *testing.T) { + meshConfig := &mockConsulResource{ + MockName: "test", + Valid: true, + } + + // This test validates that DefaultingPatches invokes DefaultNamespaceFields on the Config Entry. + patches, err := ConsulResourceDefaultingPatches(meshConfig, ConsulTenancyConfig{}) + require.NoError(t, err) + + require.Equal(t, []jsonpatch.Operation{ + { + Operation: "replace", + Path: "/MockNamespace", + Value: "bar", + }, + }, patches) +} + +type mockConsulResourceLister struct { + Resources []ConsulResource +} + +var _ ConsulResourceLister = &mockConsulResourceLister{} + +func (in *mockConsulResourceLister) List(_ context.Context) ([]ConsulResource, error) { + return in.Resources, nil +} + +type mockConsulResource struct { + MockName string + MockNamespace string + Valid bool +} + +var _ ConsulResource = &mockConsulResource{} + +func (in *mockConsulResource) ResourceID(_, _ string) *pbresource.ID { + return nil +} + +func (in *mockConsulResource) Resource(_, _ string) *pbresource.Resource { + return nil +} + +func (in *mockConsulResource) GetNamespace() string { + return in.MockNamespace +} + +func (in *mockConsulResource) SetNamespace(namespace string) { + in.MockNamespace = namespace +} + +func (in *mockConsulResource) GetName() string { + return in.MockName +} + +func (in *mockConsulResource) SetName(name string) { + in.MockName = name +} + +func (in *mockConsulResource) GetGenerateName() string { + return "" +} + +func (in *mockConsulResource) SetGenerateName(_ string) {} + +func (in *mockConsulResource) GetUID() types.UID { + return "" +} + +func (in *mockConsulResource) SetUID(_ types.UID) {} + +func (in *mockConsulResource) GetResourceVersion() string { + return "" +} + +func (in *mockConsulResource) SetResourceVersion(_ string) {} + +func (in *mockConsulResource) GetGeneration() int64 { + return 0 +} + +func (in *mockConsulResource) SetGeneration(_ int64) {} + +func (in *mockConsulResource) GetSelfLink() string { + return "" +} + +func (in *mockConsulResource) SetSelfLink(_ string) {} + +func (in *mockConsulResource) GetCreationTimestamp() metav1.Time { + return metav1.Time{} +} + +func (in *mockConsulResource) SetCreationTimestamp(_ metav1.Time) {} + +func (in *mockConsulResource) GetDeletionTimestamp() *metav1.Time { + return nil +} + +func (in *mockConsulResource) SetDeletionTimestamp(_ *metav1.Time) {} + +func (in *mockConsulResource) GetDeletionGracePeriodSeconds() *int64 { + return nil +} + +func (in *mockConsulResource) SetDeletionGracePeriodSeconds(_ *int64) {} + +func (in *mockConsulResource) GetLabels() map[string]string { + return nil +} + +func (in *mockConsulResource) SetLabels(_ map[string]string) {} + +func (in *mockConsulResource) GetAnnotations() map[string]string { + return nil +} + +func (in *mockConsulResource) SetAnnotations(_ map[string]string) {} + +func (in *mockConsulResource) GetFinalizers() []string { + return nil +} + +func (in *mockConsulResource) SetFinalizers(_ []string) {} + +func (in *mockConsulResource) GetOwnerReferences() []metav1.OwnerReference { + return nil +} + +func (in *mockConsulResource) SetOwnerReferences(_ []metav1.OwnerReference) {} + +func (in *mockConsulResource) GetClusterName() string { + return "" +} + +func (in *mockConsulResource) SetClusterName(_ string) {} + +func (in *mockConsulResource) GetManagedFields() []metav1.ManagedFieldsEntry { + return nil +} + +func (in *mockConsulResource) SetManagedFields(_ []metav1.ManagedFieldsEntry) {} + +func (in *mockConsulResource) KubernetesName() string { + return in.MockName +} + +func (in *mockConsulResource) GetObjectMeta() metav1.ObjectMeta { + return metav1.ObjectMeta{} +} + +func (in *mockConsulResource) GetObjectKind() schema.ObjectKind { + return schema.EmptyObjectKind +} + +func (in *mockConsulResource) DeepCopyObject() runtime.Object { + return in +} + +func (in *mockConsulResource) AddFinalizer(_ string) {} + +func (in *mockConsulResource) RemoveFinalizer(_ string) {} + +func (in *mockConsulResource) Finalizers() []string { + return nil +} + +func (in *mockConsulResource) KubeKind() string { + return "mockkind" +} + +func (in *mockConsulResource) SetSyncedCondition(_ corev1.ConditionStatus, _ string, _ string) {} + +func (in *mockConsulResource) SetLastSyncedTime(_ *metav1.Time) {} + +func (in *mockConsulResource) SyncedCondition() (status corev1.ConditionStatus, reason string, message string) { + return corev1.ConditionTrue, "", "" +} + +func (in *mockConsulResource) SyncedConditionStatus() corev1.ConditionStatus { + return corev1.ConditionTrue +} + +func (in *mockConsulResource) Validate(_ ConsulTenancyConfig) error { + if !in.Valid { + return errors.New("invalid") + } + return nil +} + +func (in *mockConsulResource) DefaultNamespaceFields(_ ConsulTenancyConfig) { + in.MockNamespace = "bar" +} + +func (in *mockConsulResource) MatchesConsul(_ *pbresource.Resource, _, _ string) bool { + return false +} diff --git a/control-plane/api/mesh/v2beta1/api_gateway_types.go b/control-plane/api/mesh/v2beta1/api_gateway_types.go new file mode 100644 index 0000000000..d9da1d1947 --- /dev/null +++ b/control-plane/api/mesh/v2beta1/api_gateway_types.go @@ -0,0 +1,146 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package v2beta1 + +import ( + "fmt" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1" + "github.com/hashicorp/consul/proto-public/pbresource" + "google.golang.org/protobuf/testing/protocmp" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/hashicorp/consul-k8s/control-plane/api/common" + inject "github.com/hashicorp/consul-k8s/control-plane/connect-inject/common" +) + +const ( + apiGatewayKubeKind = "gateway" +) + +func init() { + MeshSchemeBuilder.Register(&GatewayClass{}, &GatewayClassList{}) +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// APIGateway is the Schema for the API Gateway +// +kubebuilder:printcolumn:name="Synced",type="string",JSONPath=".status.conditions[?(@.type==\"Synced\")].status",description="The sync status of the resource with Consul" +// +kubebuilder:printcolumn:name="Last Synced",type="date",JSONPath=".status.lastSyncedTime",description="The last successful synced time of the resource with Consul" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="The age of the resource" +// +kubebuilder:resource:scope=Cluster +type APIGateway struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec pbmesh.APIGateway `json:"spec,omitempty"` + Status `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// APIGatewayList contains a list of APIGateway. +type APIGatewayList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []*APIGateway `json:"items"` +} + +func (in *APIGateway) ResourceID(namespace, partition string) *pbresource.ID { + return &pbresource.ID{ + Name: in.Name, + Type: pbmesh.APIGatewayType, + Tenancy: &pbresource.Tenancy{ + Partition: partition, + Namespace: namespace, + }, + } +} + +func (in *APIGateway) Resource(namespace, partition string) *pbresource.Resource { + return &pbresource.Resource{ + Id: in.ResourceID(namespace, partition), + Data: inject.ToProtoAny(&in.Spec), + Metadata: meshConfigMeta(), + } +} + +func (in *APIGateway) AddFinalizer(f string) { + in.ObjectMeta.Finalizers = append(in.Finalizers(), f) +} + +func (in *APIGateway) RemoveFinalizer(f string) { + var newFinalizers []string + for _, oldF := range in.Finalizers() { + if oldF != f { + newFinalizers = append(newFinalizers, oldF) + } + } + in.ObjectMeta.Finalizers = newFinalizers +} + +func (in *APIGateway) Finalizers() []string { + return in.ObjectMeta.Finalizers +} + +func (in *APIGateway) MatchesConsul(candidate *pbresource.Resource, namespace, partition string) bool { + return cmp.Equal( + in.Resource(namespace, partition), + candidate, + protocmp.IgnoreFields(&pbresource.Resource{}, "status", "generation", "version"), + protocmp.IgnoreFields(&pbresource.ID{}, "uid"), + protocmp.Transform(), + cmpopts.SortSlices(func(a, b any) bool { return fmt.Sprintf("%v", a) < fmt.Sprintf("%v", b) }), + ) +} + +func (in *APIGateway) KubeKind() string { + return apiGatewayKubeKind +} + +func (in *APIGateway) KubernetesName() string { + return in.ObjectMeta.Name +} + +func (in *APIGateway) SetSyncedCondition(status corev1.ConditionStatus, reason, message string) { + in.Status.Conditions = Conditions{ + { + Type: ConditionSynced, + Status: status, + LastTransitionTime: metav1.Now(), + Reason: reason, + Message: message, + }, + } +} + +func (in *APIGateway) SetLastSyncedTime(time *metav1.Time) { + in.Status.LastSyncedTime = time +} + +func (in *APIGateway) SyncedCondition() (status corev1.ConditionStatus, reason, message string) { + cond := in.Status.GetCondition(ConditionSynced) + if cond == nil { + return corev1.ConditionUnknown, "", "" + } + return cond.Status, cond.Reason, cond.Message +} + +func (in *APIGateway) SyncedConditionStatus() corev1.ConditionStatus { + condition := in.Status.GetCondition(ConditionSynced) + if condition == nil { + return corev1.ConditionUnknown + } + return condition.Status +} + +func (in *APIGateway) Validate(tenancy common.ConsulTenancyConfig) error { + return nil +} + +// DefaultNamespaceFields is required as part of the common.MeshConfig interface. +func (in *APIGateway) DefaultNamespaceFields(tenancy common.ConsulTenancyConfig) {} diff --git a/control-plane/api/mesh/v2beta1/gateway_class_config_types.go b/control-plane/api/mesh/v2beta1/gateway_class_config_types.go new file mode 100644 index 0000000000..1678a14b55 --- /dev/null +++ b/control-plane/api/mesh/v2beta1/gateway_class_config_types.go @@ -0,0 +1,171 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package v2beta1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const KindGatewayClassConfig = "GatewayClassConfig" + +func init() { + MeshSchemeBuilder.Register(&GatewayClassConfig{}, &GatewayClassConfigList{}) +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// GatewayClassConfig is the Schema for the Mesh Gateway API +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="The age of the resource" +// +kubebuilder:resource:scope=Cluster +type GatewayClassConfig struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec GatewayClassConfigSpec `json:"spec,omitempty"` + Status `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen=true + +// GatewayClassConfigSpec specifies the desired state of the GatewayClassConfig CRD. +type GatewayClassConfigSpec struct { + GatewayClassAnnotationsAndLabels `json:",inline"` + + // Deployment contains config specific to the Deployment created from this GatewayClass + Deployment GatewayClassDeploymentConfig `json:"deployment,omitempty"` + // Role contains config specific to the Role created from this GatewayClass + Role GatewayClassRoleConfig `json:"role,omitempty"` + // RoleBinding contains config specific to the RoleBinding created from this GatewayClass + RoleBinding GatewayClassRoleBindingConfig `json:"roleBinding,omitempty"` + // Service contains config specific to the Service created from this GatewayClass + Service GatewayClassServiceConfig `json:"service,omitempty"` + // ServiceAccount contains config specific to the corev1.ServiceAccount created from this GatewayClass + ServiceAccount GatewayClassServiceAccountConfig `json:"serviceAccount,omitempty"` +} + +// GatewayClassDeploymentConfig specifies the desired state of the Deployment created from the GatewayClassConfig. +type GatewayClassDeploymentConfig struct { + GatewayClassAnnotationsAndLabels `json:",inline"` + + // Container contains config specific to the created Deployment's container. + Container *GatewayClassContainerConfig `json:"container,omitempty"` + // InitContainer contains config specific to the created Deployment's init container. + InitContainer *GatewayClassInitContainerConfig `json:"initContainer,omitempty"` + // NodeSelector is a feature that constrains the scheduling of a pod to nodes that + // match specified labels. + // By defining NodeSelector in a pod's configuration, you can ensure that the pod is + // only scheduled to nodes with the corresponding labels, providing a way to + // influence the placement of workloads based on node attributes. + // More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + // PriorityClassName specifies the priority class name to use on the created Deployment. + PriorityClassName string `json:"priorityClassName,omitempty"` + // Replicas specifies the configuration to control the number of replicas for the created Deployment. + Replicas *GatewayClassReplicasConfig `json:"replicas,omitempty"` + // SecurityContext specifies the security context for the created Deployment's Pod. + SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty"` + // Tolerations specifies the tolerations to use on the created Deployment. + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` + // HostNetwork specifies whether the gateway pods should run on the host network. + HostNetwork bool `json:"hostNetwork,omitempty"` + // TopologySpreadConstraints is a feature that controls how pods are spead across your topology. + // More info: https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ + TopologySpreadConstraints []corev1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` + // DNSPolicy specifies the dns policy to use. These are set on a per pod basis. + // +kubebuilder:validation:Enum=Default;ClusterFirst;ClusterFirstWithHostNet;None + DNSPolicy corev1.DNSPolicy `json:"dnsPolicy,omitempty"` + // Affinity specifies the affinity to use on the created Deployment. + Affinity *corev1.Affinity `json:"affinity,omitempty"` +} + +type GatewayClassReplicasConfig struct { + // Default is the number of replicas assigned to the Deployment when created + Default *int32 `json:"default,omitempty"` + // Min is the minimum number of replicas allowed for a gateway with this class. + // If the replica count drops below this value due to manual or automated scaling, + // the replica count will be restored to this value. + Min *int32 `json:"min,omitempty"` + // Max is the maximum number of replicas allowed for a gateway with this class. + // If the replica count exceeds this value due to manual or automated scaling, + // the replica count will be restored to this value. + Max *int32 `json:"max,omitempty"` +} + +type GatewayClassInitContainerConfig struct { + // Consul specifies configuration for the consul-k8s-control-plane init container + Consul GatewayClassConsulConfig `json:"consul,omitempty"` + // Resources specifies the resource requirements for the created Deployment's init container + Resources *corev1.ResourceRequirements `json:"resources,omitempty"` +} + +type GatewayClassContainerConfig struct { + // Consul specifies configuration for the consul-dataplane container + Consul GatewayClassConsulConfig `json:"consul,omitempty"` + // Resources specifies the resource requirements for the created Deployment's container + Resources *corev1.ResourceRequirements `json:"resources,omitempty"` + // PortModifier specifies the value to be added to every port value for listeners on this gateway. + // This is generally used to avoid binding to privileged ports in the container. + PortModifier int32 `json:"portModifier,omitempty"` + // HostPort specifies a port to be exposed to the external host network + HostPort int32 `json:"hostPort,omitempty"` +} + +type GatewayClassRoleConfig struct { + GatewayClassAnnotationsAndLabels `json:",inline"` +} + +type GatewayClassRoleBindingConfig struct { + GatewayClassAnnotationsAndLabels `json:",inline"` +} + +type GatewayClassServiceConfig struct { + GatewayClassAnnotationsAndLabels `json:",inline"` + + // Type specifies the type of Service to use (LoadBalancer, ClusterIP, etc.) + // +kubebuilder:validation:Enum=ClusterIP;NodePort;LoadBalancer + Type *corev1.ServiceType `json:"type,omitempty"` +} + +type GatewayClassServiceAccountConfig struct { + GatewayClassAnnotationsAndLabels `json:",inline"` +} + +type GatewayClassConsulConfig struct { + // Logging specifies the logging configuration for Consul Dataplane + Logging GatewayClassConsulLoggingConfig `json:"logging,omitempty"` +} + +type GatewayClassConsulLoggingConfig struct { + // Level sets the logging level for Consul Dataplane (debug, info, etc.) + Level string `json:"level,omitempty"` +} + +// GatewayClassAnnotationsAndLabels exists to provide a commonly-embedded wrapper +// for Annotations and Labels on a given resource configuration. +type GatewayClassAnnotationsAndLabels struct { + // Annotations are applied to the created resource + Annotations GatewayClassAnnotationsLabelsConfig `json:"annotations,omitempty"` + // Labels are applied to the created resource + Labels GatewayClassAnnotationsLabelsConfig `json:"labels,omitempty"` +} + +type GatewayClassAnnotationsLabelsConfig struct { + // InheritFromGateway lists the names/keys of annotations or labels to copy from the Gateway resource. + // Any name/key included here will override those in Set if specified on the Gateway. + InheritFromGateway []string `json:"inheritFromGateway,omitempty"` + // Set lists the names/keys and values of annotations or labels to set on the resource. + // Any name/key included here will be overridden if present in InheritFromGateway and set on the Gateway. + Set map[string]string `json:"set,omitempty"` +} + +// +kubebuilder:object:root=true + +// GatewayClassConfigList contains a list of GatewayClassConfig. +type GatewayClassConfigList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []*GatewayClassConfig `json:"items"` +} diff --git a/control-plane/api/mesh/v2beta1/gateway_class_types.go b/control-plane/api/mesh/v2beta1/gateway_class_types.go new file mode 100644 index 0000000000..4e82e0a6b1 --- /dev/null +++ b/control-plane/api/mesh/v2beta1/gateway_class_types.go @@ -0,0 +1,63 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package v2beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const KindGatewayClass = "GatewayClass" + +func init() { + MeshSchemeBuilder.Register(&GatewayClass{}, &GatewayClassList{}) +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// GatewayClass is the Schema for the Gateway Class API +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="The age of the resource" +// +kubebuilder:resource:scope=Cluster +type GatewayClass struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec GatewayClassSpec `json:"spec,omitempty"` + Status `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// GatewayClassList contains a list of GatewayClass. +type GatewayClassList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []*GatewayClass `json:"items"` +} + +type GatewayClassSpec struct { + // ControllerName is the name of the Kubernetes controller + // that manages Gateways of this class + ControllerName string `json:"controllerName"` + + // ParametersRef refers to a resource responsible for configuring + // the behavior of the GatewayClass. + ParametersRef *ParametersReference `json:"parametersRef"` + + // Description of GatewayClass + Description string `json:"description,omitempty"` +} + +type ParametersReference struct { + // The Kubernetes Group that the referred object belongs to + Group string `json:"group,omitempty"` + + // The Kubernetes Kind that the referred object is + Kind string `json:"kind,omitempty"` + + // The Name of the referred object + Name string `json:"name"` + + // The kubernetes namespace that the referred object is in + Namespace *string `json:"namespace,omitempty"` +} diff --git a/control-plane/api/mesh/v2beta1/grpc_route_types.go b/control-plane/api/mesh/v2beta1/grpc_route_types.go new file mode 100644 index 0000000000..16c6725cf9 --- /dev/null +++ b/control-plane/api/mesh/v2beta1/grpc_route_types.go @@ -0,0 +1,322 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package v2beta1 + +import ( + "fmt" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1" + "github.com/hashicorp/consul/proto-public/pbresource" + "google.golang.org/protobuf/testing/protocmp" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + + "github.com/hashicorp/consul-k8s/control-plane/api/common" + inject "github.com/hashicorp/consul-k8s/control-plane/connect-inject/common" +) + +const ( + grpcRouteKubeKind = "grpcroute" +) + +func init() { + MeshSchemeBuilder.Register(&GRPCRoute{}, &GRPCRouteList{}) +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// GRPCRoute is the Schema for the GRPC Route API +// +kubebuilder:printcolumn:name="Synced",type="string",JSONPath=".status.conditions[?(@.type==\"Synced\")].status",description="The sync status of the resource with Consul" +// +kubebuilder:printcolumn:name="Last Synced",type="date",JSONPath=".status.lastSyncedTime",description="The last successful synced time of the resource with Consul" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="The age of the resource" +// +kubebuilder:resource:shortName="grpc-route" +type GRPCRoute struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec pbmesh.GRPCRoute `json:"spec,omitempty"` + Status `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// GRPCRouteList contains a list of GRPCRoute. +type GRPCRouteList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []*GRPCRoute `json:"items"` +} + +func (in *GRPCRoute) ResourceID(namespace, partition string) *pbresource.ID { + return &pbresource.ID{ + Name: in.Name, + Type: pbmesh.GRPCRouteType, + Tenancy: &pbresource.Tenancy{ + Partition: partition, + Namespace: namespace, + }, + } +} + +func (in *GRPCRoute) Resource(namespace, partition string) *pbresource.Resource { + return &pbresource.Resource{ + Id: in.ResourceID(namespace, partition), + Data: inject.ToProtoAny(&in.Spec), + Metadata: meshConfigMeta(), + } +} + +func (in *GRPCRoute) AddFinalizer(f string) { + in.ObjectMeta.Finalizers = append(in.Finalizers(), f) +} + +func (in *GRPCRoute) RemoveFinalizer(f string) { + var newFinalizers []string + for _, oldF := range in.Finalizers() { + if oldF != f { + newFinalizers = append(newFinalizers, oldF) + } + } + in.ObjectMeta.Finalizers = newFinalizers +} + +func (in *GRPCRoute) Finalizers() []string { + return in.ObjectMeta.Finalizers +} + +func (in *GRPCRoute) MatchesConsul(candidate *pbresource.Resource, namespace, partition string) bool { + return cmp.Equal( + in.Resource(namespace, partition), + candidate, + protocmp.IgnoreFields(&pbresource.Resource{}, "status", "generation", "version"), + protocmp.IgnoreFields(&pbresource.ID{}, "uid"), + protocmp.Transform(), + cmpopts.SortSlices(func(a, b any) bool { return fmt.Sprintf("%v", a) < fmt.Sprintf("%v", b) }), + ) +} + +func (in *GRPCRoute) KubeKind() string { + return grpcRouteKubeKind +} + +func (in *GRPCRoute) KubernetesName() string { + return in.ObjectMeta.Name +} + +func (in *GRPCRoute) SetSyncedCondition(status corev1.ConditionStatus, reason, message string) { + in.Status.Conditions = Conditions{ + { + Type: ConditionSynced, + Status: status, + LastTransitionTime: metav1.Now(), + Reason: reason, + Message: message, + }, + } +} + +func (in *GRPCRoute) SetLastSyncedTime(time *metav1.Time) { + in.Status.LastSyncedTime = time +} + +func (in *GRPCRoute) SyncedCondition() (status corev1.ConditionStatus, reason, message string) { + cond := in.Status.GetCondition(ConditionSynced) + if cond == nil { + return corev1.ConditionUnknown, "", "" + } + return cond.Status, cond.Reason, cond.Message +} + +func (in *GRPCRoute) SyncedConditionStatus() corev1.ConditionStatus { + condition := in.Status.GetCondition(ConditionSynced) + if condition == nil { + return corev1.ConditionUnknown + } + return condition.Status +} + +func (in *GRPCRoute) Validate(tenancy common.ConsulTenancyConfig) error { + var errs field.ErrorList + var route pbmesh.GRPCRoute + path := field.NewPath("spec") + + res := in.Resource(tenancy.ConsulDestinationNamespace, tenancy.ConsulPartition) + + if err := res.Data.UnmarshalTo(&route); err != nil { + return fmt.Errorf("error parsing resource data as type %q: %s", &route, err) + } + + if len(route.ParentRefs) == 0 { + errs = append(errs, field.Required(path.Child("parentRefs"), "cannot be empty")) + } + + if len(route.Hostnames) > 0 { + errs = append(errs, field.Invalid(path.Child("hostnames"), route.Hostnames, "should not populate hostnames")) + } + + for i, rule := range route.Rules { + rulePath := path.Child("rules").Index(i) + for j, match := range rule.Matches { + ruleMatchPath := rulePath.Child("matches").Index(j) + if match.Method != nil { + switch match.Method.Type { + case pbmesh.GRPCMethodMatchType_GRPC_METHOD_MATCH_TYPE_UNSPECIFIED: + errs = append(errs, field.Invalid(ruleMatchPath.Child("method").Child("type"), match.Method.Type, "missing required field")) + case pbmesh.GRPCMethodMatchType_GRPC_METHOD_MATCH_TYPE_EXACT: + case pbmesh.GRPCMethodMatchType_GRPC_METHOD_MATCH_TYPE_REGEX: + default: + errs = append(errs, field.Invalid(ruleMatchPath.Child("method").Child("type"), match.Method.Type, fmt.Sprintf("not a supported enum value: %v", match.Method.Type))) + } + if match.Method.Service == "" && match.Method.Method == "" { + errs = append(errs, field.Invalid(ruleMatchPath.Child("method").Child("service"), match.Method.Service, "at least one of \"service\" or \"method\" must be set")) + } + } + + for k, header := range match.Headers { + ruleHeaderPath := ruleMatchPath.Child("headers").Index(k) + if err := validateHeaderMatchType(header.Type); err != nil { + errs = append(errs, field.Invalid(ruleHeaderPath.Child("type"), header.Type, err.Error())) + } + + if header.Name == "" { + errs = append(errs, field.Required(ruleHeaderPath.Child("name"), "missing required field")) + } + } + } + + for j, filter := range rule.Filters { + set := 0 + if filter.RequestHeaderModifier != nil { + set++ + } + if filter.ResponseHeaderModifier != nil { + set++ + } + if filter.UrlRewrite != nil { + set++ + if filter.UrlRewrite.PathPrefix == "" { + errs = append(errs, field.Required(rulePath.Child("filters").Index(j).Child("urlRewrite").Child("pathPrefix"), "field should not be empty if enclosing section is set")) + } + } + if set != 1 { + errs = append(errs, field.Invalid(rulePath.Child("filters").Index(j), filter, "exactly one of request_header_modifier, response_header_modifier, or url_rewrite is required")) + } + } + + if len(rule.BackendRefs) == 0 { + errs = append(errs, field.Required(rulePath.Child("backendRefs"), "missing required field")) + } + for j, hbref := range rule.BackendRefs { + ruleBackendRefsPath := rulePath.Child("backendRefs").Index(j) + if hbref.BackendRef == nil { + errs = append(errs, field.Required(ruleBackendRefsPath.Child("backendRef"), "missing required field")) + continue + } + + if hbref.BackendRef.Datacenter != "" { + errs = append(errs, field.Invalid(ruleBackendRefsPath.Child("backendRef").Child("datacenter"), hbref.BackendRef.Datacenter, "datacenter is not yet supported on backend refs")) + } + + if len(hbref.Filters) > 0 { + errs = append(errs, field.Invalid(ruleBackendRefsPath.Child("filters"), hbref.Filters, "filters are not supported at this level yet")) + } + } + + if rule.Timeouts != nil { + errs = append(errs, validateHTTPTimeouts(rule.Timeouts, rulePath.Child("timeouts"))...) + } + if rule.Retries != nil { + errs = append(errs, validateHTTPRetries(rule.Retries, rulePath.Child("retries"))...) + } + } + + if len(errs) > 0 { + return apierrors.NewInvalid( + schema.GroupKind{Group: MeshGroup, Kind: common.GRPCRoute}, + in.KubernetesName(), errs) + } + return nil +} + +func validateHeaderMatchType(typ pbmesh.HeaderMatchType) error { + switch typ { + case pbmesh.HeaderMatchType_HEADER_MATCH_TYPE_UNSPECIFIED: + return fmt.Errorf("missing required field") + case pbmesh.HeaderMatchType_HEADER_MATCH_TYPE_EXACT: + case pbmesh.HeaderMatchType_HEADER_MATCH_TYPE_REGEX: + case pbmesh.HeaderMatchType_HEADER_MATCH_TYPE_PRESENT: + case pbmesh.HeaderMatchType_HEADER_MATCH_TYPE_PREFIX: + case pbmesh.HeaderMatchType_HEADER_MATCH_TYPE_SUFFIX: + default: + return fmt.Errorf("not a supported enum value: %v", typ) + } + return nil +} + +func validateHTTPTimeouts(timeouts *pbmesh.HTTPRouteTimeouts, path *field.Path) field.ErrorList { + if timeouts == nil { + return nil + } + + var errs field.ErrorList + + if timeouts.Request != nil { + val := timeouts.Request.AsDuration() + if val < 0 { + errs = append(errs, field.Invalid(path.Child("request"), val, "timeout cannot be negative")) + } + } + if timeouts.Idle != nil { + val := timeouts.Idle.AsDuration() + if val < 0 { + errs = append(errs, field.Invalid(path.Child("idle"), val, "timeout cannot be negative")) + } + } + + return errs +} + +func validateHTTPRetries(retries *pbmesh.HTTPRouteRetries, path *field.Path) field.ErrorList { + if retries == nil { + return nil + } + + var errs field.ErrorList + + for i, condition := range retries.OnConditions { + if !isValidRetryCondition(condition) { + errs = append(errs, field.Invalid(path.Child("onConditions").Index(i), condition, "not a valid retry condition")) + } + } + + return errs +} + +func isValidRetryCondition(retryOn string) bool { + switch retryOn { + case "5xx", + "gateway-error", + "reset", + "connect-failure", + "envoy-ratelimited", + "retriable-4xx", + "refused-stream", + "cancelled", + "deadline-exceeded", + "internal", + "resource-exhausted", + "unavailable": + return true + default: + return false + } +} + +// DefaultNamespaceFields is required as part of the common.MeshConfig interface. +func (in *GRPCRoute) DefaultNamespaceFields(tenancy common.ConsulTenancyConfig) {} diff --git a/control-plane/api/mesh/v2beta1/grpc_route_types_test.go b/control-plane/api/mesh/v2beta1/grpc_route_types_test.go new file mode 100644 index 0000000000..07f010bd97 --- /dev/null +++ b/control-plane/api/mesh/v2beta1/grpc_route_types_test.go @@ -0,0 +1,1193 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package v2beta1 + +import ( + "testing" + "time" + + "github.com/google/go-cmp/cmp" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1" + "github.com/hashicorp/consul/proto-public/pbresource" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/testing/protocmp" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" + "google.golang.org/protobuf/types/known/wrapperspb" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/hashicorp/consul-k8s/control-plane/api/common" + inject "github.com/hashicorp/consul-k8s/control-plane/connect-inject/common" + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" + "github.com/hashicorp/consul-k8s/control-plane/helper/test" +) + +func TestGRPCRoute_MatchesConsul(t *testing.T) { + cases := map[string]struct { + OurConsulNamespace string + OurConsulPartition string + OurData *GRPCRoute + + TheirName string + TheirConsulNamespace string + TheirConsulPartition string + TheirData *pbmesh.GRPCRoute + ResourceOverride *pbresource.Resource // Used to test that an empty resource of another type will not match + + Matches bool + }{ + "empty fields matches": { + OurConsulNamespace: constants.DefaultConsulNS, + OurConsulPartition: constants.DefaultConsulPartition, + OurData: &GRPCRoute{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + }, + Spec: pbmesh.GRPCRoute{}, + }, + TheirName: "name", + TheirConsulNamespace: constants.DefaultConsulNS, + TheirConsulPartition: constants.DefaultConsulPartition, + TheirData: &pbmesh.GRPCRoute{}, + Matches: true, + }, + "hostnames are compared": { + OurConsulNamespace: "consul-ns", + OurConsulPartition: "consul-partition", + OurData: &GRPCRoute{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "kube-ns", + }, + Spec: pbmesh.GRPCRoute{ + Hostnames: []string{ + "a-hostname", "another-hostname", + }, + }, + }, + TheirName: "foo", + TheirConsulNamespace: "consul-ns", + TheirConsulPartition: "consul-partition", + TheirData: &pbmesh.GRPCRoute{ + Hostnames: []string{ + "not-a-hostname", "another-hostname", + }, + }, + Matches: false, + }, + "all fields set matches": { + OurConsulNamespace: "consul-ns", + OurConsulPartition: "consul-partition", + OurData: &GRPCRoute{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "kube-ns", + }, + Spec: pbmesh.GRPCRoute{ + Rules: []*pbmesh.GRPCRouteRule{ + { + Matches: []*pbmesh.GRPCRouteMatch{ + { + Method: &pbmesh.GRPCMethodMatch{ + Type: pbmesh.GRPCMethodMatchType_GRPC_METHOD_MATCH_TYPE_EXACT, + Service: "test-service", + Method: "GET", + }, + Headers: []*pbmesh.GRPCHeaderMatch{ + { + Type: pbmesh.HeaderMatchType_HEADER_MATCH_TYPE_PREFIX, + Name: "test-header", + Value: "header-value", + }, + }, + }, + }, + Filters: []*pbmesh.GRPCRouteFilter{ + { + RequestHeaderModifier: &pbmesh.HTTPHeaderFilter{ + Set: []*pbmesh.HTTPHeader{ + { + Name: "set-header", + Value: "a-header-value", + }, + }, + Add: []*pbmesh.HTTPHeader{ + { + Name: "added-header", + Value: "another-header-value", + }, + }, + Remove: []string{ + "remove-header", + }, + }, + ResponseHeaderModifier: &pbmesh.HTTPHeaderFilter{ + Set: []*pbmesh.HTTPHeader{ + { + Name: "set-header", + Value: "a-header-value", + }, + }, + Add: []*pbmesh.HTTPHeader{ + { + Name: "added-header", + Value: "another-header-value", + }, + }, + Remove: []string{ + "remove-header", + }, + }, + UrlRewrite: &pbmesh.HTTPURLRewriteFilter{ + PathPrefix: "a-path-prefix", + }, + }, + }, + Timeouts: &pbmesh.HTTPRouteTimeouts{ + Request: &durationpb.Duration{ + Seconds: 10, + Nanos: 5, + }, + Idle: &durationpb.Duration{ + Seconds: 5, + Nanos: 10, + }, + }, + Retries: &pbmesh.HTTPRouteRetries{ + Number: &wrapperspb.UInt32Value{ + Value: 1, + }, + OnConnectFailure: false, + OnConditions: []string{ + "condition-one", "condition-two", + }, + OnStatusCodes: []uint32{ + 200, 201, 202, + }, + }, + }, + }, + }, + }, + TheirName: "foo", + TheirConsulNamespace: "consul-ns", + TheirConsulPartition: "consul-partition", + TheirData: &pbmesh.GRPCRoute{ + Rules: []*pbmesh.GRPCRouteRule{ + { + Matches: []*pbmesh.GRPCRouteMatch{ + { + Method: &pbmesh.GRPCMethodMatch{ + Type: pbmesh.GRPCMethodMatchType_GRPC_METHOD_MATCH_TYPE_EXACT, + Service: "test-service", + Method: "GET", + }, + Headers: []*pbmesh.GRPCHeaderMatch{ + { + Type: pbmesh.HeaderMatchType_HEADER_MATCH_TYPE_PREFIX, + Name: "test-header", + Value: "header-value", + }, + }, + }, + }, + Filters: []*pbmesh.GRPCRouteFilter{ + { + RequestHeaderModifier: &pbmesh.HTTPHeaderFilter{ + Set: []*pbmesh.HTTPHeader{ + { + Name: "set-header", + Value: "a-header-value", + }, + }, + Add: []*pbmesh.HTTPHeader{ + { + Name: "added-header", + Value: "another-header-value", + }, + }, + Remove: []string{ + "remove-header", + }, + }, + ResponseHeaderModifier: &pbmesh.HTTPHeaderFilter{ + Set: []*pbmesh.HTTPHeader{ + { + Name: "set-header", + Value: "a-header-value", + }, + }, + Add: []*pbmesh.HTTPHeader{ + { + Name: "added-header", + Value: "another-header-value", + }, + }, + Remove: []string{ + "remove-header", + }, + }, + UrlRewrite: &pbmesh.HTTPURLRewriteFilter{ + PathPrefix: "a-path-prefix", + }, + }, + }, + Timeouts: &pbmesh.HTTPRouteTimeouts{ + Request: &durationpb.Duration{ + Seconds: 10, + Nanos: 5, + }, + Idle: &durationpb.Duration{ + Seconds: 5, + Nanos: 10, + }, + }, + Retries: &pbmesh.HTTPRouteRetries{ + Number: &wrapperspb.UInt32Value{ + Value: 1, + }, + OnConnectFailure: false, + OnConditions: []string{ + "condition-one", "condition-two", + }, + OnStatusCodes: []uint32{ + 200, 201, 202, + }, + }, + }, + }, + }, + Matches: true, + }, + "different types does not match": { + OurConsulNamespace: constants.DefaultConsulNS, + OurConsulPartition: constants.DefaultConsulPartition, + OurData: &GRPCRoute{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + }, + Spec: pbmesh.GRPCRoute{}, + }, + ResourceOverride: &pbresource.Resource{ + Id: &pbresource.ID{ + Name: "name", + Type: pbmesh.ProxyConfigurationType, + Tenancy: &pbresource.Tenancy{ + Partition: constants.DefaultConsulNS, + Namespace: constants.DefaultConsulPartition, + }, + }, + Data: inject.ToProtoAny(&pbmesh.ProxyConfiguration{}), + Metadata: meshConfigMeta(), + }, + Matches: false, + }, + } + for name, c := range cases { + t.Run(name, func(t *testing.T) { + consulResource := c.ResourceOverride + if c.TheirName != "" { + consulResource = constructGRPCRouteResource(c.TheirData, c.TheirName, c.TheirConsulNamespace, c.TheirConsulPartition) + } + require.Equal(t, c.Matches, c.OurData.MatchesConsul(consulResource, c.OurConsulNamespace, c.OurConsulPartition)) + }) + } +} + +// TestGRPCRoute_Resource also includes test to verify ResourceID(). +func TestGRPCRoute_Resource(t *testing.T) { + cases := map[string]struct { + Ours *GRPCRoute + ConsulNamespace string + ConsulPartition string + ExpectedName string + ExpectedData *pbmesh.GRPCRoute + }{ + "empty fields": { + Ours: &GRPCRoute{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + Spec: pbmesh.GRPCRoute{}, + }, + ConsulNamespace: constants.DefaultConsulNS, + ConsulPartition: constants.DefaultConsulPartition, + ExpectedName: "foo", + ExpectedData: &pbmesh.GRPCRoute{}, + }, + "every field set": { + Ours: &GRPCRoute{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "kube-ns", + }, + Spec: pbmesh.GRPCRoute{ + Rules: []*pbmesh.GRPCRouteRule{ + { + Matches: []*pbmesh.GRPCRouteMatch{ + { + Method: &pbmesh.GRPCMethodMatch{ + Type: pbmesh.GRPCMethodMatchType_GRPC_METHOD_MATCH_TYPE_EXACT, + Service: "test-service", + Method: "GET", + }, + Headers: []*pbmesh.GRPCHeaderMatch{ + { + Type: pbmesh.HeaderMatchType_HEADER_MATCH_TYPE_PREFIX, + Name: "test-header", + Value: "header-value", + }, + }, + }, + }, + Filters: []*pbmesh.GRPCRouteFilter{ + { + RequestHeaderModifier: &pbmesh.HTTPHeaderFilter{ + Set: []*pbmesh.HTTPHeader{ + { + Name: "set-header", + Value: "a-header-value", + }, + }, + Add: []*pbmesh.HTTPHeader{ + { + Name: "added-header", + Value: "another-header-value", + }, + }, + Remove: []string{ + "remove-header", + }, + }, + ResponseHeaderModifier: &pbmesh.HTTPHeaderFilter{ + Set: []*pbmesh.HTTPHeader{ + { + Name: "set-header", + Value: "a-header-value", + }, + }, + Add: []*pbmesh.HTTPHeader{ + { + Name: "added-header", + Value: "another-header-value", + }, + }, + Remove: []string{ + "remove-header", + }, + }, + UrlRewrite: &pbmesh.HTTPURLRewriteFilter{ + PathPrefix: "a-path-prefix", + }, + }, + }, + Timeouts: &pbmesh.HTTPRouteTimeouts{ + Request: &durationpb.Duration{ + Seconds: 10, + Nanos: 5, + }, + Idle: &durationpb.Duration{ + Seconds: 5, + Nanos: 10, + }, + }, + Retries: &pbmesh.HTTPRouteRetries{ + Number: &wrapperspb.UInt32Value{ + Value: 1, + }, + OnConnectFailure: false, + OnConditions: []string{ + "condition-one", "condition-two", + }, + OnStatusCodes: []uint32{ + 200, 201, 202, + }, + }, + }, + }, + }, + }, + ConsulNamespace: "not-default-namespace", + ConsulPartition: "not-default-partition", + ExpectedName: "foo", + ExpectedData: &pbmesh.GRPCRoute{ + Rules: []*pbmesh.GRPCRouteRule{ + { + Matches: []*pbmesh.GRPCRouteMatch{ + { + Method: &pbmesh.GRPCMethodMatch{ + Type: pbmesh.GRPCMethodMatchType_GRPC_METHOD_MATCH_TYPE_EXACT, + Service: "test-service", + Method: "GET", + }, + Headers: []*pbmesh.GRPCHeaderMatch{ + { + Type: pbmesh.HeaderMatchType_HEADER_MATCH_TYPE_PREFIX, + Name: "test-header", + Value: "header-value", + }, + }, + }, + }, + Filters: []*pbmesh.GRPCRouteFilter{ + { + RequestHeaderModifier: &pbmesh.HTTPHeaderFilter{ + Set: []*pbmesh.HTTPHeader{ + { + Name: "set-header", + Value: "a-header-value", + }, + }, + Add: []*pbmesh.HTTPHeader{ + { + Name: "added-header", + Value: "another-header-value", + }, + }, + Remove: []string{ + "remove-header", + }, + }, + ResponseHeaderModifier: &pbmesh.HTTPHeaderFilter{ + Set: []*pbmesh.HTTPHeader{ + { + Name: "set-header", + Value: "a-header-value", + }, + }, + Add: []*pbmesh.HTTPHeader{ + { + Name: "added-header", + Value: "another-header-value", + }, + }, + Remove: []string{ + "remove-header", + }, + }, + UrlRewrite: &pbmesh.HTTPURLRewriteFilter{ + PathPrefix: "a-path-prefix", + }, + }, + }, + Timeouts: &pbmesh.HTTPRouteTimeouts{ + Request: &durationpb.Duration{ + Seconds: 10, + Nanos: 5, + }, + Idle: &durationpb.Duration{ + Seconds: 5, + Nanos: 10, + }, + }, + Retries: &pbmesh.HTTPRouteRetries{ + Number: &wrapperspb.UInt32Value{ + Value: 1, + }, + OnConnectFailure: false, + OnConditions: []string{ + "condition-one", "condition-two", + }, + OnStatusCodes: []uint32{ + 200, 201, 202, + }, + }, + }, + }, + }, + }, + } + for name, c := range cases { + t.Run(name, func(t *testing.T) { + actual := c.Ours.Resource(c.ConsulNamespace, c.ConsulPartition) + expected := constructGRPCRouteResource(c.ExpectedData, c.ExpectedName, c.ConsulNamespace, c.ConsulPartition) + + opts := append([]cmp.Option{ + protocmp.IgnoreFields(&pbresource.Resource{}, "status", "generation", "version"), + protocmp.IgnoreFields(&pbresource.ID{}, "uid"), + }, test.CmpProtoIgnoreOrder()...) + diff := cmp.Diff(expected, actual, opts...) + require.Equal(t, "", diff, "GRPCRoute do not match") + }) + } +} + +func TestGRPCRoute_SetSyncedCondition(t *testing.T) { + trafficPermissions := &GRPCRoute{} + trafficPermissions.SetSyncedCondition(corev1.ConditionTrue, "reason", "message") + + require.Equal(t, corev1.ConditionTrue, trafficPermissions.Status.Conditions[0].Status) + require.Equal(t, "reason", trafficPermissions.Status.Conditions[0].Reason) + require.Equal(t, "message", trafficPermissions.Status.Conditions[0].Message) + now := metav1.Now() + require.True(t, trafficPermissions.Status.Conditions[0].LastTransitionTime.Before(&now)) +} + +func TestGRPCRoute_SetLastSyncedTime(t *testing.T) { + trafficPermissions := &GRPCRoute{} + syncedTime := metav1.NewTime(time.Now()) + trafficPermissions.SetLastSyncedTime(&syncedTime) + + require.Equal(t, &syncedTime, trafficPermissions.Status.LastSyncedTime) +} + +func TestGRPCRoute_GetSyncedConditionStatus(t *testing.T) { + cases := []corev1.ConditionStatus{ + corev1.ConditionUnknown, + corev1.ConditionFalse, + corev1.ConditionTrue, + } + for _, status := range cases { + t.Run(string(status), func(t *testing.T) { + trafficPermissions := &GRPCRoute{ + Status: Status{ + Conditions: []Condition{{ + Type: ConditionSynced, + Status: status, + }}, + }, + } + + require.Equal(t, status, trafficPermissions.SyncedConditionStatus()) + }) + } +} + +func TestGRPCRoute_GetConditionWhenStatusNil(t *testing.T) { + require.Nil(t, (&GRPCRoute{}).GetCondition(ConditionSynced)) +} + +func TestGRPCRoute_SyncedConditionStatusWhenStatusNil(t *testing.T) { + require.Equal(t, corev1.ConditionUnknown, (&GRPCRoute{}).SyncedConditionStatus()) +} + +func TestGRPCRoute_SyncedConditionWhenStatusNil(t *testing.T) { + status, reason, message := (&GRPCRoute{}).SyncedCondition() + require.Equal(t, corev1.ConditionUnknown, status) + require.Equal(t, "", reason) + require.Equal(t, "", message) +} + +func TestGRPCRoute_KubeKind(t *testing.T) { + require.Equal(t, "grpcroute", (&GRPCRoute{}).KubeKind()) +} + +func TestGRPCRoute_KubernetesName(t *testing.T) { + require.Equal(t, "test", (&GRPCRoute{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "bar", + }, + Spec: pbmesh.GRPCRoute{}, + }).KubernetesName()) +} + +func TestGRPCRoute_ObjectMeta(t *testing.T) { + meta := metav1.ObjectMeta{ + Name: "name", + Namespace: "namespace", + } + trafficPermissions := &GRPCRoute{ + ObjectMeta: meta, + } + require.Equal(t, &meta, trafficPermissions.GetObjectMeta()) +} + +// Test defaulting behavior when namespaces are enabled as well as disabled. +// TODO: add when implemented +// func TestGRPCRoute_DefaultNamespaceFields(t *testing.T) + +func TestGRPCRoute_Validate(t *testing.T) { + cases := []struct { + name string + input *GRPCRoute + expectedErrMsgs []string + }{ + { + name: "kitchen sink OK", + input: &GRPCRoute{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "kube-ns", + }, + Spec: pbmesh.GRPCRoute{ + ParentRefs: []*pbmesh.ParentReference{ + { + Ref: &pbresource.Reference{ + Type: pbmesh.ComputedRoutesType, + Tenancy: &pbresource.Tenancy{ + Partition: "some-partition", + Namespace: "some-namespace", + }, + Name: "reference", + Section: "some-section", + }, + Port: "20020", + }, + }, + Hostnames: []string{}, + Rules: []*pbmesh.GRPCRouteRule{ + { + Matches: []*pbmesh.GRPCRouteMatch{ + { + Method: &pbmesh.GRPCMethodMatch{ + Type: pbmesh.GRPCMethodMatchType_GRPC_METHOD_MATCH_TYPE_EXACT, + Service: "test-service", + Method: "GET", + }, + Headers: []*pbmesh.GRPCHeaderMatch{ + { + Type: pbmesh.HeaderMatchType_HEADER_MATCH_TYPE_PREFIX, + Name: "test-header", + Value: "header-value", + }, + }, + }, + }, + Filters: []*pbmesh.GRPCRouteFilter{ + { + UrlRewrite: &pbmesh.HTTPURLRewriteFilter{ + PathPrefix: "a-path-prefix", + }, + }, + }, + Timeouts: &pbmesh.HTTPRouteTimeouts{ + Request: &durationpb.Duration{ + Seconds: 10, + Nanos: 5, + }, + Idle: &durationpb.Duration{ + Seconds: 5, + Nanos: 10, + }, + }, + Retries: &pbmesh.HTTPRouteRetries{ + Number: &wrapperspb.UInt32Value{ + Value: 1, + }, + OnConnectFailure: false, + OnConditions: []string{ + "5xx", "resource-exhausted", + }, + OnStatusCodes: []uint32{ + 200, 201, 202, + }, + }, + BackendRefs: []*pbmesh.GRPCBackendRef{ + { + BackendRef: &pbmesh.BackendReference{ + Ref: &pbresource.Reference{ + Type: pbmesh.ComputedRoutesType, + Tenancy: &pbresource.Tenancy{ + Partition: "some-partition", + Namespace: "some-namespace", + }, + Name: "reference", + Section: "some-section", + }, + Port: "21000", + }, + Weight: 50, + }, + }, + }, + }, + }, + }, + expectedErrMsgs: nil, + }, + { + name: "empty parentRefs", + input: &GRPCRoute{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "kube-ns", + }, + Spec: pbmesh.GRPCRoute{ + ParentRefs: []*pbmesh.ParentReference{}, + }, + }, + expectedErrMsgs: []string{ + `spec.parentRefs: Required value: cannot be empty`, + }, + }, + { + name: "populated hostnames", + input: &GRPCRoute{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "kube-ns", + }, + Spec: pbmesh.GRPCRoute{ + ParentRefs: []*pbmesh.ParentReference{ + { + Ref: &pbresource.Reference{ + Type: pbmesh.ComputedRoutesType, + Tenancy: &pbresource.Tenancy{ + Partition: "some-partition", + Namespace: "some-namespace", + }, + Name: "reference", + Section: "some-section", + }, + Port: "20020", + }, + }, + Hostnames: []string{"a-hostname"}, + }, + }, + expectedErrMsgs: []string{ + `spec.hostnames: Invalid value: []string{"a-hostname"}: should not populate hostnames`, + }, + }, + { + name: "rules.matches.method", + input: &GRPCRoute{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "kube-ns", + }, + Spec: pbmesh.GRPCRoute{ + ParentRefs: []*pbmesh.ParentReference{ + { + Ref: &pbresource.Reference{ + Type: pbmesh.ComputedRoutesType, + Tenancy: &pbresource.Tenancy{ + Partition: "some-partition", + Namespace: "some-namespace", + }, + Name: "reference", + Section: "some-section", + }, + Port: "20020", + }, + }, + Hostnames: []string{}, + Rules: []*pbmesh.GRPCRouteRule{ + { + Matches: []*pbmesh.GRPCRouteMatch{ + { + Method: &pbmesh.GRPCMethodMatch{ + Type: pbmesh.GRPCMethodMatchType_GRPC_METHOD_MATCH_TYPE_UNSPECIFIED, + Service: "test-service", + Method: "GET", + }, + }, { + Method: &pbmesh.GRPCMethodMatch{ + Service: "test-service", + Method: "GET", + }, + }, { + Method: &pbmesh.GRPCMethodMatch{ + Type: pbmesh.GRPCMethodMatchType_GRPC_METHOD_MATCH_TYPE_EXACT, + }, + }, + }, + BackendRefs: []*pbmesh.GRPCBackendRef{ + { + BackendRef: &pbmesh.BackendReference{ + Ref: &pbresource.Reference{ + Type: pbmesh.ComputedRoutesType, + Tenancy: &pbresource.Tenancy{ + Partition: "some-partition", + Namespace: "some-namespace", + }, + Name: "reference", + Section: "some-section", + }, + Port: "21000", + }, + Weight: 50, + }, + }, + }, + }, + }, + }, + expectedErrMsgs: []string{ + `spec.rules[0].matches[0].method.type: Invalid value: GRPC_METHOD_MATCH_TYPE_UNSPECIFIED: missing required field`, + `spec.rules[0].matches[1].method.type: Invalid value: GRPC_METHOD_MATCH_TYPE_UNSPECIFIED: missing required field`, + `spec.rules[0].matches[2].method.service: Invalid value: "": at least one of "service" or "method" must be set`, + }, + }, + { + name: "rules.matches.headers", + input: &GRPCRoute{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "kube-ns", + }, + Spec: pbmesh.GRPCRoute{ + ParentRefs: []*pbmesh.ParentReference{ + { + Ref: &pbresource.Reference{ + Type: pbmesh.ComputedRoutesType, + Tenancy: &pbresource.Tenancy{ + Partition: "some-partition", + Namespace: "some-namespace", + }, + Name: "reference", + Section: "some-section", + }, + Port: "20020", + }, + }, + Hostnames: []string{}, + Rules: []*pbmesh.GRPCRouteRule{ + { + Matches: []*pbmesh.GRPCRouteMatch{ + { + Headers: []*pbmesh.GRPCHeaderMatch{ + { + Type: pbmesh.HeaderMatchType_HEADER_MATCH_TYPE_UNSPECIFIED, + Name: "test-header", + Value: "header-value", + }, + { + Name: "test-header", + Value: "header-value", + }, + { + Type: pbmesh.HeaderMatchType_HEADER_MATCH_TYPE_PREFIX, + Value: "header-value", + }, + }, + }, + }, + BackendRefs: []*pbmesh.GRPCBackendRef{ + { + BackendRef: &pbmesh.BackendReference{ + Ref: &pbresource.Reference{ + Type: pbmesh.ComputedRoutesType, + Tenancy: &pbresource.Tenancy{ + Partition: "some-partition", + Namespace: "some-namespace", + }, + Name: "reference", + Section: "some-section", + }, + Port: "21000", + }, + Weight: 50, + }, + }, + }, + }, + }, + }, + expectedErrMsgs: []string{ + `spec.rules[0].matches[0].headers[0].type: Invalid value: HEADER_MATCH_TYPE_UNSPECIFIED: missing required field`, + `spec.rules[0].matches[0].headers[1].type: Invalid value: HEADER_MATCH_TYPE_UNSPECIFIED: missing required field`, + `spec.rules[0].matches[0].headers[2].name: Required value: missing required field`, + }, + }, + { + name: "rules.filters", + input: &GRPCRoute{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "kube-ns", + }, + Spec: pbmesh.GRPCRoute{ + ParentRefs: []*pbmesh.ParentReference{ + { + Ref: &pbresource.Reference{ + Type: pbmesh.ComputedRoutesType, + Tenancy: &pbresource.Tenancy{ + Partition: "some-partition", + Namespace: "some-namespace", + }, + Name: "reference", + Section: "some-section", + }, + Port: "20020", + }, + }, + Hostnames: []string{}, + Rules: []*pbmesh.GRPCRouteRule{ + { + Filters: []*pbmesh.GRPCRouteFilter{ + { + RequestHeaderModifier: &pbmesh.HTTPHeaderFilter{}, + ResponseHeaderModifier: &pbmesh.HTTPHeaderFilter{}, + UrlRewrite: &pbmesh.HTTPURLRewriteFilter{ + PathPrefix: "", + }, + }, + }, + BackendRefs: []*pbmesh.GRPCBackendRef{ + { + BackendRef: &pbmesh.BackendReference{ + Ref: &pbresource.Reference{ + Type: pbmesh.ComputedRoutesType, + Tenancy: &pbresource.Tenancy{ + Partition: "some-partition", + Namespace: "some-namespace", + }, + Name: "reference", + Section: "some-section", + }, + Port: "21000", + }, + Weight: 50, + }, + }, + }, + }, + }, + }, + expectedErrMsgs: []string{ + `spec.rules[0].filters[0].urlRewrite.pathPrefix: Required value: field should not be empty if enclosing section is set`, + `exactly one of request_header_modifier, response_header_modifier, or url_rewrite is required`, + }, + }, + { + name: "missing backendRefs", + input: &GRPCRoute{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "kube-ns", + }, + Spec: pbmesh.GRPCRoute{ + ParentRefs: []*pbmesh.ParentReference{ + { + Ref: &pbresource.Reference{ + Type: pbmesh.ComputedRoutesType, + Tenancy: &pbresource.Tenancy{ + Partition: "some-partition", + Namespace: "some-namespace", + }, + Name: "reference", + Section: "some-section", + }, + Port: "20020", + }, + }, + Hostnames: []string{}, + Rules: []*pbmesh.GRPCRouteRule{ + { + BackendRefs: []*pbmesh.GRPCBackendRef{}, + }, + }, + }, + }, + expectedErrMsgs: []string{ + `spec.rules[0].backendRefs: Required value: missing required field`, + }, + }, + { + name: "rules.backendRefs", + input: &GRPCRoute{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "kube-ns", + }, + Spec: pbmesh.GRPCRoute{ + ParentRefs: []*pbmesh.ParentReference{ + { + Ref: &pbresource.Reference{ + Type: pbmesh.ComputedRoutesType, + Tenancy: &pbresource.Tenancy{ + Partition: "some-partition", + Namespace: "some-namespace", + }, + Name: "reference", + Section: "some-section", + }, + Port: "20020", + }, + }, + Hostnames: []string{}, + Rules: []*pbmesh.GRPCRouteRule{ + { + BackendRefs: []*pbmesh.GRPCBackendRef{ + { + Weight: 50, + }, + { + BackendRef: &pbmesh.BackendReference{ + Datacenter: "wrong-datacenter", + Port: "21000", + }, + Weight: 50, + }, + { + BackendRef: &pbmesh.BackendReference{ + Port: "21000", + }, + Filters: []*pbmesh.GRPCRouteFilter{{}}, + }, + }, + }, + }, + }, + }, + expectedErrMsgs: []string{ + `spec.rules[0].backendRefs[0].backendRef: Required value: missing required field`, + `spec.rules[0].backendRefs[1].backendRef.datacenter: Invalid value: "wrong-datacenter": datacenter is not yet supported on backend refs`, + `filters are not supported at this level yet`, + }, + }, + { + name: "rules.timeout", + input: &GRPCRoute{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "kube-ns", + }, + Spec: pbmesh.GRPCRoute{ + ParentRefs: []*pbmesh.ParentReference{ + { + Ref: &pbresource.Reference{ + Type: pbmesh.ComputedRoutesType, + Tenancy: &pbresource.Tenancy{ + Partition: "some-partition", + Namespace: "some-namespace", + }, + Name: "reference", + Section: "some-section", + }, + Port: "20020", + }, + }, + Hostnames: []string{}, + Rules: []*pbmesh.GRPCRouteRule{ + { + Timeouts: &pbmesh.HTTPRouteTimeouts{ + Request: &durationpb.Duration{ + Seconds: -9, + Nanos: -10, + }, + Idle: &durationpb.Duration{ + Seconds: -2, + Nanos: -3, + }, + }, + BackendRefs: []*pbmesh.GRPCBackendRef{ + { + BackendRef: &pbmesh.BackendReference{ + Ref: &pbresource.Reference{ + Type: pbmesh.ComputedRoutesType, + Tenancy: &pbresource.Tenancy{ + Partition: "some-partition", + Namespace: "some-namespace", + }, + Name: "reference", + Section: "some-section", + }, + Port: "21000", + }, + Weight: 50, + }, + }, + }, + }, + }, + }, + expectedErrMsgs: []string{ + `spec.rules[0].timeouts.request: Invalid value: -9.00000001s: timeout cannot be negative`, + `spec.rules[0].timeouts.idle: Invalid value: -2.000000003s: timeout cannot be negative`, + }, + }, + { + name: "rules.retries", + input: &GRPCRoute{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "kube-ns", + }, + Spec: pbmesh.GRPCRoute{ + ParentRefs: []*pbmesh.ParentReference{ + { + Ref: &pbresource.Reference{ + Type: pbmesh.ComputedRoutesType, + Tenancy: &pbresource.Tenancy{ + Partition: "some-partition", + Namespace: "some-namespace", + }, + Name: "reference", + Section: "some-section", + }, + Port: "20020", + }, + }, + Hostnames: []string{}, + Rules: []*pbmesh.GRPCRouteRule{ + { + Retries: &pbmesh.HTTPRouteRetries{ + OnConditions: []string{"invalid-condition", "another-invalid-condition", "internal"}, + }, + BackendRefs: []*pbmesh.GRPCBackendRef{ + { + BackendRef: &pbmesh.BackendReference{ + Ref: &pbresource.Reference{ + Type: pbmesh.ComputedRoutesType, + Tenancy: &pbresource.Tenancy{ + Partition: "some-partition", + Namespace: "some-namespace", + }, + Name: "reference", + Section: "some-section", + }, + Port: "21000", + }, + Weight: 50, + }, + }, + }, + }, + }, + }, + expectedErrMsgs: []string{ + `spec.rules[0].retries.onConditions[0]: Invalid value: "invalid-condition": not a valid retry condition`, + `spec.rules[0].retries.onConditions[1]: Invalid value: "another-invalid-condition": not a valid retry condition`, + }, + }, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + err := tc.input.Validate(common.ConsulTenancyConfig{}) + if len(tc.expectedErrMsgs) != 0 { + require.Error(t, err) + for _, s := range tc.expectedErrMsgs { + require.Contains(t, err.Error(), s) + } + } else { + require.NoError(t, err) + } + }) + } +} + +func constructGRPCRouteResource(tp *pbmesh.GRPCRoute, name, namespace, partition string) *pbresource.Resource { + data := inject.ToProtoAny(tp) + + id := &pbresource.ID{ + Name: name, + Type: pbmesh.GRPCRouteType, + Tenancy: &pbresource.Tenancy{ + Partition: partition, + Namespace: namespace, + }, + Uid: "ABCD", // We add this to show it does not factor into the comparison + } + + return &pbresource.Resource{ + Id: id, + Data: data, + Metadata: meshConfigMeta(), + + // We add the fields below to prove that they are not used in the Match when comparing the CRD to Consul. + Version: "123456", + Generation: "01ARZ3NDEKTSV4RRFFQ69G5FAV", + Status: map[string]*pbresource.Status{ + "knock": { + ObservedGeneration: "01ARZ3NDEKTSV4RRFFQ69G5FAV", + Conditions: make([]*pbresource.Condition, 0), + UpdatedAt: timestamppb.Now(), + }, + }, + } +} diff --git a/control-plane/api/mesh/v2beta1/grpc_route_webhook.go b/control-plane/api/mesh/v2beta1/grpc_route_webhook.go new file mode 100644 index 0000000000..2eab837467 --- /dev/null +++ b/control-plane/api/mesh/v2beta1/grpc_route_webhook.go @@ -0,0 +1,65 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package v2beta1 + +import ( + "context" + "net/http" + + "github.com/go-logr/logr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + "github.com/hashicorp/consul-k8s/control-plane/api/common" +) + +// +kubebuilder:object:generate=false + +type GRPCRouteWebhook struct { + Logger logr.Logger + + // ConsulTenancyConfig contains the injector's namespace and partition configuration. + ConsulTenancyConfig common.ConsulTenancyConfig + + decoder *admission.Decoder + client.Client +} + +var _ common.ConsulResourceLister = &GRPCRouteWebhook{} + +// NOTE: The path value in the below line is the path to the webhook. +// If it is updated, run code-gen, update subcommand/inject-connect/command.go +// and the consul-helm value for the path to the webhook. +// +// NOTE: The below line cannot be combined with any other comment. If it is it will break the code generation. +// +// +kubebuilder:webhook:verbs=create;update,path=/mutate-v2beta1-grpcroute,mutating=true,failurePolicy=fail,groups=auth.consul.hashicorp.com,resources=grpcroute,versions=v2beta1,name=mutate-grpcroute.auth.consul.hashicorp.com,sideEffects=None,admissionReviewVersions=v1beta1;v1 + +func (v *GRPCRouteWebhook) Handle(ctx context.Context, req admission.Request) admission.Response { + var resource GRPCRoute + err := v.decoder.Decode(req, &resource) + if err != nil { + return admission.Errored(http.StatusBadRequest, err) + } + + return common.ValidateConsulResource(ctx, req, v.Logger, v, &resource, v.ConsulTenancyConfig) +} + +func (v *GRPCRouteWebhook) List(ctx context.Context) ([]common.ConsulResource, error) { + var resourceList GRPCRouteList + if err := v.Client.List(ctx, &resourceList); err != nil { + return nil, err + } + var entries []common.ConsulResource + for _, item := range resourceList.Items { + entries = append(entries, common.ConsulResource(item)) + } + return entries, nil +} + +func (v *GRPCRouteWebhook) SetupWithManager(mgr ctrl.Manager) { + v.decoder = admission.NewDecoder(mgr.GetScheme()) + mgr.GetWebhookServer().Register("/mutate-v2beta1-grpcroute", &admission.Webhook{Handler: v}) +} diff --git a/control-plane/api/mesh/v2beta1/http_route_types.go b/control-plane/api/mesh/v2beta1/http_route_types.go new file mode 100644 index 0000000000..dd8e0848f4 --- /dev/null +++ b/control-plane/api/mesh/v2beta1/http_route_types.go @@ -0,0 +1,304 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package v2beta1 + +import ( + "fmt" + "net/http" + "strings" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1" + "github.com/hashicorp/consul/proto-public/pbresource" + "google.golang.org/protobuf/testing/protocmp" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + + "github.com/hashicorp/consul-k8s/control-plane/api/common" + inject "github.com/hashicorp/consul-k8s/control-plane/connect-inject/common" +) + +const ( + httpRouteKubeKind = "httproute" +) + +func init() { + MeshSchemeBuilder.Register(&HTTPRoute{}, &HTTPRouteList{}) +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// HTTPRoute is the Schema for the HTTP Route API +// +kubebuilder:printcolumn:name="Synced",type="string",JSONPath=".status.conditions[?(@.type==\"Synced\")].status",description="The sync status of the resource with Consul" +// +kubebuilder:printcolumn:name="Last Synced",type="date",JSONPath=".status.lastSyncedTime",description="The last successful synced time of the resource with Consul" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="The age of the resource" +// +kubebuilder:resource:shortName="http-route" +type HTTPRoute struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec pbmesh.HTTPRoute `json:"spec,omitempty"` + Status `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// HTTPRouteList contains a list of HTTPRoute. +type HTTPRouteList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []*HTTPRoute `json:"items"` +} + +func (in *HTTPRoute) ResourceID(namespace, partition string) *pbresource.ID { + return &pbresource.ID{ + Name: in.Name, + Type: pbmesh.HTTPRouteType, + Tenancy: &pbresource.Tenancy{ + Partition: partition, + Namespace: namespace, + }, + } +} + +func (in *HTTPRoute) Resource(namespace, partition string) *pbresource.Resource { + return &pbresource.Resource{ + Id: in.ResourceID(namespace, partition), + Data: inject.ToProtoAny(&in.Spec), + Metadata: meshConfigMeta(), + } +} + +func (in *HTTPRoute) AddFinalizer(f string) { + in.ObjectMeta.Finalizers = append(in.Finalizers(), f) +} + +func (in *HTTPRoute) RemoveFinalizer(f string) { + var newFinalizers []string + for _, oldF := range in.Finalizers() { + if oldF != f { + newFinalizers = append(newFinalizers, oldF) + } + } + in.ObjectMeta.Finalizers = newFinalizers +} + +func (in *HTTPRoute) Finalizers() []string { + return in.ObjectMeta.Finalizers +} + +func (in *HTTPRoute) MatchesConsul(candidate *pbresource.Resource, namespace, partition string) bool { + return cmp.Equal( + in.Resource(namespace, partition), + candidate, + protocmp.IgnoreFields(&pbresource.Resource{}, "status", "generation", "version"), + protocmp.IgnoreFields(&pbresource.ID{}, "uid"), + protocmp.Transform(), + cmpopts.SortSlices(func(a, b any) bool { return fmt.Sprintf("%v", a) < fmt.Sprintf("%v", b) }), + ) +} + +func (in *HTTPRoute) KubeKind() string { + return httpRouteKubeKind +} + +func (in *HTTPRoute) KubernetesName() string { + return in.ObjectMeta.Name +} + +func (in *HTTPRoute) SetSyncedCondition(status corev1.ConditionStatus, reason, message string) { + in.Status.Conditions = Conditions{ + { + Type: ConditionSynced, + Status: status, + LastTransitionTime: metav1.Now(), + Reason: reason, + Message: message, + }, + } +} + +func (in *HTTPRoute) SetLastSyncedTime(time *metav1.Time) { + in.Status.LastSyncedTime = time +} + +func (in *HTTPRoute) SyncedCondition() (status corev1.ConditionStatus, reason, message string) { + cond := in.Status.GetCondition(ConditionSynced) + if cond == nil { + return corev1.ConditionUnknown, "", "" + } + return cond.Status, cond.Reason, cond.Message +} + +func (in *HTTPRoute) SyncedConditionStatus() corev1.ConditionStatus { + condition := in.Status.GetCondition(ConditionSynced) + if condition == nil { + return corev1.ConditionUnknown + } + return condition.Status +} + +func (in *HTTPRoute) Validate(tenancy common.ConsulTenancyConfig) error { + var errs field.ErrorList + var route pbmesh.HTTPRoute + path := field.NewPath("spec") + + res := in.Resource(tenancy.ConsulDestinationNamespace, tenancy.ConsulPartition) + + if err := res.Data.UnmarshalTo(&route); err != nil { + return fmt.Errorf("error parsing resource data as type %q: %s", &route, err) + } + + if len(route.ParentRefs) == 0 { + errs = append(errs, field.Required(path.Child("parentRefs"), "cannot be empty")) + } + + if len(route.Hostnames) > 0 { + errs = append(errs, field.Invalid(path.Child("hostnames"), route.Hostnames, "should not populate hostnames")) + } + + for i, rule := range route.Rules { + rulePath := path.Child("rules").Index(i) + for j, match := range rule.Matches { + ruleMatchPath := rulePath.Child("matches").Index(j) + if match.Path != nil { + switch match.Path.Type { + case pbmesh.PathMatchType_PATH_MATCH_TYPE_UNSPECIFIED: + errs = append(errs, field.Invalid(ruleMatchPath.Child("path").Child("type"), pbmesh.PathMatchType_PATH_MATCH_TYPE_UNSPECIFIED, "missing required field")) + case pbmesh.PathMatchType_PATH_MATCH_TYPE_EXACT: + if !strings.HasPrefix(match.Path.Value, "/") { + errs = append(errs, field.Invalid(ruleMatchPath.Child("path").Child("value"), match.Path.Value, "exact patch value does not start with '/'")) + } + case pbmesh.PathMatchType_PATH_MATCH_TYPE_PREFIX: + if !strings.HasPrefix(match.Path.Value, "/") { + errs = append(errs, field.Invalid(ruleMatchPath.Child("path").Child("value"), match.Path.Value, "prefix patch value does not start with '/'")) + } + case pbmesh.PathMatchType_PATH_MATCH_TYPE_REGEX: + if match.Path.Value == "" { + errs = append(errs, field.Required(ruleMatchPath.Child("path").Child("value"), "missing required field")) + } + default: + errs = append(errs, field.Invalid(ruleMatchPath.Child("path").Child("type"), match.Path, "not a supported enum value")) + } + } + + for k, hdr := range match.Headers { + if err := validateHeaderMatchType(hdr.Type); err != nil { + errs = append(errs, field.Invalid(ruleMatchPath.Child("headers").Index(k).Child("type"), hdr.Type, err.Error())) + } + + if hdr.Name == "" { + errs = append(errs, field.Required(ruleMatchPath.Child("headers").Index(k).Child("name"), "missing required field")) + } + } + + for k, qm := range match.QueryParams { + switch qm.Type { + case pbmesh.QueryParamMatchType_QUERY_PARAM_MATCH_TYPE_UNSPECIFIED: + errs = append(errs, field.Invalid(ruleMatchPath.Child("queryParams").Index(k).Child("type"), pbmesh.QueryParamMatchType_QUERY_PARAM_MATCH_TYPE_UNSPECIFIED, "missing required field")) + case pbmesh.QueryParamMatchType_QUERY_PARAM_MATCH_TYPE_EXACT: + case pbmesh.QueryParamMatchType_QUERY_PARAM_MATCH_TYPE_REGEX: + case pbmesh.QueryParamMatchType_QUERY_PARAM_MATCH_TYPE_PRESENT: + default: + errs = append(errs, field.Invalid(ruleMatchPath.Child("queryParams").Index(k).Child("type"), qm.Type, "not a supported enum value")) + } + + if qm.Name == "" { + errs = append(errs, field.Required(ruleMatchPath.Child("queryParams").Index(k).Child("name"), "missing required field")) + } + } + + if match.Method != "" && !isValidHTTPMethod(match.Method) { + errs = append(errs, field.Invalid(ruleMatchPath.Child("method"), match.Method, "not a valid http method")) + } + } + + var ( + hasReqMod bool + hasUrlRewrite bool + ) + for j, filter := range rule.Filters { + ruleFilterPath := path.Child("filters").Index(j) + set := 0 + if filter.RequestHeaderModifier != nil { + set++ + hasReqMod = true + } + if filter.ResponseHeaderModifier != nil { + set++ + } + if filter.UrlRewrite != nil { + set++ + hasUrlRewrite = true + if filter.UrlRewrite.PathPrefix == "" { + errs = append(errs, field.Invalid(ruleFilterPath.Child("urlRewrite").Child("pathPrefix"), filter.UrlRewrite.PathPrefix, "field should not be empty if enclosing section is set")) + } + } + if set != 1 { + errs = append(errs, field.Invalid(ruleFilterPath, filter, "exactly one of request_header_modifier, response_header_modifier, or url_rewrite is required")) + } + } + + if hasReqMod && hasUrlRewrite { + errs = append(errs, field.Invalid(rulePath.Child("filters"), rule.Filters, "exactly one of request_header_modifier or url_rewrite can be set at a time")) + } + + if len(rule.BackendRefs) == 0 { + errs = append(errs, field.Required(rulePath.Child("backendRefs"), "missing required field")) + } + for j, hbref := range rule.BackendRefs { + ruleBackendRefsPath := rulePath.Child("backendRefs").Index(j) + if hbref.BackendRef == nil { + errs = append(errs, field.Required(ruleBackendRefsPath.Child("backendRef"), "missing required field")) + continue + } + + if hbref.BackendRef.Datacenter != "" { + errs = append(errs, field.Invalid(ruleBackendRefsPath.Child("backendRef").Child("datacenter"), hbref.BackendRef.Datacenter, "datacenter is not yet supported on backend refs")) + } + + if len(hbref.Filters) > 0 { + errs = append(errs, field.Invalid(ruleBackendRefsPath.Child("filters"), hbref.Filters, "filters are not supported at this level yet")) + } + } + + if rule.Timeouts != nil { + errs = append(errs, validateHTTPTimeouts(rule.Timeouts, rulePath.Child("timeouts"))...) + } + if rule.Retries != nil { + errs = append(errs, validateHTTPRetries(rule.Retries, rulePath.Child("retries"))...) + } + } + + if len(errs) > 0 { + return apierrors.NewInvalid( + schema.GroupKind{Group: MeshGroup, Kind: common.HTTPRoute}, + in.KubernetesName(), errs) + } + return nil +} + +func isValidHTTPMethod(method string) bool { + switch method { + case http.MethodGet, + http.MethodHead, + http.MethodPost, + http.MethodPut, + http.MethodPatch, + http.MethodDelete, + http.MethodConnect, + http.MethodOptions, + http.MethodTrace: + return true + default: + return false + } +} + +// DefaultNamespaceFields is required as part of the common.MeshConfig interface. +func (in *HTTPRoute) DefaultNamespaceFields(tenancy common.ConsulTenancyConfig) {} diff --git a/control-plane/api/mesh/v2beta1/http_route_types_test.go b/control-plane/api/mesh/v2beta1/http_route_types_test.go new file mode 100644 index 0000000000..ecebfb7600 --- /dev/null +++ b/control-plane/api/mesh/v2beta1/http_route_types_test.go @@ -0,0 +1,1330 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package v2beta1 + +import ( + "testing" + "time" + + "github.com/google/go-cmp/cmp" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1" + "github.com/hashicorp/consul/proto-public/pbresource" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/testing/protocmp" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" + "google.golang.org/protobuf/types/known/wrapperspb" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/hashicorp/consul-k8s/control-plane/api/common" + inject "github.com/hashicorp/consul-k8s/control-plane/connect-inject/common" + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" + "github.com/hashicorp/consul-k8s/control-plane/helper/test" +) + +func TestHTTPRoute_MatchesConsul(t *testing.T) { + cases := map[string]struct { + OurConsulNamespace string + OurConsulPartition string + OurData *HTTPRoute + + TheirName string + TheirConsulNamespace string + TheirConsulPartition string + TheirData *pbmesh.HTTPRoute + ResourceOverride *pbresource.Resource // Used to test that an empty resource of another type will not match + + Matches bool + }{ + "empty fields matches": { + OurConsulNamespace: constants.DefaultConsulNS, + OurConsulPartition: constants.DefaultConsulPartition, + OurData: &HTTPRoute{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + }, + Spec: pbmesh.HTTPRoute{}, + }, + TheirName: "name", + TheirConsulNamespace: constants.DefaultConsulNS, + TheirConsulPartition: constants.DefaultConsulPartition, + TheirData: &pbmesh.HTTPRoute{}, + Matches: true, + }, + "hostnames are compared": { + OurConsulNamespace: "consul-ns", + OurConsulPartition: "consul-partition", + OurData: &HTTPRoute{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "kube-ns", + }, + Spec: pbmesh.HTTPRoute{ + Hostnames: []string{ + "a-hostname", "another-hostname", + }, + }, + }, + TheirName: "foo", + TheirConsulNamespace: "consul-ns", + TheirConsulPartition: "consul-partition", + TheirData: &pbmesh.HTTPRoute{ + Hostnames: []string{ + "not-a-hostname", "another-hostname", + }, + }, + Matches: false, + }, + "all fields set matches": { + OurConsulNamespace: "consul-ns", + OurConsulPartition: "consul-partition", + OurData: &HTTPRoute{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "kube-ns", + }, + Spec: pbmesh.HTTPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + { + Ref: &pbresource.Reference{ + Type: pbmesh.ComputedRoutesType, + Tenancy: &pbresource.Tenancy{ + Partition: "a-partition", + Namespace: "a-namespace", + }, + Name: "reference-name", + Section: "section-name", + }, + Port: "20201", + }, + }, + Hostnames: []string{ + "a-hostname", "another-hostname", + }, + Rules: []*pbmesh.HTTPRouteRule{ + { + Matches: []*pbmesh.HTTPRouteMatch{ + { + Path: &pbmesh.HTTPPathMatch{ + Type: pbmesh.PathMatchType_PATH_MATCH_TYPE_EXACT, + Value: "exact-value", + }, + Headers: []*pbmesh.HTTPHeaderMatch{ + { + Type: pbmesh.HeaderMatchType_HEADER_MATCH_TYPE_PREFIX, + Name: "test-header", + Value: "header-value", + }, + }, + QueryParams: []*pbmesh.HTTPQueryParamMatch{ + { + Type: pbmesh.QueryParamMatchType_QUERY_PARAM_MATCH_TYPE_PRESENT, + Name: "query-param-name", + Value: "query-value", + }, + }, + Method: "GET", + }, + }, + Filters: []*pbmesh.HTTPRouteFilter{ + { + RequestHeaderModifier: &pbmesh.HTTPHeaderFilter{ + Set: []*pbmesh.HTTPHeader{ + { + Name: "set-header", + Value: "a-header-value", + }, + }, + Add: []*pbmesh.HTTPHeader{ + { + Name: "added-header", + Value: "another-header-value", + }, + }, + Remove: []string{ + "remove-header", + }, + }, + ResponseHeaderModifier: &pbmesh.HTTPHeaderFilter{ + Set: []*pbmesh.HTTPHeader{ + { + Name: "set-header", + Value: "a-header-value", + }, + }, + Add: []*pbmesh.HTTPHeader{ + { + Name: "added-header", + Value: "another-header-value", + }, + }, + Remove: []string{ + "remove-header", + }, + }, + UrlRewrite: &pbmesh.HTTPURLRewriteFilter{ + PathPrefix: "a-path-prefix", + }, + }, + }, + Timeouts: &pbmesh.HTTPRouteTimeouts{ + Request: &durationpb.Duration{ + Seconds: 10, + Nanos: 5, + }, + Idle: &durationpb.Duration{ + Seconds: 5, + Nanos: 10, + }, + }, + Retries: &pbmesh.HTTPRouteRetries{ + Number: &wrapperspb.UInt32Value{ + Value: 1, + }, + OnConnectFailure: false, + OnConditions: []string{ + "condition-one", "condition-two", + }, + OnStatusCodes: []uint32{ + 200, 201, 202, + }, + }, + BackendRefs: []*pbmesh.HTTPBackendRef{ + { + BackendRef: &pbmesh.BackendReference{ + Ref: &pbresource.Reference{ + Type: pbmesh.ComputedRoutesType, + Tenancy: &pbresource.Tenancy{ + Partition: "some-partition", + Namespace: "some-namespace", + }, + Name: "backend-name", + Section: "backend-section", + }, + Port: "20211", + Datacenter: "another-datacenter", + }, + Weight: 12, + Filters: []*pbmesh.HTTPRouteFilter{ + { + RequestHeaderModifier: &pbmesh.HTTPHeaderFilter{ + Set: []*pbmesh.HTTPHeader{ + { + Name: "set-header", + Value: "setting", + }, + }, + Add: []*pbmesh.HTTPHeader{ + { + Name: "added-header", + Value: "adding", + }, + }, + Remove: []string{"removing"}, + }, + ResponseHeaderModifier: &pbmesh.HTTPHeaderFilter{ + Set: []*pbmesh.HTTPHeader{ + { + Name: "another-set-header", + Value: "setting", + }, + }, + Add: []*pbmesh.HTTPHeader{ + { + Name: "another-added-header", + Value: "adding", + }, + }, + Remove: []string{"also-removing"}, + }, + UrlRewrite: &pbmesh.HTTPURLRewriteFilter{ + PathPrefix: "/prefixing-it", + }, + }, + }, + }, + }, + }, + }, + }, + }, + TheirName: "foo", + TheirConsulNamespace: "consul-ns", + TheirConsulPartition: "consul-partition", + TheirData: &pbmesh.HTTPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + { + Ref: &pbresource.Reference{ + Type: pbmesh.ComputedRoutesType, + Tenancy: &pbresource.Tenancy{ + Partition: "a-partition", + Namespace: "a-namespace", + }, + Name: "reference-name", + Section: "section-name", + }, + Port: "20201", + }, + }, + Hostnames: []string{ + "a-hostname", "another-hostname", + }, + Rules: []*pbmesh.HTTPRouteRule{ + { + Matches: []*pbmesh.HTTPRouteMatch{ + { + Path: &pbmesh.HTTPPathMatch{ + Type: pbmesh.PathMatchType_PATH_MATCH_TYPE_EXACT, + Value: "exact-value", + }, + Headers: []*pbmesh.HTTPHeaderMatch{ + { + Type: pbmesh.HeaderMatchType_HEADER_MATCH_TYPE_PREFIX, + Name: "test-header", + Value: "header-value", + }, + }, + QueryParams: []*pbmesh.HTTPQueryParamMatch{ + { + Type: pbmesh.QueryParamMatchType_QUERY_PARAM_MATCH_TYPE_PRESENT, + Name: "query-param-name", + Value: "query-value", + }, + }, + Method: "GET", + }, + }, + Filters: []*pbmesh.HTTPRouteFilter{ + { + RequestHeaderModifier: &pbmesh.HTTPHeaderFilter{ + Set: []*pbmesh.HTTPHeader{ + { + Name: "set-header", + Value: "a-header-value", + }, + }, + Add: []*pbmesh.HTTPHeader{ + { + Name: "added-header", + Value: "another-header-value", + }, + }, + Remove: []string{ + "remove-header", + }, + }, + ResponseHeaderModifier: &pbmesh.HTTPHeaderFilter{ + Set: []*pbmesh.HTTPHeader{ + { + Name: "set-header", + Value: "a-header-value", + }, + }, + Add: []*pbmesh.HTTPHeader{ + { + Name: "added-header", + Value: "another-header-value", + }, + }, + Remove: []string{ + "remove-header", + }, + }, + UrlRewrite: &pbmesh.HTTPURLRewriteFilter{ + PathPrefix: "a-path-prefix", + }, + }, + }, + Timeouts: &pbmesh.HTTPRouteTimeouts{ + Request: &durationpb.Duration{ + Seconds: 10, + Nanos: 5, + }, + Idle: &durationpb.Duration{ + Seconds: 5, + Nanos: 10, + }, + }, + Retries: &pbmesh.HTTPRouteRetries{ + Number: &wrapperspb.UInt32Value{ + Value: 1, + }, + OnConnectFailure: false, + OnConditions: []string{ + "condition-one", "condition-two", + }, + OnStatusCodes: []uint32{ + 200, 201, 202, + }, + }, + BackendRefs: []*pbmesh.HTTPBackendRef{ + { + BackendRef: &pbmesh.BackendReference{ + Ref: &pbresource.Reference{ + Type: pbmesh.ComputedRoutesType, + Tenancy: &pbresource.Tenancy{ + Partition: "some-partition", + Namespace: "some-namespace", + }, + Name: "backend-name", + Section: "backend-section", + }, + Port: "20211", + Datacenter: "another-datacenter", + }, + Weight: 12, + Filters: []*pbmesh.HTTPRouteFilter{ + { + RequestHeaderModifier: &pbmesh.HTTPHeaderFilter{ + Set: []*pbmesh.HTTPHeader{ + { + Name: "set-header", + Value: "setting", + }, + }, + Add: []*pbmesh.HTTPHeader{ + { + Name: "added-header", + Value: "adding", + }, + }, + Remove: []string{"removing"}, + }, + ResponseHeaderModifier: &pbmesh.HTTPHeaderFilter{ + Set: []*pbmesh.HTTPHeader{ + { + Name: "another-set-header", + Value: "setting", + }, + }, + Add: []*pbmesh.HTTPHeader{ + { + Name: "another-added-header", + Value: "adding", + }, + }, + Remove: []string{"also-removing"}, + }, + UrlRewrite: &pbmesh.HTTPURLRewriteFilter{ + PathPrefix: "/prefixing-it", + }, + }, + }, + }, + }, + }, + }, + }, + Matches: true, + }, + "different types does not match": { + OurConsulNamespace: constants.DefaultConsulNS, + OurConsulPartition: constants.DefaultConsulPartition, + OurData: &HTTPRoute{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + }, + Spec: pbmesh.HTTPRoute{}, + }, + ResourceOverride: &pbresource.Resource{ + Id: &pbresource.ID{ + Name: "name", + Type: pbmesh.ProxyConfigurationType, + Tenancy: &pbresource.Tenancy{ + Partition: constants.DefaultConsulNS, + Namespace: constants.DefaultConsulPartition, + }, + }, + Data: inject.ToProtoAny(&pbmesh.ProxyConfiguration{}), + Metadata: meshConfigMeta(), + }, + Matches: false, + }, + } + for name, c := range cases { + t.Run(name, func(t *testing.T) { + consulResource := c.ResourceOverride + if c.TheirName != "" { + consulResource = constructHTTPRouteResource(c.TheirData, c.TheirName, c.TheirConsulNamespace, c.TheirConsulPartition) + } + require.Equal(t, c.Matches, c.OurData.MatchesConsul(consulResource, c.OurConsulNamespace, c.OurConsulPartition)) + }) + } +} + +// TestHTTPRoute_Resource also includes test to verify ResourceID(). +func TestHTTPRoute_Resource(t *testing.T) { + cases := map[string]struct { + Ours *HTTPRoute + ConsulNamespace string + ConsulPartition string + ExpectedName string + ExpectedData *pbmesh.HTTPRoute + }{ + "empty fields": { + Ours: &HTTPRoute{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + Spec: pbmesh.HTTPRoute{}, + }, + ConsulNamespace: constants.DefaultConsulNS, + ConsulPartition: constants.DefaultConsulPartition, + ExpectedName: "foo", + ExpectedData: &pbmesh.HTTPRoute{}, + }, + "every field set": { + Ours: &HTTPRoute{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "kube-ns", + }, + Spec: pbmesh.HTTPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + { + Ref: &pbresource.Reference{ + Type: pbmesh.ComputedRoutesType, + Tenancy: &pbresource.Tenancy{ + Partition: "a-partition", + Namespace: "a-namespace", + }, + Name: "reference-name", + Section: "section-name", + }, + Port: "20201", + }, + }, + Hostnames: []string{ + "a-hostname", "another-hostname", + }, + Rules: []*pbmesh.HTTPRouteRule{ + { + Matches: []*pbmesh.HTTPRouteMatch{ + { + Path: &pbmesh.HTTPPathMatch{ + Type: pbmesh.PathMatchType_PATH_MATCH_TYPE_EXACT, + Value: "exact-value", + }, + Headers: []*pbmesh.HTTPHeaderMatch{ + { + Type: pbmesh.HeaderMatchType_HEADER_MATCH_TYPE_PREFIX, + Name: "test-header", + Value: "header-value", + }, + }, + QueryParams: []*pbmesh.HTTPQueryParamMatch{ + { + Type: pbmesh.QueryParamMatchType_QUERY_PARAM_MATCH_TYPE_PRESENT, + Name: "query-param-name", + Value: "query-value", + }, + }, + Method: "GET", + }, + }, + Filters: []*pbmesh.HTTPRouteFilter{ + { + RequestHeaderModifier: &pbmesh.HTTPHeaderFilter{ + Set: []*pbmesh.HTTPHeader{ + { + Name: "set-header", + Value: "a-header-value", + }, + }, + Add: []*pbmesh.HTTPHeader{ + { + Name: "added-header", + Value: "another-header-value", + }, + }, + Remove: []string{ + "remove-header", + }, + }, + ResponseHeaderModifier: &pbmesh.HTTPHeaderFilter{ + Set: []*pbmesh.HTTPHeader{ + { + Name: "set-header", + Value: "a-header-value", + }, + }, + Add: []*pbmesh.HTTPHeader{ + { + Name: "added-header", + Value: "another-header-value", + }, + }, + Remove: []string{ + "remove-header", + }, + }, + UrlRewrite: &pbmesh.HTTPURLRewriteFilter{ + PathPrefix: "a-path-prefix", + }, + }, + }, + Timeouts: &pbmesh.HTTPRouteTimeouts{ + Request: &durationpb.Duration{ + Seconds: 10, + Nanos: 5, + }, + Idle: &durationpb.Duration{ + Seconds: 5, + Nanos: 10, + }, + }, + Retries: &pbmesh.HTTPRouteRetries{ + Number: &wrapperspb.UInt32Value{ + Value: 1, + }, + OnConnectFailure: false, + OnConditions: []string{ + "condition-one", "condition-two", + }, + OnStatusCodes: []uint32{ + 200, 201, 202, + }, + }, + }, + }, + }, + }, + ConsulNamespace: "not-default-namespace", + ConsulPartition: "not-default-partition", + ExpectedName: "foo", + ExpectedData: &pbmesh.HTTPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + { + Ref: &pbresource.Reference{ + Type: pbmesh.ComputedRoutesType, + Tenancy: &pbresource.Tenancy{ + Partition: "a-partition", + Namespace: "a-namespace", + }, + Name: "reference-name", + Section: "section-name", + }, + Port: "20201", + }, + }, + Hostnames: []string{ + "a-hostname", "another-hostname", + }, + Rules: []*pbmesh.HTTPRouteRule{ + { + Matches: []*pbmesh.HTTPRouteMatch{ + { + Path: &pbmesh.HTTPPathMatch{ + Type: pbmesh.PathMatchType_PATH_MATCH_TYPE_EXACT, + Value: "exact-value", + }, + Headers: []*pbmesh.HTTPHeaderMatch{ + { + Type: pbmesh.HeaderMatchType_HEADER_MATCH_TYPE_PREFIX, + Name: "test-header", + Value: "header-value", + }, + }, + QueryParams: []*pbmesh.HTTPQueryParamMatch{ + { + Type: pbmesh.QueryParamMatchType_QUERY_PARAM_MATCH_TYPE_PRESENT, + Name: "query-param-name", + Value: "query-value", + }, + }, + Method: "GET", + }, + }, + Filters: []*pbmesh.HTTPRouteFilter{ + { + RequestHeaderModifier: &pbmesh.HTTPHeaderFilter{ + Set: []*pbmesh.HTTPHeader{ + { + Name: "set-header", + Value: "a-header-value", + }, + }, + Add: []*pbmesh.HTTPHeader{ + { + Name: "added-header", + Value: "another-header-value", + }, + }, + Remove: []string{ + "remove-header", + }, + }, + ResponseHeaderModifier: &pbmesh.HTTPHeaderFilter{ + Set: []*pbmesh.HTTPHeader{ + { + Name: "set-header", + Value: "a-header-value", + }, + }, + Add: []*pbmesh.HTTPHeader{ + { + Name: "added-header", + Value: "another-header-value", + }, + }, + Remove: []string{ + "remove-header", + }, + }, + UrlRewrite: &pbmesh.HTTPURLRewriteFilter{ + PathPrefix: "a-path-prefix", + }, + }, + }, + Timeouts: &pbmesh.HTTPRouteTimeouts{ + Request: &durationpb.Duration{ + Seconds: 10, + Nanos: 5, + }, + Idle: &durationpb.Duration{ + Seconds: 5, + Nanos: 10, + }, + }, + Retries: &pbmesh.HTTPRouteRetries{ + Number: &wrapperspb.UInt32Value{ + Value: 1, + }, + OnConnectFailure: false, + OnConditions: []string{ + "condition-one", "condition-two", + }, + OnStatusCodes: []uint32{ + 200, 201, 202, + }, + }, + }, + }, + }, + }, + } + for name, c := range cases { + t.Run(name, func(t *testing.T) { + actual := c.Ours.Resource(c.ConsulNamespace, c.ConsulPartition) + expected := constructHTTPRouteResource(c.ExpectedData, c.ExpectedName, c.ConsulNamespace, c.ConsulPartition) + + opts := append([]cmp.Option{ + protocmp.IgnoreFields(&pbresource.Resource{}, "status", "generation", "version"), + protocmp.IgnoreFields(&pbresource.ID{}, "uid"), + }, test.CmpProtoIgnoreOrder()...) + diff := cmp.Diff(expected, actual, opts...) + require.Equal(t, "", diff, "HTTPRoute do not match") + }) + } +} + +func TestHTTPRoute_SetSyncedCondition(t *testing.T) { + trafficPermissions := &HTTPRoute{} + trafficPermissions.SetSyncedCondition(corev1.ConditionTrue, "reason", "message") + + require.Equal(t, corev1.ConditionTrue, trafficPermissions.Status.Conditions[0].Status) + require.Equal(t, "reason", trafficPermissions.Status.Conditions[0].Reason) + require.Equal(t, "message", trafficPermissions.Status.Conditions[0].Message) + now := metav1.Now() + require.True(t, trafficPermissions.Status.Conditions[0].LastTransitionTime.Before(&now)) +} + +func TestHTTPRoute_SetLastSyncedTime(t *testing.T) { + trafficPermissions := &HTTPRoute{} + syncedTime := metav1.NewTime(time.Now()) + trafficPermissions.SetLastSyncedTime(&syncedTime) + + require.Equal(t, &syncedTime, trafficPermissions.Status.LastSyncedTime) +} + +func TestHTTPRoute_GetSyncedConditionStatus(t *testing.T) { + cases := []corev1.ConditionStatus{ + corev1.ConditionUnknown, + corev1.ConditionFalse, + corev1.ConditionTrue, + } + for _, status := range cases { + t.Run(string(status), func(t *testing.T) { + trafficPermissions := &HTTPRoute{ + Status: Status{ + Conditions: []Condition{{ + Type: ConditionSynced, + Status: status, + }}, + }, + } + + require.Equal(t, status, trafficPermissions.SyncedConditionStatus()) + }) + } +} + +func TestHTTPRoute_GetConditionWhenStatusNil(t *testing.T) { + require.Nil(t, (&HTTPRoute{}).GetCondition(ConditionSynced)) +} + +func TestHTTPRoute_SyncedConditionStatusWhenStatusNil(t *testing.T) { + require.Equal(t, corev1.ConditionUnknown, (&HTTPRoute{}).SyncedConditionStatus()) +} + +func TestHTTPRoute_SyncedConditionWhenStatusNil(t *testing.T) { + status, reason, message := (&HTTPRoute{}).SyncedCondition() + require.Equal(t, corev1.ConditionUnknown, status) + require.Equal(t, "", reason) + require.Equal(t, "", message) +} + +func TestHTTPRoute_KubeKind(t *testing.T) { + require.Equal(t, "httproute", (&HTTPRoute{}).KubeKind()) +} + +func TestHTTPRoute_KubernetesName(t *testing.T) { + require.Equal(t, "test", (&HTTPRoute{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "bar", + }, + Spec: pbmesh.HTTPRoute{}, + }).KubernetesName()) +} + +func TestHTTPRoute_ObjectMeta(t *testing.T) { + meta := metav1.ObjectMeta{ + Name: "name", + Namespace: "namespace", + } + trafficPermissions := &HTTPRoute{ + ObjectMeta: meta, + } + require.Equal(t, &meta, trafficPermissions.GetObjectMeta()) +} + +// Test defaulting behavior when namespaces are enabled as well as disabled. +// TODO: add when implemented +// func TestHTTPRoute_DefaultNamespaceFields(t *testing.T) + +func TestHTTPRoute_Validate(t *testing.T) { + cases := []struct { + name string + input *HTTPRoute + expectedErrMsgs []string + }{ + { + name: "kitchen sink OK", + input: &HTTPRoute{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "kube-ns", + }, + Spec: pbmesh.HTTPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + { + Ref: &pbresource.Reference{ + Type: pbmesh.ComputedRoutesType, + Tenancy: &pbresource.Tenancy{ + Partition: "a-partition", + Namespace: "a-namespace", + }, + Name: "reference-name", + Section: "section-name", + }, + Port: "20201", + }, + }, + Hostnames: []string{}, + Rules: []*pbmesh.HTTPRouteRule{ + { + Matches: []*pbmesh.HTTPRouteMatch{ + { + Path: &pbmesh.HTTPPathMatch{ + Type: pbmesh.PathMatchType_PATH_MATCH_TYPE_EXACT, + Value: "/exactValue", + }, + Headers: []*pbmesh.HTTPHeaderMatch{ + { + Type: pbmesh.HeaderMatchType_HEADER_MATCH_TYPE_PREFIX, + Name: "test-header", + Value: "header-value", + }, + }, + QueryParams: []*pbmesh.HTTPQueryParamMatch{ + { + Type: pbmesh.QueryParamMatchType_QUERY_PARAM_MATCH_TYPE_PRESENT, + Name: "query-param-name", + Value: "query-value", + }, + }, + Method: "GET", + }, + }, + Filters: []*pbmesh.HTTPRouteFilter{ + { + UrlRewrite: &pbmesh.HTTPURLRewriteFilter{ + PathPrefix: "a-path-prefix", + }, + }, + }, + Timeouts: &pbmesh.HTTPRouteTimeouts{ + Request: &durationpb.Duration{ + Seconds: 10, + Nanos: 5, + }, + Idle: &durationpb.Duration{ + Seconds: 5, + Nanos: 10, + }, + }, + Retries: &pbmesh.HTTPRouteRetries{ + Number: &wrapperspb.UInt32Value{ + Value: 1, + }, + OnConnectFailure: false, + OnConditions: []string{ + "reset", "cancelled", + }, + OnStatusCodes: []uint32{ + 200, 201, 202, + }, + }, + BackendRefs: []*pbmesh.HTTPBackendRef{ + { + BackendRef: &pbmesh.BackendReference{ + Ref: &pbresource.Reference{ + Type: pbmesh.ComputedRoutesType, + Tenancy: &pbresource.Tenancy{ + Partition: "some-partition", + Namespace: "some-namespace", + }, + Name: "backend", + Section: "backend-section", + }, + Port: "20101", + }, + Weight: 15, + }, + }, + }, + }, + }, + }, + expectedErrMsgs: nil, + }, + { + name: "missing parentRefs", + input: &HTTPRoute{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "kube-ns", + }, + Spec: pbmesh.HTTPRoute{ + ParentRefs: []*pbmesh.ParentReference{}, + }, + }, + expectedErrMsgs: []string{ + `spec.parentRefs: Required value: cannot be empty`, + }, + }, + { + name: "hostnames created", + input: &HTTPRoute{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "kube-ns", + }, + Spec: pbmesh.HTTPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + { + Ref: &pbresource.Reference{ + Type: pbmesh.ComputedRoutesType, + Tenancy: &pbresource.Tenancy{ + Partition: "a-partition", + Namespace: "a-namespace", + }, + Name: "reference-name", + Section: "section-name", + }, + Port: "20201", + }, + }, + Hostnames: []string{"a-hostname", "another-hostname"}, + }, + }, + expectedErrMsgs: []string{ + `spec.hostnames: Invalid value: []string{"a-hostname", "another-hostname"}: should not populate hostnames`, + }, + }, + { + name: "rules.matches.path", + input: &HTTPRoute{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "kube-ns", + }, + Spec: pbmesh.HTTPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + { + Ref: &pbresource.Reference{ + Type: pbmesh.ComputedRoutesType, + Tenancy: &pbresource.Tenancy{ + Partition: "a-partition", + Namespace: "a-namespace", + }, + Name: "reference-name", + Section: "section-name", + }, + Port: "20201", + }, + }, + Hostnames: []string{}, + Rules: []*pbmesh.HTTPRouteRule{ + { + Matches: []*pbmesh.HTTPRouteMatch{ + { + Path: &pbmesh.HTTPPathMatch{ + Type: pbmesh.PathMatchType_PATH_MATCH_TYPE_UNSPECIFIED, + }, + }, + { + Path: &pbmesh.HTTPPathMatch{}, + }, + { + Path: &pbmesh.HTTPPathMatch{ + Type: pbmesh.PathMatchType_PATH_MATCH_TYPE_EXACT, + Value: "does-not-have-/-prefix", + }, + }, + { + Path: &pbmesh.HTTPPathMatch{ + Type: pbmesh.PathMatchType_PATH_MATCH_TYPE_PREFIX, + Value: "does-not-have-/-prefix-either", + }, + }, + { + Path: &pbmesh.HTTPPathMatch{ + Type: pbmesh.PathMatchType_PATH_MATCH_TYPE_REGEX, + Value: "", + }, + }, + }, + BackendRefs: []*pbmesh.HTTPBackendRef{{BackendRef: &pbmesh.BackendReference{}}}, + }, + }, + }, + }, + expectedErrMsgs: []string{ + `spec.rules[0].matches[0].path.type: Invalid value: PATH_MATCH_TYPE_UNSPECIFIED: missing required field`, + `spec.rules[0].matches[1].path.type: Invalid value: PATH_MATCH_TYPE_UNSPECIFIED: missing required field`, + `spec.rules[0].matches[2].path.value: Invalid value: "does-not-have-/-prefix": exact patch value does not start with '/'`, + `spec.rules[0].matches[3].path.value: Invalid value: "does-not-have-/-prefix-either": prefix patch value does not start with '/'`, + `spec.rules[0].matches[4].path.value: Required value: missing required field`, + }, + }, + { + name: "rules.matches.headers", + input: &HTTPRoute{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "kube-ns", + }, + Spec: pbmesh.HTTPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + { + Ref: &pbresource.Reference{ + Type: pbmesh.ComputedRoutesType, + Tenancy: &pbresource.Tenancy{ + Partition: "a-partition", + Namespace: "a-namespace", + }, + Name: "reference-name", + Section: "section-name", + }, + Port: "20201", + }, + }, + Hostnames: []string{}, + Rules: []*pbmesh.HTTPRouteRule{ + { + Matches: []*pbmesh.HTTPRouteMatch{ + { + Headers: []*pbmesh.HTTPHeaderMatch{ + { + Type: pbmesh.HeaderMatchType_HEADER_MATCH_TYPE_UNSPECIFIED, + Name: "test-header", + Value: "header-value", + }, + { + // Type: "", + Name: "test-header", + Value: "header-value", + }, + { + Type: pbmesh.HeaderMatchType_HEADER_MATCH_TYPE_EXACT, + Name: "", + }, + }, + Method: "GET", + }, + }, + BackendRefs: []*pbmesh.HTTPBackendRef{{BackendRef: &pbmesh.BackendReference{}}}, + }, + }, + }, + }, + expectedErrMsgs: []string{ + `spec.rules[0].matches[0].headers[0].type: Invalid value: HEADER_MATCH_TYPE_UNSPECIFIED: missing required field`, + `spec.rules[0].matches[0].headers[1].type: Invalid value: HEADER_MATCH_TYPE_UNSPECIFIED: missing required field`, + `spec.rules[0].matches[0].headers[2].name: Required value: missing required field`, + }, + }, + { + name: "rules.filters", + input: &HTTPRoute{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "kube-ns", + }, + Spec: pbmesh.HTTPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + { + Ref: &pbresource.Reference{ + Type: pbmesh.ComputedRoutesType, + Tenancy: &pbresource.Tenancy{ + Partition: "a-partition", + Namespace: "a-namespace", + }, + Name: "reference-name", + Section: "section-name", + }, + Port: "20201", + }, + }, + Hostnames: []string{}, + Rules: []*pbmesh.HTTPRouteRule{ + { + Filters: []*pbmesh.HTTPRouteFilter{ + { + RequestHeaderModifier: &pbmesh.HTTPHeaderFilter{}, + ResponseHeaderModifier: &pbmesh.HTTPHeaderFilter{}, + }, + { + RequestHeaderModifier: &pbmesh.HTTPHeaderFilter{}, + UrlRewrite: &pbmesh.HTTPURLRewriteFilter{ + PathPrefix: "prefix-1", + }, + }, + { + ResponseHeaderModifier: &pbmesh.HTTPHeaderFilter{}, + UrlRewrite: &pbmesh.HTTPURLRewriteFilter{ + PathPrefix: "prefix-2", + }, + }, + { + UrlRewrite: &pbmesh.HTTPURLRewriteFilter{ + PathPrefix: "", + }, + }, + }, + BackendRefs: []*pbmesh.HTTPBackendRef{{BackendRef: &pbmesh.BackendReference{}}}, + }, + }, + }, + }, + expectedErrMsgs: []string{ + `spec.filters[0]: Invalid value`, + `spec.filters[1]: Invalid value`, + `spec.filters[2]: Invalid value`, + `spec.filters[3].urlRewrite.pathPrefix: Invalid value: "": field should not be empty if enclosing section is set`, + `exactly one of request_header_modifier, response_header_modifier, or url_rewrite is required`, + }, + }, + { + name: "rule.backendRefs", + input: &HTTPRoute{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "kube-ns", + }, + Spec: pbmesh.HTTPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + { + Ref: &pbresource.Reference{ + Type: pbmesh.ComputedRoutesType, + Tenancy: &pbresource.Tenancy{ + Partition: "a-partition", + Namespace: "a-namespace", + }, + Name: "reference-name", + Section: "section-name", + }, + Port: "20201", + }, + }, + Hostnames: []string{}, + Rules: []*pbmesh.HTTPRouteRule{ + { + BackendRefs: []*pbmesh.HTTPBackendRef{}, + }, + { + BackendRefs: []*pbmesh.HTTPBackendRef{ + {}, + { + BackendRef: &pbmesh.BackendReference{ + Datacenter: "some-datacenter", + }, + }, + { + BackendRef: &pbmesh.BackendReference{}, + Filters: []*pbmesh.HTTPRouteFilter{ + { + UrlRewrite: &pbmesh.HTTPURLRewriteFilter{ + PathPrefix: "/prefixed", + }, + }, + }, + }, + }, + }, + }, + }, + }, + expectedErrMsgs: []string{ + `spec.rules[0].backendRefs: Required value: missing required field`, + `spec.rules[1].backendRefs[0].backendRef: Required value: missing required field`, + `spec.rules[1].backendRefs[1].backendRef.datacenter: Invalid value: "some-datacenter": datacenter is not yet supported on backend refs`, + `spec.rules[1].backendRefs[2].filters: Invalid value`, + `filters are not supported at this level yet`, + }, + }, + { + name: "rules.timeouts", + input: &HTTPRoute{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "kube-ns", + }, + Spec: pbmesh.HTTPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + { + Ref: &pbresource.Reference{ + Type: pbmesh.ComputedRoutesType, + Tenancy: &pbresource.Tenancy{ + Partition: "a-partition", + Namespace: "a-namespace", + }, + Name: "reference-name", + Section: "section-name", + }, + Port: "20201", + }, + }, + Hostnames: []string{}, + Rules: []*pbmesh.HTTPRouteRule{ + { + Timeouts: &pbmesh.HTTPRouteTimeouts{ + Request: &durationpb.Duration{ + Seconds: -10, + Nanos: -5, + }, + Idle: &durationpb.Duration{ + Seconds: -5, + Nanos: -10, + }, + }, + BackendRefs: []*pbmesh.HTTPBackendRef{{BackendRef: &pbmesh.BackendReference{}}}, + }, + }, + }, + }, + expectedErrMsgs: []string{ + `spec.rules[0].timeouts.request: Invalid value: -10.000000005s: timeout cannot be negative`, + `spec.rules[0].timeouts.idle: Invalid value: -5.00000001s: timeout cannot be negative`, + }, + }, + { + name: "rules.timeouts", + input: &HTTPRoute{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "kube-ns", + }, + Spec: pbmesh.HTTPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + { + Ref: &pbresource.Reference{ + Type: pbmesh.ComputedRoutesType, + Tenancy: &pbresource.Tenancy{ + Partition: "a-partition", + Namespace: "a-namespace", + }, + Name: "reference-name", + Section: "section-name", + }, + Port: "20201", + }, + }, + Hostnames: []string{}, + Rules: []*pbmesh.HTTPRouteRule{ + { + Retries: &pbmesh.HTTPRouteRetries{ + OnConditions: []string{ + "invalid-condition", "another-invalid-condition", + }, + }, + BackendRefs: []*pbmesh.HTTPBackendRef{{BackendRef: &pbmesh.BackendReference{}}}, + }, + }, + }, + }, + expectedErrMsgs: []string{ + `spec.rules[0].retries.onConditions[0]: Invalid value: "invalid-condition": not a valid retry condition`, + `spec.rules[0].retries.onConditions[1]: Invalid value: "another-invalid-condition": not a valid retry condition`, + }, + }, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + err := tc.input.Validate(common.ConsulTenancyConfig{}) + if len(tc.expectedErrMsgs) != 0 { + require.Error(t, err) + for _, s := range tc.expectedErrMsgs { + require.Contains(t, err.Error(), s) + } + } else { + require.NoError(t, err) + } + }) + } +} + +func constructHTTPRouteResource(tp *pbmesh.HTTPRoute, name, namespace, partition string) *pbresource.Resource { + data := inject.ToProtoAny(tp) + + id := &pbresource.ID{ + Name: name, + Type: pbmesh.HTTPRouteType, + Tenancy: &pbresource.Tenancy{ + Partition: partition, + Namespace: namespace, + }, + Uid: "ABCD", // We add this to show it does not factor into the comparison + } + + return &pbresource.Resource{ + Id: id, + Data: data, + Metadata: meshConfigMeta(), + + // We add the fields below to prove that they are not used in the Match when comparing the CRD to Consul. + Version: "123456", + Generation: "01ARZ3NDEKTSV4RRFFQ69G5FAV", + Status: map[string]*pbresource.Status{ + "knock": { + ObservedGeneration: "01ARZ3NDEKTSV4RRFFQ69G5FAV", + Conditions: make([]*pbresource.Condition, 0), + UpdatedAt: timestamppb.Now(), + }, + }, + } +} diff --git a/control-plane/api/mesh/v2beta1/http_route_webhook.go b/control-plane/api/mesh/v2beta1/http_route_webhook.go new file mode 100644 index 0000000000..22d4e45dac --- /dev/null +++ b/control-plane/api/mesh/v2beta1/http_route_webhook.go @@ -0,0 +1,65 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package v2beta1 + +import ( + "context" + "net/http" + + "github.com/go-logr/logr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + "github.com/hashicorp/consul-k8s/control-plane/api/common" +) + +// +kubebuilder:object:generate=false + +type HTTPRouteWebhook struct { + Logger logr.Logger + + // ConsulTenancyConfig contains the injector's namespace and partition configuration. + ConsulTenancyConfig common.ConsulTenancyConfig + + decoder *admission.Decoder + client.Client +} + +var _ common.ConsulResourceLister = &HTTPRouteWebhook{} + +// NOTE: The path value in the below line is the path to the webhook. +// If it is updated, run code-gen, update subcommand/inject-connect/command.go +// and the consul-helm value for the path to the webhook. +// +// NOTE: The below line cannot be combined with any other comment. If it is it will break the code generation. +// +// +kubebuilder:webhook:verbs=create;update,path=/mutate-v2beta1-httproute,mutating=true,failurePolicy=fail,groups=auth.consul.hashicorp.com,resources=httproute,versions=v2beta1,name=mutate-httproute.auth.consul.hashicorp.com,sideEffects=None,admissionReviewVersions=v1beta1;v1 + +func (v *HTTPRouteWebhook) Handle(ctx context.Context, req admission.Request) admission.Response { + var resource HTTPRoute + err := v.decoder.Decode(req, &resource) + if err != nil { + return admission.Errored(http.StatusBadRequest, err) + } + + return common.ValidateConsulResource(ctx, req, v.Logger, v, &resource, v.ConsulTenancyConfig) +} + +func (v *HTTPRouteWebhook) List(ctx context.Context) ([]common.ConsulResource, error) { + var resourceList HTTPRouteList + if err := v.Client.List(ctx, &resourceList); err != nil { + return nil, err + } + var entries []common.ConsulResource + for _, item := range resourceList.Items { + entries = append(entries, common.ConsulResource(item)) + } + return entries, nil +} + +func (v *HTTPRouteWebhook) SetupWithManager(mgr ctrl.Manager) { + v.decoder = admission.NewDecoder(mgr.GetScheme()) + mgr.GetWebhookServer().Register("/mutate-v2beta1-httproute", &admission.Webhook{Handler: v}) +} diff --git a/control-plane/api/mesh/v2beta1/mesh_configuration_types.go b/control-plane/api/mesh/v2beta1/mesh_configuration_types.go new file mode 100644 index 0000000000..5468168380 --- /dev/null +++ b/control-plane/api/mesh/v2beta1/mesh_configuration_types.go @@ -0,0 +1,146 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package v2beta1 + +import ( + "fmt" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1" + "github.com/hashicorp/consul/proto-public/pbresource" + "google.golang.org/protobuf/testing/protocmp" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/hashicorp/consul-k8s/control-plane/api/common" + inject "github.com/hashicorp/consul-k8s/control-plane/connect-inject/common" +) + +const ( + meshConfigurationKind = "meshconfiguration" +) + +func init() { + MeshSchemeBuilder.Register(&MeshConfiguration{}, &MeshConfigurationList{}) +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// MeshConfiguration is the Schema for the Mesh Configuration +// +kubebuilder:printcolumn:name="Synced",type="string",JSONPath=".status.conditions[?(@.type==\"Synced\")].status",description="The sync status of the resource with Consul" +// +kubebuilder:printcolumn:name="Last Synced",type="date",JSONPath=".status.lastSyncedTime",description="The last successful synced time of the resource with Consul" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="The age of the resource" +// +kubebuilder:resource:scope=Cluster +type MeshConfiguration struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec pbmesh.MeshConfiguration `json:"spec,omitempty"` + Status `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// MeshConfigurationList contains a list of MeshConfiguration. +type MeshConfigurationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []*MeshConfiguration `json:"items"` +} + +func (in *MeshConfiguration) ResourceID(_, partition string) *pbresource.ID { + return &pbresource.ID{ + Name: in.Name, + Type: pbmesh.MeshConfigurationType, + Tenancy: &pbresource.Tenancy{ + // we don't pass a namespace here because MeshConfiguration is partition-scoped + Partition: partition, + }, + } +} + +func (in *MeshConfiguration) Resource(_, partition string) *pbresource.Resource { + return &pbresource.Resource{ + Id: in.ResourceID("", partition), + Data: inject.ToProtoAny(&in.Spec), + Metadata: meshConfigMeta(), + } +} + +func (in *MeshConfiguration) AddFinalizer(f string) { + in.ObjectMeta.Finalizers = append(in.Finalizers(), f) +} + +func (in *MeshConfiguration) RemoveFinalizer(f string) { + var newFinalizers []string + for _, oldF := range in.Finalizers() { + if oldF != f { + newFinalizers = append(newFinalizers, oldF) + } + } + in.ObjectMeta.Finalizers = newFinalizers +} + +func (in *MeshConfiguration) Finalizers() []string { + return in.ObjectMeta.Finalizers +} + +func (in *MeshConfiguration) MatchesConsul(candidate *pbresource.Resource, _, partition string) bool { + return cmp.Equal( + in.Resource("", partition), + candidate, + protocmp.IgnoreFields(&pbresource.Resource{}, "status", "generation", "version"), + protocmp.IgnoreFields(&pbresource.ID{}, "uid"), + protocmp.Transform(), + cmpopts.SortSlices(func(a, b any) bool { return fmt.Sprintf("%v", a) < fmt.Sprintf("%v", b) }), + ) +} + +func (in *MeshConfiguration) KubeKind() string { + return meshConfigurationKind +} + +func (in *MeshConfiguration) KubernetesName() string { + return in.ObjectMeta.Name +} + +func (in *MeshConfiguration) SetSyncedCondition(status corev1.ConditionStatus, reason, message string) { + in.Status.Conditions = Conditions{ + { + Type: ConditionSynced, + Status: status, + LastTransitionTime: metav1.Now(), + Reason: reason, + Message: message, + }, + } +} + +func (in *MeshConfiguration) SetLastSyncedTime(time *metav1.Time) { + in.Status.LastSyncedTime = time +} + +func (in *MeshConfiguration) SyncedCondition() (status corev1.ConditionStatus, reason, message string) { + cond := in.Status.GetCondition(ConditionSynced) + if cond == nil { + return corev1.ConditionUnknown, "", "" + } + return cond.Status, cond.Reason, cond.Message +} + +func (in *MeshConfiguration) SyncedConditionStatus() corev1.ConditionStatus { + condition := in.Status.GetCondition(ConditionSynced) + if condition == nil { + return corev1.ConditionUnknown + } + return condition.Status +} + +func (in *MeshConfiguration) Validate(tenancy common.ConsulTenancyConfig) error { + return nil +} + +// DefaultNamespaceFields is required as part of the common.MeshConfig interface. +func (in *MeshConfiguration) DefaultNamespaceFields(tenancy common.ConsulTenancyConfig) {} diff --git a/control-plane/api/mesh/v2beta1/mesh_gateway_types.go b/control-plane/api/mesh/v2beta1/mesh_gateway_types.go new file mode 100644 index 0000000000..922fd272c2 --- /dev/null +++ b/control-plane/api/mesh/v2beta1/mesh_gateway_types.go @@ -0,0 +1,148 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package v2beta1 + +import ( + "fmt" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1" + "github.com/hashicorp/consul/proto-public/pbresource" + "google.golang.org/protobuf/testing/protocmp" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/hashicorp/consul-k8s/control-plane/api/common" + inject "github.com/hashicorp/consul-k8s/control-plane/connect-inject/common" +) + +const ( + meshGatewayKubeKind = "meshgateway" +) + +func init() { + MeshSchemeBuilder.Register(&MeshGateway{}, &MeshGatewayList{}) +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// MeshGateway is the Schema for the Mesh Gateway API +// +kubebuilder:printcolumn:name="Synced",type="string",JSONPath=".status.conditions[?(@.type==\"Synced\")].status",description="The sync status of the resource with Consul" +// +kubebuilder:printcolumn:name="Last Synced",type="date",JSONPath=".status.lastSyncedTime",description="The last successful synced time of the resource with Consul" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="The age of the resource" +// +kubebuilder:resource:scope="Namespaced" +type MeshGateway struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec pbmesh.MeshGateway `json:"spec,omitempty"` + Status `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// MeshGatewayList contains a list of MeshGateway. +type MeshGatewayList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []*MeshGateway `json:"items"` +} + +func (in *MeshGateway) ResourceID(_, partition string) *pbresource.ID { + return &pbresource.ID{ + Name: in.Name, + Type: pbmesh.MeshGatewayType, + Tenancy: &pbresource.Tenancy{ + Partition: partition, + Namespace: "", // Namespace is always unset because MeshGateway is partition-scoped + + }, + } +} + +func (in *MeshGateway) Resource(namespace, partition string) *pbresource.Resource { + return &pbresource.Resource{ + Id: in.ResourceID(namespace, partition), + Data: inject.ToProtoAny(&in.Spec), + Metadata: meshConfigMeta(), + } +} + +func (in *MeshGateway) AddFinalizer(f string) { + in.ObjectMeta.Finalizers = append(in.Finalizers(), f) +} + +func (in *MeshGateway) RemoveFinalizer(f string) { + var newFinalizers []string + for _, oldF := range in.Finalizers() { + if oldF != f { + newFinalizers = append(newFinalizers, oldF) + } + } + in.ObjectMeta.Finalizers = newFinalizers +} + +func (in *MeshGateway) Finalizers() []string { + return in.ObjectMeta.Finalizers +} + +func (in *MeshGateway) MatchesConsul(candidate *pbresource.Resource, namespace, partition string) bool { + return cmp.Equal( + in.Resource(namespace, partition), + candidate, + protocmp.IgnoreFields(&pbresource.Resource{}, "status", "generation", "version"), + protocmp.IgnoreFields(&pbresource.ID{}, "uid"), + protocmp.Transform(), + cmpopts.SortSlices(func(a, b any) bool { return fmt.Sprintf("%v", a) < fmt.Sprintf("%v", b) }), + ) +} + +func (in *MeshGateway) KubeKind() string { + return meshGatewayKubeKind +} + +func (in *MeshGateway) KubernetesName() string { + return in.ObjectMeta.Name +} + +func (in *MeshGateway) SetSyncedCondition(status corev1.ConditionStatus, reason, message string) { + in.Status.Conditions = Conditions{ + { + Type: ConditionSynced, + Status: status, + LastTransitionTime: metav1.Now(), + Reason: reason, + Message: message, + }, + } +} + +func (in *MeshGateway) SetLastSyncedTime(time *metav1.Time) { + in.Status.LastSyncedTime = time +} + +func (in *MeshGateway) SyncedCondition() (status corev1.ConditionStatus, reason, message string) { + cond := in.Status.GetCondition(ConditionSynced) + if cond == nil { + return corev1.ConditionUnknown, "", "" + } + return cond.Status, cond.Reason, cond.Message +} + +func (in *MeshGateway) SyncedConditionStatus() corev1.ConditionStatus { + condition := in.Status.GetCondition(ConditionSynced) + if condition == nil { + return corev1.ConditionUnknown + } + return condition.Status +} + +func (in *MeshGateway) Validate(tenancy common.ConsulTenancyConfig) error { + // TODO add validation logic that ensures we only ever write this to the default namespace. + return nil +} + +// DefaultNamespaceFields is required as part of the common.MeshConfig interface. +func (in *MeshGateway) DefaultNamespaceFields(tenancy common.ConsulTenancyConfig) {} diff --git a/control-plane/api/mesh/v2beta1/mesh_groupversion_info.go b/control-plane/api/mesh/v2beta1/mesh_groupversion_info.go new file mode 100644 index 0000000000..a9fe6a6a83 --- /dev/null +++ b/control-plane/api/mesh/v2beta1/mesh_groupversion_info.go @@ -0,0 +1,27 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package v2beta1 contains API Schema definitions for the consul.hashicorp.com v2beta1 API group +// +kubebuilder:object:generate=true +// +groupName=mesh.consul.hashicorp.com +package v2beta1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + + // MeshGroup is a collection of mesh resources. + MeshGroup = "mesh.consul.hashicorp.com" + + // MeshGroupVersion is group version used to register these objects. + MeshGroupVersion = schema.GroupVersion{Group: MeshGroup, Version: "v2beta1"} + + // MeshSchemeBuilder is used to add go types to the GroupVersionKind scheme. + MeshSchemeBuilder = &scheme.Builder{GroupVersion: MeshGroupVersion} + + // AddMeshToScheme adds the types in this group-version to the given scheme. + AddMeshToScheme = MeshSchemeBuilder.AddToScheme +) diff --git a/control-plane/api/mesh/v2beta1/proxy_configuration_route_webhook.go b/control-plane/api/mesh/v2beta1/proxy_configuration_route_webhook.go new file mode 100644 index 0000000000..2b990b6979 --- /dev/null +++ b/control-plane/api/mesh/v2beta1/proxy_configuration_route_webhook.go @@ -0,0 +1,66 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package v2beta1 + +import ( + "context" + "net/http" + + "github.com/go-logr/logr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/hashicorp/consul-k8s/control-plane/api/common" +) + +// +kubebuilder:object:generate=false + +type ProxyConfigurationWebhook struct { + Logger logr.Logger + + // ConsulTenancyConfig contains the injector's namespace and partition configuration. + ConsulTenancyConfig common.ConsulTenancyConfig + + decoder *admission.Decoder + client.Client +} + +var _ common.ConsulResourceLister = &ProxyConfigurationWebhook{} + +// NOTE: The path value in the below line is the path to the webhook. +// If it is updated, run code-gen, update subcommand/inject-connect/command.go +// and the consul-helm value for the path to the webhook. +// +// NOTE: The below line cannot be combined with any other comment. If it is it will break the code generation. +// +// +kubebuilder:webhook:verbs=create;update,path=/mutate-v2beta1-proxyconfiguration,mutating=true,failurePolicy=fail,groups=auth.consul.hashicorp.com,resources=proxyconfiguration,versions=v2beta1,name=mutate-proxyconfiguration.auth.consul.hashicorp.com,sideEffects=None,admissionReviewVersions=v1beta1;v1 + +func (v *ProxyConfigurationWebhook) Handle(ctx context.Context, req admission.Request) admission.Response { + var resource ProxyConfiguration + err := v.decoder.Decode(req, &resource) + if err != nil { + return admission.Errored(http.StatusBadRequest, err) + } + + return common.ValidateConsulResource(ctx, req, v.Logger, v, &resource, v.ConsulTenancyConfig) +} + +func (v *ProxyConfigurationWebhook) List(ctx context.Context) ([]common.ConsulResource, error) { + var resourceList ProxyConfigurationList + if err := v.Client.List(ctx, &resourceList); err != nil { + return nil, err + } + var entries []common.ConsulResource + for _, item := range resourceList.Items { + entries = append(entries, common.ConsulResource(item)) + } + return entries, nil +} + +func (v *ProxyConfigurationWebhook) SetupWithManager(mgr ctrl.Manager) { + v.decoder = admission.NewDecoder(mgr.GetScheme()) + mgr.GetWebhookServer().Register("/mutate-v2beta1-proxyconfigurations", &admission.Webhook{Handler: v}) +} diff --git a/control-plane/api/mesh/v2beta1/proxy_configuration_types.go b/control-plane/api/mesh/v2beta1/proxy_configuration_types.go new file mode 100644 index 0000000000..cc1b5db9bd --- /dev/null +++ b/control-plane/api/mesh/v2beta1/proxy_configuration_types.go @@ -0,0 +1,155 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package v2beta1 + +import ( + "fmt" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1" + "github.com/hashicorp/consul/proto-public/pbresource" + "google.golang.org/protobuf/testing/protocmp" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + + "github.com/hashicorp/consul-k8s/control-plane/api/common" + inject "github.com/hashicorp/consul-k8s/control-plane/connect-inject/common" +) + +const ( + proxyConfigurationKubeKind = "proxyconfiguration" +) + +func init() { + MeshSchemeBuilder.Register(&ProxyConfiguration{}, &ProxyConfigurationList{}) +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ProxyConfiguration is the Schema for the TCP Routes API +// +kubebuilder:printcolumn:name="Synced",type="string",JSONPath=".status.conditions[?(@.type==\"Synced\")].status",description="The sync status of the resource with Consul" +// +kubebuilder:printcolumn:name="Last Synced",type="date",JSONPath=".status.lastSyncedTime",description="The last successful synced time of the resource with Consul" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="The age of the resource" +// +kubebuilder:resource:shortName="proxy-configuration" +type ProxyConfiguration struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec pbmesh.ProxyConfiguration `json:"spec,omitempty"` + Status `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ProxyConfigurationList contains a list of ProxyConfiguration. +type ProxyConfigurationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []*ProxyConfiguration `json:"items"` +} + +func (in *ProxyConfiguration) ResourceID(namespace, partition string) *pbresource.ID { + return &pbresource.ID{ + Name: in.Name, + Type: pbmesh.ProxyConfigurationType, + Tenancy: &pbresource.Tenancy{ + Partition: partition, + Namespace: namespace, + }, + } +} + +func (in *ProxyConfiguration) Resource(namespace, partition string) *pbresource.Resource { + return &pbresource.Resource{ + Id: in.ResourceID(namespace, partition), + Data: inject.ToProtoAny(&in.Spec), + Metadata: meshConfigMeta(), + } +} + +func (in *ProxyConfiguration) AddFinalizer(f string) { + in.ObjectMeta.Finalizers = append(in.Finalizers(), f) +} + +func (in *ProxyConfiguration) RemoveFinalizer(f string) { + var newFinalizers []string + for _, oldF := range in.Finalizers() { + if oldF != f { + newFinalizers = append(newFinalizers, oldF) + } + } + in.ObjectMeta.Finalizers = newFinalizers +} + +func (in *ProxyConfiguration) Finalizers() []string { + return in.ObjectMeta.Finalizers +} + +func (in *ProxyConfiguration) MatchesConsul(candidate *pbresource.Resource, namespace, partition string) bool { + return cmp.Equal( + in.Resource(namespace, partition), + candidate, + protocmp.IgnoreFields(&pbresource.Resource{}, "status", "generation", "version"), + protocmp.IgnoreFields(&pbresource.ID{}, "uid"), + protocmp.Transform(), + cmpopts.SortSlices(func(a, b any) bool { return fmt.Sprintf("%v", a) < fmt.Sprintf("%v", b) }), + ) +} + +func (in *ProxyConfiguration) KubeKind() string { + return proxyConfigurationKubeKind +} + +func (in *ProxyConfiguration) KubernetesName() string { + return in.ObjectMeta.Name +} + +func (in *ProxyConfiguration) SetSyncedCondition(status corev1.ConditionStatus, reason, message string) { + in.Status.Conditions = Conditions{ + { + Type: ConditionSynced, + Status: status, + LastTransitionTime: metav1.Now(), + Reason: reason, + Message: message, + }, + } +} + +func (in *ProxyConfiguration) SetLastSyncedTime(time *metav1.Time) { + in.Status.LastSyncedTime = time +} + +func (in *ProxyConfiguration) SyncedCondition() (status corev1.ConditionStatus, reason, message string) { + cond := in.Status.GetCondition(ConditionSynced) + if cond == nil { + return corev1.ConditionUnknown, "", "" + } + return cond.Status, cond.Reason, cond.Message +} + +func (in *ProxyConfiguration) SyncedConditionStatus() corev1.ConditionStatus { + condition := in.Status.GetCondition(ConditionSynced) + if condition == nil { + return corev1.ConditionUnknown + } + return condition.Status +} + +func (in *ProxyConfiguration) Validate(_ common.ConsulTenancyConfig) error { + var errs field.ErrorList + if len(errs) > 0 { + return apierrors.NewInvalid( + schema.GroupKind{Group: MeshGroup, Kind: common.ProxyConfiguration}, + in.KubernetesName(), errs) + } + return nil +} + +// DefaultNamespaceFields is required as part of the common.MeshConfig interface. +func (in *ProxyConfiguration) DefaultNamespaceFields(tenancy common.ConsulTenancyConfig) {} diff --git a/control-plane/api/mesh/v2beta1/proxy_configuration_types_test.go b/control-plane/api/mesh/v2beta1/proxy_configuration_types_test.go new file mode 100644 index 0000000000..936f3dd9c2 --- /dev/null +++ b/control-plane/api/mesh/v2beta1/proxy_configuration_types_test.go @@ -0,0 +1,543 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package v2beta1 + +import ( + "testing" + "time" + + "github.com/google/go-cmp/cmp" + pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1" + "github.com/hashicorp/consul/proto-public/pbresource" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/testing/protocmp" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + inject "github.com/hashicorp/consul-k8s/control-plane/connect-inject/common" + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" + "github.com/hashicorp/consul-k8s/control-plane/helper/test" +) + +func TestProxyConfiguration_MatchesConsul(t *testing.T) { + cases := map[string]struct { + OurConsulNamespace string + OurConsulPartition string + OurData *ProxyConfiguration + + TheirName string + TheirConsulNamespace string + TheirConsulPartition string + TheirData *pbmesh.ProxyConfiguration + ResourceOverride *pbresource.Resource // Used to test that an empty resource of another type will not match + + Matches bool + }{ + "empty fields matches": { + OurConsulNamespace: constants.DefaultConsulNS, + OurConsulPartition: constants.DefaultConsulPartition, + OurData: &ProxyConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + }, + Spec: pbmesh.ProxyConfiguration{}, + }, + TheirName: "name", + TheirConsulNamespace: constants.DefaultConsulNS, + TheirConsulPartition: constants.DefaultConsulPartition, + TheirData: &pbmesh.ProxyConfiguration{}, + Matches: true, + }, + "all fields set matches": { + OurConsulNamespace: "consul-ns", + OurConsulPartition: "consul-partition", + OurData: &ProxyConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "kube-ns", + }, + Spec: pbmesh.ProxyConfiguration{ + Workloads: &pbcatalog.WorkloadSelector{ + Prefixes: []string{"prefix-1", "prefix-2"}, + Names: []string{"workload-name"}, + Filter: "first-filter", + }, + DynamicConfig: &pbmesh.DynamicConfig{ + Mode: 2, + TransparentProxy: &pbmesh.TransparentProxy{ + OutboundListenerPort: 1234, + DialedDirectly: true, + }, + MutualTlsMode: 1, + LocalConnection: map[string]*pbmesh.ConnectionConfig{ + "local": { + ConnectTimeout: &durationpb.Duration{ + Seconds: 5, + Nanos: 10, + }, + RequestTimeout: &durationpb.Duration{ + Seconds: 2, + Nanos: 10, + }, + }, + }, + InboundConnections: &pbmesh.InboundConnectionsConfig{ + MaxInboundConnections: 5, + BalanceInboundConnections: 10, + }, + MeshGatewayMode: pbmesh.MeshGatewayMode_MESH_GATEWAY_MODE_LOCAL, + ExposeConfig: &pbmesh.ExposeConfig{ + ExposePaths: []*pbmesh.ExposePath{ + { + ListenerPort: 19000, + Path: "/expose-path", + LocalPathPort: 1901, + Protocol: 2, + }, + }, + }, + AccessLogs: &pbmesh.AccessLogsConfig{ + Enabled: true, + DisableListenerLogs: true, + Type: 3, + Path: "/path", + JsonFormat: "jsonFormat", + TextFormat: "text format.", + }, + PublicListenerJson: "publicListenerJson{}", + ListenerTracingJson: "listenerTracingJson{}", + LocalClusterJson: "localClusterJson{}", + }, + BootstrapConfig: &pbmesh.BootstrapConfig{ + StatsdUrl: "statsdURL", + DogstatsdUrl: "dogstatsdURL", + StatsTags: []string{"statsTags"}, + PrometheusBindAddr: "firstBindAddr", + StatsBindAddr: "secondBindAddr", + ReadyBindAddr: "thirdBindAddr", + OverrideJsonTpl: "overrideJSON", + StaticClustersJson: "staticClusterJSON", + StaticListenersJson: "staticListenersJSON", + StatsSinksJson: "statsSinksJSON", + StatsConfigJson: "statsConfigJSON", + StatsFlushInterval: "45s", + TracingConfigJson: "tracingConfigJSON", + TelemetryCollectorBindSocketDir: "/bindSocketDir", + }, + }, + }, + TheirName: "foo", + TheirConsulNamespace: "consul-ns", + TheirConsulPartition: "consul-partition", + TheirData: &pbmesh.ProxyConfiguration{ + Workloads: &pbcatalog.WorkloadSelector{ + Prefixes: []string{"prefix-1", "prefix-2"}, + Names: []string{"workload-name"}, + Filter: "first-filter", + }, + DynamicConfig: &pbmesh.DynamicConfig{ + Mode: 2, + TransparentProxy: &pbmesh.TransparentProxy{ + OutboundListenerPort: 1234, + DialedDirectly: true, + }, + MutualTlsMode: 1, + LocalConnection: map[string]*pbmesh.ConnectionConfig{ + "local": { + ConnectTimeout: &durationpb.Duration{ + Seconds: 5, + Nanos: 10, + }, + RequestTimeout: &durationpb.Duration{ + Seconds: 2, + Nanos: 10, + }, + }, + }, + InboundConnections: &pbmesh.InboundConnectionsConfig{ + MaxInboundConnections: 5, + BalanceInboundConnections: 10, + }, + MeshGatewayMode: pbmesh.MeshGatewayMode_MESH_GATEWAY_MODE_LOCAL, + ExposeConfig: &pbmesh.ExposeConfig{ + ExposePaths: []*pbmesh.ExposePath{ + { + ListenerPort: 19000, + Path: "/expose-path", + LocalPathPort: 1901, + Protocol: 2, + }, + }, + }, + AccessLogs: &pbmesh.AccessLogsConfig{ + Enabled: true, + DisableListenerLogs: true, + Type: 3, + Path: "/path", + JsonFormat: "jsonFormat", + TextFormat: "text format.", + }, + PublicListenerJson: "publicListenerJson{}", + ListenerTracingJson: "listenerTracingJson{}", + LocalClusterJson: "localClusterJson{}", + }, + BootstrapConfig: &pbmesh.BootstrapConfig{ + StatsdUrl: "statsdURL", + DogstatsdUrl: "dogstatsdURL", + StatsTags: []string{"statsTags"}, + PrometheusBindAddr: "firstBindAddr", + StatsBindAddr: "secondBindAddr", + ReadyBindAddr: "thirdBindAddr", + OverrideJsonTpl: "overrideJSON", + StaticClustersJson: "staticClusterJSON", + StaticListenersJson: "staticListenersJSON", + StatsSinksJson: "statsSinksJSON", + StatsConfigJson: "statsConfigJSON", + StatsFlushInterval: "45s", + TracingConfigJson: "tracingConfigJSON", + TelemetryCollectorBindSocketDir: "/bindSocketDir", + }, + }, + Matches: true, + }, + "different types does not match": { + OurConsulNamespace: constants.DefaultConsulNS, + OurConsulPartition: constants.DefaultConsulPartition, + OurData: &ProxyConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + }, + Spec: pbmesh.ProxyConfiguration{}, + }, + ResourceOverride: &pbresource.Resource{ + Id: &pbresource.ID{ + Name: "name", + Type: pbmesh.TCPRouteType, + Tenancy: &pbresource.Tenancy{ + Partition: constants.DefaultConsulNS, + Namespace: constants.DefaultConsulPartition, + }, + }, + Data: inject.ToProtoAny(&pbmesh.ProxyConfiguration{}), + Metadata: meshConfigMeta(), + }, + Matches: false, + }, + } + for name, c := range cases { + t.Run(name, func(t *testing.T) { + consulResource := c.ResourceOverride + if c.TheirName != "" { + consulResource = constructProxyConfigurationResource(c.TheirData, c.TheirName, c.TheirConsulNamespace, c.TheirConsulPartition) + } + require.Equal(t, c.Matches, c.OurData.MatchesConsul(consulResource, c.OurConsulNamespace, c.OurConsulPartition)) + }) + } +} + +// TestProxyConfiguration_Resource also includes test to verify ResourceID(). +func TestProxyConfiguration_Resource(t *testing.T) { + cases := map[string]struct { + Ours *ProxyConfiguration + ConsulNamespace string + ConsulPartition string + ExpectedName string + ExpectedData *pbmesh.ProxyConfiguration + }{ + "empty fields": { + Ours: &ProxyConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + Spec: pbmesh.ProxyConfiguration{}, + }, + ConsulNamespace: constants.DefaultConsulNS, + ConsulPartition: constants.DefaultConsulPartition, + ExpectedName: "foo", + ExpectedData: &pbmesh.ProxyConfiguration{}, + }, + "every field set": { + Ours: &ProxyConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "kube-ns", + }, + Spec: pbmesh.ProxyConfiguration{ + Workloads: &pbcatalog.WorkloadSelector{ + Prefixes: []string{"prefix-1", "prefix-2"}, + Names: []string{"workload-name"}, + Filter: "first-filter", + }, + DynamicConfig: &pbmesh.DynamicConfig{ + Mode: 2, + TransparentProxy: &pbmesh.TransparentProxy{ + OutboundListenerPort: 1234, + DialedDirectly: true, + }, + MutualTlsMode: 1, + LocalConnection: map[string]*pbmesh.ConnectionConfig{ + "local": { + ConnectTimeout: &durationpb.Duration{ + Seconds: 5, + Nanos: 10, + }, + RequestTimeout: &durationpb.Duration{ + Seconds: 2, + Nanos: 10, + }, + }, + }, + InboundConnections: &pbmesh.InboundConnectionsConfig{ + MaxInboundConnections: 5, + BalanceInboundConnections: 10, + }, + MeshGatewayMode: pbmesh.MeshGatewayMode_MESH_GATEWAY_MODE_LOCAL, + ExposeConfig: &pbmesh.ExposeConfig{ + ExposePaths: []*pbmesh.ExposePath{ + { + ListenerPort: 19000, + Path: "/expose-path", + LocalPathPort: 1901, + Protocol: 2, + }, + }, + }, + AccessLogs: &pbmesh.AccessLogsConfig{ + Enabled: true, + DisableListenerLogs: true, + Type: 3, + Path: "/path", + JsonFormat: "jsonFormat", + TextFormat: "text format.", + }, + PublicListenerJson: "publicListenerJson{}", + ListenerTracingJson: "listenerTracingJson{}", + LocalClusterJson: "localClusterJson{}", + }, + BootstrapConfig: &pbmesh.BootstrapConfig{ + StatsdUrl: "statsdURL", + DogstatsdUrl: "dogstatsdURL", + StatsTags: []string{"statsTags"}, + PrometheusBindAddr: "firstBindAddr", + StatsBindAddr: "secondBindAddr", + ReadyBindAddr: "thirdBindAddr", + OverrideJsonTpl: "overrideJSON", + StaticClustersJson: "staticClusterJSON", + StaticListenersJson: "staticListenersJSON", + StatsSinksJson: "statsSinksJSON", + StatsConfigJson: "statsConfigJSON", + StatsFlushInterval: "45s", + TracingConfigJson: "tracingConfigJSON", + TelemetryCollectorBindSocketDir: "/bindSocketDir", + }, + }, + }, + ConsulNamespace: "not-default-namespace", + ConsulPartition: "not-default-partition", + ExpectedName: "foo", + ExpectedData: &pbmesh.ProxyConfiguration{ + Workloads: &pbcatalog.WorkloadSelector{ + Prefixes: []string{"prefix-1", "prefix-2"}, + Names: []string{"workload-name"}, + Filter: "first-filter", + }, + DynamicConfig: &pbmesh.DynamicConfig{ + Mode: 2, + TransparentProxy: &pbmesh.TransparentProxy{ + OutboundListenerPort: 1234, + DialedDirectly: true, + }, + MutualTlsMode: 1, + LocalConnection: map[string]*pbmesh.ConnectionConfig{ + "local": { + ConnectTimeout: &durationpb.Duration{ + Seconds: 5, + Nanos: 10, + }, + RequestTimeout: &durationpb.Duration{ + Seconds: 2, + Nanos: 10, + }, + }, + }, + InboundConnections: &pbmesh.InboundConnectionsConfig{ + MaxInboundConnections: 5, + BalanceInboundConnections: 10, + }, + MeshGatewayMode: pbmesh.MeshGatewayMode_MESH_GATEWAY_MODE_LOCAL, + ExposeConfig: &pbmesh.ExposeConfig{ + ExposePaths: []*pbmesh.ExposePath{ + { + ListenerPort: 19000, + Path: "/expose-path", + LocalPathPort: 1901, + Protocol: 2, + }, + }, + }, + AccessLogs: &pbmesh.AccessLogsConfig{ + Enabled: true, + DisableListenerLogs: true, + Type: 3, + Path: "/path", + JsonFormat: "jsonFormat", + TextFormat: "text format.", + }, + PublicListenerJson: "publicListenerJson{}", + ListenerTracingJson: "listenerTracingJson{}", + LocalClusterJson: "localClusterJson{}", + }, + BootstrapConfig: &pbmesh.BootstrapConfig{ + StatsdUrl: "statsdURL", + DogstatsdUrl: "dogstatsdURL", + StatsTags: []string{"statsTags"}, + PrometheusBindAddr: "firstBindAddr", + StatsBindAddr: "secondBindAddr", + ReadyBindAddr: "thirdBindAddr", + OverrideJsonTpl: "overrideJSON", + StaticClustersJson: "staticClusterJSON", + StaticListenersJson: "staticListenersJSON", + StatsSinksJson: "statsSinksJSON", + StatsConfigJson: "statsConfigJSON", + StatsFlushInterval: "45s", + TracingConfigJson: "tracingConfigJSON", + TelemetryCollectorBindSocketDir: "/bindSocketDir", + }, + }, + }, + } + for name, c := range cases { + t.Run(name, func(t *testing.T) { + actual := c.Ours.Resource(c.ConsulNamespace, c.ConsulPartition) + expected := constructProxyConfigurationResource(c.ExpectedData, c.ExpectedName, c.ConsulNamespace, c.ConsulPartition) + + opts := append([]cmp.Option{ + protocmp.IgnoreFields(&pbresource.Resource{}, "status", "generation", "version"), + protocmp.IgnoreFields(&pbresource.ID{}, "uid"), + }, test.CmpProtoIgnoreOrder()...) + diff := cmp.Diff(expected, actual, opts...) + require.Equal(t, "", diff, "ProxyConfiguration do not match") + }) + } +} + +func TestProxyConfiguration_SetSyncedCondition(t *testing.T) { + trafficPermissions := &ProxyConfiguration{} + trafficPermissions.SetSyncedCondition(corev1.ConditionTrue, "reason", "message") + + require.Equal(t, corev1.ConditionTrue, trafficPermissions.Status.Conditions[0].Status) + require.Equal(t, "reason", trafficPermissions.Status.Conditions[0].Reason) + require.Equal(t, "message", trafficPermissions.Status.Conditions[0].Message) + now := metav1.Now() + require.True(t, trafficPermissions.Status.Conditions[0].LastTransitionTime.Before(&now)) + require.True(t, trafficPermissions.Status.Conditions[0].LastTransitionTime.Before(&now)) +} + +func TestProxyConfiguration_SetLastSyncedTime(t *testing.T) { + trafficPermissions := &ProxyConfiguration{} + syncedTime := metav1.NewTime(time.Now()) + trafficPermissions.SetLastSyncedTime(&syncedTime) + + require.Equal(t, &syncedTime, trafficPermissions.Status.LastSyncedTime) +} + +func TestProxyConfiguration_GetSyncedConditionStatus(t *testing.T) { + cases := []corev1.ConditionStatus{ + corev1.ConditionUnknown, + corev1.ConditionFalse, + corev1.ConditionTrue, + } + for _, status := range cases { + t.Run(string(status), func(t *testing.T) { + trafficPermissions := &ProxyConfiguration{ + Status: Status{ + Conditions: []Condition{{ + Type: ConditionSynced, + Status: status, + }}, + }, + } + + require.Equal(t, status, trafficPermissions.SyncedConditionStatus()) + }) + } +} + +func TestProxyConfiguration_GetConditionWhenStatusNil(t *testing.T) { + require.Nil(t, (&ProxyConfiguration{}).GetCondition(ConditionSynced)) +} + +func TestProxyConfiguration_SyncedConditionStatusWhenStatusNil(t *testing.T) { + require.Equal(t, corev1.ConditionUnknown, (&ProxyConfiguration{}).SyncedConditionStatus()) +} + +func TestProxyConfiguration_SyncedConditionWhenStatusNil(t *testing.T) { + status, reason, message := (&ProxyConfiguration{}).SyncedCondition() + require.Equal(t, corev1.ConditionUnknown, status) + require.Equal(t, "", reason) + require.Equal(t, "", message) +} + +func TestProxyConfiguration_KubeKind(t *testing.T) { + require.Equal(t, "proxyconfiguration", (&ProxyConfiguration{}).KubeKind()) +} + +func TestProxyConfiguration_KubernetesName(t *testing.T) { + require.Equal(t, "test", (&ProxyConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "bar", + }, + Spec: pbmesh.ProxyConfiguration{}, + }).KubernetesName()) +} + +func TestProxyConfiguration_ObjectMeta(t *testing.T) { + meta := metav1.ObjectMeta{ + Name: "name", + Namespace: "namespace", + } + trafficPermissions := &ProxyConfiguration{ + ObjectMeta: meta, + } + require.Equal(t, &meta, trafficPermissions.GetObjectMeta()) +} + +// Test defaulting behavior when namespaces are enabled as well as disabled. +// TODO: add when implemented +// func TestProxyConfiguration_DefaultNamespaceFields(t *testing.T) + +func constructProxyConfigurationResource(tp *pbmesh.ProxyConfiguration, name, namespace, partition string) *pbresource.Resource { + data := inject.ToProtoAny(tp) + + id := &pbresource.ID{ + Name: name, + Type: pbmesh.ProxyConfigurationType, + Tenancy: &pbresource.Tenancy{ + Partition: partition, + Namespace: namespace, + }, + Uid: "ABCD", // We add this to show it does not factor into the comparison + } + + return &pbresource.Resource{ + Id: id, + Data: data, + Metadata: meshConfigMeta(), + + // We add the fields below to prove that they are not used in the Match when comparing the CRD to Consul. + Version: "123456", + Generation: "01ARZ3NDEKTSV4RRFFQ69G5FAV", + Status: map[string]*pbresource.Status{ + "knock": { + ObservedGeneration: "01ARZ3NDEKTSV4RRFFQ69G5FAV", + Conditions: make([]*pbresource.Condition, 0), + UpdatedAt: timestamppb.Now(), + }, + }, + } +} diff --git a/control-plane/api/mesh/v2beta1/shared_types.go b/control-plane/api/mesh/v2beta1/shared_types.go new file mode 100644 index 0000000000..a5225afb71 --- /dev/null +++ b/control-plane/api/mesh/v2beta1/shared_types.go @@ -0,0 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package v2beta1 + +import ( + "github.com/hashicorp/consul-k8s/control-plane/api/common" +) + +func meshConfigMeta() map[string]string { + return map[string]string{ + common.SourceKey: common.SourceValue, + } +} diff --git a/control-plane/api/mesh/v2beta1/status.go b/control-plane/api/mesh/v2beta1/status.go new file mode 100644 index 0000000000..cc75a1cd82 --- /dev/null +++ b/control-plane/api/mesh/v2beta1/status.go @@ -0,0 +1,93 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package v2beta1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Conditions is the schema for the conditions portion of the payload. +type Conditions []Condition + +// ConditionType is a camel-cased condition type. +type ConditionType string + +const ( + // ConditionSynced specifies that the resource has been synced with Consul. + ConditionSynced ConditionType = "Synced" +) + +// Conditions define a readiness condition for a Consul resource. +// See: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties +// +k8s:deepcopy-gen=true +// +k8s:openapi-gen=true +type Condition struct { + // Type of condition. + // +required + Type ConditionType `json:"type" description:"type of status condition"` + + // Status of the condition, one of True, False, Unknown. + // +required + Status corev1.ConditionStatus `json:"status" description:"status of the condition, one of True, False, Unknown"` + + // LastTransitionTime is the last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" description:"last time the condition transitioned from one status to another"` + + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" description:"one-word CamelCase reason for the condition's last transition"` + + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty" description:"human-readable message indicating details about last transition"` +} + +// IsTrue is true if the condition is True. +func (c *Condition) IsTrue() bool { + if c == nil { + return false + } + return c.Status == corev1.ConditionTrue +} + +// IsFalse is true if the condition is False. +func (c *Condition) IsFalse() bool { + if c == nil { + return false + } + return c.Status == corev1.ConditionFalse +} + +// IsUnknown is true if the condition is Unknown. +func (c *Condition) IsUnknown() bool { + if c == nil { + return true + } + return c.Status == corev1.ConditionUnknown +} + +// +k8s:deepcopy-gen=true +// +k8s:openapi-gen=true +type Status struct { + // Conditions indicate the latest available observations of a resource's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions Conditions `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + + // LastSyncedTime is the last time the resource successfully synced with Consul. + // +optional + LastSyncedTime *metav1.Time `json:"lastSyncedTime,omitempty" description:"last time the condition transitioned from one status to another"` +} + +func (s *Status) GetCondition(t ConditionType) *Condition { + for _, cond := range s.Conditions { + if cond.Type == t { + return &cond + } + } + return nil +} diff --git a/control-plane/api/mesh/v2beta1/tcp_route_types.go b/control-plane/api/mesh/v2beta1/tcp_route_types.go new file mode 100644 index 0000000000..f5fa2888d6 --- /dev/null +++ b/control-plane/api/mesh/v2beta1/tcp_route_types.go @@ -0,0 +1,190 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package v2beta1 + +import ( + "fmt" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1" + "github.com/hashicorp/consul/proto-public/pbresource" + "google.golang.org/protobuf/testing/protocmp" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + + "github.com/hashicorp/consul-k8s/control-plane/api/common" + inject "github.com/hashicorp/consul-k8s/control-plane/connect-inject/common" +) + +const ( + tcpRouteKubeKind = "tcproute" +) + +func init() { + MeshSchemeBuilder.Register(&TCPRoute{}, &TCPRouteList{}) +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// TCPRoute is the Schema for the TCP Route API +// +kubebuilder:printcolumn:name="Synced",type="string",JSONPath=".status.conditions[?(@.type==\"Synced\")].status",description="The sync status of the resource with Consul" +// +kubebuilder:printcolumn:name="Last Synced",type="date",JSONPath=".status.lastSyncedTime",description="The last successful synced time of the resource with Consul" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="The age of the resource" +// +kubebuilder:resource:shortName="tcp-route" +type TCPRoute struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec pbmesh.TCPRoute `json:"spec,omitempty"` + Status `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// TCPRouteList contains a list of TCPRoute. +type TCPRouteList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []*TCPRoute `json:"items"` +} + +func (in *TCPRoute) ResourceID(namespace, partition string) *pbresource.ID { + return &pbresource.ID{ + Name: in.Name, + Type: pbmesh.TCPRouteType, + Tenancy: &pbresource.Tenancy{ + Partition: partition, + Namespace: namespace, + }, + } +} + +func (in *TCPRoute) Resource(namespace, partition string) *pbresource.Resource { + return &pbresource.Resource{ + Id: in.ResourceID(namespace, partition), + Data: inject.ToProtoAny(&in.Spec), + Metadata: meshConfigMeta(), + } +} + +func (in *TCPRoute) AddFinalizer(f string) { + in.ObjectMeta.Finalizers = append(in.Finalizers(), f) +} + +func (in *TCPRoute) RemoveFinalizer(f string) { + var newFinalizers []string + for _, oldF := range in.Finalizers() { + if oldF != f { + newFinalizers = append(newFinalizers, oldF) + } + } + in.ObjectMeta.Finalizers = newFinalizers +} + +func (in *TCPRoute) Finalizers() []string { + return in.ObjectMeta.Finalizers +} + +func (in *TCPRoute) MatchesConsul(candidate *pbresource.Resource, namespace, partition string) bool { + return cmp.Equal( + in.Resource(namespace, partition), + candidate, + protocmp.IgnoreFields(&pbresource.Resource{}, "status", "generation", "version"), + protocmp.IgnoreFields(&pbresource.ID{}, "uid"), + protocmp.Transform(), + cmpopts.SortSlices(func(a, b any) bool { return fmt.Sprintf("%v", a) < fmt.Sprintf("%v", b) }), + ) +} + +func (in *TCPRoute) KubeKind() string { + return tcpRouteKubeKind +} + +func (in *TCPRoute) KubernetesName() string { + return in.ObjectMeta.Name +} + +func (in *TCPRoute) SetSyncedCondition(status corev1.ConditionStatus, reason, message string) { + in.Status.Conditions = Conditions{ + { + Type: ConditionSynced, + Status: status, + LastTransitionTime: metav1.Now(), + Reason: reason, + Message: message, + }, + } +} + +func (in *TCPRoute) SetLastSyncedTime(time *metav1.Time) { + in.Status.LastSyncedTime = time +} + +func (in *TCPRoute) SyncedCondition() (status corev1.ConditionStatus, reason, message string) { + cond := in.Status.GetCondition(ConditionSynced) + if cond == nil { + return corev1.ConditionUnknown, "", "" + } + return cond.Status, cond.Reason, cond.Message +} + +func (in *TCPRoute) SyncedConditionStatus() corev1.ConditionStatus { + condition := in.Status.GetCondition(ConditionSynced) + if condition == nil { + return corev1.ConditionUnknown + } + return condition.Status +} + +func (in *TCPRoute) Validate(tenancy common.ConsulTenancyConfig) error { + var errs field.ErrorList + var route pbmesh.TCPRoute + path := field.NewPath("spec") + res := in.Resource(tenancy.ConsulDestinationNamespace, tenancy.ConsulPartition) + + if err := res.Data.UnmarshalTo(&route); err != nil { + return fmt.Errorf("error parsing resource data as type %q: %s", &route, err) + } + + if len(route.ParentRefs) == 0 { + errs = append(errs, field.Required(path.Child("parentRefs"), "cannot be empty")) + } + + if len(route.Rules) > 1 { + errs = append(errs, field.Invalid(path.Child("rules"), route.Rules, "must only specify a single rule for now")) + } + + for i, rule := range route.Rules { + rulePath := path.Child("rules").Index(i) + + if len(rule.BackendRefs) == 0 { + errs = append(errs, field.Required(rulePath.Child("backendRefs"), "cannot be empty")) + } + for j, hbref := range rule.BackendRefs { + ruleBackendRefsPath := rulePath.Child("backendRefs").Index(j) + if hbref.BackendRef == nil { + errs = append(errs, field.Required(ruleBackendRefsPath.Child("backendRef"), "missing required field")) + continue + } + + if hbref.BackendRef.Datacenter != "" { + errs = append(errs, field.Invalid(ruleBackendRefsPath.Child("backendRef").Child("datacenter"), hbref.BackendRef.Datacenter, "datacenter is not yet supported on backend refs")) + } + } + } + + if len(errs) > 0 { + return apierrors.NewInvalid( + schema.GroupKind{Group: MeshGroup, Kind: common.TCPRoute}, + in.KubernetesName(), errs) + } + return nil +} + +// DefaultNamespaceFields is required as part of the common.MeshConfig interface. +func (in *TCPRoute) DefaultNamespaceFields(tenancy common.ConsulTenancyConfig) {} diff --git a/control-plane/api/mesh/v2beta1/tcp_route_types_test.go b/control-plane/api/mesh/v2beta1/tcp_route_types_test.go new file mode 100644 index 0000000000..fd139058d7 --- /dev/null +++ b/control-plane/api/mesh/v2beta1/tcp_route_types_test.go @@ -0,0 +1,564 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package v2beta1 + +import ( + "testing" + "time" + + "github.com/google/go-cmp/cmp" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1" + "github.com/hashicorp/consul/proto-public/pbresource" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/testing/protocmp" + "google.golang.org/protobuf/types/known/timestamppb" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/hashicorp/consul-k8s/control-plane/api/common" + inject "github.com/hashicorp/consul-k8s/control-plane/connect-inject/common" + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" + "github.com/hashicorp/consul-k8s/control-plane/helper/test" +) + +func TestTCPRoute_MatchesConsul(t *testing.T) { + cases := map[string]struct { + OurConsulNamespace string + OurConsulPartition string + OurData *TCPRoute + + TheirName string + TheirConsulNamespace string + TheirConsulPartition string + TheirData *pbmesh.TCPRoute + ResourceOverride *pbresource.Resource // Used to test that an empty resource of another type will not match + + Matches bool + }{ + "empty fields matches": { + OurConsulNamespace: constants.DefaultConsulNS, + OurConsulPartition: constants.DefaultConsulPartition, + OurData: &TCPRoute{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + }, + Spec: pbmesh.TCPRoute{}, + }, + TheirName: "name", + TheirConsulNamespace: constants.DefaultConsulNS, + TheirConsulPartition: constants.DefaultConsulPartition, + TheirData: &pbmesh.TCPRoute{}, + Matches: true, + }, + "all fields set matches": { + OurConsulNamespace: "consul-ns", + OurConsulPartition: "consul-partition", + OurData: &TCPRoute{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "kube-ns", + }, + Spec: pbmesh.TCPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + { + Ref: &pbresource.Reference{ + Type: pbmesh.ComputedRoutesType, + Tenancy: &pbresource.Tenancy{ + Partition: "some-partition", + Namespace: "some-namespace", + }, + Name: "parent-name", + Section: "parent-section", + }, + Port: "20122", + }, + }, + Rules: []*pbmesh.TCPRouteRule{ + { + BackendRefs: []*pbmesh.TCPBackendRef{ + { + BackendRef: &pbmesh.BackendReference{ + Ref: &pbresource.Reference{ + Type: pbmesh.ComputedRoutesType, + Tenancy: &pbresource.Tenancy{ + Namespace: "another-namespace", + }, + Name: "backend-name", + Section: "backend-section", + }, + Port: "20111", + Datacenter: "different-datacenter", + }, + Weight: 50, + }, + }, + }, + }, + }, + }, + TheirName: "foo", + TheirConsulNamespace: "consul-ns", + TheirConsulPartition: "consul-partition", + TheirData: &pbmesh.TCPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + { + Ref: &pbresource.Reference{ + Type: pbmesh.ComputedRoutesType, + Tenancy: &pbresource.Tenancy{ + Partition: "some-partition", + Namespace: "some-namespace", + }, + Name: "parent-name", + Section: "parent-section", + }, + Port: "20122", + }, + }, + Rules: []*pbmesh.TCPRouteRule{ + { + BackendRefs: []*pbmesh.TCPBackendRef{ + { + BackendRef: &pbmesh.BackendReference{ + Ref: &pbresource.Reference{ + Type: pbmesh.ComputedRoutesType, + Tenancy: &pbresource.Tenancy{ + Namespace: "another-namespace", + }, + Name: "backend-name", + Section: "backend-section", + }, + Port: "20111", + Datacenter: "different-datacenter", + }, + Weight: 50, + }, + }, + }, + }, + }, + Matches: true, + }, + "different types does not match": { + OurConsulNamespace: constants.DefaultConsulNS, + OurConsulPartition: constants.DefaultConsulPartition, + OurData: &TCPRoute{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + }, + Spec: pbmesh.TCPRoute{}, + }, + ResourceOverride: &pbresource.Resource{ + Id: &pbresource.ID{ + Name: "name", + Type: pbmesh.ProxyConfigurationType, + Tenancy: &pbresource.Tenancy{ + Partition: constants.DefaultConsulNS, + Namespace: constants.DefaultConsulPartition, + }, + }, + Data: inject.ToProtoAny(&pbmesh.ProxyConfiguration{}), + Metadata: meshConfigMeta(), + }, + Matches: false, + }, + } + for name, c := range cases { + t.Run(name, func(t *testing.T) { + consulResource := c.ResourceOverride + if c.TheirName != "" { + consulResource = constructTCPRouteResource(c.TheirData, c.TheirName, c.TheirConsulNamespace, c.TheirConsulPartition) + } + require.Equal(t, c.Matches, c.OurData.MatchesConsul(consulResource, c.OurConsulNamespace, c.OurConsulPartition)) + }) + } +} + +// TestTCPRoute_Resource also includes test to verify ResourceID(). +func TestTCPRoute_Resource(t *testing.T) { + cases := map[string]struct { + Ours *TCPRoute + ConsulNamespace string + ConsulPartition string + ExpectedName string + ExpectedData *pbmesh.TCPRoute + }{ + "empty fields": { + Ours: &TCPRoute{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + Spec: pbmesh.TCPRoute{}, + }, + ConsulNamespace: constants.DefaultConsulNS, + ConsulPartition: constants.DefaultConsulPartition, + ExpectedName: "foo", + ExpectedData: &pbmesh.TCPRoute{}, + }, + "every field set": { + Ours: &TCPRoute{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "kube-ns", + }, + Spec: pbmesh.TCPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + { + Ref: &pbresource.Reference{ + Type: pbmesh.ComputedRoutesType, + Tenancy: &pbresource.Tenancy{ + Partition: "some-partition", + Namespace: "some-namespace", + }, + Name: "parent-name", + Section: "parent-section", + }, + Port: "20122", + }, + }, + Rules: []*pbmesh.TCPRouteRule{ + { + BackendRefs: []*pbmesh.TCPBackendRef{ + { + BackendRef: &pbmesh.BackendReference{ + Ref: &pbresource.Reference{ + Type: pbmesh.ComputedRoutesType, + Tenancy: &pbresource.Tenancy{ + Namespace: "another-namespace", + }, + Name: "backend-name", + Section: "backend-section", + }, + Port: "20111", + Datacenter: "different-datacenter", + }, + Weight: 50, + }, + }, + }, + }, + }, + }, + ConsulNamespace: "not-default-namespace", + ConsulPartition: "not-default-partition", + ExpectedName: "foo", + ExpectedData: &pbmesh.TCPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + { + Ref: &pbresource.Reference{ + Type: pbmesh.ComputedRoutesType, + Tenancy: &pbresource.Tenancy{ + Partition: "some-partition", + Namespace: "some-namespace", + }, + Name: "parent-name", + Section: "parent-section", + }, + Port: "20122", + }, + }, + Rules: []*pbmesh.TCPRouteRule{ + { + BackendRefs: []*pbmesh.TCPBackendRef{ + { + BackendRef: &pbmesh.BackendReference{ + Ref: &pbresource.Reference{ + Type: pbmesh.ComputedRoutesType, + Tenancy: &pbresource.Tenancy{ + Namespace: "another-namespace", + }, + Name: "backend-name", + Section: "backend-section", + }, + Port: "20111", + Datacenter: "different-datacenter", + }, + Weight: 50, + }, + }, + }, + }, + }, + }, + } + for name, c := range cases { + t.Run(name, func(t *testing.T) { + actual := c.Ours.Resource(c.ConsulNamespace, c.ConsulPartition) + expected := constructTCPRouteResource(c.ExpectedData, c.ExpectedName, c.ConsulNamespace, c.ConsulPartition) + + opts := append([]cmp.Option{ + protocmp.IgnoreFields(&pbresource.Resource{}, "status", "generation", "version"), + protocmp.IgnoreFields(&pbresource.ID{}, "uid"), + }, test.CmpProtoIgnoreOrder()...) + diff := cmp.Diff(expected, actual, opts...) + require.Equal(t, "", diff, "TCPRoute do not match") + }) + } +} + +func TestTCPRoute_SetSyncedCondition(t *testing.T) { + trafficPermissions := &TCPRoute{} + trafficPermissions.SetSyncedCondition(corev1.ConditionTrue, "reason", "message") + + require.Equal(t, corev1.ConditionTrue, trafficPermissions.Status.Conditions[0].Status) + require.Equal(t, "reason", trafficPermissions.Status.Conditions[0].Reason) + require.Equal(t, "message", trafficPermissions.Status.Conditions[0].Message) + now := metav1.Now() + require.True(t, trafficPermissions.Status.Conditions[0].LastTransitionTime.Before(&now)) +} + +func TestTCPRoute_SetLastSyncedTime(t *testing.T) { + trafficPermissions := &TCPRoute{} + syncedTime := metav1.NewTime(time.Now()) + trafficPermissions.SetLastSyncedTime(&syncedTime) + + require.Equal(t, &syncedTime, trafficPermissions.Status.LastSyncedTime) +} + +func TestTCPRoute_GetSyncedConditionStatus(t *testing.T) { + cases := []corev1.ConditionStatus{ + corev1.ConditionUnknown, + corev1.ConditionFalse, + corev1.ConditionTrue, + } + for _, status := range cases { + t.Run(string(status), func(t *testing.T) { + trafficPermissions := &TCPRoute{ + Status: Status{ + Conditions: []Condition{{ + Type: ConditionSynced, + Status: status, + }}, + }, + } + + require.Equal(t, status, trafficPermissions.SyncedConditionStatus()) + }) + } +} + +func TestTCPRoute_GetConditionWhenStatusNil(t *testing.T) { + require.Nil(t, (&TCPRoute{}).GetCondition(ConditionSynced)) +} + +func TestTCPRoute_SyncedConditionStatusWhenStatusNil(t *testing.T) { + require.Equal(t, corev1.ConditionUnknown, (&TCPRoute{}).SyncedConditionStatus()) +} + +func TestTCPRoute_SyncedConditionWhenStatusNil(t *testing.T) { + status, reason, message := (&TCPRoute{}).SyncedCondition() + require.Equal(t, corev1.ConditionUnknown, status) + require.Equal(t, "", reason) + require.Equal(t, "", message) +} + +func TestTCPRoute_KubeKind(t *testing.T) { + require.Equal(t, "tcproute", (&TCPRoute{}).KubeKind()) +} + +func TestTCPRoute_KubernetesName(t *testing.T) { + require.Equal(t, "test", (&TCPRoute{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "bar", + }, + Spec: pbmesh.TCPRoute{}, + }).KubernetesName()) +} + +func TestTCPRoute_ObjectMeta(t *testing.T) { + meta := metav1.ObjectMeta{ + Name: "name", + Namespace: "namespace", + } + trafficPermissions := &TCPRoute{ + ObjectMeta: meta, + } + require.Equal(t, &meta, trafficPermissions.GetObjectMeta()) +} + +// Test defaulting behavior when namespaces are enabled as well as disabled. +// TODO: add when implemented +// func TestTCPRoute_DefaultNamespaceFields(t *testing.T) + +func TestTCPRoute_Validate(t *testing.T) { + cases := []struct { + name string + input *TCPRoute + expectedErrMsgs []string + }{ + { + name: "kitchen sink OK", + input: &TCPRoute{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "kube-ns", + }, + Spec: pbmesh.TCPRoute{ + ParentRefs: []*pbmesh.ParentReference{ + { + Ref: &pbresource.Reference{ + Type: pbmesh.ComputedRoutesType, + Tenancy: &pbresource.Tenancy{ + Partition: "some-partition", + Namespace: "some-namespace", + }, + Name: "parent-name", + Section: "parent-section", + }, + Port: "20122", + }, + }, + Rules: []*pbmesh.TCPRouteRule{ + { + BackendRefs: []*pbmesh.TCPBackendRef{ + { + BackendRef: &pbmesh.BackendReference{ + Ref: &pbresource.Reference{ + Type: pbmesh.ComputedRoutesType, + Tenancy: &pbresource.Tenancy{ + Namespace: "another-namespace", + }, + Name: "backend-name", + Section: "backend-section", + }, + Port: "20111", + }, + Weight: 50, + }, + }, + }, + }, + }, + }, + expectedErrMsgs: nil, + }, + { + name: "no parentRefs", + input: &TCPRoute{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "kube-ns", + }, + Spec: pbmesh.TCPRoute{ + ParentRefs: []*pbmesh.ParentReference{}, + }, + }, + expectedErrMsgs: []string{ + `spec.parentRefs: Required value: cannot be empty`, + }, + }, + { + name: "multiple rules", + input: &TCPRoute{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "kube-ns", + }, + Spec: pbmesh.TCPRoute{ + ParentRefs: []*pbmesh.ParentReference{{}}, + Rules: []*pbmesh.TCPRouteRule{ + {BackendRefs: []*pbmesh.TCPBackendRef{{BackendRef: &pbmesh.BackendReference{}}}}, + {BackendRefs: []*pbmesh.TCPBackendRef{{BackendRef: &pbmesh.BackendReference{}}}}, + }, + }, + }, + expectedErrMsgs: []string{ + `must only specify a single rule for now`, + }, + }, + { + name: "rules.backendRefs", + input: &TCPRoute{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "kube-ns", + }, + Spec: pbmesh.TCPRoute{ + ParentRefs: []*pbmesh.ParentReference{{}}, + Rules: []*pbmesh.TCPRouteRule{ + {BackendRefs: []*pbmesh.TCPBackendRef{}}, + }, + }, + }, + expectedErrMsgs: []string{ + `spec.rules[0].backendRefs: Required value: cannot be empty`, + }, + }, + { + name: "rules.backendRefs.backendRef", + input: &TCPRoute{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "kube-ns", + }, + Spec: pbmesh.TCPRoute{ + ParentRefs: []*pbmesh.ParentReference{{}}, + Rules: []*pbmesh.TCPRouteRule{ + { + BackendRefs: []*pbmesh.TCPBackendRef{ + {}, + { + BackendRef: &pbmesh.BackendReference{ + Ref: &pbresource.Reference{ + Type: pbmesh.ComputedRoutesType, + }, + Datacenter: "backend-datacenter", + }, + }, + }, + }, + }, + }, + }, + expectedErrMsgs: []string{ + `spec.rules[0].backendRefs[0].backendRef: Required value: missing required field`, + `spec.rules[0].backendRefs[1].backendRef.datacenter: Invalid value: "backend-datacenter": datacenter is not yet supported on backend refs`, + }, + }, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + err := tc.input.Validate(common.ConsulTenancyConfig{}) + if len(tc.expectedErrMsgs) != 0 { + require.Error(t, err) + for _, s := range tc.expectedErrMsgs { + require.Contains(t, err.Error(), s) + } + } else { + require.NoError(t, err) + } + }) + } +} + +func constructTCPRouteResource(tp *pbmesh.TCPRoute, name, namespace, partition string) *pbresource.Resource { + data := inject.ToProtoAny(tp) + + id := &pbresource.ID{ + Name: name, + Type: pbmesh.TCPRouteType, + Tenancy: &pbresource.Tenancy{ + Partition: partition, + Namespace: namespace, + }, + Uid: "ABCD", // We add this to show it does not factor into the comparison + } + + return &pbresource.Resource{ + Id: id, + Data: data, + Metadata: meshConfigMeta(), + + // We add the fields below to prove that they are not used in the Match when comparing the CRD to Consul. + Version: "123456", + Generation: "01ARZ3NDEKTSV4RRFFQ69G5FAV", + Status: map[string]*pbresource.Status{ + "knock": { + ObservedGeneration: "01ARZ3NDEKTSV4RRFFQ69G5FAV", + Conditions: make([]*pbresource.Condition, 0), + UpdatedAt: timestamppb.Now(), + }, + }, + } +} diff --git a/control-plane/api/mesh/v2beta1/tcp_route_webhook.go b/control-plane/api/mesh/v2beta1/tcp_route_webhook.go new file mode 100644 index 0000000000..731e4e5b2d --- /dev/null +++ b/control-plane/api/mesh/v2beta1/tcp_route_webhook.go @@ -0,0 +1,65 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package v2beta1 + +import ( + "context" + "net/http" + + "github.com/go-logr/logr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + "github.com/hashicorp/consul-k8s/control-plane/api/common" +) + +// +kubebuilder:object:generate=false + +type TCPRouteWebhook struct { + Logger logr.Logger + + // ConsulTenancyConfig contains the injector's namespace and partition configuration. + ConsulTenancyConfig common.ConsulTenancyConfig + + decoder *admission.Decoder + client.Client +} + +var _ common.ConsulResourceLister = &TCPRouteWebhook{} + +// NOTE: The path value in the below line is the path to the webhook. +// If it is updated, run code-gen, update subcommand/inject-connect/command.go +// and the consul-helm value for the path to the webhook. +// +// NOTE: The below line cannot be combined with any other comment. If it is it will break the code generation. +// +// +kubebuilder:webhook:verbs=create;update,path=/mutate-v2beta1-tcproute,mutating=true,failurePolicy=fail,groups=auth.consul.hashicorp.com,resources=tcproute,versions=v2beta1,name=mutate-tcproute.auth.consul.hashicorp.com,sideEffects=None,admissionReviewVersions=v1beta1;v1 + +func (v *TCPRouteWebhook) Handle(ctx context.Context, req admission.Request) admission.Response { + var resource TCPRoute + err := v.decoder.Decode(req, &resource) + if err != nil { + return admission.Errored(http.StatusBadRequest, err) + } + + return common.ValidateConsulResource(ctx, req, v.Logger, v, &resource, v.ConsulTenancyConfig) +} + +func (v *TCPRouteWebhook) List(ctx context.Context) ([]common.ConsulResource, error) { + var resourceList TCPRouteList + if err := v.Client.List(ctx, &resourceList); err != nil { + return nil, err + } + var entries []common.ConsulResource + for _, item := range resourceList.Items { + entries = append(entries, common.ConsulResource(item)) + } + return entries, nil +} + +func (v *TCPRouteWebhook) SetupWithManager(mgr ctrl.Manager) { + v.decoder = admission.NewDecoder(mgr.GetScheme()) + mgr.GetWebhookServer().Register("/mutate-v2beta1-tcproute", &admission.Webhook{Handler: v}) +} diff --git a/control-plane/api/mesh/v2beta1/zz_generated.deepcopy.go b/control-plane/api/mesh/v2beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..9cc59efb3e --- /dev/null +++ b/control-plane/api/mesh/v2beta1/zz_generated.deepcopy.go @@ -0,0 +1,980 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v2beta1 + +import ( + "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIGateway) DeepCopyInto(out *APIGateway) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIGateway. +func (in *APIGateway) DeepCopy() *APIGateway { + if in == nil { + return nil + } + out := new(APIGateway) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIGateway) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIGatewayList) DeepCopyInto(out *APIGatewayList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*APIGateway, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(APIGateway) + (*in).DeepCopyInto(*out) + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIGatewayList. +func (in *APIGatewayList) DeepCopy() *APIGatewayList { + if in == nil { + return nil + } + out := new(APIGatewayList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIGatewayList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Condition) DeepCopyInto(out *Condition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition. +func (in *Condition) DeepCopy() *Condition { + if in == nil { + return nil + } + out := new(Condition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in Conditions) DeepCopyInto(out *Conditions) { + { + in := &in + *out = make(Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Conditions. +func (in Conditions) DeepCopy() Conditions { + if in == nil { + return nil + } + out := new(Conditions) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCRoute) DeepCopyInto(out *GRPCRoute) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCRoute. +func (in *GRPCRoute) DeepCopy() *GRPCRoute { + if in == nil { + return nil + } + out := new(GRPCRoute) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GRPCRoute) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCRouteList) DeepCopyInto(out *GRPCRouteList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*GRPCRoute, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(GRPCRoute) + (*in).DeepCopyInto(*out) + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCRouteList. +func (in *GRPCRouteList) DeepCopy() *GRPCRouteList { + if in == nil { + return nil + } + out := new(GRPCRouteList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GRPCRouteList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayClass) DeepCopyInto(out *GatewayClass) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayClass. +func (in *GatewayClass) DeepCopy() *GatewayClass { + if in == nil { + return nil + } + out := new(GatewayClass) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GatewayClass) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayClassAnnotationsAndLabels) DeepCopyInto(out *GatewayClassAnnotationsAndLabels) { + *out = *in + in.Annotations.DeepCopyInto(&out.Annotations) + in.Labels.DeepCopyInto(&out.Labels) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayClassAnnotationsAndLabels. +func (in *GatewayClassAnnotationsAndLabels) DeepCopy() *GatewayClassAnnotationsAndLabels { + if in == nil { + return nil + } + out := new(GatewayClassAnnotationsAndLabels) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayClassAnnotationsLabelsConfig) DeepCopyInto(out *GatewayClassAnnotationsLabelsConfig) { + *out = *in + if in.InheritFromGateway != nil { + in, out := &in.InheritFromGateway, &out.InheritFromGateway + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Set != nil { + in, out := &in.Set, &out.Set + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayClassAnnotationsLabelsConfig. +func (in *GatewayClassAnnotationsLabelsConfig) DeepCopy() *GatewayClassAnnotationsLabelsConfig { + if in == nil { + return nil + } + out := new(GatewayClassAnnotationsLabelsConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayClassConfig) DeepCopyInto(out *GatewayClassConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayClassConfig. +func (in *GatewayClassConfig) DeepCopy() *GatewayClassConfig { + if in == nil { + return nil + } + out := new(GatewayClassConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GatewayClassConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayClassConfigList) DeepCopyInto(out *GatewayClassConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*GatewayClassConfig, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(GatewayClassConfig) + (*in).DeepCopyInto(*out) + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayClassConfigList. +func (in *GatewayClassConfigList) DeepCopy() *GatewayClassConfigList { + if in == nil { + return nil + } + out := new(GatewayClassConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GatewayClassConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayClassConfigSpec) DeepCopyInto(out *GatewayClassConfigSpec) { + *out = *in + in.GatewayClassAnnotationsAndLabels.DeepCopyInto(&out.GatewayClassAnnotationsAndLabels) + in.Deployment.DeepCopyInto(&out.Deployment) + in.Role.DeepCopyInto(&out.Role) + in.RoleBinding.DeepCopyInto(&out.RoleBinding) + in.Service.DeepCopyInto(&out.Service) + in.ServiceAccount.DeepCopyInto(&out.ServiceAccount) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayClassConfigSpec. +func (in *GatewayClassConfigSpec) DeepCopy() *GatewayClassConfigSpec { + if in == nil { + return nil + } + out := new(GatewayClassConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayClassConsulConfig) DeepCopyInto(out *GatewayClassConsulConfig) { + *out = *in + out.Logging = in.Logging +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayClassConsulConfig. +func (in *GatewayClassConsulConfig) DeepCopy() *GatewayClassConsulConfig { + if in == nil { + return nil + } + out := new(GatewayClassConsulConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayClassConsulLoggingConfig) DeepCopyInto(out *GatewayClassConsulLoggingConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayClassConsulLoggingConfig. +func (in *GatewayClassConsulLoggingConfig) DeepCopy() *GatewayClassConsulLoggingConfig { + if in == nil { + return nil + } + out := new(GatewayClassConsulLoggingConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayClassContainerConfig) DeepCopyInto(out *GatewayClassContainerConfig) { + *out = *in + out.Consul = in.Consul + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayClassContainerConfig. +func (in *GatewayClassContainerConfig) DeepCopy() *GatewayClassContainerConfig { + if in == nil { + return nil + } + out := new(GatewayClassContainerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayClassDeploymentConfig) DeepCopyInto(out *GatewayClassDeploymentConfig) { + *out = *in + in.GatewayClassAnnotationsAndLabels.DeepCopyInto(&out.GatewayClassAnnotationsAndLabels) + if in.Container != nil { + in, out := &in.Container, &out.Container + *out = new(GatewayClassContainerConfig) + (*in).DeepCopyInto(*out) + } + if in.InitContainer != nil { + in, out := &in.InitContainer, &out.InitContainer + *out = new(GatewayClassInitContainerConfig) + (*in).DeepCopyInto(*out) + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(GatewayClassReplicasConfig) + (*in).DeepCopyInto(*out) + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(v1.PodSecurityContext) + (*in).DeepCopyInto(*out) + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]v1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TopologySpreadConstraints != nil { + in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints + *out = make([]v1.TopologySpreadConstraint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(v1.Affinity) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayClassDeploymentConfig. +func (in *GatewayClassDeploymentConfig) DeepCopy() *GatewayClassDeploymentConfig { + if in == nil { + return nil + } + out := new(GatewayClassDeploymentConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayClassInitContainerConfig) DeepCopyInto(out *GatewayClassInitContainerConfig) { + *out = *in + out.Consul = in.Consul + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayClassInitContainerConfig. +func (in *GatewayClassInitContainerConfig) DeepCopy() *GatewayClassInitContainerConfig { + if in == nil { + return nil + } + out := new(GatewayClassInitContainerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayClassList) DeepCopyInto(out *GatewayClassList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*GatewayClass, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(GatewayClass) + (*in).DeepCopyInto(*out) + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayClassList. +func (in *GatewayClassList) DeepCopy() *GatewayClassList { + if in == nil { + return nil + } + out := new(GatewayClassList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GatewayClassList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayClassReplicasConfig) DeepCopyInto(out *GatewayClassReplicasConfig) { + *out = *in + if in.Default != nil { + in, out := &in.Default, &out.Default + *out = new(int32) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(int32) + **out = **in + } + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(int32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayClassReplicasConfig. +func (in *GatewayClassReplicasConfig) DeepCopy() *GatewayClassReplicasConfig { + if in == nil { + return nil + } + out := new(GatewayClassReplicasConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayClassRoleBindingConfig) DeepCopyInto(out *GatewayClassRoleBindingConfig) { + *out = *in + in.GatewayClassAnnotationsAndLabels.DeepCopyInto(&out.GatewayClassAnnotationsAndLabels) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayClassRoleBindingConfig. +func (in *GatewayClassRoleBindingConfig) DeepCopy() *GatewayClassRoleBindingConfig { + if in == nil { + return nil + } + out := new(GatewayClassRoleBindingConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayClassRoleConfig) DeepCopyInto(out *GatewayClassRoleConfig) { + *out = *in + in.GatewayClassAnnotationsAndLabels.DeepCopyInto(&out.GatewayClassAnnotationsAndLabels) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayClassRoleConfig. +func (in *GatewayClassRoleConfig) DeepCopy() *GatewayClassRoleConfig { + if in == nil { + return nil + } + out := new(GatewayClassRoleConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayClassServiceAccountConfig) DeepCopyInto(out *GatewayClassServiceAccountConfig) { + *out = *in + in.GatewayClassAnnotationsAndLabels.DeepCopyInto(&out.GatewayClassAnnotationsAndLabels) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayClassServiceAccountConfig. +func (in *GatewayClassServiceAccountConfig) DeepCopy() *GatewayClassServiceAccountConfig { + if in == nil { + return nil + } + out := new(GatewayClassServiceAccountConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayClassServiceConfig) DeepCopyInto(out *GatewayClassServiceConfig) { + *out = *in + in.GatewayClassAnnotationsAndLabels.DeepCopyInto(&out.GatewayClassAnnotationsAndLabels) + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(v1.ServiceType) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayClassServiceConfig. +func (in *GatewayClassServiceConfig) DeepCopy() *GatewayClassServiceConfig { + if in == nil { + return nil + } + out := new(GatewayClassServiceConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayClassSpec) DeepCopyInto(out *GatewayClassSpec) { + *out = *in + if in.ParametersRef != nil { + in, out := &in.ParametersRef, &out.ParametersRef + *out = new(ParametersReference) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayClassSpec. +func (in *GatewayClassSpec) DeepCopy() *GatewayClassSpec { + if in == nil { + return nil + } + out := new(GatewayClassSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRoute) DeepCopyInto(out *HTTPRoute) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRoute. +func (in *HTTPRoute) DeepCopy() *HTTPRoute { + if in == nil { + return nil + } + out := new(HTTPRoute) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HTTPRoute) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteList) DeepCopyInto(out *HTTPRouteList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*HTTPRoute, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(HTTPRoute) + (*in).DeepCopyInto(*out) + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteList. +func (in *HTTPRouteList) DeepCopy() *HTTPRouteList { + if in == nil { + return nil + } + out := new(HTTPRouteList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HTTPRouteList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MeshConfiguration) DeepCopyInto(out *MeshConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshConfiguration. +func (in *MeshConfiguration) DeepCopy() *MeshConfiguration { + if in == nil { + return nil + } + out := new(MeshConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MeshConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MeshConfigurationList) DeepCopyInto(out *MeshConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*MeshConfiguration, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(MeshConfiguration) + (*in).DeepCopyInto(*out) + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshConfigurationList. +func (in *MeshConfigurationList) DeepCopy() *MeshConfigurationList { + if in == nil { + return nil + } + out := new(MeshConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MeshConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MeshGateway) DeepCopyInto(out *MeshGateway) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshGateway. +func (in *MeshGateway) DeepCopy() *MeshGateway { + if in == nil { + return nil + } + out := new(MeshGateway) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MeshGateway) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MeshGatewayList) DeepCopyInto(out *MeshGatewayList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*MeshGateway, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(MeshGateway) + (*in).DeepCopyInto(*out) + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshGatewayList. +func (in *MeshGatewayList) DeepCopy() *MeshGatewayList { + if in == nil { + return nil + } + out := new(MeshGatewayList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MeshGatewayList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParametersReference) DeepCopyInto(out *ParametersReference) { + *out = *in + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParametersReference. +func (in *ParametersReference) DeepCopy() *ParametersReference { + if in == nil { + return nil + } + out := new(ParametersReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyConfiguration) DeepCopyInto(out *ProxyConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyConfiguration. +func (in *ProxyConfiguration) DeepCopy() *ProxyConfiguration { + if in == nil { + return nil + } + out := new(ProxyConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProxyConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyConfigurationList) DeepCopyInto(out *ProxyConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*ProxyConfiguration, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(ProxyConfiguration) + (*in).DeepCopyInto(*out) + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyConfigurationList. +func (in *ProxyConfigurationList) DeepCopy() *ProxyConfigurationList { + if in == nil { + return nil + } + out := new(ProxyConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProxyConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Status) DeepCopyInto(out *Status) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LastSyncedTime != nil { + in, out := &in.LastSyncedTime, &out.LastSyncedTime + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Status. +func (in *Status) DeepCopy() *Status { + if in == nil { + return nil + } + out := new(Status) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPRoute) DeepCopyInto(out *TCPRoute) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPRoute. +func (in *TCPRoute) DeepCopy() *TCPRoute { + if in == nil { + return nil + } + out := new(TCPRoute) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TCPRoute) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPRouteList) DeepCopyInto(out *TCPRouteList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*TCPRoute, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(TCPRoute) + (*in).DeepCopyInto(*out) + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPRouteList. +func (in *TCPRouteList) DeepCopy() *TCPRouteList { + if in == nil { + return nil + } + out := new(TCPRouteList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TCPRouteList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/control-plane/api/multicluster/v2/exported_services_types.go b/control-plane/api/multicluster/v2/exported_services_types.go new file mode 100644 index 0000000000..2e746efbee --- /dev/null +++ b/control-plane/api/multicluster/v2/exported_services_types.go @@ -0,0 +1,148 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package v2 + +import ( + "fmt" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + pbmulticluster "github.com/hashicorp/consul/proto-public/pbmulticluster/v2" + "github.com/hashicorp/consul/proto-public/pbresource" + "google.golang.org/protobuf/testing/protocmp" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/hashicorp/consul-k8s/control-plane/api/common" + inject "github.com/hashicorp/consul-k8s/control-plane/connect-inject/common" +) + +const ( + exportedServicesKubeKind = "exportedservices" +) + +func init() { + MultiClusterSchemeBuilder.Register(&ExportedServices{}, &ExportedServicesList{}) +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ExportedServices is the Schema for the Exported Services API +// +kubebuilder:printcolumn:name="Synced",type="string",JSONPath=".status.conditions[?(@.type==\"Synced\")].status",description="The sync status of the resource with Consul" +// +kubebuilder:printcolumn:name="Last Synced",type="date",JSONPath=".status.lastSyncedTime",description="The last successful synced time of the resource with Consul" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="The age of the resource" +// +kubebuilder:resource:scope="Namespaced" +type ExportedServices struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec pbmulticluster.ExportedServices `json:"spec,omitempty"` + Status `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ExportedServicesList contains a list of ExportedServices. +type ExportedServicesList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []*ExportedServices `json:"items"` +} + +func (in *ExportedServices) ResourceID(_, partition string) *pbresource.ID { + return &pbresource.ID{ + Name: in.Name, + Type: pbmulticluster.ExportedServicesType, + Tenancy: &pbresource.Tenancy{ + Partition: partition, + Namespace: "", // Namespace is always unset because ExportedServices is partition-scoped + + }, + } +} + +func (in *ExportedServices) Resource(namespace, partition string) *pbresource.Resource { + return &pbresource.Resource{ + Id: in.ResourceID(namespace, partition), + Data: inject.ToProtoAny(&in.Spec), + Metadata: multiClusterConfigMeta(), + } +} + +func (in *ExportedServices) AddFinalizer(f string) { + in.ObjectMeta.Finalizers = append(in.Finalizers(), f) +} + +func (in *ExportedServices) RemoveFinalizer(f string) { + var newFinalizers []string + for _, oldF := range in.Finalizers() { + if oldF != f { + newFinalizers = append(newFinalizers, oldF) + } + } + in.ObjectMeta.Finalizers = newFinalizers +} + +func (in *ExportedServices) Finalizers() []string { + return in.ObjectMeta.Finalizers +} + +func (in *ExportedServices) MatchesConsul(candidate *pbresource.Resource, namespace, partition string) bool { + return cmp.Equal( + in.Resource(namespace, partition), + candidate, + protocmp.IgnoreFields(&pbresource.Resource{}, "status", "generation", "version"), + protocmp.IgnoreFields(&pbresource.ID{}, "uid"), + protocmp.Transform(), + cmpopts.SortSlices(func(a, b any) bool { return fmt.Sprintf("%v", a) < fmt.Sprintf("%v", b) }), + ) +} + +func (in *ExportedServices) KubeKind() string { + return exportedServicesKubeKind +} + +func (in *ExportedServices) KubernetesName() string { + return in.ObjectMeta.Name +} + +func (in *ExportedServices) SetSyncedCondition(status corev1.ConditionStatus, reason, message string) { + in.Status.Conditions = Conditions{ + { + Type: ConditionSynced, + Status: status, + LastTransitionTime: metav1.Now(), + Reason: reason, + Message: message, + }, + } +} + +func (in *ExportedServices) SetLastSyncedTime(time *metav1.Time) { + in.Status.LastSyncedTime = time +} + +func (in *ExportedServices) SyncedCondition() (status corev1.ConditionStatus, reason, message string) { + cond := in.Status.GetCondition(ConditionSynced) + if cond == nil { + return corev1.ConditionUnknown, "", "" + } + return cond.Status, cond.Reason, cond.Message +} + +func (in *ExportedServices) SyncedConditionStatus() corev1.ConditionStatus { + condition := in.Status.GetCondition(ConditionSynced) + if condition == nil { + return corev1.ConditionUnknown + } + return condition.Status +} + +func (in *ExportedServices) Validate(tenancy common.ConsulTenancyConfig) error { + // TODO add validation logic that ensures we only ever write this to the default namespace. + return nil +} + +// DefaultNamespaceFields is required as part of the common.MeshConfig interface. +func (in *ExportedServices) DefaultNamespaceFields(tenancy common.ConsulTenancyConfig) {} diff --git a/control-plane/api/multicluster/v2/multicluster_groupversion_info.go b/control-plane/api/multicluster/v2/multicluster_groupversion_info.go new file mode 100644 index 0000000000..7b8f92f4ac --- /dev/null +++ b/control-plane/api/multicluster/v2/multicluster_groupversion_info.go @@ -0,0 +1,27 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package v2 contains API Schema definitions for the consul.hashicorp.com v2 API group +// +kubebuilder:object:generate=true +// +groupName=multicluster.consul.hashicorp.com +package v2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + + // MultiClusterGroup is a collection of multi-cluster resources. + MultiClusterGroup = "multicluster.consul.hashicorp.com" + + // MultiClusterGroupVersion is group version used to register these objects. + MultiClusterGroupVersion = schema.GroupVersion{Group: MultiClusterGroup, Version: "v2"} + + // MultiClusterSchemeBuilder is used to add go types to the GroupVersionKind scheme. + MultiClusterSchemeBuilder = &scheme.Builder{GroupVersion: MultiClusterGroupVersion} + + // AddMultiClusterToScheme adds the types in this group-version to the given scheme. + AddMultiClusterToScheme = MultiClusterSchemeBuilder.AddToScheme +) diff --git a/control-plane/api/multicluster/v2/shared_types.go b/control-plane/api/multicluster/v2/shared_types.go new file mode 100644 index 0000000000..04f9fcefa4 --- /dev/null +++ b/control-plane/api/multicluster/v2/shared_types.go @@ -0,0 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package v2 + +import ( + "github.com/hashicorp/consul-k8s/control-plane/api/common" +) + +func multiClusterConfigMeta() map[string]string { + return map[string]string{ + common.SourceKey: common.SourceValue, + } +} diff --git a/control-plane/api/multicluster/v2/status.go b/control-plane/api/multicluster/v2/status.go new file mode 100644 index 0000000000..070c57a7d9 --- /dev/null +++ b/control-plane/api/multicluster/v2/status.go @@ -0,0 +1,93 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package v2 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Conditions is the schema for the conditions portion of the payload. +type Conditions []Condition + +// ConditionType is a camel-cased condition type. +type ConditionType string + +const ( + // ConditionSynced specifies that the resource has been synced with Consul. + ConditionSynced ConditionType = "Synced" +) + +// Conditions define a readiness condition for a Consul resource. +// See: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties +// +k8s:deepcopy-gen=true +// +k8s:openapi-gen=true +type Condition struct { + // Type of condition. + // +required + Type ConditionType `json:"type" description:"type of status condition"` + + // Status of the condition, one of True, False, Unknown. + // +required + Status corev1.ConditionStatus `json:"status" description:"status of the condition, one of True, False, Unknown"` + + // LastTransitionTime is the last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" description:"last time the condition transitioned from one status to another"` + + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" description:"one-word CamelCase reason for the condition's last transition"` + + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty" description:"human-readable message indicating details about last transition"` +} + +// IsTrue is true if the condition is True. +func (c *Condition) IsTrue() bool { + if c == nil { + return false + } + return c.Status == corev1.ConditionTrue +} + +// IsFalse is true if the condition is False. +func (c *Condition) IsFalse() bool { + if c == nil { + return false + } + return c.Status == corev1.ConditionFalse +} + +// IsUnknown is true if the condition is Unknown. +func (c *Condition) IsUnknown() bool { + if c == nil { + return true + } + return c.Status == corev1.ConditionUnknown +} + +// +k8s:deepcopy-gen=true +// +k8s:openapi-gen=true +type Status struct { + // Conditions indicate the latest available observations of a resource's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions Conditions `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + + // LastSyncedTime is the last time the resource successfully synced with Consul. + // +optional + LastSyncedTime *metav1.Time `json:"lastSyncedTime,omitempty" description:"last time the condition transitioned from one status to another"` +} + +func (s *Status) GetCondition(t ConditionType) *Condition { + for _, cond := range s.Conditions { + if cond.Type == t { + return &cond + } + } + return nil +} diff --git a/control-plane/api/multicluster/v2/zz_generated.deepcopy.go b/control-plane/api/multicluster/v2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..c52d2bfe81 --- /dev/null +++ b/control-plane/api/multicluster/v2/zz_generated.deepcopy.go @@ -0,0 +1,136 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v2 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Condition) DeepCopyInto(out *Condition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition. +func (in *Condition) DeepCopy() *Condition { + if in == nil { + return nil + } + out := new(Condition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in Conditions) DeepCopyInto(out *Conditions) { + { + in := &in + *out = make(Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Conditions. +func (in Conditions) DeepCopy() Conditions { + if in == nil { + return nil + } + out := new(Conditions) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExportedServices) DeepCopyInto(out *ExportedServices) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExportedServices. +func (in *ExportedServices) DeepCopy() *ExportedServices { + if in == nil { + return nil + } + out := new(ExportedServices) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ExportedServices) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExportedServicesList) DeepCopyInto(out *ExportedServicesList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*ExportedServices, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(ExportedServices) + (*in).DeepCopyInto(*out) + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExportedServicesList. +func (in *ExportedServicesList) DeepCopy() *ExportedServicesList { + if in == nil { + return nil + } + out := new(ExportedServicesList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ExportedServicesList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Status) DeepCopyInto(out *Status) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LastSyncedTime != nil { + in, out := &in.LastSyncedTime, &out.LastSyncedTime + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Status. +func (in *Status) DeepCopy() *Status { + if in == nil { + return nil + } + out := new(Status) + in.DeepCopyInto(out) + return out +} diff --git a/control-plane/api/v1alpha1/registration_types.go b/control-plane/api/v1alpha1/registration_types.go deleted file mode 100644 index e3dd8ba0f0..0000000000 --- a/control-plane/api/v1alpha1/registration_types.go +++ /dev/null @@ -1,575 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package v1alpha1 - -import ( - "errors" - "maps" - "slices" - "time" - - capi "github.com/hashicorp/consul/api" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func init() { - SchemeBuilder.Register(&Registration{}, &RegistrationList{}) -} - -// +genclient -// +kubebuilder:object:root=true -// +kubebuilder:resource:scope=Cluster -// +kubebuilder:subresource:status - -// Registration defines the resource for working with service registrations. -type Registration struct { - // Standard Kubernetes resource metadata. - metav1.TypeMeta `json:",inline"` - - // Standard object's metadata. - metav1.ObjectMeta `json:"metadata,omitempty"` - - // Spec defines the desired state of Registration. - Spec RegistrationSpec `json:"spec,omitempty"` - - Status RegistrationStatus `json:"status,omitempty"` -} - -// RegistrationStatus defines the observed state of Registration. -type RegistrationStatus struct { - // Conditions indicate the latest available observations of a resource's current state. - // +optional - // +patchMergeKey=type - // +patchStrategy=merge - Conditions Conditions `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` - // LastSyncedTime is the last time the resource successfully synced with Consul. - // +optional - LastSyncedTime *metav1.Time `json:"lastSyncedTime,omitempty" description:"last time the condition transitioned from one status to another"` -} - -// +k8s:deepcopy-gen=true - -// RegistrationSpec specifies the desired state of the Config CRD. -type RegistrationSpec struct { - ID string `json:"id,omitempty"` - Node string `json:"node,omitempty"` - Address string `json:"address,omitempty"` - TaggedAddresses map[string]string `json:"taggedAddresses,omitempty"` - NodeMeta map[string]string `json:"nodeMeta,omitempty"` - Datacenter string `json:"datacenter,omitempty"` - Service Service `json:"service,omitempty"` - SkipNodeUpdate bool `json:"skipNodeUpdate,omitempty"` - Partition string `json:"partition,omitempty"` - HealthCheck *HealthCheck `json:"check,omitempty"` - Locality *Locality `json:"locality,omitempty"` -} - -// +k8s:deepcopy-gen=true - -type Service struct { - ID string `json:"id,omitempty"` - Name string `json:"name"` - Tags []string `json:"tags,omitempty"` - Meta map[string]string `json:"meta,omitempty"` - Port int `json:"port"` - Address string `json:"address,omitempty"` - SocketPath string `json:"socketPath,omitempty"` - TaggedAddresses map[string]ServiceAddress `json:"taggedAddresses,omitempty"` - Weights Weights `json:"weights,omitempty"` - EnableTagOverride bool `json:"enableTagOverride,omitempty"` - Locality *Locality `json:"locality,omitempty"` - Namespace string `json:"namespace,omitempty"` - Partition string `json:"partition,omitempty"` -} - -// +k8s:deepcopy-gen=true - -type ServiceAddress struct { - Address string `json:"address"` - Port int `json:"port"` -} - -// +k8s:deepcopy-gen=true - -type Weights struct { - Passing int `json:"passing"` - Warning int `json:"warning"` -} - -// +k8s:deepcopy-gen=true - -type Locality struct { - Region string `json:"region,omitempty"` - Zone string `json:"zone,omitempty"` -} - -// +k8s:deepcopy-gen=true - -// HealthCheck is used to represent a single check. -type HealthCheck struct { - Node string `json:"node,omitempty"` - CheckID string `json:"checkId"` - Name string `json:"name"` - Status string `json:"status"` - Notes string `json:"notes,omitempty"` - Output string `json:"output,omitempty"` - ServiceID string `json:"serviceId"` - ServiceName string `json:"serviceName"` - Type string `json:"type,omitempty"` - ExposedPort int `json:"exposedPort,omitempty"` - Definition HealthCheckDefinition `json:"definition"` - Namespace string `json:"namespace,omitempty"` - Partition string `json:"partition,omitempty"` -} - -// HealthCheckDefinition is used to store the details about -// a health check's execution. -type HealthCheckDefinition struct { - HTTP string `json:"http,omitempty"` - Header map[string][]string `json:"header,omitempty"` - Method string `json:"method,omitempty"` - Body string `json:"body,omitempty"` - TLSServerName string `json:"tlsServerName,omitempty"` - TLSSkipVerify bool `json:"tlsSkipVerify,omitempty"` - TCP string `json:"tcp,omitempty"` - TCPUseTLS bool `json:"tcpUseTLS,omitempty"` - UDP string `json:"udp,omitempty"` - GRPC string `json:"grpc,omitempty"` - OSService string `json:"osService,omitempty"` - GRPCUseTLS bool `json:"grpcUseTLS,omitempty"` - IntervalDuration string `json:"intervalDuration"` - TimeoutDuration string `json:"timeoutDuration,omitempty"` - DeregisterCriticalServiceAfterDuration string `json:"deregisterCriticalServiceAfterDuration,omitempty"` -} - -// +kubebuilder:object:root=true - -// RegistrationList is a list of Registration resources. -type RegistrationList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata"` - - // Items is the list of Registrations. - Items []Registration `json:"items"` -} - -// ToCatalogRegistration converts a Registration to a Consul CatalogRegistration. -func (r *Registration) ToCatalogRegistration() (*capi.CatalogRegistration, error) { - check, err := copyHealthCheck(r.Spec.HealthCheck) - if err != nil { - return nil, err - } - - return &capi.CatalogRegistration{ - ID: r.Spec.ID, - Node: r.Spec.Node, - Address: r.Spec.Address, - TaggedAddresses: maps.Clone(r.Spec.TaggedAddresses), - NodeMeta: maps.Clone(r.Spec.NodeMeta), - Datacenter: r.Spec.Datacenter, - Service: &capi.AgentService{ - ID: r.Spec.Service.ID, - Service: r.Spec.Service.Name, - Tags: slices.Clone(r.Spec.Service.Tags), - Meta: maps.Clone(r.Spec.Service.Meta), - Port: r.Spec.Service.Port, - Address: r.Spec.Service.Address, - SocketPath: r.Spec.Service.SocketPath, - TaggedAddresses: copyTaggedAddresses(r.Spec.Service.TaggedAddresses), - Weights: capi.AgentWeights(r.Spec.Service.Weights), - EnableTagOverride: r.Spec.Service.EnableTagOverride, - Namespace: r.Spec.Service.Namespace, - Partition: r.Spec.Service.Partition, - Locality: copyLocality(r.Spec.Service.Locality), - }, - Check: check, - SkipNodeUpdate: r.Spec.SkipNodeUpdate, - Partition: r.Spec.Partition, - Locality: copyLocality(r.Spec.Locality), - }, nil -} - -func copyTaggedAddresses(taggedAddresses map[string]ServiceAddress) map[string]capi.ServiceAddress { - if taggedAddresses == nil { - return nil - } - result := make(map[string]capi.ServiceAddress, len(taggedAddresses)) - for k, v := range taggedAddresses { - result[k] = capi.ServiceAddress(v) - } - return result -} - -func copyLocality(locality *Locality) *capi.Locality { - if locality == nil { - return nil - } - return &capi.Locality{ - Region: locality.Region, - Zone: locality.Zone, - } -} - -var ( - ErrInvalidInterval = errors.New("invalid value for IntervalDuration") - ErrInvalidTimeout = errors.New("invalid value for TimeoutDuration") - ErrInvalidDergisterAfter = errors.New("invalid value for DeregisterCriticalServiceAfterDuration") -) - -func copyHealthCheck(healthCheck *HealthCheck) (*capi.AgentCheck, error) { - if healthCheck == nil { - return nil, nil - } - - // TODO: handle error - intervalDuration, err := time.ParseDuration(healthCheck.Definition.IntervalDuration) - if err != nil { - return nil, ErrInvalidInterval - } - - timeoutDuration, err := time.ParseDuration(healthCheck.Definition.TimeoutDuration) - if err != nil { - return nil, ErrInvalidTimeout - } - - deregisterAfter, err := time.ParseDuration(healthCheck.Definition.DeregisterCriticalServiceAfterDuration) - if err != nil { - return nil, ErrInvalidDergisterAfter - } - - return &capi.AgentCheck{ - Node: healthCheck.Node, - Notes: healthCheck.Notes, - ServiceName: healthCheck.ServiceName, - CheckID: healthCheck.CheckID, - Name: healthCheck.Name, - Type: healthCheck.Type, - Status: healthCheck.Status, - ServiceID: healthCheck.ServiceID, - ExposedPort: healthCheck.ExposedPort, - Output: healthCheck.Output, - Namespace: healthCheck.Namespace, - Partition: healthCheck.Partition, - Definition: capi.HealthCheckDefinition{ - HTTP: healthCheck.Definition.HTTP, - TCP: healthCheck.Definition.TCP, - GRPC: healthCheck.Definition.GRPC, - GRPCUseTLS: healthCheck.Definition.GRPCUseTLS, - Method: healthCheck.Definition.Method, - Header: healthCheck.Definition.Header, - Body: healthCheck.Definition.Body, - TLSServerName: healthCheck.Definition.TLSServerName, - TLSSkipVerify: healthCheck.Definition.TLSSkipVerify, - OSService: healthCheck.Definition.OSService, - IntervalDuration: intervalDuration, - TimeoutDuration: timeoutDuration, - DeregisterCriticalServiceAfterDuration: deregisterAfter, - }, - }, nil -} - -// ToCatalogDeregistration converts a Registration to a Consul CatalogDeregistration. -func (r *Registration) ToCatalogDeregistration() *capi.CatalogDeregistration { - checkID := "" - if r.Spec.HealthCheck != nil { - checkID = r.Spec.HealthCheck.CheckID - } - - return &capi.CatalogDeregistration{ - Node: r.Spec.Node, - Address: r.Spec.Address, - Datacenter: r.Spec.Datacenter, - ServiceID: r.Spec.Service.ID, - CheckID: checkID, - Namespace: r.Spec.Service.Namespace, - Partition: r.Spec.Service.Partition, - } -} - -func (r *Registration) EqualExceptStatus(other *Registration) bool { - if r == nil || other == nil { - return false - } - - if r.Spec.ID != other.Spec.ID { - return false - } - - if r.Spec.Node != other.Spec.Node { - return false - } - - if r.Spec.Address != other.Spec.Address { - return false - } - - if !maps.Equal(r.Spec.TaggedAddresses, other.Spec.TaggedAddresses) { - return false - } - - if !maps.Equal(r.Spec.NodeMeta, other.Spec.NodeMeta) { - return false - } - - if r.Spec.Datacenter != other.Spec.Datacenter { - return false - } - - if !r.Spec.Service.Equal(&other.Spec.Service) { - return false - } - - if r.Spec.SkipNodeUpdate != other.Spec.SkipNodeUpdate { - return false - } - - if r.Spec.Partition != other.Spec.Partition { - return false - } - - if !r.Spec.HealthCheck.Equal(other.Spec.HealthCheck) { - return false - } - - if !r.Spec.Locality.Equal(other.Spec.Locality) { - return false - } - - return true -} - -func (s *Service) Equal(other *Service) bool { - if s == nil && other == nil { - return true - } - - if s == nil || other == nil { - return false - } - - if s.ID != other.ID { - return false - } - - if s.Name != other.Name { - return false - } - - if !slices.Equal(s.Tags, other.Tags) { - return false - } - - if !maps.Equal(s.Meta, other.Meta) { - return false - } - - if s.Port != other.Port { - return false - } - - if s.Address != other.Address { - return false - } - - if s.SocketPath != other.SocketPath { - return false - } - - if !maps.Equal(s.TaggedAddresses, other.TaggedAddresses) { - return false - } - - if !s.Weights.Equal(other.Weights) { - return false - } - - if s.EnableTagOverride != other.EnableTagOverride { - return false - } - - if s.Namespace != other.Namespace { - return false - } - - if s.Partition != other.Partition { - return false - } - - if !s.Locality.Equal(other.Locality) { - return false - } - return true -} - -func (l *Locality) Equal(other *Locality) bool { - if l == nil && other == nil { - return true - } - - if l == nil || other == nil { - return false - } - if l.Region != other.Region { - return false - } - if l.Zone != other.Zone { - return false - } - return true -} - -func (w Weights) Equal(other Weights) bool { - if w.Passing != other.Passing { - return false - } - - if w.Warning != other.Warning { - return false - } - return true -} - -func (h *HealthCheck) Equal(other *HealthCheck) bool { - if h == nil && other == nil { - return true - } - - if h == nil || other == nil { - return false - } - - if h.Node != other.Node { - return false - } - - if h.CheckID != other.CheckID { - return false - } - - if h.Name != other.Name { - return false - } - - if h.Status != other.Status { - return false - } - - if h.Notes != other.Notes { - return false - } - - if h.Output != other.Output { - return false - } - - if h.ServiceID != other.ServiceID { - return false - } - - if h.ServiceName != other.ServiceName { - return false - } - - if h.Type != other.Type { - return false - } - - if h.ExposedPort != other.ExposedPort { - return false - } - - if h.Namespace != other.Namespace { - return false - } - - if h.Partition != other.Partition { - return false - } - - if !h.Definition.Equal(other.Definition) { - return false - } - - return true -} - -func (h HealthCheckDefinition) Equal(other HealthCheckDefinition) bool { - if h.HTTP != other.HTTP { - return false - } - - if len(h.Header) != len(other.Header) { - return false - } - - for k, v := range h.Header { - otherValues, ok := other.Header[k] - if !ok { - return false - } - - if !slices.Equal(v, otherValues) { - return false - } - } - - if h.Method != other.Method { - return false - } - - if h.Body != other.Body { - return false - } - - if h.TLSServerName != other.TLSServerName { - return false - } - - if h.TLSSkipVerify != other.TLSSkipVerify { - return false - } - - if h.TCP != other.TCP { - return false - } - - if h.TCPUseTLS != other.TCPUseTLS { - return false - } - - if h.UDP != other.UDP { - return false - } - - if h.GRPC != other.GRPC { - return false - } - - if h.OSService != other.OSService { - return false - } - - if h.GRPCUseTLS != other.GRPCUseTLS { - return false - } - - if h.IntervalDuration != other.IntervalDuration { - return false - } - - if h.TimeoutDuration != other.TimeoutDuration { - return false - } - - if h.DeregisterCriticalServiceAfterDuration != other.DeregisterCriticalServiceAfterDuration { - return false - } - - return true -} - -func (r *Registration) KubernetesName() string { - return r.ObjectMeta.Name -} diff --git a/control-plane/api/v1alpha1/registration_types_test.go b/control-plane/api/v1alpha1/registration_types_test.go deleted file mode 100644 index 8c3744efb9..0000000000 --- a/control-plane/api/v1alpha1/registration_types_test.go +++ /dev/null @@ -1,293 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package v1alpha1 - -import ( - "testing" - "time" - - capi "github.com/hashicorp/consul/api" - - "github.com/stretchr/testify/require" -) - -func TestToCatalogRegistration(tt *testing.T) { - cases := map[string]struct { - registration *Registration - expected *capi.CatalogRegistration - }{ - "minimal registration": { - registration: &Registration{ - Spec: RegistrationSpec{ - ID: "node-id", - Node: "node-virtual", - Address: "127.0.0.1", - Datacenter: "dc1", - Service: Service{ - ID: "service-id", - Name: "service-name", - Port: 8080, - Address: "127.0.0.1", - }, - }, - }, - expected: &capi.CatalogRegistration{ - ID: "node-id", - Node: "node-virtual", - Address: "127.0.0.1", - Datacenter: "dc1", - Service: &capi.AgentService{ - ID: "service-id", - Service: "service-name", - Port: 8080, - Address: "127.0.0.1", - }, - }, - }, - "maximal registration": { - registration: &Registration{ - Spec: RegistrationSpec{ - ID: "node-id", - Node: "node-virtual", - Address: "127.0.0.1", - TaggedAddresses: map[string]string{ - "lan": "8080", - }, - NodeMeta: map[string]string{ - "n1": "m1", - }, - Datacenter: "dc1", - Service: Service{ - ID: "service-id", - Name: "service-name", - Tags: []string{"tag1", "tag2"}, - Meta: map[string]string{ - "m1": "1", - "m2": "2", - }, - Port: 8080, - Address: "127.0.0.1", - TaggedAddresses: map[string]ServiceAddress{ - "lan": { - Address: "10.0.0.10", - Port: 5000, - }, - }, - Weights: Weights{ - Passing: 50, - Warning: 100, - }, - EnableTagOverride: true, - Locality: &Locality{ - Region: "us-east-1", - Zone: "auto", - }, - Namespace: "n1", - Partition: "p1", - }, - Partition: "p1", - HealthCheck: &HealthCheck{ - Node: "node-virtual", - CheckID: "service-check", - Name: "service-health", - Status: "passing", - Notes: "all about that service", - Output: "healthy", - ServiceID: "service-id", - ServiceName: "service-name", - Type: "readiness", - ExposedPort: 19000, - Definition: HealthCheckDefinition{ - HTTP: "/health", - TCP: "tcp-check", - Header: map[string][]string{ - "Content-Type": {"application/json"}, - }, - Method: "GET", - TLSServerName: "my-secure-tls-server", - TLSSkipVerify: true, - Body: "some-body", - GRPC: "/grpc-health-check", - GRPCUseTLS: true, - OSService: "osservice-name", - IntervalDuration: "5s", - TimeoutDuration: "10s", - DeregisterCriticalServiceAfterDuration: "30s", - }, - Namespace: "n1", - Partition: "p1", - }, - Locality: &Locality{ - Region: "us-east-1", - Zone: "auto", - }, - }, - }, - expected: &capi.CatalogRegistration{ - ID: "node-id", - Node: "node-virtual", - Address: "127.0.0.1", - TaggedAddresses: map[string]string{ - "lan": "8080", - }, - NodeMeta: map[string]string{ - "n1": "m1", - }, - Datacenter: "dc1", - Service: &capi.AgentService{ - ID: "service-id", - Service: "service-name", - Tags: []string{"tag1", "tag2"}, - Meta: map[string]string{ - "m1": "1", - "m2": "2", - }, - Port: 8080, - Address: "127.0.0.1", - TaggedAddresses: map[string]capi.ServiceAddress{ - "lan": { - Address: "10.0.0.10", - Port: 5000, - }, - }, - Weights: capi.AgentWeights{ - Passing: 50, - Warning: 100, - }, - EnableTagOverride: true, - Locality: &capi.Locality{ - Region: "us-east-1", - Zone: "auto", - }, - Namespace: "n1", - Partition: "p1", - }, - Check: &capi.AgentCheck{ - Node: "node-virtual", - CheckID: "service-check", - Name: "service-health", - Status: "passing", - Notes: "all about that service", - Output: "healthy", - ServiceID: "service-id", - ServiceName: "service-name", - Type: "readiness", - ExposedPort: 19000, - Definition: capi.HealthCheckDefinition{ - HTTP: "/health", - TCP: "tcp-check", - Header: map[string][]string{ - "Content-Type": {"application/json"}, - }, - Method: "GET", - TLSServerName: "my-secure-tls-server", - TLSSkipVerify: true, - Body: "some-body", - GRPC: "/grpc-health-check", - GRPCUseTLS: true, - OSService: "osservice-name", - IntervalDuration: toDuration(tt, "5s"), - TimeoutDuration: toDuration(tt, "10s"), - DeregisterCriticalServiceAfterDuration: toDuration(tt, "30s"), - }, - Namespace: "n1", - Partition: "p1", - }, - SkipNodeUpdate: false, - Partition: "p1", - Locality: &capi.Locality{ - Region: "us-east-1", - Zone: "auto", - }, - }, - }, - } - - for name, tc := range cases { - tc := tc - tt.Run(name, func(t *testing.T) { - t.Parallel() - actual, err := tc.registration.ToCatalogRegistration() - require.NoError(t, err) - require.Equal(t, tc.expected, actual) - }) - } -} - -func TestToCatalogDeregistration(tt *testing.T) { - cases := map[string]struct { - registration *Registration - expected *capi.CatalogDeregistration - }{ - "with health check": { - registration: &Registration{ - Spec: RegistrationSpec{ - ID: "node-id", - Node: "node-virtual", - Address: "127.0.0.1", - Datacenter: "dc1", - Service: Service{ - ID: "service-id", - Namespace: "n1", - Partition: "p1", - }, - HealthCheck: &HealthCheck{ - CheckID: "checkID", - }, - }, - }, - expected: &capi.CatalogDeregistration{ - Node: "node-virtual", - Address: "127.0.0.1", - Datacenter: "dc1", - ServiceID: "service-id", - CheckID: "checkID", - Namespace: "n1", - Partition: "p1", - }, - }, - "no health check": { - registration: &Registration{ - Spec: RegistrationSpec{ - ID: "node-id", - Node: "node-virtual", - Address: "127.0.0.1", - Datacenter: "dc1", - Service: Service{ - ID: "service-id", - Namespace: "n1", - Partition: "p1", - }, - }, - }, - expected: &capi.CatalogDeregistration{ - Node: "node-virtual", - Address: "127.0.0.1", - Datacenter: "dc1", - ServiceID: "service-id", - CheckID: "", - Namespace: "n1", - Partition: "p1", - }, - }, - } - - for name, tc := range cases { - tc := tc - tt.Run(name, func(t *testing.T) { - t.Parallel() - actual := tc.registration.ToCatalogDeregistration() - require.Equal(t, tc.expected, actual) - }) - } -} - -func toDuration(t *testing.T, d string) time.Duration { - t.Helper() - duration, err := time.ParseDuration(d) - if err != nil { - t.Fatal(err) - } - return duration -} diff --git a/control-plane/api/v1alpha1/registration_webhook.go b/control-plane/api/v1alpha1/registration_webhook.go deleted file mode 100644 index e2a69f3b0b..0000000000 --- a/control-plane/api/v1alpha1/registration_webhook.go +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package v1alpha1 - -import ( - "context" - "errors" - "fmt" - "net/http" - "time" - - "github.com/go-logr/logr" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - - "github.com/hashicorp/consul-k8s/control-plane/api/common" -) - -// +kubebuilder:object:generate=false - -type RegistrationWebhook struct { - Logger logr.Logger - - // ConsulMeta contains metadata specific to the Consul installation. - ConsulMeta common.ConsulMeta - - decoder *admission.Decoder - client.Client -} - -// +kubebuilder:webhook:verbs=create;update,path=/validate-v1alpha1-registration,mutating=false,failurePolicy=fail,groups=consul.hashicorp.com,resources=registrations,versions=v1alpha1,name=validate-registration.consul.hashicorp.com,sideEffects=None,admissionReviewVersions=v1beta1;v1 - -func (v *RegistrationWebhook) Handle(ctx context.Context, req admission.Request) admission.Response { - resource := &Registration{} - decodeErr := v.decoder.Decode(req, resource) - if decodeErr != nil { - return admission.Errored(http.StatusBadRequest, decodeErr) - } - - var err error - - err = errors.Join(err, validateRequiredFields(resource)) - err = errors.Join(err, validateHealthChecks(resource)) - if err != nil { - return admission.Errored(http.StatusBadRequest, err) - } - - return admission.Allowed("registration is valid") -} - -func (v *RegistrationWebhook) SetupWithManager(mgr ctrl.Manager) { - v.Logger.Info("setting up registration webhook") - v.decoder = admission.NewDecoder(mgr.GetScheme()) - mgr.GetWebhookServer().Register("/validate-v1alpha1-registration", &admission.Webhook{Handler: v}) -} - -func validateRequiredFields(registration *Registration) error { - var err error - if registration.Spec.Node == "" { - err = errors.Join(err, errors.New("registration.Spec.Node is required")) - } - if registration.Spec.Service.Name == "" { - err = errors.Join(err, errors.New("registration.Spec.Service.Name is required")) - } - if registration.Spec.Address == "" { - err = errors.Join(err, errors.New("registration.Spec.Address is required")) - } - - if err != nil { - return err - } - return nil -} - -var validStatuses = map[string]struct{}{ - "passing": {}, - "warning": {}, - "critical": {}, -} - -func validateHealthChecks(registration *Registration) error { - if registration.Spec.HealthCheck == nil { - return nil - } - - var err error - - if registration.Spec.HealthCheck.Name == "" { - err = errors.Join(err, errors.New("registration.Spec.HealthCheck.Name is required")) - } - - // status must be one "passing", "warning", or "critical" - if _, ok := validStatuses[registration.Spec.HealthCheck.Status]; !ok { - err = errors.Join(err, fmt.Errorf("invalid registration.Spec.HealthCheck.Status value, must be 'passing', 'warning', or 'critical', actual: %q", registration.Spec.HealthCheck.Status)) - } - - // parse all durations and check for any errors - _, parseErr := time.ParseDuration(registration.Spec.HealthCheck.Definition.IntervalDuration) - if parseErr != nil { - err = errors.Join(err, fmt.Errorf("invalid registration.Spec.HealthCheck.Definition.IntervalDuration value: %q", registration.Spec.HealthCheck.Definition.IntervalDuration)) - } - - if registration.Spec.HealthCheck.Definition.TimeoutDuration != "" { - _, timeoutErr := time.ParseDuration(registration.Spec.HealthCheck.Definition.TimeoutDuration) - if timeoutErr != nil { - err = errors.Join(err, fmt.Errorf("invalid registration.Spec.HealthCheck.Definition.TimeoutDuration value: %q", registration.Spec.HealthCheck.Definition.TimeoutDuration)) - } - } - - if registration.Spec.HealthCheck.Definition.DeregisterCriticalServiceAfterDuration != "" { - _, deregCriticalErr := time.ParseDuration(registration.Spec.HealthCheck.Definition.DeregisterCriticalServiceAfterDuration) - if deregCriticalErr != nil { - err = errors.Join(err, fmt.Errorf("invalid registration.Spec.HealthCheck.Definition.DeregisterCriticalServiceAfterDuration value: %q", registration.Spec.HealthCheck.Definition.DeregisterCriticalServiceAfterDuration)) - } - } - - if err != nil { - return err - } - - return nil -} diff --git a/control-plane/api/v1alpha1/registration_webhook_test.go b/control-plane/api/v1alpha1/registration_webhook_test.go deleted file mode 100644 index 8883cbda10..0000000000 --- a/control-plane/api/v1alpha1/registration_webhook_test.go +++ /dev/null @@ -1,330 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package v1alpha1 - -import ( - "context" - "encoding/json" - "testing" - - logrtest "github.com/go-logr/logr/testr" - "github.com/stretchr/testify/require" - admissionv1 "k8s.io/api/admission/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/client/fake" - "sigs.k8s.io/controller-runtime/pkg/webhook/admission" -) - -func TestValidateRegistration(t *testing.T) { - cases := map[string]struct { - newResource *Registration - expectedToAllow bool - expectedErrMessage string - }{ - "valid with health check, status 'passing'": { - newResource: &Registration{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-registration", - }, - Spec: RegistrationSpec{ - Node: "node-virtual", - Address: "10.2.2.1", - Service: Service{Name: "test-service"}, - HealthCheck: &HealthCheck{ - Name: "check name", - Status: "passing", - Definition: HealthCheckDefinition{ - IntervalDuration: "10s", - }, - }, - }, - }, - expectedToAllow: true, - }, - "valid with health check, status 'warning'": { - newResource: &Registration{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-registration", - }, - Spec: RegistrationSpec{ - Node: "node-virtual", - Address: "10.2.2.1", - Service: Service{Name: "test-service"}, - HealthCheck: &HealthCheck{ - Name: "check name", - Status: "warning", - Definition: HealthCheckDefinition{ - IntervalDuration: "10s", - }, - }, - }, - }, - expectedToAllow: true, - }, - "valid with health check, status 'critical'": { - newResource: &Registration{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-registration", - }, - Spec: RegistrationSpec{ - Node: "node-virtual", - Address: "10.2.2.1", - Service: Service{Name: "test-service"}, - HealthCheck: &HealthCheck{ - Name: "check name", - Status: "critical", - Definition: HealthCheckDefinition{ - IntervalDuration: "10s", - }, - }, - }, - }, - expectedToAllow: true, - }, - "valid without health check": { - newResource: &Registration{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-registration", - }, - Spec: RegistrationSpec{ - Node: "node-virtual", - Address: "10.2.2.1", - Service: Service{Name: "test-service"}, - HealthCheck: nil, - }, - }, - expectedToAllow: true, - }, - "invalid, missing node field": { - newResource: &Registration{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-registration", - }, - Spec: RegistrationSpec{ - Node: "", - Address: "10.2.2.1", - Service: Service{Name: "test-service"}, - HealthCheck: nil, - }, - }, - expectedToAllow: false, - expectedErrMessage: "registration.Spec.Node is required", - }, - "invalid, missing address field": { - newResource: &Registration{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-registration", - }, - Spec: RegistrationSpec{ - Node: "test-node", - Address: "", - Service: Service{Name: "test-service"}, - HealthCheck: nil, - }, - }, - expectedToAllow: false, - expectedErrMessage: "registration.Spec.Address is required", - }, - "invalid, missing service.name field": { - newResource: &Registration{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-registration", - }, - Spec: RegistrationSpec{ - Node: "test-node", - Address: "10.2.2.1", - Service: Service{Name: ""}, - HealthCheck: nil, - }, - }, - expectedToAllow: false, - expectedErrMessage: "registration.Spec.Service.Name is required", - }, - "invalid, health check is set and name is missing": { - newResource: &Registration{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-registration", - }, - Spec: RegistrationSpec{ - Node: "test-node", - Address: "10.2.2.1", - Service: Service{Name: "test-service"}, - HealthCheck: &HealthCheck{ - Name: "", - Status: "passing", - Definition: HealthCheckDefinition{ - IntervalDuration: "10s", - }, - }, - }, - }, - expectedToAllow: false, - expectedErrMessage: "registration.Spec.HealthCheck.Name is required", - }, - "invalid, health check is set and intervalDuration is missing": { - newResource: &Registration{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-registration", - }, - Spec: RegistrationSpec{ - Node: "test-node", - Address: "10.2.2.1", - Service: Service{Name: "test-service"}, - HealthCheck: &HealthCheck{ - Name: "check name", - Status: "passing", - Definition: HealthCheckDefinition{ - IntervalDuration: "", - }, - }, - }, - }, - expectedToAllow: false, - expectedErrMessage: "invalid registration.Spec.HealthCheck.Definition.IntervalDuration value: \"\"", - }, - "invalid, health check is set and intervalDuration is invalid duration type": { - newResource: &Registration{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-registration", - }, - Spec: RegistrationSpec{ - Node: "test-node", - Address: "10.2.2.1", - Service: Service{Name: "test-service"}, - HealthCheck: &HealthCheck{ - Name: "check name", - Status: "passing", - Definition: HealthCheckDefinition{ - IntervalDuration: "150", - }, - }, - }, - }, - expectedToAllow: false, - expectedErrMessage: "invalid registration.Spec.HealthCheck.Definition.IntervalDuration value: \"150\"", - }, - "invalid, health check is set and timeoutDuration is invalid duration type": { - newResource: &Registration{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-registration", - }, - Spec: RegistrationSpec{ - Node: "test-node", - Address: "10.2.2.1", - Service: Service{Name: "test-service"}, - HealthCheck: &HealthCheck{ - Name: "check name", - Status: "passing", - Definition: HealthCheckDefinition{ - IntervalDuration: "10s", - TimeoutDuration: "150", - }, - }, - }, - }, - expectedToAllow: false, - expectedErrMessage: "invalid registration.Spec.HealthCheck.Definition.TimeoutDuration value: \"150\"", - }, - "invalid, health check is set and deregisterCriticalServiceAfterDuration is invalid duration type": { - newResource: &Registration{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-registration", - }, - Spec: RegistrationSpec{ - Node: "test-node", - Address: "10.2.2.1", - Service: Service{Name: "test-service"}, - HealthCheck: &HealthCheck{ - Name: "check name", - Status: "passing", - Definition: HealthCheckDefinition{ - IntervalDuration: "10s", - TimeoutDuration: "150s", - DeregisterCriticalServiceAfterDuration: "40", - }, - }, - }, - }, - expectedToAllow: false, - expectedErrMessage: "invalid registration.Spec.HealthCheck.Definition.DeregisterCriticalServiceAfterDuration value: \"40\"", - }, - "invalid, health check is set and status is not 'passing', 'critical', or 'warning'": { - newResource: &Registration{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-registration", - }, - Spec: RegistrationSpec{ - Node: "test-node", - Address: "10.2.2.1", - Service: Service{Name: "test-service"}, - HealthCheck: &HealthCheck{ - Name: "check name", - Status: "wrong", - Definition: HealthCheckDefinition{ - IntervalDuration: "10s", - }, - }, - }, - }, - expectedToAllow: false, - expectedErrMessage: "invalid registration.Spec.HealthCheck.Status value, must be 'passing', 'warning', or 'critical', actual: \"wrong\"", - }, - "everything that can go wrong has gone wrong": { - newResource: &Registration{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-registration", - }, - Spec: RegistrationSpec{ - Node: "", - Address: "", - Service: Service{Name: ""}, - HealthCheck: &HealthCheck{ - Name: "", - Status: "wrong", - Definition: HealthCheckDefinition{ - IntervalDuration: "10", - TimeoutDuration: "150", - DeregisterCriticalServiceAfterDuration: "40", - }, - }, - }, - }, - expectedToAllow: false, - expectedErrMessage: "registration.Spec.Node is required\nregistration.Spec.Service.Name is required\nregistration.Spec.Address is required\nregistration.Spec.HealthCheck.Name is required\ninvalid registration.Spec.HealthCheck.Status value, must be 'passing', 'warning', or 'critical', actual: \"wrong\"\ninvalid registration.Spec.HealthCheck.Definition.IntervalDuration value: \"10\"\ninvalid registration.Spec.HealthCheck.Definition.TimeoutDuration value: \"150\"\ninvalid registration.Spec.HealthCheck.Definition.DeregisterCriticalServiceAfterDuration value: \"40\"", - }, - } - for name, c := range cases { - t.Run(name, func(t *testing.T) { - ctx := context.Background() - marshalledRequestObject, err := json.Marshal(c.newResource) - require.NoError(t, err) - s := runtime.NewScheme() - s.AddKnownTypes(GroupVersion, &Registration{}, &RegistrationList{}) - client := fake.NewClientBuilder().WithScheme(s).Build() - decoder := admission.NewDecoder(s) - - validator := &RegistrationWebhook{ - Client: client, - Logger: logrtest.New(t), - decoder: decoder, - } - response := validator.Handle(ctx, admission.Request{ - AdmissionRequest: admissionv1.AdmissionRequest{ - Name: c.newResource.KubernetesName(), - Namespace: "default", - Operation: admissionv1.Create, - Object: runtime.RawExtension{ - Raw: marshalledRequestObject, - }, - }, - }) - - require.Equal(t, c.expectedToAllow, response.Allowed) - if c.expectedErrMessage != "" { - require.Equal(t, c.expectedErrMessage, response.AdmissionResponse.Result.Message) - } - }) - } -} diff --git a/control-plane/api/v1alpha1/zz_generated.deepcopy.go b/control-plane/api/v1alpha1/zz_generated.deepcopy.go index a8aed9b1ff..2a1854d178 100644 --- a/control-plane/api/v1alpha1/zz_generated.deepcopy.go +++ b/control-plane/api/v1alpha1/zz_generated.deepcopy.go @@ -881,52 +881,6 @@ func (in *HashPolicy) DeepCopy() *HashPolicy { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HealthCheck) DeepCopyInto(out *HealthCheck) { - *out = *in - in.Definition.DeepCopyInto(&out.Definition) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthCheck. -func (in *HealthCheck) DeepCopy() *HealthCheck { - if in == nil { - return nil - } - out := new(HealthCheck) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HealthCheckDefinition) DeepCopyInto(out *HealthCheckDefinition) { - *out = *in - if in.Header != nil { - in, out := &in.Header, &out.Header - *out = make(map[string][]string, len(*in)) - for key, val := range *in { - var outVal []string - if val == nil { - (*out)[key] = nil - } else { - in, out := &val, &outVal - *out = make([]string, len(*in)) - copy(*out, *in) - } - (*out)[key] = outVal - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthCheckDefinition. -func (in *HealthCheckDefinition) DeepCopy() *HealthCheckDefinition { - if in == nil { - return nil - } - out := new(HealthCheckDefinition) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IngressGateway) DeepCopyInto(out *IngressGateway) { *out = *in @@ -1781,21 +1735,6 @@ func (in *LocalJWKS) DeepCopy() *LocalJWKS { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Locality) DeepCopyInto(out *Locality) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Locality. -func (in *Locality) DeepCopy() *Locality { - if in == nil { - return nil - } - out := new(Locality) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Mesh) DeepCopyInto(out *Mesh) { *out = *in @@ -2544,131 +2483,6 @@ func (in *ReadWriteRatesConfig) DeepCopy() *ReadWriteRatesConfig { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Registration) DeepCopyInto(out *Registration) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Registration. -func (in *Registration) DeepCopy() *Registration { - if in == nil { - return nil - } - out := new(Registration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Registration) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RegistrationList) DeepCopyInto(out *RegistrationList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Registration, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistrationList. -func (in *RegistrationList) DeepCopy() *RegistrationList { - if in == nil { - return nil - } - out := new(RegistrationList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *RegistrationList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RegistrationSpec) DeepCopyInto(out *RegistrationSpec) { - *out = *in - if in.TaggedAddresses != nil { - in, out := &in.TaggedAddresses, &out.TaggedAddresses - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.NodeMeta != nil { - in, out := &in.NodeMeta, &out.NodeMeta - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - in.Service.DeepCopyInto(&out.Service) - if in.HealthCheck != nil { - in, out := &in.HealthCheck, &out.HealthCheck - *out = new(HealthCheck) - (*in).DeepCopyInto(*out) - } - if in.Locality != nil { - in, out := &in.Locality, &out.Locality - *out = new(Locality) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistrationSpec. -func (in *RegistrationSpec) DeepCopy() *RegistrationSpec { - if in == nil { - return nil - } - out := new(RegistrationSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RegistrationStatus) DeepCopyInto(out *RegistrationStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make(Conditions, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.LastSyncedTime != nil { - in, out := &in.LastSyncedTime, &out.LastSyncedTime - *out = (*in).DeepCopy() - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistrationStatus. -func (in *RegistrationStatus) DeepCopy() *RegistrationStatus { - if in == nil { - return nil - } - out := new(RegistrationStatus) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RemoteJWKS) DeepCopyInto(out *RemoteJWKS) { *out = *in @@ -3142,61 +2956,6 @@ func (in *SecretRefStatus) DeepCopy() *SecretRefStatus { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Service) DeepCopyInto(out *Service) { - *out = *in - if in.Tags != nil { - in, out := &in.Tags, &out.Tags - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Meta != nil { - in, out := &in.Meta, &out.Meta - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.TaggedAddresses != nil { - in, out := &in.TaggedAddresses, &out.TaggedAddresses - *out = make(map[string]ServiceAddress, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - out.Weights = in.Weights - if in.Locality != nil { - in, out := &in.Locality, &out.Locality - *out = new(Locality) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Service. -func (in *Service) DeepCopy() *Service { - if in == nil { - return nil - } - out := new(Service) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceAddress) DeepCopyInto(out *ServiceAddress) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAddress. -func (in *ServiceAddress) DeepCopy() *ServiceAddress { - if in == nil { - return nil - } - out := new(ServiceAddress) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ServiceConsumer) DeepCopyInto(out *ServiceConsumer) { *out = *in @@ -4275,18 +4034,3 @@ func (in *Upstreams) DeepCopy() *Upstreams { in.DeepCopyInto(out) return out } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Weights) DeepCopyInto(out *Weights) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Weights. -func (in *Weights) DeepCopy() *Weights { - if in == nil { - return nil - } - out := new(Weights) - in.DeepCopyInto(out) - return out -} diff --git a/control-plane/build-support/functions/10-util.sh b/control-plane/build-support/functions/10-util.sh index 3f0a140f9c..6cc632f55c 100644 --- a/control-plane/build-support/functions/10-util.sh +++ b/control-plane/build-support/functions/10-util.sh @@ -631,8 +631,7 @@ function update_version_helm { full_consul_version="$5-$3" full_consul_dataplane_version="$7-$3" elif test "$3" == "dev"; then - full_version="${2%.*}-$3" - full_version_k8s_for_chart_version="$2-$3" + full_version="$2-$3" # strip off the last minor patch version so that the consul image can be set to something like 1.16-dev. The image # is produced by Consul every night full_consul_version="${5%.*}-$3" @@ -658,7 +657,7 @@ function update_version_helm { fi sed_i ${SED_EXT} -e "s/(imageK8S:.*\/consul-k8s-control-plane:)[^\"]*/imageK8S: $4${full_version}/g" "${vfile}" - sed_i ${SED_EXT} -e "s/(version:[[:space:]]*)[^\"]*/\1${full_version_k8s_for_chart_version}/g" "${cfile}" + sed_i ${SED_EXT} -e "s/(version:[[:space:]]*)[^\"]*/\1${full_version}/g" "${cfile}" sed_i ${SED_EXT} -e "s/(appVersion:[[:space:]]*)[^\"]*/\1${full_consul_version}/g" "${cfile}" sed_i ${SED_EXT} -e "s/(image:.*\/consul-k8s-control-plane:)[^\"]*/image: $4${full_version}/g" "${cfile}" @@ -776,8 +775,7 @@ function set_changelog { fi compatibility_note=" -> NOTE: Consul K8s ${version_short}.x is compatible with Consul ${consul_version_short}.x and Consul Dataplane ${consul_dataplane_version_short}.x. Refer to our [compatibility matrix](https://developer.hashicorp.com/consul/docs/k8s/compatibility) for more info. -" +> NOTE: Consul K8s ${version_short}.x is compatible with Consul ${consul_version_short}.x and Consul Dataplane ${consul_dataplane_version_short}.x. Refer to our [compatibility matrix](https://developer.hashicorp.com/consul/docs/k8s/compatibility) for more info." fi cat <tmp && mv tmp "${curdir}"/CHANGELOG.MD diff --git a/control-plane/catalog/registration/cache.go b/control-plane/catalog/registration/cache.go deleted file mode 100644 index 357a59b33c..0000000000 --- a/control-plane/catalog/registration/cache.go +++ /dev/null @@ -1,397 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package registration - -import ( - "bytes" - "context" - "errors" - "fmt" - "slices" - "strings" - "sync" - "text/template" - - mapset "github.com/deckarep/golang-set/v2" - "github.com/go-logr/logr" - "github.com/hashicorp/consul-k8s/control-plane/api/v1alpha1" - "github.com/hashicorp/consul-k8s/control-plane/consul" - capi "github.com/hashicorp/consul/api" - k8serrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -const NotInServiceMeshFilter = "ServiceMeta[\"managed-by\"] != \"consul-k8s-endpoints-controller\"" - -func init() { - gatewayTpl = template.Must(template.New("root").Parse(strings.TrimSpace(gatewayRulesTpl))) -} - -type templateArgs struct { - EnablePartitions bool - Partition string - EnableNamespaces bool - Namespace string - ServiceName string -} - -var ( - gatewayTpl *template.Template - gatewayRulesTpl = ` -{{ if .EnablePartitions }} -partition "{{.Partition}}" { -{{- end }} - {{- if .EnableNamespaces }} - namespace "{{.Namespace}}" { - {{- end }} - service "{{.ServiceName}}" { - policy = "write" - } - {{- if .EnableNamespaces }} - } - {{- end }} -{{- if .EnablePartitions }} -} -{{- end }} -` -) - -type RegistrationCache struct { - // we include the context here so that we can use it for cancellation of `run` invocations that are scheduled after the cache is started - // this occurs when registering services in a new namespace as we have an invocation of `run` per namespace that is registered - ctx context.Context - - ConsulClientConfig *consul.Config - ConsulServerConnMgr consul.ServerConnectionManager - k8sClient client.Client - - serviceMtx *sync.Mutex - Services map[string]*v1alpha1.Registration - - namespaces mapset.Set[string] - - synced chan struct{} - UpdateChan chan string - - namespacesEnabled bool - partitionsEnabled bool -} - -func NewRegistrationCache(ctx context.Context, consulClientConfig *consul.Config, consulServerConnMgr consul.ServerConnectionManager, k8sClient client.Client, namespacesEnabled, partitionsEnabled bool) *RegistrationCache { - return &RegistrationCache{ - ctx: ctx, - ConsulClientConfig: consulClientConfig, - ConsulServerConnMgr: consulServerConnMgr, - k8sClient: k8sClient, - serviceMtx: &sync.Mutex{}, - Services: make(map[string]*v1alpha1.Registration), - UpdateChan: make(chan string), - synced: make(chan struct{}), - namespaces: mapset.NewSet[string](), - namespacesEnabled: namespacesEnabled, - partitionsEnabled: partitionsEnabled, - } -} - -// waitSynced is used to coordinate with the caller when the cache is initially filled. -func (c *RegistrationCache) waitSynced(ctx context.Context) { - select { - case <-c.synced: - return - case <-ctx.Done(): - return - } -} - -func (c *RegistrationCache) run(log logr.Logger, namespace string) { - once := &sync.Once{} - opts := &capi.QueryOptions{Filter: NotInServiceMeshFilter, Namespace: namespace} - - for { - select { - case <-c.ctx.Done(): - return - default: - - client, err := consul.NewClientFromConnMgr(c.ConsulClientConfig, c.ConsulServerConnMgr) - if err != nil { - log.Error(err, "error initializing consul client") - continue - } - entries, meta, err := client.Catalog().Services(opts.WithContext(c.ctx)) - if err != nil { - // if we timeout we don't care about the error message because it's expected to happen on long polls - // any other error we want to alert on - if !strings.Contains(strings.ToLower(err.Error()), "timeout") && - !strings.Contains(strings.ToLower(err.Error()), "no such host") && - !strings.Contains(strings.ToLower(err.Error()), "connection refused") { - log.Error(err, "error fetching registrations") - } - continue - } - - servicesToRemove := mapset.NewSet[string]() - servicesToAdd := mapset.NewSet[string]() - c.serviceMtx.Lock() - for svc := range c.Services { - if _, ok := entries[svc]; !ok { - servicesToRemove.Add(svc) - } - } - - for svc := range entries { - if _, ok := c.Services[svc]; !ok { - servicesToAdd.Add(svc) - } - } - c.serviceMtx.Unlock() - - for _, svc := range servicesToRemove.ToSlice() { - log.Info("consul deregistered service", "svcName", svc) - c.UpdateChan <- svc - } - - for _, svc := range servicesToAdd.ToSlice() { - registration := &v1alpha1.Registration{} - - if err := c.k8sClient.Get(c.ctx, types.NamespacedName{Name: svc, Namespace: namespace}, registration); err != nil { - if !k8serrors.IsNotFound(err) { - log.Error(err, "unable to get registration", "svcName", svc, "namespace", namespace) - } - continue - } - - c.Services[svc] = registration - } - - opts.WaitIndex = meta.LastIndex - once.Do(func() { - log.Info("Initial sync complete") - c.synced <- struct{}{} - }) - } - } -} - -func (c *RegistrationCache) get(svcName string) (*v1alpha1.Registration, bool) { - c.serviceMtx.Lock() - defer c.serviceMtx.Unlock() - val, ok := c.Services[svcName] - return val, ok -} - -func (c *RegistrationCache) aclsEnabled() bool { - return c.ConsulClientConfig.APIClientConfig.Token != "" || c.ConsulClientConfig.APIClientConfig.TokenFile != "" -} - -func (c *RegistrationCache) registerService(log logr.Logger, reg *v1alpha1.Registration) error { - client, err := consul.NewClientFromConnMgr(c.ConsulClientConfig, c.ConsulServerConnMgr) - if err != nil { - return err - } - - regReq, err := reg.ToCatalogRegistration() - if err != nil { - return err - } - - _, err = client.Catalog().Register(regReq, &capi.WriteOptions{Namespace: reg.Spec.Service.Namespace}) - if err != nil { - log.Error(err, "error registering service", "svcName", regReq.Service.Service) - return err - } - - if !c.namespaces.Contains(reg.Spec.Service.Namespace) && !emptyOrDefault(reg.Spec.Service.Namespace) { - c.namespaces.Add(reg.Spec.Service.Namespace) - go c.run(log, reg.Spec.Service.Namespace) - } - - log.Info("Successfully registered service", "svcName", regReq.Service.Service) - - return nil -} - -func emptyOrDefault(s string) bool { - return s == "" || s == "default" -} - -func (c *RegistrationCache) updateTermGWACLRole(log logr.Logger, registration *v1alpha1.Registration, termGWsToUpdate []v1alpha1.TerminatingGateway) error { - if len(termGWsToUpdate) == 0 { - log.Info("terminating gateway not found") - return nil - } - - client, err := consul.NewClientFromConnMgr(c.ConsulClientConfig, c.ConsulServerConnMgr) - if err != nil { - return err - } - - var data bytes.Buffer - if err := gatewayTpl.Execute(&data, templateArgs{ - EnablePartitions: c.partitionsEnabled, - Partition: registration.Spec.Service.Partition, - EnableNamespaces: c.namespacesEnabled, - Namespace: registration.Spec.Service.Namespace, - ServiceName: registration.Spec.Service.Name, - }); err != nil { - // just panic if we can't compile the simple template - // as it means something else is going severly wrong. - panic(err) - } - - var mErr error - for _, termGW := range termGWsToUpdate { - // the terminating gateway role is _always_ in the default namespace - roles, _, err := client.ACL().RoleList(&capi.QueryOptions{}) - if err != nil { - log.Error(err, "error reading role list") - return err - } - - policy := &capi.ACLPolicy{ - Name: servicePolicyName(registration.Spec.Service.Name), - Description: "Write policy for terminating gateways for external service", - Rules: data.String(), - Datacenters: []string{registration.Spec.Datacenter}, - } - - existingPolicy, _, err := client.ACL().PolicyReadByName(policy.Name, &capi.QueryOptions{}) - if err != nil { - log.Error(err, "error reading policy") - return err - } - - // we don't need to include the namespace/partition here because all roles and policies are created in the default namespace for consul-k8s managed resources. - writeOpts := &capi.WriteOptions{} - - if existingPolicy == nil { - policy, _, err = client.ACL().PolicyCreate(policy, writeOpts) - if err != nil { - return fmt.Errorf("error creating policy: %w", err) - } - } else { - policy = existingPolicy - } - var role *capi.ACLRole - for _, r := range roles { - if strings.HasSuffix(r.Name, fmt.Sprintf("-%s-acl-role", termGW.Name)) { - role = r - break - } - } - - if role == nil { - log.Info("terminating gateway role not found", "terminatingGatewayName", termGW.Name) - mErr = errors.Join(mErr, fmt.Errorf("terminating gateway role not found for %q", termGW.Name)) - continue - } - - role.Policies = append(role.Policies, &capi.ACLRolePolicyLink{Name: policy.Name, ID: policy.ID}) - - _, _, err = client.ACL().RoleUpdate(role, writeOpts) - if err != nil { - log.Error(err, "error updating role", "roleName", role.Name) - mErr = errors.Join(mErr, fmt.Errorf("error updating role %q", role.Name)) - continue - } - } - - return mErr -} - -func (c *RegistrationCache) deregisterService(log logr.Logger, reg *v1alpha1.Registration) error { - client, err := consul.NewClientFromConnMgr(c.ConsulClientConfig, c.ConsulServerConnMgr) - if err != nil { - return err - } - - deRegReq := reg.ToCatalogDeregistration() - _, err = client.Catalog().Deregister(deRegReq, nil) - if err != nil { - log.Error(err, "error deregistering service", "svcID", deRegReq.ServiceID) - return err - } - - c.serviceMtx.Lock() - defer c.serviceMtx.Unlock() - delete(c.Services, reg.Spec.Service.Name) - - log.Info("Successfully deregistered service", "svcID", deRegReq.ServiceID) - return nil -} - -func (c *RegistrationCache) removeTermGWACLRole(log logr.Logger, registration *v1alpha1.Registration, termGWsToUpdate []v1alpha1.TerminatingGateway) error { - if len(termGWsToUpdate) == 0 { - log.Info("terminating gateway not found") - return nil - } - - client, err := consul.NewClientFromConnMgr(c.ConsulClientConfig, c.ConsulServerConnMgr) - if err != nil { - return err - } - - var mErr error - for _, termGW := range termGWsToUpdate { - - // we don't need to include the namespace/partition here because all roles and policies are created in the default namespace for consul-k8s managed resources. - queryOpts := &capi.QueryOptions{} - writeOpts := &capi.WriteOptions{} - - roles, _, err := client.ACL().RoleList(queryOpts) - if err != nil { - return err - } - var role *capi.ACLRole - for _, r := range roles { - if strings.HasSuffix(r.Name, fmt.Sprintf("-%s-acl-role", termGW.Name)) { - role = r - break - } - } - - if role == nil { - log.Info("terminating gateway role not found", "terminatingGatewayName", termGW.Name) - mErr = errors.Join(mErr, fmt.Errorf("terminating gateway role not found for %q", termGW.Name)) - continue - } - - var policyID string - - expectedPolicyName := servicePolicyName(registration.Spec.Service.Name) - role.Policies = slices.DeleteFunc(role.Policies, func(i *capi.ACLRolePolicyLink) bool { - if i.Name == expectedPolicyName { - policyID = i.ID - return true - } - return false - }) - - if policyID == "" { - log.Info("policy not found on terminating gateway role", "policyName", expectedPolicyName, "terminatingGatewayName", termGW.Name) - continue - } - - _, _, err = client.ACL().RoleUpdate(role, writeOpts) - if err != nil { - log.Error(err, "error updating role", "roleName", role.Name) - mErr = errors.Join(mErr, fmt.Errorf("error updating role %q", role.Name)) - continue - } - - _, err = client.ACL().PolicyDelete(policyID, writeOpts) - if err != nil { - log.Error(err, "error deleting service policy", "policyID", policyID, "policyName", expectedPolicyName) - mErr = errors.Join(mErr, fmt.Errorf("error deleting service ACL policy %q", policyID)) - continue - } - } - - return mErr -} - -func servicePolicyName(name string) string { - return fmt.Sprintf("%s-write-policy", name) -} diff --git a/control-plane/catalog/registration/registrations_controller.go b/control-plane/catalog/registration/registrations_controller.go deleted file mode 100644 index 0b6b4bc24e..0000000000 --- a/control-plane/catalog/registration/registrations_controller.go +++ /dev/null @@ -1,297 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package registration - -import ( - "context" - "fmt" - "slices" - "time" - - "github.com/go-logr/logr" - k8serrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - - "github.com/hashicorp/consul-k8s/control-plane/api/v1alpha1" - "github.com/hashicorp/consul-k8s/control-plane/controllers/configentries" -) - -const ( - RegistrationFinalizer = "registration.finalizers.consul.hashicorp.com" - registrationByServiceNameIndex = "registrationName" -) - -var ( - ErrRegisteringService = fmt.Errorf("error registering service") - ErrDeregisteringService = fmt.Errorf("error deregistering service") - ErrUpdatingACLRoles = fmt.Errorf("error updating ACL roles") - ErrRemovingACLRoles = fmt.Errorf("error removing ACL roles") -) - -// RegistrationsController is the controller for Registrations resources. -type RegistrationsController struct { - client.Client - configentries.FinalizerPatcher - Scheme *runtime.Scheme - Cache *RegistrationCache - Log logr.Logger -} - -// +kubebuilder:rbac:groups=consul.hashicorp.com,resources=servicerouters,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=consul.hashicorp.com,resources=servicerouters/status,verbs=get;update;patch - -func (r *RegistrationsController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - log := r.Log.V(1).WithValues("registration", req.NamespacedName) - log.Info("Reconciling Registaration") - - registration := &v1alpha1.Registration{} - // get the registration - if err := r.Client.Get(ctx, req.NamespacedName, registration); err != nil { - if !k8serrors.IsNotFound(err) { - log.Error(err, "unable to get registration") - } - return ctrl.Result{}, client.IgnoreNotFound(err) - } - - cachedRegistration, ok := r.Cache.get(registration.Spec.Service.Name) - if slices.ContainsFunc(registration.Status.Conditions, func(c v1alpha1.Condition) bool { return c.Type == ConditionDeregistered }) { - if ok && registration.EqualExceptStatus(cachedRegistration) { - log.Info("Registration is in sync") - // registration is already in sync so we do nothing, this happens when consul deregisters a service - // and we update the status to show that consul deregistered it - return ctrl.Result{}, nil - } - } - - log.Info("need to reconcile") - - // deletion request - if !registration.ObjectMeta.DeletionTimestamp.IsZero() { - result := r.handleDeletion(ctx, log, registration) - - if result.hasErrors() { - err := r.UpdateStatus(ctx, log, registration, result) - if err != nil { - log.Error(err, "failed to update Registration status", "name", registration.Name, "namespace", registration.Namespace) - } - return ctrl.Result{}, result.errors() - } - return ctrl.Result{}, nil - } - - // registration request - result := r.handleRegistration(ctx, log, registration) - err := r.UpdateStatus(ctx, log, registration, result) - if err != nil { - log.Error(err, "failed to update Registration status", "name", registration.Name, "namespace", registration.Namespace) - } - if result.hasErrors() { - return ctrl.Result{}, result.errors() - } - - return ctrl.Result{}, nil -} - -func (c *RegistrationsController) watchForDeregistrations(ctx context.Context) { - for { - select { - case <-ctx.Done(): - return - case svc := <-c.Cache.UpdateChan: - // get all registrations for the service - regList := &v1alpha1.RegistrationList{} - err := c.Client.List(context.Background(), regList, client.MatchingFields{registrationByServiceNameIndex: svc}) - if err != nil { - c.Log.Error(err, "error listing registrations by service name", "serviceName", svc) - continue - } - for _, reg := range regList.Items { - - err := c.UpdateStatus(context.Background(), c.Log, ®, Result{Registering: false, ConsulDeregistered: true}) - if err != nil { - c.Log.Error(err, "failed to update Registration status", "name", reg.Name, "namespace", reg.Namespace) - } - } - } - } -} - -func (r *RegistrationsController) handleRegistration(ctx context.Context, log logr.Logger, registration *v1alpha1.Registration) Result { - log.Info("Registering service") - - result := Result{Registering: true} - - patch := r.AddFinalizersPatch(registration, RegistrationFinalizer) - err := r.Patch(ctx, registration, patch) - if err != nil { - err = fmt.Errorf("error adding finalizer: %w", err) - result.Finalizer = err - return result - } - - err = r.Cache.registerService(log, registration) - if err != nil { - result.Sync = err - result.Registration = fmt.Errorf("%w: %s", ErrRegisteringService, err) - return result - } - - if r.Cache.aclsEnabled() { - termGWsToUpdate, err := r.terminatingGatewaysToUpdate(ctx, log, registration) - if err != nil { - result.Sync = err - result.ACLUpdate = fmt.Errorf("%w: %s", ErrUpdatingACLRoles, err) - return result - } - - err = r.Cache.updateTermGWACLRole(log, registration, termGWsToUpdate) - if err != nil { - result.Sync = err - result.ACLUpdate = fmt.Errorf("%w: %s", ErrUpdatingACLRoles, err) - return result - } - } - return result -} - -func (r *RegistrationsController) terminatingGatewaysToUpdate(ctx context.Context, log logr.Logger, registration *v1alpha1.Registration) ([]v1alpha1.TerminatingGateway, error) { - termGWList := &v1alpha1.TerminatingGatewayList{} - err := r.Client.List(ctx, termGWList) - if err != nil { - log.Error(err, "error listing terminating gateways") - return nil, err - } - - termGWsToUpdate := make([]v1alpha1.TerminatingGateway, 0, len(termGWList.Items)) - for _, termGW := range termGWList.Items { - if slices.ContainsFunc(termGW.Spec.Services, termGWContainsService(registration)) { - termGWsToUpdate = append(termGWsToUpdate, termGW) - } - } - - return termGWsToUpdate, nil -} - -func termGWContainsService(registration *v1alpha1.Registration) func(v1alpha1.LinkedService) bool { - return func(svc v1alpha1.LinkedService) bool { - return svc.Name == registration.Spec.Service.Name - } -} - -func (r *RegistrationsController) handleDeletion(ctx context.Context, log logr.Logger, registration *v1alpha1.Registration) Result { - log.Info("Deregistering service") - result := Result{Registering: false} - err := r.Cache.deregisterService(log, registration) - if err != nil { - result.Sync = err - result.Deregistration = fmt.Errorf("%w: %s", ErrDeregisteringService, err) - return result - } - - if r.Cache.aclsEnabled() { - termGWsToUpdate, err := r.terminatingGatewaysToUpdate(ctx, log, registration) - if err != nil { - result.Sync = err - result.ACLUpdate = fmt.Errorf("%w: %s", ErrRemovingACLRoles, err) - return result - } - - err = r.Cache.removeTermGWACLRole(log, registration, termGWsToUpdate) - if err != nil { - result.Sync = err - result.ACLUpdate = fmt.Errorf("%w: %s", ErrRemovingACLRoles, err) - return result - } - } - - patch := r.RemoveFinalizersPatch(registration, RegistrationFinalizer) - err = r.Patch(ctx, registration, patch) - if err != nil { - result.Finalizer = err - return result - } - - return result -} - -func (r *RegistrationsController) UpdateStatus(ctx context.Context, log logr.Logger, registration *v1alpha1.Registration, result Result) error { - registration.Status.LastSyncedTime = &metav1.Time{Time: time.Now()} - registration.Status.Conditions = v1alpha1.Conditions{ - syncedCondition(result), - } - - if result.Registering { - registration.Status.Conditions = append(registration.Status.Conditions, registrationCondition(result)) - } else { - registration.Status.Conditions = append(registration.Status.Conditions, deregistrationCondition(result)) - } - - if r.Cache.aclsEnabled() { - registration.Status.Conditions = append(registration.Status.Conditions, aclCondition(result)) - } - - err := r.Status().Update(ctx, registration) - if err != nil { - return err - } - return nil -} - -func (r *RegistrationsController) Logger(name types.NamespacedName) logr.Logger { - return r.Log.WithValues("request", name) -} - -func (r *RegistrationsController) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error { - // setup the cache - go r.Cache.run(r.Log, "") - r.Cache.waitSynced(ctx) - - go r.watchForDeregistrations(ctx) - - // setup the index to lookup registrations by service name - if err := mgr.GetFieldIndexer().IndexField(ctx, &v1alpha1.Registration{}, registrationByServiceNameIndex, indexerFn); err != nil { - return err - } - - return ctrl.NewControllerManagedBy(mgr). - For(&v1alpha1.Registration{}). - Watches(&v1alpha1.TerminatingGateway{}, handler.EnqueueRequestsFromMapFunc(r.transformTerminatingGateways)). - Complete(r) -} - -func indexerFn(o client.Object) []string { - reg := o.(*v1alpha1.Registration) - return []string{reg.Spec.Service.Name} -} - -func (r *RegistrationsController) transformTerminatingGateways(ctx context.Context, o client.Object) []reconcile.Request { - termGW := o.(*v1alpha1.TerminatingGateway) - reqs := make([]reconcile.Request, 0, len(termGW.Spec.Services)) - for _, svc := range termGW.Spec.Services { - // lookup registrationList by service name and add it to the reconcile request - registrationList := &v1alpha1.RegistrationList{} - - err := r.Client.List(ctx, registrationList, client.MatchingFields{registrationByServiceNameIndex: svc.Name}) - if err != nil { - r.Log.Error(err, "error listing registrations by service name", "serviceName", svc.Name) - continue - } - - for _, reg := range registrationList.Items { - reqs = append(reqs, reconcile.Request{ - NamespacedName: types.NamespacedName{ - Name: reg.Name, - Namespace: reg.Namespace, - }, - }) - } - } - return reqs -} diff --git a/control-plane/catalog/registration/registrations_controller_test.go b/control-plane/catalog/registration/registrations_controller_test.go deleted file mode 100644 index ce3dab14f5..0000000000 --- a/control-plane/catalog/registration/registrations_controller_test.go +++ /dev/null @@ -1,1087 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package registration_test - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/http/httptest" - "net/url" - "strconv" - "strings" - "testing" - - logrtest "github.com/go-logr/logr/testing" - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/stretchr/testify/require" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client/fake" - - capi "github.com/hashicorp/consul/api" - "github.com/hashicorp/go-uuid" - - "github.com/hashicorp/consul-k8s/control-plane/api/v1alpha1" - "github.com/hashicorp/consul-k8s/control-plane/catalog/registration" - "github.com/hashicorp/consul-k8s/control-plane/consul" - "github.com/hashicorp/consul-k8s/control-plane/helper/test" -) - -type serverResponseConfig struct { - registering bool - aclEnabled bool - errOnRegister bool - errOnDeregister bool - errOnPolicyRead bool - errOnPolicyWrite bool - errOnPolicyDelete bool - errOnRoleUpdate bool - policyExists bool - temGWRoleMissing bool -} - -func TestReconcile_Success(tt *testing.T) { - deletionTime := metav1.Now() - cases := map[string]struct { - registration *v1alpha1.Registration - terminatingGateways []runtime.Object - serverResponseConfig serverResponseConfig - expectedFinalizers []string - expectedConditions []v1alpha1.Condition - }{ - "registering - success on registration": { - registration: &v1alpha1.Registration{ - TypeMeta: metav1.TypeMeta{ - Kind: "Registration", - APIVersion: "consul.hashicorp.com/v1alpha1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "test-registration", - Finalizers: []string{registration.RegistrationFinalizer}, - }, - Spec: v1alpha1.RegistrationSpec{ - ID: "node-id", - Node: "virtual-node", - Address: "127.0.0.1", - Datacenter: "dc1", - Service: v1alpha1.Service{ - ID: "service-id", - Name: "service-name", - Port: 8080, - Address: "127.0.0.1", - }, - }, - }, - terminatingGateways: []runtime.Object{ - &v1alpha1.TerminatingGateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "terminating-gateway", - }, - Spec: v1alpha1.TerminatingGatewaySpec{ - Services: []v1alpha1.LinkedService{ - { - Name: "service-name", - }, - }, - }, - }, - }, - serverResponseConfig: serverResponseConfig{registering: true}, - expectedFinalizers: []string{registration.RegistrationFinalizer}, - expectedConditions: []v1alpha1.Condition{ - { - Type: v1alpha1.ConditionSynced, - Status: v1.ConditionTrue, - Reason: "", - Message: "", - }, - { - Type: registration.ConditionRegistered, - Status: v1.ConditionTrue, - Reason: "", - Message: "", - }, - }, - }, - "registering -- ACLs enabled and policy does not exist": { - registration: &v1alpha1.Registration{ - TypeMeta: metav1.TypeMeta{ - Kind: "Registration", - APIVersion: "consul.hashicorp.com/v1alpha1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "test-registration", - Finalizers: []string{registration.RegistrationFinalizer}, - }, - Spec: v1alpha1.RegistrationSpec{ - ID: "node-id", - Node: "virtual-node", - Address: "127.0.0.1", - Datacenter: "dc1", - Service: v1alpha1.Service{ - ID: "service-id", - Name: "service-name", - Port: 8080, - Address: "127.0.0.1", - }, - }, - }, - terminatingGateways: []runtime.Object{ - &v1alpha1.TerminatingGateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "terminating-gateway", - }, - Spec: v1alpha1.TerminatingGatewaySpec{ - Services: []v1alpha1.LinkedService{ - { - Name: "service-name", - }, - }, - }, - }, - }, - serverResponseConfig: serverResponseConfig{ - registering: true, - aclEnabled: true, - }, - expectedFinalizers: []string{registration.RegistrationFinalizer}, - expectedConditions: []v1alpha1.Condition{ - { - Type: v1alpha1.ConditionSynced, - Status: v1.ConditionTrue, - Reason: "", - Message: "", - }, - { - Type: registration.ConditionRegistered, - Status: v1.ConditionTrue, - Reason: "", - Message: "", - }, - { - Type: registration.ConditionACLsUpdated, - Status: v1.ConditionTrue, - Reason: "", - Message: "", - }, - }, - }, - "registering -- ACLs enabled and policy does exists": { - registration: &v1alpha1.Registration{ - TypeMeta: metav1.TypeMeta{ - Kind: "Registration", - APIVersion: "consul.hashicorp.com/v1alpha1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "test-registration", - Finalizers: []string{registration.RegistrationFinalizer}, - }, - Spec: v1alpha1.RegistrationSpec{ - ID: "node-id", - Node: "virtual-node", - Address: "127.0.0.1", - Datacenter: "dc1", - Service: v1alpha1.Service{ - ID: "service-id", - Name: "service-name", - Port: 8080, - Address: "127.0.0.1", - }, - }, - }, - terminatingGateways: []runtime.Object{ - &v1alpha1.TerminatingGateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "terminating-gateway", - }, - Spec: v1alpha1.TerminatingGatewaySpec{ - Services: []v1alpha1.LinkedService{ - { - Name: "service-name", - }, - }, - }, - }, - }, - serverResponseConfig: serverResponseConfig{ - registering: true, - aclEnabled: true, - policyExists: true, - }, - expectedFinalizers: []string{registration.RegistrationFinalizer}, - expectedConditions: []v1alpha1.Condition{ - { - Type: v1alpha1.ConditionSynced, - Status: v1.ConditionTrue, - Reason: "", - Message: "", - }, - { - Type: registration.ConditionRegistered, - Status: v1.ConditionTrue, - Reason: "", - Message: "", - }, - { - Type: registration.ConditionACLsUpdated, - Status: v1.ConditionTrue, - Reason: "", - Message: "", - }, - }, - }, - "deregistering": { - registration: &v1alpha1.Registration{ - TypeMeta: metav1.TypeMeta{ - Kind: "Registration", - APIVersion: "consul.hashicorp.com/v1alpha1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "test-registration", - Finalizers: []string{registration.RegistrationFinalizer}, - DeletionTimestamp: &deletionTime, - }, - Spec: v1alpha1.RegistrationSpec{ - ID: "node-id", - Node: "virtual-node", - Address: "127.0.0.1", - Datacenter: "dc1", - Service: v1alpha1.Service{ - ID: "service-id", - Name: "service-name", - Port: 8080, - Address: "127.0.0.1", - }, - }, - }, - terminatingGateways: []runtime.Object{ - &v1alpha1.TerminatingGateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "terminating-gateway", - }, - Spec: v1alpha1.TerminatingGatewaySpec{ - Services: []v1alpha1.LinkedService{ - { - Name: "service-name", - }, - }, - }, - }, - }, - serverResponseConfig: serverResponseConfig{ - registering: false, - aclEnabled: false, - }, - expectedConditions: []v1alpha1.Condition{}, - }, - "deregistering - ACLs enabled": { - registration: &v1alpha1.Registration{ - TypeMeta: metav1.TypeMeta{ - Kind: "Registration", - APIVersion: "consul.hashicorp.com/v1alpha1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "test-registration", - Finalizers: []string{registration.RegistrationFinalizer}, - DeletionTimestamp: &deletionTime, - }, - Spec: v1alpha1.RegistrationSpec{ - ID: "node-id", - Node: "virtual-node", - Address: "127.0.0.1", - Datacenter: "dc1", - Service: v1alpha1.Service{ - ID: "service-id", - Name: "service-name", - Port: 8080, - Address: "127.0.0.1", - }, - }, - }, - terminatingGateways: []runtime.Object{ - &v1alpha1.TerminatingGateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "terminating-gateway", - }, - Spec: v1alpha1.TerminatingGatewaySpec{ - Services: []v1alpha1.LinkedService{ - { - Name: "service-name", - }, - }, - }, - }, - }, - serverResponseConfig: serverResponseConfig{ - registering: false, - aclEnabled: true, - }, - expectedConditions: []v1alpha1.Condition{}, - }, - } - - for name, tc := range cases { - tc := tc - tt.Run(name, func(t *testing.T) { - t.Parallel() - s := runtime.NewScheme() - s.AddKnownTypes(v1alpha1.GroupVersion, &v1alpha1.Registration{}, &v1alpha1.TerminatingGateway{}, &v1alpha1.TerminatingGatewayList{}) - ctx := context.Background() - - consulServer, testClient := fakeConsulServer(t, tc.serverResponseConfig, tc.registration.Spec.Service.Name) - defer consulServer.Close() - - runtimeObjs := []runtime.Object{tc.registration} - runtimeObjs = append(runtimeObjs, tc.terminatingGateways...) - fakeClient := fake.NewClientBuilder(). - WithScheme(s). - WithRuntimeObjects(runtimeObjs...). - WithStatusSubresource(&v1alpha1.Registration{}). - Build() - - controller := ®istration.RegistrationsController{ - Client: fakeClient, - Log: logrtest.NewTestLogger(t), - Scheme: s, - Cache: registration.NewRegistrationCache(context.Background(), testClient.Cfg, testClient.Watcher, fakeClient, false, false), - } - - _, err := controller.Reconcile(ctx, ctrl.Request{ - NamespacedName: types.NamespacedName{Name: tc.registration.Name, Namespace: tc.registration.Namespace}, - }) - require.NoError(t, err) - - fetchedReg := &v1alpha1.Registration{TypeMeta: metav1.TypeMeta{APIVersion: "consul.hashicorp.com/v1alpha1", Kind: "Registration"}} - fakeClient.Get(ctx, types.NamespacedName{Name: tc.registration.Name}, fetchedReg) - - require.Len(t, fetchedReg.Status.Conditions, len(tc.expectedConditions)) - - for i, c := range fetchedReg.Status.Conditions { - if diff := cmp.Diff(c, tc.expectedConditions[i], cmpopts.IgnoreFields(v1alpha1.Condition{}, "LastTransitionTime", "Message")); diff != "" { - t.Errorf("unexpected condition diff: %s", diff) - } - } - - require.ElementsMatch(t, fetchedReg.Finalizers, tc.expectedFinalizers) - }) - } -} - -func TestReconcile_Failure(tt *testing.T) { - deletionTime := metav1.Now() - cases := map[string]struct { - registration *v1alpha1.Registration - terminatingGateways []runtime.Object - serverResponseConfig serverResponseConfig - expectedConditions []v1alpha1.Condition - }{ - "registering - registration call to consul fails": { - registration: &v1alpha1.Registration{ - TypeMeta: metav1.TypeMeta{ - Kind: "Registration", - APIVersion: "consul.hashicorp.com/v1alpha1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "test-registration", - Finalizers: []string{registration.RegistrationFinalizer}, - }, - Spec: v1alpha1.RegistrationSpec{ - ID: "node-id", - Node: "virtual-node", - Address: "127.0.0.1", - Datacenter: "dc1", - Service: v1alpha1.Service{ - ID: "service-id", - Name: "service-name", - Port: 8080, - Address: "127.0.0.1", - }, - }, - }, - terminatingGateways: []runtime.Object{ - &v1alpha1.TerminatingGateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "terminating-gateway", - }, - Spec: v1alpha1.TerminatingGatewaySpec{ - Services: []v1alpha1.LinkedService{ - { - Name: "service-name", - }, - }, - }, - }, - }, - serverResponseConfig: serverResponseConfig{ - registering: true, - errOnRegister: true, - }, - expectedConditions: []v1alpha1.Condition{ - { - Type: v1alpha1.ConditionSynced, - Status: v1.ConditionFalse, - Reason: registration.SyncError, - }, - { - Type: registration.ConditionRegistered, - Status: v1.ConditionFalse, - Reason: registration.ConsulErrorRegistration, - }, - }, - }, - "registering - terminating gateway acl role not found": { - registration: &v1alpha1.Registration{ - TypeMeta: metav1.TypeMeta{ - Kind: "Registration", - APIVersion: "consul.hashicorp.com/v1alpha1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "test-registration", - Finalizers: []string{registration.RegistrationFinalizer}, - }, - Spec: v1alpha1.RegistrationSpec{ - ID: "node-id", - Node: "virtual-node", - Address: "127.0.0.1", - Datacenter: "dc1", - Service: v1alpha1.Service{ - ID: "service-id", - Name: "service-name", - Port: 8080, - Address: "127.0.0.1", - }, - }, - }, - terminatingGateways: []runtime.Object{ - &v1alpha1.TerminatingGateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "terminating-gateway", - }, - Spec: v1alpha1.TerminatingGatewaySpec{ - Services: []v1alpha1.LinkedService{ - { - Name: "service-name", - }, - }, - }, - }, - }, - serverResponseConfig: serverResponseConfig{ - registering: true, - aclEnabled: true, - temGWRoleMissing: true, - }, - expectedConditions: []v1alpha1.Condition{ - { - Type: v1alpha1.ConditionSynced, - Status: v1.ConditionFalse, - Reason: registration.SyncError, - }, - { - Type: registration.ConditionRegistered, - Status: v1.ConditionTrue, - }, - { - Type: registration.ConditionACLsUpdated, - Status: v1.ConditionFalse, - Reason: registration.ConsulErrorACL, - }, - }, - }, - "registering - error reading policy": { - registration: &v1alpha1.Registration{ - TypeMeta: metav1.TypeMeta{ - Kind: "Registration", - APIVersion: "consul.hashicorp.com/v1alpha1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "test-registration", - Finalizers: []string{registration.RegistrationFinalizer}, - }, - Spec: v1alpha1.RegistrationSpec{ - ID: "node-id", - Node: "virtual-node", - Address: "127.0.0.1", - Datacenter: "dc1", - Service: v1alpha1.Service{ - ID: "service-id", - Name: "service-name", - Port: 8080, - Address: "127.0.0.1", - }, - }, - }, - terminatingGateways: []runtime.Object{ - &v1alpha1.TerminatingGateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "terminating-gateway", - }, - Spec: v1alpha1.TerminatingGatewaySpec{ - Services: []v1alpha1.LinkedService{ - { - Name: "service-name", - }, - }, - }, - }, - }, - serverResponseConfig: serverResponseConfig{ - registering: true, - aclEnabled: true, - errOnPolicyRead: true, - policyExists: true, - }, - expectedConditions: []v1alpha1.Condition{ - { - Type: v1alpha1.ConditionSynced, - Status: v1.ConditionFalse, - Reason: registration.SyncError, - }, - { - Type: registration.ConditionRegistered, - Status: v1.ConditionTrue, - }, - { - Type: registration.ConditionACLsUpdated, - Status: v1.ConditionFalse, - Reason: registration.ConsulErrorACL, - }, - }, - }, - "registering - policy does not exist - error creating policy": { - registration: &v1alpha1.Registration{ - TypeMeta: metav1.TypeMeta{ - Kind: "Registration", - APIVersion: "consul.hashicorp.com/v1alpha1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "test-registration", - Finalizers: []string{registration.RegistrationFinalizer}, - }, - Spec: v1alpha1.RegistrationSpec{ - ID: "node-id", - Node: "virtual-node", - Address: "127.0.0.1", - Datacenter: "dc1", - Service: v1alpha1.Service{ - ID: "service-id", - Name: "service-name", - Port: 8080, - Address: "127.0.0.1", - }, - }, - }, - terminatingGateways: []runtime.Object{ - &v1alpha1.TerminatingGateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "terminating-gateway", - }, - Spec: v1alpha1.TerminatingGatewaySpec{ - Services: []v1alpha1.LinkedService{ - { - Name: "service-name", - }, - }, - }, - }, - }, - serverResponseConfig: serverResponseConfig{ - registering: true, - aclEnabled: true, - errOnPolicyWrite: true, - }, - expectedConditions: []v1alpha1.Condition{ - { - Type: v1alpha1.ConditionSynced, - Status: v1.ConditionFalse, - Reason: registration.SyncError, - }, - { - Type: registration.ConditionRegistered, - Status: v1.ConditionTrue, - }, - { - Type: registration.ConditionACLsUpdated, - Status: v1.ConditionFalse, - Reason: registration.ConsulErrorACL, - }, - }, - }, - "registering - error updating role": { - registration: &v1alpha1.Registration{ - TypeMeta: metav1.TypeMeta{ - Kind: "Registration", - APIVersion: "consul.hashicorp.com/v1alpha1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "test-registration", - Finalizers: []string{registration.RegistrationFinalizer}, - }, - Spec: v1alpha1.RegistrationSpec{ - ID: "node-id", - Node: "virtual-node", - Address: "127.0.0.1", - Datacenter: "dc1", - Service: v1alpha1.Service{ - ID: "service-id", - Name: "service-name", - Port: 8080, - Address: "127.0.0.1", - }, - }, - }, - terminatingGateways: []runtime.Object{ - &v1alpha1.TerminatingGateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "terminating-gateway", - }, - Spec: v1alpha1.TerminatingGatewaySpec{ - Services: []v1alpha1.LinkedService{ - { - Name: "service-name", - }, - }, - }, - }, - }, - serverResponseConfig: serverResponseConfig{ - registering: true, - aclEnabled: true, - errOnRoleUpdate: true, - }, - expectedConditions: []v1alpha1.Condition{ - { - Type: v1alpha1.ConditionSynced, - Status: v1.ConditionFalse, - Reason: registration.SyncError, - }, - { - Type: registration.ConditionRegistered, - Status: v1.ConditionTrue, - }, - { - Type: registration.ConditionACLsUpdated, - Status: v1.ConditionFalse, - Reason: registration.ConsulErrorACL, - }, - }, - }, - "deregistering": { - registration: &v1alpha1.Registration{ - TypeMeta: metav1.TypeMeta{ - Kind: "Registration", - APIVersion: "consul.hashicorp.com/v1alpha1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "test-registration", - Finalizers: []string{registration.RegistrationFinalizer}, - DeletionTimestamp: &deletionTime, - }, - Spec: v1alpha1.RegistrationSpec{ - ID: "node-id", - Node: "virtual-node", - Address: "127.0.0.1", - Datacenter: "dc1", - Service: v1alpha1.Service{ - ID: "service-id", - Name: "service-name", - Port: 8080, - Address: "127.0.0.1", - }, - }, - }, - terminatingGateways: []runtime.Object{ - &v1alpha1.TerminatingGateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "terminating-gateway", - }, - Spec: v1alpha1.TerminatingGatewaySpec{ - Services: []v1alpha1.LinkedService{ - { - Name: "service-name", - }, - }, - }, - }, - }, - serverResponseConfig: serverResponseConfig{ - errOnDeregister: true, - }, - expectedConditions: []v1alpha1.Condition{ - { - Type: v1alpha1.ConditionSynced, - Status: v1.ConditionFalse, - Reason: registration.SyncError, - }, - { - Type: registration.ConditionDeregistered, - Status: v1.ConditionFalse, - Reason: registration.ConsulErrorDeregistration, - }, - }, - }, - "deregistering - ACLs enabled - terminating-gateway error updating role": { - registration: &v1alpha1.Registration{ - TypeMeta: metav1.TypeMeta{ - Kind: "Registration", - APIVersion: "consul.hashicorp.com/v1alpha1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "test-registration", - Finalizers: []string{registration.RegistrationFinalizer}, - DeletionTimestamp: &deletionTime, - }, - Spec: v1alpha1.RegistrationSpec{ - ID: "node-id", - Node: "virtual-node", - Address: "127.0.0.1", - Datacenter: "dc1", - Service: v1alpha1.Service{ - ID: "service-id", - Name: "service-name", - Port: 8080, - Address: "127.0.0.1", - }, - }, - }, - terminatingGateways: []runtime.Object{ - &v1alpha1.TerminatingGateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "terminating-gateway", - }, - Spec: v1alpha1.TerminatingGatewaySpec{ - Services: []v1alpha1.LinkedService{ - { - Name: "service-name", - }, - }, - }, - }, - }, - serverResponseConfig: serverResponseConfig{ - aclEnabled: true, - errOnRoleUpdate: true, - }, - expectedConditions: []v1alpha1.Condition{ - { - Type: v1alpha1.ConditionSynced, - Status: v1.ConditionFalse, - Reason: registration.SyncError, - }, - { - Type: registration.ConditionDeregistered, - Status: v1.ConditionTrue, - }, - { - Type: registration.ConditionACLsUpdated, - Status: v1.ConditionFalse, - Reason: registration.ConsulErrorACL, - }, - }, - }, - "deregistering - ACLs enabled - terminating-gateway error deleting policy": { - registration: &v1alpha1.Registration{ - TypeMeta: metav1.TypeMeta{ - Kind: "Registration", - APIVersion: "consul.hashicorp.com/v1alpha1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "test-registration", - Finalizers: []string{registration.RegistrationFinalizer}, - DeletionTimestamp: &deletionTime, - }, - Spec: v1alpha1.RegistrationSpec{ - ID: "node-id", - Node: "virtual-node", - Address: "127.0.0.1", - Datacenter: "dc1", - Service: v1alpha1.Service{ - ID: "service-id", - Name: "service-name", - Port: 8080, - Address: "127.0.0.1", - }, - }, - }, - terminatingGateways: []runtime.Object{ - &v1alpha1.TerminatingGateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "terminating-gateway", - }, - Spec: v1alpha1.TerminatingGatewaySpec{ - Services: []v1alpha1.LinkedService{ - { - Name: "service-name", - }, - }, - }, - }, - }, - serverResponseConfig: serverResponseConfig{ - aclEnabled: true, - errOnPolicyDelete: true, - }, - expectedConditions: []v1alpha1.Condition{ - { - Type: v1alpha1.ConditionSynced, - Status: v1.ConditionFalse, - Reason: registration.SyncError, - }, - { - Type: registration.ConditionDeregistered, - Status: v1.ConditionTrue, - }, - { - Type: registration.ConditionACLsUpdated, - Status: v1.ConditionFalse, - Reason: registration.ConsulErrorACL, - }, - }, - }, - } - - for name, tc := range cases { - tc := tc - tt.Run(name, func(t *testing.T) { - t.Parallel() - s := runtime.NewScheme() - s.AddKnownTypes(v1alpha1.GroupVersion, &v1alpha1.Registration{}, &v1alpha1.TerminatingGateway{}, &v1alpha1.TerminatingGatewayList{}) - ctx := context.Background() - - consulServer, testClient := fakeConsulServer(t, tc.serverResponseConfig, tc.registration.Spec.Service.Name) - defer consulServer.Close() - - runtimeObjs := []runtime.Object{tc.registration} - runtimeObjs = append(runtimeObjs, tc.terminatingGateways...) - fakeClient := fake.NewClientBuilder(). - WithScheme(s). - WithRuntimeObjects(runtimeObjs...). - WithStatusSubresource(&v1alpha1.Registration{}). - Build() - - controller := ®istration.RegistrationsController{ - Client: fakeClient, - Log: logrtest.NewTestLogger(t), - Scheme: s, - Cache: registration.NewRegistrationCache(context.Background(), testClient.Cfg, testClient.Watcher, fakeClient, false, false), - } - - _, err := controller.Reconcile(ctx, ctrl.Request{ - NamespacedName: types.NamespacedName{Name: tc.registration.Name, Namespace: tc.registration.Namespace}, - }) - require.Error(t, err) - - fetchedReg := &v1alpha1.Registration{TypeMeta: metav1.TypeMeta{APIVersion: "consul.hashicorp.com/v1alpha1", Kind: "Registration"}} - fakeClient.Get(ctx, types.NamespacedName{Name: tc.registration.Name}, fetchedReg) - - require.Len(t, fetchedReg.Status.Conditions, len(tc.expectedConditions)) - - for i, c := range fetchedReg.Status.Conditions { - if diff := cmp.Diff(c, tc.expectedConditions[i], cmpopts.IgnoreFields(v1alpha1.Condition{}, "LastTransitionTime", "Message")); diff != "" { - t.Errorf("unexpected condition diff: %s", diff) - } - } - - require.ElementsMatch(t, fetchedReg.Finalizers, []string{registration.RegistrationFinalizer}) - }) - } -} - -func fakeConsulServer(t *testing.T, serverResponseConfig serverResponseConfig, serviceName string) (*httptest.Server, *test.TestServerClient) { - t.Helper() - mux := buildMux(t, serverResponseConfig, serviceName) - consulServer := httptest.NewServer(mux) - - parsedURL, err := url.Parse(consulServer.URL) - require.NoError(t, err) - host := strings.Split(parsedURL.Host, ":")[0] - - port, err := strconv.Atoi(parsedURL.Port()) - require.NoError(t, err) - - cfg := &consul.Config{APIClientConfig: &capi.Config{Address: host}, HTTPPort: port} - if serverResponseConfig.aclEnabled { - cfg.APIClientConfig.Token = "test-token" - } - - testClient := &test.TestServerClient{ - Cfg: cfg, - Watcher: test.MockConnMgrForIPAndPort(t, host, port, false), - } - - return consulServer, testClient -} - -func buildMux(t *testing.T, cfg serverResponseConfig, serviceName string) http.Handler { - t.Helper() - mux := http.NewServeMux() - mux.HandleFunc("/v1/catalog/register", func(w http.ResponseWriter, r *http.Request) { - if cfg.errOnRegister { - w.WriteHeader(500) - return - } - w.WriteHeader(200) - }) - - mux.HandleFunc("/v1/catalog/deregister", func(w http.ResponseWriter, r *http.Request) { - if cfg.errOnDeregister { - w.WriteHeader(500) - return - } - w.WriteHeader(200) - }) - - policyID, err := uuid.GenerateUUID() - require.NoError(t, err) - - mux.HandleFunc("/v1/acl/roles", func(w http.ResponseWriter, r *http.Request) { - entries := []*capi.ACLRole{ - { - ID: "754a8717-46e9-9f18-7f76-28dc0afafd19", - Name: "consul-consul-connect-inject-acl-role", - Description: "ACL Role for consul-consul-connect-injector", - Policies: []*capi.ACLLink{ - { - ID: "38511a9f-a309-11e2-7f67-7fea12056e7c", - Name: "connect-inject-policy", - }, - }, - }, - } - - if cfg.temGWRoleMissing { - val, err := json.Marshal(entries) - if err != nil { - w.WriteHeader(500) - return - } - w.WriteHeader(200) - w.Write(val) - return - } - - termGWPolicies := []*capi.ACLLink{ - { - ID: "b7e377d9-5e2b-b99c-3f06-139584cf47f8", - Name: "terminating-gateway-policy", - }, - } - - if !cfg.registering { - termGWPolicies = append(termGWPolicies, &capi.ACLLink{ - ID: policyID, - Name: fmt.Sprintf("%s-write-policy", serviceName), - }) - } - - termGWRole := &capi.ACLRole{ - ID: "61fc5051-96e9-7b67-69b5-98f7f6682563", - Name: "consul-consul-terminating-gateway-acl-role", - Description: "ACL Role for consul-consul-terminating-gateway", - Policies: termGWPolicies, - } - - entries = append(entries, termGWRole) - - val, err := json.Marshal(entries) - if err != nil { - w.WriteHeader(500) - return - } - w.WriteHeader(200) - w.Write(val) - }) - - mux.HandleFunc("/v1/acl/role/", func(w http.ResponseWriter, r *http.Request) { - if cfg.errOnRoleUpdate { - w.WriteHeader(500) - return - } - - role := &capi.ACLRole{ - ID: "61fc5051-96e9-7b67-69b5-98f7f6682563", - Name: "consul-consul-terminating-gateway-acl-role", - Description: "ACL Role for consul-consul-terminating-gateway", - Policies: []*capi.ACLLink{ - { - ID: "b7e377d9-5e2b-b99c-3f06-139584cf47f8", - Name: "terminating-gateway-policy", - }, - { - ID: policyID, - Name: fmt.Sprintf("%s-write-policy", serviceName), - }, - }, - } - val, err := json.Marshal(role) - if err != nil { - w.WriteHeader(500) - return - } - w.WriteHeader(200) - w.Write(val) - }) - - mux.HandleFunc("/v1/acl/policy/name/", func(w http.ResponseWriter, r *http.Request) { - if cfg.errOnPolicyRead { - w.WriteHeader(500) - return - } - - if !cfg.policyExists { - w.WriteHeader(404) - return - } - - policy := &capi.ACLPolicy{ - ID: policyID, - Name: fmt.Sprintf("%s-write-policy", serviceName), - } - - val, err := json.Marshal(policy) - if err != nil { - w.WriteHeader(500) - return - } - w.WriteHeader(200) - w.Write(val) - }) - - mux.HandleFunc("/v1/acl/policy/", func(w http.ResponseWriter, r *http.Request) { - switch r.Method { - case "GET": - if cfg.errOnPolicyWrite { - w.WriteHeader(500) - return - } - - policy := &capi.ACLPolicy{ - ID: policyID, - Name: fmt.Sprintf("%s-write-policy", serviceName), - } - - val, err := json.Marshal(policy) - if err != nil { - w.WriteHeader(500) - return - } - w.WriteHeader(200) - w.Write(val) - case "DELETE": - if cfg.errOnPolicyDelete { - w.WriteHeader(500) - return - } - w.WriteHeader(200) - } - }) - - return mux -} diff --git a/control-plane/catalog/registration/result.go b/control-plane/catalog/registration/result.go deleted file mode 100644 index 176855c330..0000000000 --- a/control-plane/catalog/registration/result.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package registration - -import ( - "errors" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/hashicorp/consul-k8s/control-plane/api/v1alpha1" -) - -// Conditions. -const ( - ConditionSynced = "Synced" - ConditionRegistered = "Registered" - ConditionDeregistered = "Deregistered" - ConditionACLsUpdated = "ACLsUpdated" -) - -// Status Reasons. -const ( - SyncError = "SyncError" - ConsulErrorRegistration = "ConsulErrorRegistration" - ConsulErrorDeregistration = "ConsulErrorDeregistration" - ConsulErrorACL = "ConsulErrorACL" - ConsulDeregistration = "ConsulDeregistration" -) - -type Result struct { - Registering bool - ConsulDeregistered bool - Sync error - Registration error - Deregistration error - ACLUpdate error - Finalizer error -} - -func (r Result) hasErrors() bool { - return r.Sync != nil || r.Registration != nil || r.ACLUpdate != nil || r.Finalizer != nil -} - -func (r Result) errors() error { - var err error - err = errors.Join(err, r.Sync, r.Registration, r.ACLUpdate, r.Finalizer) - return err -} - -func syncedCondition(result Result) v1alpha1.Condition { - if result.Sync != nil { - return v1alpha1.Condition{ - Type: ConditionSynced, - Status: corev1.ConditionFalse, - Reason: SyncError, - Message: result.Sync.Error(), - LastTransitionTime: metav1.Now(), - } - } - return v1alpha1.Condition{ - Type: ConditionSynced, - Status: corev1.ConditionTrue, - LastTransitionTime: metav1.Now(), - } -} - -func registrationCondition(result Result) v1alpha1.Condition { - if result.Registration != nil { - return v1alpha1.Condition{ - Type: ConditionRegistered, - Status: corev1.ConditionFalse, - Reason: ConsulErrorRegistration, - Message: result.Registration.Error(), - LastTransitionTime: metav1.Now(), - } - } - return v1alpha1.Condition{ - Type: ConditionRegistered, - Status: corev1.ConditionTrue, - LastTransitionTime: metav1.Now(), - } -} - -func deregistrationCondition(result Result) v1alpha1.Condition { - if result.Deregistration != nil { - return v1alpha1.Condition{ - Type: ConditionDeregistered, - Status: corev1.ConditionFalse, - Reason: ConsulErrorDeregistration, - Message: result.Deregistration.Error(), - LastTransitionTime: metav1.Now(), - } - } - - var ( - reason string - message string - ) - if result.ConsulDeregistered { - reason = ConsulDeregistration - message = "Consul deregistered this service" - } - return v1alpha1.Condition{ - Type: ConditionDeregistered, - Status: corev1.ConditionTrue, - Reason: reason, - Message: message, - LastTransitionTime: metav1.Now(), - } -} - -func aclCondition(result Result) v1alpha1.Condition { - if result.ACLUpdate != nil { - return v1alpha1.Condition{ - Type: ConditionACLsUpdated, - Status: corev1.ConditionFalse, - Reason: ConsulErrorACL, - Message: result.ACLUpdate.Error(), - LastTransitionTime: metav1.Now(), - } - } - - if result.ConsulDeregistered { - return v1alpha1.Condition{ - Type: ConditionACLsUpdated, - Status: corev1.ConditionFalse, - Reason: ConsulDeregistration, - Message: "Consul deregistered this service, acls were not removed", - LastTransitionTime: metav1.Now(), - } - } - - return v1alpha1.Condition{ - Type: ConditionACLsUpdated, - Status: corev1.ConditionTrue, - LastTransitionTime: metav1.Now(), - } -} diff --git a/control-plane/catalog/to-consul/resource_test.go b/control-plane/catalog/to-consul/resource_test.go index 3272849bd3..8ccc54780f 100644 --- a/control-plane/catalog/to-consul/resource_test.go +++ b/control-plane/catalog/to-consul/resource_test.go @@ -2091,6 +2091,9 @@ func createNodes(t *testing.T, client *fake.Clientset) (*corev1.Node, *corev1.No node1 := &corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: nodeName1, + Labels: map[string]string{ + corev1.LabelTopologyRegion: "us-west-2", + }, }, Status: corev1.NodeStatus{ @@ -2107,6 +2110,9 @@ func createNodes(t *testing.T, client *fake.Clientset) (*corev1.Node, *corev1.No node2 := &corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: nodeName2, + Labels: map[string]string{ + corev1.LabelTopologyRegion: "us-west-2", + }, }, Status: corev1.NodeStatus{ diff --git a/control-plane/cni/go.mod b/control-plane/cni/go.mod index e27eb9ab3f..9944d670e3 100644 --- a/control-plane/cni/go.mod +++ b/control-plane/cni/go.mod @@ -6,7 +6,7 @@ require ( github.com/containernetworking/cni v1.1.2 github.com/containernetworking/plugins v1.2.0 github.com/hashicorp/consul-k8s/version v0.0.0 - github.com/hashicorp/consul/sdk v0.16.1 + github.com/hashicorp/consul/sdk v0.16.0 github.com/hashicorp/go-hclog v1.5.0 github.com/stretchr/testify v1.8.4 k8s.io/api v0.28.9 diff --git a/control-plane/cni/go.sum b/control-plane/cni/go.sum index 25d890cdd4..cad80e31b8 100644 --- a/control-plane/cni/go.sum +++ b/control-plane/cni/go.sum @@ -59,8 +59,8 @@ github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg= -github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s= +github.com/hashicorp/consul/sdk v0.16.0 h1:SE9m0W6DEfgIVCJX7xU+iv/hUl4m/nxqMTnCdMxDpJ8= +github.com/hashicorp/consul/sdk v0.16.0/go.mod h1:7pxqqhqoaPqnBnzXD1StKed62LqJeClzVsUEy85Zr0A= github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= diff --git a/control-plane/cni/main.go b/control-plane/cni/main.go index 86cc211f71..b49dacd3ce 100644 --- a/control-plane/cni/main.go +++ b/control-plane/cni/main.go @@ -269,7 +269,7 @@ func main() { } // createK8sClient configures the command's Kubernetes API client if it doesn't -// already exist. +// already exist func (c *Command) createK8sClient(cfg *PluginConf) error { restConfig, err := clientcmd.BuildConfigFromFlags("", filepath.Join(cfg.CNINetDir, cfg.Kubeconfig)) if err != nil { diff --git a/control-plane/commands.go b/control-plane/commands.go index 87cf77ffc5..16dbc670f7 100644 --- a/control-plane/commands.go +++ b/control-plane/commands.go @@ -20,6 +20,7 @@ import ( cmdGossipEncryptionAutogenerate "github.com/hashicorp/consul-k8s/control-plane/subcommand/gossip-encryption-autogenerate" cmdInjectConnect "github.com/hashicorp/consul-k8s/control-plane/subcommand/inject-connect" cmdInstallCNI "github.com/hashicorp/consul-k8s/control-plane/subcommand/install-cni" + cmdMeshInit "github.com/hashicorp/consul-k8s/control-plane/subcommand/mesh-init" cmdPartitionInit "github.com/hashicorp/consul-k8s/control-plane/subcommand/partition-init" cmdServerACLInit "github.com/hashicorp/consul-k8s/control-plane/subcommand/server-acl-init" cmdSyncCatalog "github.com/hashicorp/consul-k8s/control-plane/subcommand/sync-catalog" @@ -44,6 +45,10 @@ func init() { return &cmdConnectInit.Command{UI: ui}, nil }, + "mesh-init": func() (cli.Command, error) { + return &cmdMeshInit.Command{UI: ui}, nil + }, + "inject-connect": func() (cli.Command, error) { return &cmdInjectConnect.Command{UI: ui}, nil }, diff --git a/control-plane/config/crd/bases/auth.consul.hashicorp.com_trafficpermissions.yaml b/control-plane/config/crd/bases/auth.consul.hashicorp.com_trafficpermissions.yaml index ca29923851..3a7699dce4 100644 --- a/control-plane/config/crd/bases/auth.consul.hashicorp.com_trafficpermissions.yaml +++ b/control-plane/config/crd/bases/auth.consul.hashicorp.com_trafficpermissions.yaml @@ -97,25 +97,23 @@ spec: when evaluating rules for the incoming connection. items: properties: - headers: - items: - properties: - exact: - type: string - invert: - type: boolean - name: - type: string - prefix: - type: string - present: - type: boolean - regex: - type: string - suffix: - type: string - type: object - type: array + header: + properties: + exact: + type: string + invert: + type: boolean + name: + type: string + prefix: + type: string + present: + type: boolean + regex: + type: string + suffix: + type: string + type: object methods: description: Methods is the list of HTTP methods. items: @@ -136,25 +134,23 @@ spec: type: array type: object type: array - headers: - items: - properties: - exact: - type: string - invert: - type: boolean - name: - type: string - prefix: - type: string - present: - type: boolean - regex: - type: string - suffix: - type: string - type: object - type: array + header: + properties: + exact: + type: string + invert: + type: boolean + name: + type: string + prefix: + type: string + present: + type: boolean + regex: + type: string + suffix: + type: string + type: object methods: description: Methods is the list of HTTP methods. If no methods are specified, this rule will apply to all methods. diff --git a/control-plane/config/crd/bases/consul.hashicorp.com_gatewayclassconfigs.yaml b/control-plane/config/crd/bases/consul.hashicorp.com_gatewayclassconfigs.yaml index 5f6e3a990b..c2a857db34 100644 --- a/control-plane/config/crd/bases/consul.hashicorp.com_gatewayclassconfigs.yaml +++ b/control-plane/config/crd/bases/consul.hashicorp.com_gatewayclassconfigs.yaml @@ -118,8 +118,7 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. Requests cannot exceed - Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object type: object diff --git a/control-plane/config/crd/bases/consul.hashicorp.com_registrations.yaml b/control-plane/config/crd/bases/consul.hashicorp.com_registrations.yaml deleted file mode 100644 index c36167b5a2..0000000000 --- a/control-plane/config/crd/bases/consul.hashicorp.com_registrations.yaml +++ /dev/null @@ -1,245 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.12.1 - name: registrations.consul.hashicorp.com -spec: - group: consul.hashicorp.com - names: - kind: Registration - listKind: RegistrationList - plural: registrations - singular: registration - scope: Cluster - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: Registration defines the resource for working with service registrations. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Spec defines the desired state of Registration. - properties: - address: - type: string - check: - description: HealthCheck is used to represent a single check. - properties: - checkId: - type: string - definition: - description: HealthCheckDefinition is used to store the details - about a health check's execution. - properties: - body: - type: string - deregisterCriticalServiceAfterDuration: - type: string - grpc: - type: string - grpcUseTLS: - type: boolean - header: - additionalProperties: - items: - type: string - type: array - type: object - http: - type: string - intervalDuration: - type: string - method: - type: string - osService: - type: string - tcp: - type: string - tcpUseTLS: - type: boolean - timeoutDuration: - type: string - tlsServerName: - type: string - tlsSkipVerify: - type: boolean - udp: - type: string - required: - - intervalDuration - type: object - exposedPort: - type: integer - name: - type: string - namespace: - type: string - node: - type: string - notes: - type: string - output: - type: string - partition: - type: string - serviceId: - type: string - serviceName: - type: string - status: - type: string - type: - type: string - required: - - checkId - - definition - - name - - serviceId - - serviceName - - status - type: object - datacenter: - type: string - id: - type: string - locality: - properties: - region: - type: string - zone: - type: string - type: object - node: - type: string - nodeMeta: - additionalProperties: - type: string - type: object - partition: - type: string - service: - properties: - address: - type: string - enableTagOverride: - type: boolean - id: - type: string - locality: - properties: - region: - type: string - zone: - type: string - type: object - meta: - additionalProperties: - type: string - type: object - name: - type: string - namespace: - type: string - partition: - type: string - port: - type: integer - socketPath: - type: string - taggedAddresses: - additionalProperties: - properties: - address: - type: string - port: - type: integer - required: - - address - - port - type: object - type: object - tags: - items: - type: string - type: array - weights: - properties: - passing: - type: integer - warning: - type: integer - required: - - passing - - warning - type: object - required: - - name - - port - type: object - skipNodeUpdate: - type: boolean - taggedAddresses: - additionalProperties: - type: string - type: object - type: object - status: - description: RegistrationStatus defines the observed state of Registration. - properties: - conditions: - description: Conditions indicate the latest available observations - of a resource's current state. - items: - description: 'Conditions define a readiness condition for a Consul - resource. See: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties' - properties: - lastTransitionTime: - description: LastTransitionTime is the last time the condition - transitioned from one status to another. - format: date-time - type: string - message: - description: A human readable message indicating details about - the transition. - type: string - reason: - description: The reason for the condition's last transition. - type: string - status: - description: Status of the condition, one of True, False, Unknown. - type: string - type: - description: Type of condition. - type: string - required: - - status - - type - type: object - type: array - lastSyncedTime: - description: LastSyncedTime is the last time the resource successfully - synced with Consul. - format: date-time - type: string - type: object - type: object - served: true - storage: true - subresources: - status: {} diff --git a/control-plane/config/crd/bases/mesh.consul.hashicorp.com_apigateways.yaml b/control-plane/config/crd/bases/mesh.consul.hashicorp.com_apigateways.yaml new file mode 100644 index 0000000000..44713c234f --- /dev/null +++ b/control-plane/config/crd/bases/mesh.consul.hashicorp.com_apigateways.yaml @@ -0,0 +1,235 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.1 + name: apigateways.mesh.consul.hashicorp.com +spec: + group: mesh.consul.hashicorp.com + names: + kind: APIGateway + listKind: APIGatewayList + plural: apigateways + singular: apigateway + scope: Cluster + versions: + - additionalPrinterColumns: + - description: The sync status of the resource with Consul + jsonPath: .status.conditions[?(@.type=="Synced")].status + name: Synced + type: string + - description: The last successful synced time of the resource with Consul + jsonPath: .status.lastSyncedTime + name: Last Synced + type: date + - description: The age of the resource + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v2beta1 + schema: + openAPIV3Schema: + description: APIGateway is the Schema for the API Gateway + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + gatewayClassName: + description: GatewayClassName is the name of the GatewayClass used + by the APIGateway + type: string + listeners: + items: + properties: + hostname: + description: Hostname is the host name that a listener should + be bound to, if unspecified, the listener accepts requests + for all hostnames. + type: string + name: + description: Name is the name of the listener in a given gateway. + This must be unique within a gateway. + type: string + port: + format: int32 + maximum: 65535 + minimum: 0 + type: integer + protocol: + description: Protocol is the protocol that a listener should + use, it must either be "http" or "tcp" + type: string + tls: + description: TLS is the TLS settings for the listener. + properties: + certificates: + description: Certificates is a set of references to certificates + that a gateway listener uses for TLS termination. + items: + description: Reference identifies which resource a condition + relates to, when it is not the core resource itself. + properties: + name: + description: Name is the user-given name of the resource + (e.g. the "billing" service). + type: string + section: + description: Section identifies which part of the + resource the condition relates to. + type: string + tenancy: + description: Tenancy identifies the tenancy units + (i.e. partition, namespace) in which the resource + resides. + properties: + namespace: + description: "Namespace further isolates resources + within a partition. https://developer.hashicorp.com/consul/docs/enterprise/namespaces + \n When using the List and WatchList endpoints, + provide the wildcard value \"*\" to list resources + across all namespaces." + type: string + partition: + description: "Partition is the topmost administrative + boundary within a cluster. https://developer.hashicorp.com/consul/docs/enterprise/admin-partitions + \n When using the List and WatchList endpoints, + provide the wildcard value \"*\" to list resources + across all partitions." + type: string + peerName: + description: "PeerName identifies which peer the + resource is imported from. https://developer.hashicorp.com/consul/docs/connect/cluster-peering + \n When using the List and WatchList endpoints, + provide the wildcard value \"*\" to list resources + across all peers." + type: string + type: object + type: + description: Type identifies the resource's type. + properties: + group: + description: Group describes the area of functionality + to which this resource type relates (e.g. "catalog", + "authorization"). + type: string + groupVersion: + description: GroupVersion is incremented when + sweeping or backward-incompatible changes are + made to the group's resource types. + type: string + kind: + description: Kind identifies the specific resource + type within the group. + type: string + type: object + type: object + type: array + tlsParameters: + description: TLSParameters contains optional configuration + for running TLS termination. + properties: + cipherSuites: + items: + enum: + - TLS_CIPHER_SUITE_ECDHE_ECDSA_AES128_GCM_SHA256 + - TLS_CIPHER_SUITE_AES256_SHA + - TLS_CIPHER_SUITE_ECDHE_ECDSA_CHACHA20_POLY1305 + - TLS_CIPHER_SUITE_ECDHE_RSA_AES128_GCM_SHA256 + - TLS_CIPHER_SUITE_ECDHE_RSA_CHACHA20_POLY1305 + - TLS_CIPHER_SUITE_ECDHE_ECDSA_AES128_SHA + - TLS_CIPHER_SUITE_ECDHE_RSA_AES128_SHA + - TLS_CIPHER_SUITE_AES128_GCM_SHA256 + - TLS_CIPHER_SUITE_AES128_SHA + - TLS_CIPHER_SUITE_ECDHE_ECDSA_AES256_GCM_SHA384 + - TLS_CIPHER_SUITE_ECDHE_RSA_AES256_GCM_SHA384 + - TLS_CIPHER_SUITE_ECDHE_ECDSA_AES256_SHA + - TLS_CIPHER_SUITE_ECDHE_RSA_AES256_SHA + - TLS_CIPHER_SUITE_AES256_GCM_SHA384 + format: int32 + type: string + type: array + maxVersion: + enum: + - TLS_VERSION_AUTO + - TLS_VERSION_1_0 + - TLS_VERSION_1_1 + - TLS_VERSION_1_2 + - TLS_VERSION_1_3 + - TLS_VERSION_INVALID + - TLS_VERSION_UNSPECIFIED + format: int32 + type: string + minVersion: + enum: + - TLS_VERSION_AUTO + - TLS_VERSION_1_0 + - TLS_VERSION_1_1 + - TLS_VERSION_1_2 + - TLS_VERSION_1_3 + - TLS_VERSION_INVALID + - TLS_VERSION_UNSPECIFIED + format: int32 + type: string + type: object + type: object + type: object + minItems: 1 + type: array + type: object + status: + properties: + conditions: + description: Conditions indicate the latest available observations + of a resource's current state. + items: + description: 'Conditions define a readiness condition for a Consul + resource. See: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties' + properties: + lastTransitionTime: + description: LastTransitionTime is the last time the condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition. + type: string + required: + - status + - type + type: object + type: array + lastSyncedTime: + description: LastSyncedTime is the last time the resource successfully + synced with Consul. + format: date-time + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/control-plane/config/crd/bases/mesh.consul.hashicorp.com_gatewayclassconfigs.yaml b/control-plane/config/crd/bases/mesh.consul.hashicorp.com_gatewayclassconfigs.yaml new file mode 100644 index 0000000000..e7f560861b --- /dev/null +++ b/control-plane/config/crd/bases/mesh.consul.hashicorp.com_gatewayclassconfigs.yaml @@ -0,0 +1,1821 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.1 + name: gatewayclassconfigs.mesh.consul.hashicorp.com +spec: + group: mesh.consul.hashicorp.com + names: + kind: GatewayClassConfig + listKind: GatewayClassConfigList + plural: gatewayclassconfigs + singular: gatewayclassconfig + scope: Cluster + versions: + - additionalPrinterColumns: + - description: The age of the resource + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v2beta1 + schema: + openAPIV3Schema: + description: GatewayClassConfig is the Schema for the Mesh Gateway API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: GatewayClassConfigSpec specifies the desired state of the + GatewayClassConfig CRD. + properties: + annotations: + description: Annotations are applied to the created resource + properties: + inheritFromGateway: + description: InheritFromGateway lists the names/keys of annotations + or labels to copy from the Gateway resource. Any name/key included + here will override those in Set if specified on the Gateway. + items: + type: string + type: array + set: + additionalProperties: + type: string + description: Set lists the names/keys and values of annotations + or labels to set on the resource. Any name/key included here + will be overridden if present in InheritFromGateway and set + on the Gateway. + type: object + type: object + deployment: + description: Deployment contains config specific to the Deployment + created from this GatewayClass + properties: + affinity: + description: Affinity specifies the affinity to use on the created + Deployment. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for + the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods + to nodes that satisfy the affinity expressions specified + by this field, but it may choose a node that violates + one or more of the expressions. The node that is most + preferred is the one with the greatest sum of weights, + i.e. for each node that meets all of the scheduling + requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum by iterating + through the elements of this field and adding "weight" + to the sum if the node matches the corresponding matchExpressions; + the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches + all objects with implicit weight 0 (i.e. it's a no-op). + A null preferred scheduling term matches no objects + (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with + the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is + a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, the + values array must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. If the + operator is Gt or Lt, the values array + must have a single element, which will + be interpreted as an integer. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is + a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, the + values array must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. If the + operator is Gt or Lt, the values array + must have a single element, which will + be interpreted as an integer. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the + corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by + this field are not met at scheduling time, the pod will + not be scheduled onto the node. If the affinity requirements + specified by this field cease to be met at some point + during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from + its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: A null or empty node selector term + matches no objects. The requirements of them are + ANDed. The TopologySelectorTerm type implements + a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is + a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, the + values array must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. If the + operator is Gt or Lt, the values array + must have a single element, which will + be interpreted as an integer. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is + a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, the + values array must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. If the + operator is Gt or Lt, the values array + must have a single element, which will + be interpreted as an integer. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + type: array + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods + to nodes that satisfy the affinity expressions specified + by this field, but it may choose a node that violates + one or more of the expressions. The node that is most + preferred is the one with the greatest sum of weights, + i.e. for each node that meets all of the scheduling + requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum by iterating + through the elements of this field and adding "weight" + to the sum if the node has pods which matches the corresponding + podAffinityTerm; the node(s) with the highest sum are + the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied + to the union of the namespaces selected by + this field and the ones listed in the namespaces + field. null selector and null or empty namespaces + list means "this pod's namespace". An empty + selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. + The term is applied to the union of the namespaces + listed in this field and the ones selected + by namespaceSelector. null or empty namespaces + list and null namespaceSelector means "this + pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the + pods matching the labelSelector in the specified + namespaces, where co-located is defined as + running on a node whose value of the label + with key topologyKey matches that of any node + on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the + corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by + this field are not met at scheduling time, the pod will + not be scheduled onto the node. If the affinity requirements + specified by this field cease to be met at some point + during pod execution (e.g. due to a pod label update), + the system may or may not try to eventually evict the + pod from its node. When there are multiple elements, + the lists of nodes corresponding to each podAffinityTerm + are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not + co-located (anti-affinity) with, where co-located + is defined as running on a node whose value of the + label with key matches that of any node + on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied + to the union of the namespaces selected by this + field and the ones listed in the namespaces field. + null selector and null or empty namespaces list + means "this pod's namespace". An empty selector + ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. The + term is applied to the union of the namespaces + listed in this field and the ones selected by + namespaceSelector. null or empty namespaces list + and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, etc. + as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods + to nodes that satisfy the anti-affinity expressions + specified by this field, but it may choose a node that + violates one or more of the expressions. The node that + is most preferred is the one with the greatest sum of + weights, i.e. for each node that meets all of the scheduling + requirements (resource request, requiredDuringScheduling + anti-affinity expressions, etc.), compute a sum by iterating + through the elements of this field and adding "weight" + to the sum if the node has pods which matches the corresponding + podAffinityTerm; the node(s) with the highest sum are + the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied + to the union of the namespaces selected by + this field and the ones listed in the namespaces + field. null selector and null or empty namespaces + list means "this pod's namespace". An empty + selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. + The term is applied to the union of the namespaces + listed in this field and the ones selected + by namespaceSelector. null or empty namespaces + list and null namespaceSelector means "this + pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the + pods matching the labelSelector in the specified + namespaces, where co-located is defined as + running on a node whose value of the label + with key topologyKey matches that of any node + on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the + corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified + by this field are not met at scheduling time, the pod + will not be scheduled onto the node. If the anti-affinity + requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod + label update), the system may or may not try to eventually + evict the pod from its node. When there are multiple + elements, the lists of nodes corresponding to each podAffinityTerm + are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not + co-located (anti-affinity) with, where co-located + is defined as running on a node whose value of the + label with key matches that of any node + on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied + to the union of the namespaces selected by this + field and the ones listed in the namespaces field. + null selector and null or empty namespaces list + means "this pod's namespace". An empty selector + ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. The + term is applied to the union of the namespaces + listed in this field and the ones selected by + namespaceSelector. null or empty namespaces list + and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + annotations: + description: Annotations are applied to the created resource + properties: + inheritFromGateway: + description: InheritFromGateway lists the names/keys of annotations + or labels to copy from the Gateway resource. Any name/key + included here will override those in Set if specified on + the Gateway. + items: + type: string + type: array + set: + additionalProperties: + type: string + description: Set lists the names/keys and values of annotations + or labels to set on the resource. Any name/key included + here will be overridden if present in InheritFromGateway + and set on the Gateway. + type: object + type: object + container: + description: Container contains config specific to the created + Deployment's container. + properties: + consul: + description: Consul specifies configuration for the consul-dataplane + container + properties: + logging: + description: Logging specifies the logging configuration + for Consul Dataplane + properties: + level: + description: Level sets the logging level for Consul + Dataplane (debug, info, etc.) + type: string + type: object + type: object + hostPort: + description: HostPort specifies a port to be exposed to the + external host network + format: int32 + type: integer + portModifier: + description: PortModifier specifies the value to be added + to every port value for listeners on this gateway. This + is generally used to avoid binding to privileged ports in + the container. + format: int32 + type: integer + resources: + description: Resources specifies the resource requirements + for the created Deployment's container + properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. \n This field + is immutable. It can only be set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry + in pod.spec.resourceClaims of the Pod where this + field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of + compute resources required. If Requests is omitted for + a container, it defaults to Limits if that is explicitly + specified, otherwise to an implementation-defined value. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + dnsPolicy: + description: DNSPolicy specifies the dns policy to use. These + are set on a per pod basis. + enum: + - Default + - ClusterFirst + - ClusterFirstWithHostNet + - None + type: string + hostNetwork: + description: HostNetwork specifies whether the gateway pods should + run on the host network. + type: boolean + initContainer: + description: InitContainer contains config specific to the created + Deployment's init container. + properties: + consul: + description: Consul specifies configuration for the consul-k8s-control-plane + init container + properties: + logging: + description: Logging specifies the logging configuration + for Consul Dataplane + properties: + level: + description: Level sets the logging level for Consul + Dataplane (debug, info, etc.) + type: string + type: object + type: object + resources: + description: Resources specifies the resource requirements + for the created Deployment's init container + properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. \n This field + is immutable. It can only be set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry + in pod.spec.resourceClaims of the Pod where this + field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of + compute resources required. If Requests is omitted for + a container, it defaults to Limits if that is explicitly + specified, otherwise to an implementation-defined value. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + labels: + description: Labels are applied to the created resource + properties: + inheritFromGateway: + description: InheritFromGateway lists the names/keys of annotations + or labels to copy from the Gateway resource. Any name/key + included here will override those in Set if specified on + the Gateway. + items: + type: string + type: array + set: + additionalProperties: + type: string + description: Set lists the names/keys and values of annotations + or labels to set on the resource. Any name/key included + here will be overridden if present in InheritFromGateway + and set on the Gateway. + type: object + type: object + nodeSelector: + additionalProperties: + type: string + description: 'NodeSelector is a feature that constrains the scheduling + of a pod to nodes that match specified labels. By defining NodeSelector + in a pod''s configuration, you can ensure that the pod is only + scheduled to nodes with the corresponding labels, providing + a way to influence the placement of workloads based on node + attributes. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' + type: object + priorityClassName: + description: PriorityClassName specifies the priority class name + to use on the created Deployment. + type: string + replicas: + description: Replicas specifies the configuration to control the + number of replicas for the created Deployment. + properties: + default: + description: Default is the number of replicas assigned to + the Deployment when created + format: int32 + type: integer + max: + description: Max is the maximum number of replicas allowed + for a gateway with this class. If the replica count exceeds + this value due to manual or automated scaling, the replica + count will be restored to this value. + format: int32 + type: integer + min: + description: Min is the minimum number of replicas allowed + for a gateway with this class. If the replica count drops + below this value due to manual or automated scaling, the + replica count will be restored to this value. + format: int32 + type: integer + type: object + securityContext: + description: SecurityContext specifies the security context for + the created Deployment's Pod. + properties: + fsGroup: + description: "A special supplemental group that applies to + all containers in a pod. Some volume types allow the Kubelet + to change the ownership of that volume to be owned by the + pod: \n 1. The owning GID will be the FSGroup 2. The setgid + bit is set (new files created in the volume will be owned + by FSGroup) 3. The permission bits are OR'd with rw-rw---- + \n If unset, the Kubelet will not modify the ownership and + permissions of any volume. Note that this field cannot be + set when spec.os.name is windows." + format: int64 + type: integer + fsGroupChangePolicy: + description: 'fsGroupChangePolicy defines behavior of changing + ownership and permission of the volume before being exposed + inside Pod. This field will only apply to volume types which + support fsGroup based ownership(and permissions). It will + have no effect on ephemeral volume types such as: secret, + configmaps and emptydir. Valid values are "OnRootMismatch" + and "Always". If not specified, "Always" is used. Note that + this field cannot be set when spec.os.name is windows.' + type: string + runAsGroup: + description: The GID to run the entrypoint of the container + process. Uses runtime default if unset. May also be set + in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. Note that this field + cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root + user. If true, the Kubelet will validate the image at runtime + to ensure that it does not run as UID 0 (root) and fail + to start the container if it does. If unset or false, no + such validation will be performed. May also be set in SecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container + process. Defaults to user specified in image metadata if + unspecified. May also be set in SecurityContext. If set + in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence for that container. + Note that this field cannot be set when spec.os.name is + windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random + SELinux context for each container. May also be set in + SecurityContext. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence + for that container. Note that this field cannot be set when + spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by the containers + in this pod. Note that this field cannot be set when spec.os.name + is windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined + in a file on the node should be used. The profile must + be preconfigured on the node to work. Must be a descending + path, relative to the kubelet's configured seccomp profile + location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile + will be applied. Valid options are: \n Localhost - a + profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile + should be used. Unconfined - no profile should be applied." + type: string + required: + - type + type: object + supplementalGroups: + description: A list of groups applied to the first process + run in each container, in addition to the container's primary + GID, the fsGroup (if specified), and group memberships defined + in the container image for the uid of the container process. + If unspecified, no additional groups are added to any container. + Note that group memberships defined in the container image + for the uid of the container process are still effective, + even if they are not included in this list. Note that this + field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + sysctls: + description: Sysctls hold a list of namespaced sysctls used + for the pod. Pods with unsupported sysctls (by the container + runtime) might fail to launch. Note that this field cannot + be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: The Windows specific settings applied to all + containers. If unspecified, the options within a container's + SecurityContext will be used. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when + spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named + by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should + be run as a 'Host Process' container. This field is + alpha-level and will only be honored by components that + enable the WindowsHostProcessContainers feature flag. + Setting this field without the feature flag will result + in errors when validating the Pod. All of a Pod's containers + must have the same effective HostProcess value (it is + not allowed to have a mix of HostProcess containers + and non-HostProcess containers). In addition, if HostProcess + is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set in + PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. + type: string + type: object + type: object + tolerations: + description: Tolerations specifies the tolerations to use on the + created Deployment. + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple using + the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. + Empty means match all taint effects. When specified, allowed + values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies + to. Empty means match all taint keys. If the key is empty, + operator must be Exists; this combination means to match + all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to + the value. Valid operators are Exists and Equal. Defaults + to Equal. Exists is equivalent to wildcard for value, + so that a pod can tolerate all taints of a particular + category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of + time the toleration (which must be of effect NoExecute, + otherwise this field is ignored) tolerates the taint. + By default, it is not set, which means tolerate the taint + forever (do not evict). Zero and negative values will + be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches + to. If the operator is Exists, the value should be empty, + otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: 'TopologySpreadConstraints is a feature that controls + how pods are spead across your topology. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/' + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine + the number of pods in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be empty. + This array is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: MatchLabelKeys is a set of pod label keys to + select the pods over which spreading will be calculated. + The keys are used to lookup values from the incoming pod + labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading + will be calculated for the incoming pod. Keys that don't + exist in the incoming pod labels will be ignored. A null + or empty list means only match against labelSelector. + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: 'MaxSkew describes the degree to which pods + may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, + it is the maximum permitted difference between the number + of matching pods in the target topology and the global + minimum. The global minimum is the minimum number of matching + pods in an eligible domain or zero if the number of eligible + domains is less than MinDomains. For example, in a 3-zone + cluster, MaxSkew is set to 1, and pods with the same labelSelector + spread as 2/2/1: In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | | P P | P P | P | - + if MaxSkew is 1, incoming pod can only be scheduled to + zone3 to become 2/2/2; scheduling it onto zone1(zone2) + would make the ActualSkew(3-1) on zone1(zone2) violate + MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled + onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, + it is used to give higher precedence to topologies that + satisfy it. It''s a required field. Default value is 1 + and 0 is not allowed.' + format: int32 + type: integer + minDomains: + description: "MinDomains indicates a minimum number of eligible + domains. When the number of eligible domains with matching + topology keys is less than minDomains, Pod Topology Spread + treats \"global minimum\" as 0, and then the calculation + of Skew is performed. And when the number of eligible + domains with matching topology keys equals or greater + than minDomains, this value has no effect on scheduling. + As a result, when the number of eligible domains is less + than minDomains, scheduler won't schedule more than maxSkew + Pods to those domains. If value is nil, the constraint + behaves as if MinDomains is equal to 1. Valid values are + integers greater than 0. When value is not nil, WhenUnsatisfiable + must be DoNotSchedule. \n For example, in a 3-zone cluster, + MaxSkew is set to 2, MinDomains is set to 5 and pods with + the same labelSelector spread as 2/2/2: | zone1 | zone2 + | zone3 | | P P | P P | P P | The number of domains + is less than 5(MinDomains), so \"global minimum\" is treated + as 0. In this situation, new pod with the same labelSelector + cannot be scheduled, because computed skew will be 3(3 + - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. \n This is a beta field and requires + the MinDomainsInPodTopologySpread feature gate to be enabled + (enabled by default)." + format: int32 + type: integer + nodeAffinityPolicy: + description: "NodeAffinityPolicy indicates how we will treat + Pod's nodeAffinity/nodeSelector when calculating pod topology + spread skew. Options are: - Honor: only nodes matching + nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes + are included in the calculations. \n If this value is + nil, the behavior is equivalent to the Honor policy. This + is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread + feature flag." + type: string + nodeTaintsPolicy: + description: "NodeTaintsPolicy indicates how we will treat + node taints when calculating pod topology spread skew. + Options are: - Honor: nodes without taints, along with + tainted nodes for which the incoming pod has a toleration, + are included. - Ignore: node taints are ignored. All nodes + are included. \n If this value is nil, the behavior is + equivalent to the Ignore policy. This is a beta-level + feature default enabled by the NodeInclusionPolicyInPodTopologySpread + feature flag." + type: string + topologyKey: + description: TopologyKey is the key of node labels. Nodes + that have a label with this key and identical values are + considered to be in the same topology. We consider each + as a "bucket", and try to put balanced number + of pods into each bucket. We define a domain as a particular + instance of a topology. Also, we define an eligible domain + as a domain whose nodes meet the requirements of nodeAffinityPolicy + and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", + each Node is a domain of that topology. And, if TopologyKey + is "topology.kubernetes.io/zone", each zone is a domain + of that topology. It's a required field. + type: string + whenUnsatisfiable: + description: 'WhenUnsatisfiable indicates how to deal with + a pod if it doesn''t satisfy the spread constraint. - + DoNotSchedule (default) tells the scheduler not to schedule + it. - ScheduleAnyway tells the scheduler to schedule the + pod in any location, but giving higher precedence to topologies + that would help reduce the skew. A constraint is considered + "Unsatisfiable" for an incoming pod if and only if every + possible node assignment for that pod would violate "MaxSkew" + on some topology. For example, in a 3-zone cluster, MaxSkew + is set to 1, and pods with the same labelSelector spread + as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming + pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) + as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). + In other words, the cluster can still be imbalanced, but + scheduler won''t make it *more* imbalanced. It''s a required + field.' + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + type: object + labels: + description: Labels are applied to the created resource + properties: + inheritFromGateway: + description: InheritFromGateway lists the names/keys of annotations + or labels to copy from the Gateway resource. Any name/key included + here will override those in Set if specified on the Gateway. + items: + type: string + type: array + set: + additionalProperties: + type: string + description: Set lists the names/keys and values of annotations + or labels to set on the resource. Any name/key included here + will be overridden if present in InheritFromGateway and set + on the Gateway. + type: object + type: object + role: + description: Role contains config specific to the Role created from + this GatewayClass + properties: + annotations: + description: Annotations are applied to the created resource + properties: + inheritFromGateway: + description: InheritFromGateway lists the names/keys of annotations + or labels to copy from the Gateway resource. Any name/key + included here will override those in Set if specified on + the Gateway. + items: + type: string + type: array + set: + additionalProperties: + type: string + description: Set lists the names/keys and values of annotations + or labels to set on the resource. Any name/key included + here will be overridden if present in InheritFromGateway + and set on the Gateway. + type: object + type: object + labels: + description: Labels are applied to the created resource + properties: + inheritFromGateway: + description: InheritFromGateway lists the names/keys of annotations + or labels to copy from the Gateway resource. Any name/key + included here will override those in Set if specified on + the Gateway. + items: + type: string + type: array + set: + additionalProperties: + type: string + description: Set lists the names/keys and values of annotations + or labels to set on the resource. Any name/key included + here will be overridden if present in InheritFromGateway + and set on the Gateway. + type: object + type: object + type: object + roleBinding: + description: RoleBinding contains config specific to the RoleBinding + created from this GatewayClass + properties: + annotations: + description: Annotations are applied to the created resource + properties: + inheritFromGateway: + description: InheritFromGateway lists the names/keys of annotations + or labels to copy from the Gateway resource. Any name/key + included here will override those in Set if specified on + the Gateway. + items: + type: string + type: array + set: + additionalProperties: + type: string + description: Set lists the names/keys and values of annotations + or labels to set on the resource. Any name/key included + here will be overridden if present in InheritFromGateway + and set on the Gateway. + type: object + type: object + labels: + description: Labels are applied to the created resource + properties: + inheritFromGateway: + description: InheritFromGateway lists the names/keys of annotations + or labels to copy from the Gateway resource. Any name/key + included here will override those in Set if specified on + the Gateway. + items: + type: string + type: array + set: + additionalProperties: + type: string + description: Set lists the names/keys and values of annotations + or labels to set on the resource. Any name/key included + here will be overridden if present in InheritFromGateway + and set on the Gateway. + type: object + type: object + type: object + service: + description: Service contains config specific to the Service created + from this GatewayClass + properties: + annotations: + description: Annotations are applied to the created resource + properties: + inheritFromGateway: + description: InheritFromGateway lists the names/keys of annotations + or labels to copy from the Gateway resource. Any name/key + included here will override those in Set if specified on + the Gateway. + items: + type: string + type: array + set: + additionalProperties: + type: string + description: Set lists the names/keys and values of annotations + or labels to set on the resource. Any name/key included + here will be overridden if present in InheritFromGateway + and set on the Gateway. + type: object + type: object + labels: + description: Labels are applied to the created resource + properties: + inheritFromGateway: + description: InheritFromGateway lists the names/keys of annotations + or labels to copy from the Gateway resource. Any name/key + included here will override those in Set if specified on + the Gateway. + items: + type: string + type: array + set: + additionalProperties: + type: string + description: Set lists the names/keys and values of annotations + or labels to set on the resource. Any name/key included + here will be overridden if present in InheritFromGateway + and set on the Gateway. + type: object + type: object + type: + description: Type specifies the type of Service to use (LoadBalancer, + ClusterIP, etc.) + enum: + - ClusterIP + - NodePort + - LoadBalancer + type: string + type: object + serviceAccount: + description: ServiceAccount contains config specific to the corev1.ServiceAccount + created from this GatewayClass + properties: + annotations: + description: Annotations are applied to the created resource + properties: + inheritFromGateway: + description: InheritFromGateway lists the names/keys of annotations + or labels to copy from the Gateway resource. Any name/key + included here will override those in Set if specified on + the Gateway. + items: + type: string + type: array + set: + additionalProperties: + type: string + description: Set lists the names/keys and values of annotations + or labels to set on the resource. Any name/key included + here will be overridden if present in InheritFromGateway + and set on the Gateway. + type: object + type: object + labels: + description: Labels are applied to the created resource + properties: + inheritFromGateway: + description: InheritFromGateway lists the names/keys of annotations + or labels to copy from the Gateway resource. Any name/key + included here will override those in Set if specified on + the Gateway. + items: + type: string + type: array + set: + additionalProperties: + type: string + description: Set lists the names/keys and values of annotations + or labels to set on the resource. Any name/key included + here will be overridden if present in InheritFromGateway + and set on the Gateway. + type: object + type: object + type: object + type: object + status: + properties: + conditions: + description: Conditions indicate the latest available observations + of a resource's current state. + items: + description: 'Conditions define a readiness condition for a Consul + resource. See: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties' + properties: + lastTransitionTime: + description: LastTransitionTime is the last time the condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition. + type: string + required: + - status + - type + type: object + type: array + lastSyncedTime: + description: LastSyncedTime is the last time the resource successfully + synced with Consul. + format: date-time + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/control-plane/config/crd/bases/mesh.consul.hashicorp.com_gatewayclasses.yaml b/control-plane/config/crd/bases/mesh.consul.hashicorp.com_gatewayclasses.yaml new file mode 100644 index 0000000000..ca2b05d062 --- /dev/null +++ b/control-plane/config/crd/bases/mesh.consul.hashicorp.com_gatewayclasses.yaml @@ -0,0 +1,117 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.1 + name: gatewayclasses.mesh.consul.hashicorp.com +spec: + group: mesh.consul.hashicorp.com + names: + kind: GatewayClass + listKind: GatewayClassList + plural: gatewayclasses + singular: gatewayclass + scope: Cluster + versions: + - additionalPrinterColumns: + - description: The age of the resource + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v2beta1 + schema: + openAPIV3Schema: + description: GatewayClass is the Schema for the Gateway Class API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + controllerName: + description: ControllerName is the name of the Kubernetes controller + that manages Gateways of this class + type: string + description: + description: Description of GatewayClass + type: string + parametersRef: + description: ParametersRef refers to a resource responsible for configuring + the behavior of the GatewayClass. + properties: + group: + description: The Kubernetes Group that the referred object belongs + to + type: string + kind: + description: The Kubernetes Kind that the referred object is + type: string + name: + description: The Name of the referred object + type: string + namespace: + description: The kubernetes namespace that the referred object + is in + type: string + required: + - name + type: object + required: + - controllerName + - parametersRef + type: object + status: + properties: + conditions: + description: Conditions indicate the latest available observations + of a resource's current state. + items: + description: 'Conditions define a readiness condition for a Consul + resource. See: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties' + properties: + lastTransitionTime: + description: LastTransitionTime is the last time the condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition. + type: string + required: + - status + - type + type: object + type: array + lastSyncedTime: + description: LastSyncedTime is the last time the resource successfully + synced with Consul. + format: date-time + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/control-plane/config/crd/bases/mesh.consul.hashicorp.com_grpcroutes.yaml b/control-plane/config/crd/bases/mesh.consul.hashicorp.com_grpcroutes.yaml new file mode 100644 index 0000000000..fda3e4255e --- /dev/null +++ b/control-plane/config/crd/bases/mesh.consul.hashicorp.com_grpcroutes.yaml @@ -0,0 +1,612 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.1 + name: grpcroutes.mesh.consul.hashicorp.com +spec: + group: mesh.consul.hashicorp.com + names: + kind: GRPCRoute + listKind: GRPCRouteList + plural: grpcroutes + shortNames: + - grpc-route + singular: grpcroute + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The sync status of the resource with Consul + jsonPath: .status.conditions[?(@.type=="Synced")].status + name: Synced + type: string + - description: The last successful synced time of the resource with Consul + jsonPath: .status.lastSyncedTime + name: Last Synced + type: date + - description: The age of the resource + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v2beta1 + schema: + openAPIV3Schema: + description: GRPCRoute is the Schema for the GRPC Route API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: "NOTE: this should align to the GAMMA/gateway-api version, + or at least be easily translatable. \n https://gateway-api.sigs.k8s.io/references/spec/#gateway.networking.k8s.io/v1alpha2.GRPCRoute + \n This is a Resource type." + properties: + hostnames: + description: "Hostnames are the hostnames for which this GRPCRoute + should respond to requests. \n This is only valid for north/south." + items: + type: string + type: array + parentRefs: + description: "ParentRefs references the resources (usually Services) + that a Route wants to be attached to. \n It is invalid to reference + an identical parent more than once. It is valid to reference multiple + distinct sections within the same parent resource." + items: + description: 'NOTE: roughly equivalent to structs.ResourceReference' + properties: + port: + description: For east/west this is the name of the Consul Service + port to direct traffic to or empty to imply all. For north/south + this is TBD. + type: string + ref: + description: For east/west configuration, this should point + to a Service. For north/south it should point to a Gateway. + properties: + name: + description: Name is the user-given name of the resource + (e.g. the "billing" service). + type: string + section: + description: Section identifies which part of the resource + the condition relates to. + type: string + tenancy: + description: Tenancy identifies the tenancy units (i.e. + partition, namespace) in which the resource resides. + properties: + namespace: + description: "Namespace further isolates resources within + a partition. https://developer.hashicorp.com/consul/docs/enterprise/namespaces + \n When using the List and WatchList endpoints, provide + the wildcard value \"*\" to list resources across + all namespaces." + type: string + partition: + description: "Partition is the topmost administrative + boundary within a cluster. https://developer.hashicorp.com/consul/docs/enterprise/admin-partitions + \n When using the List and WatchList endpoints, provide + the wildcard value \"*\" to list resources across + all partitions." + type: string + peerName: + description: "PeerName identifies which peer the resource + is imported from. https://developer.hashicorp.com/consul/docs/connect/cluster-peering + \n When using the List and WatchList endpoints, provide + the wildcard value \"*\" to list resources across + all peers." + type: string + type: object + type: + description: Type identifies the resource's type. + properties: + group: + description: Group describes the area of functionality + to which this resource type relates (e.g. "catalog", + "authorization"). + type: string + groupVersion: + description: GroupVersion is incremented when sweeping + or backward-incompatible changes are made to the group's + resource types. + type: string + kind: + description: Kind identifies the specific resource type + within the group. + type: string + type: object + type: object + type: object + type: array + rules: + description: Rules are a list of GRPC matchers, filters and actions. + items: + properties: + backendRefs: + description: "BackendRefs defines the backend(s) where matching + requests should be sent. Failure behavior here depends on + how many BackendRefs are specified and how many are invalid. + \n If all entries in BackendRefs are invalid, and there are + also no filters specified in this route rule, all traffic + which matches this rule MUST receive a 500 status code. \n + See the GRPCBackendRef definition for the rules about what + makes a single GRPCBackendRef invalid. \n When a GRPCBackendRef + is invalid, 500 status codes MUST be returned for requests + that would have otherwise been routed to an invalid backend. + If multiple backends are specified, and some are invalid, + the proportion of requests that would otherwise have been + routed to an invalid backend MUST receive a 500 status code. + \n For example, if two backends are specified with equal weights, + and one is invalid, 50 percent of traffic must receive a 500. + Implementations may choose how that 50 percent is determined." + items: + properties: + backendRef: + properties: + datacenter: + type: string + port: + description: "For east/west this is the name of the + Consul Service port to direct traffic to or empty + to imply using the same value as the parent ref. + \n For north/south this is TBD." + type: string + ref: + description: For east/west configuration, this should + point to a Service. + properties: + name: + description: Name is the user-given name of the + resource (e.g. the "billing" service). + type: string + section: + description: Section identifies which part of + the resource the condition relates to. + type: string + tenancy: + description: Tenancy identifies the tenancy units + (i.e. partition, namespace) in which the resource + resides. + properties: + namespace: + description: "Namespace further isolates resources + within a partition. https://developer.hashicorp.com/consul/docs/enterprise/namespaces + \n When using the List and WatchList endpoints, + provide the wildcard value \"*\" to list + resources across all namespaces." + type: string + partition: + description: "Partition is the topmost administrative + boundary within a cluster. https://developer.hashicorp.com/consul/docs/enterprise/admin-partitions + \n When using the List and WatchList endpoints, + provide the wildcard value \"*\" to list + resources across all partitions." + type: string + peerName: + description: "PeerName identifies which peer + the resource is imported from. https://developer.hashicorp.com/consul/docs/connect/cluster-peering + \n When using the List and WatchList endpoints, + provide the wildcard value \"*\" to list + resources across all peers." + type: string + type: object + type: + description: Type identifies the resource's type. + properties: + group: + description: Group describes the area of functionality + to which this resource type relates (e.g. + "catalog", "authorization"). + type: string + groupVersion: + description: GroupVersion is incremented when + sweeping or backward-incompatible changes + are made to the group's resource types. + type: string + kind: + description: Kind identifies the specific + resource type within the group. + type: string + type: object + type: object + type: object + filters: + description: Filters defined at this level should be executed + if and only if the request is being forwarded to the + backend defined here. + items: + properties: + requestHeaderModifier: + description: RequestHeaderModifier defines a schema + for a filter that modifies request headers. + properties: + add: + description: Add adds the given header(s) (name, + value) to the request before the action. It + appends to any existing values associated + with the header name. + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + remove: + description: Remove the given header(s) from + the HTTP request before the action. The value + of Remove is a list of HTTP header names. + Note that the header names are case-insensitive + (see https://datatracker.ietf.org/doc/html/rfc2616#section-4.2). + items: + type: string + type: array + set: + description: Set overwrites the request with + the given header (name, value) before the + action. + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + type: object + responseHeaderModifier: + description: ResponseHeaderModifier defines a schema + for a filter that modifies response headers. + properties: + add: + description: Add adds the given header(s) (name, + value) to the request before the action. It + appends to any existing values associated + with the header name. + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + remove: + description: Remove the given header(s) from + the HTTP request before the action. The value + of Remove is a list of HTTP header names. + Note that the header names are case-insensitive + (see https://datatracker.ietf.org/doc/html/rfc2616#section-4.2). + items: + type: string + type: array + set: + description: Set overwrites the request with + the given header (name, value) before the + action. + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + type: object + urlRewrite: + description: URLRewrite defines a schema for a filter + that modifies a request during forwarding. + properties: + pathPrefix: + type: string + type: object + type: object + type: array + weight: + description: "Weight specifies the proportion of requests + forwarded to the referenced backend. This is computed + as weight/(sum of all weights in this BackendRefs list). + For non-zero values, there may be some epsilon from + the exact proportion defined here depending on the precision + an implementation supports. Weight is not a percentage + and the sum of weights does not need to equal 100. \n + If only one backend is specified and it has a weight + greater than 0, 100% of the traffic is forwarded to + that backend. If weight is set to 0, no traffic should + be forwarded for this entry. If unspecified, weight + defaults to 1." + format: int32 + type: integer + type: object + type: array + filters: + items: + properties: + requestHeaderModifier: + description: RequestHeaderModifier defines a schema for + a filter that modifies request headers. + properties: + add: + description: Add adds the given header(s) (name, value) + to the request before the action. It appends to + any existing values associated with the header name. + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + remove: + description: Remove the given header(s) from the HTTP + request before the action. The value of Remove is + a list of HTTP header names. Note that the header + names are case-insensitive (see https://datatracker.ietf.org/doc/html/rfc2616#section-4.2). + items: + type: string + type: array + set: + description: Set overwrites the request with the given + header (name, value) before the action. + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + type: object + responseHeaderModifier: + description: ResponseHeaderModifier defines a schema for + a filter that modifies response headers. + properties: + add: + description: Add adds the given header(s) (name, value) + to the request before the action. It appends to + any existing values associated with the header name. + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + remove: + description: Remove the given header(s) from the HTTP + request before the action. The value of Remove is + a list of HTTP header names. Note that the header + names are case-insensitive (see https://datatracker.ietf.org/doc/html/rfc2616#section-4.2). + items: + type: string + type: array + set: + description: Set overwrites the request with the given + header (name, value) before the action. + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + type: object + urlRewrite: + description: URLRewrite defines a schema for a filter + that modifies a request during forwarding. + properties: + pathPrefix: + type: string + type: object + type: object + type: array + matches: + items: + properties: + headers: + description: Headers specifies gRPC request header matchers. + Multiple match values are ANDed together, meaning, a + request MUST match all the specified headers to select + the route. + items: + properties: + name: + type: string + type: + description: "HeaderMatchType specifies the semantics + of how HTTP header values should be compared. + Valid HeaderMatchType values, along with their + conformance levels, are: \n Note that values may + be added to this enum, implementations must ensure + that unknown values will not cause a crash. \n + Unknown values here must result in the implementation + setting the Accepted Condition for the Route to + status: False, with a Reason of UnsupportedValue." + enum: + - HEADER_MATCH_TYPE_UNSPECIFIED + - HEADER_MATCH_TYPE_EXACT + - HEADER_MATCH_TYPE_REGEX + - HEADER_MATCH_TYPE_PRESENT + - HEADER_MATCH_TYPE_PREFIX + - HEADER_MATCH_TYPE_SUFFIX + format: int32 + type: string + value: + type: string + type: object + type: array + method: + description: Method specifies a gRPC request service/method + matcher. If this field is not specified, all services + and methods will match. + properties: + method: + description: "Value of the method to match against. + If left empty or omitted, will match all services. + \n At least one of Service and Method MUST be a + non-empty string.}" + type: string + service: + description: "Value of the service to match against. + If left empty or omitted, will match any service. + \n At least one of Service and Method MUST be a + non-empty string." + type: string + type: + description: 'Type specifies how to match against + the service and/or method. Support: Core (Exact + with service and method specified)' + enum: + - GRPC_METHOD_MATCH_TYPE_UNSPECIFIED + - GRPC_METHOD_MATCH_TYPE_EXACT + - GRPC_METHOD_MATCH_TYPE_REGEX + format: int32 + type: string + type: object + type: object + type: array + retries: + properties: + number: + description: Number is the number of times to retry the + request when a retryable result occurs. + properties: + value: + description: The uint32 value. + format: int32 + type: integer + type: object + onConditions: + description: RetryOn allows setting envoy specific conditions + when a request should be automatically retried. + items: + type: string + type: array + onConnectFailure: + description: RetryOnConnectFailure allows for connection + failure errors to trigger a retry. + type: boolean + onStatusCodes: + description: RetryOnStatusCodes is a flat list of http response + status codes that are eligible for retry. This again should + be feasible in any reasonable proxy. + items: + format: int32 + type: integer + type: array + type: object + timeouts: + description: HTTPRouteTimeouts defines timeouts that can be + configured for an HTTPRoute or GRPCRoute. + properties: + idle: + description: Idle specifies the total amount of time permitted + for the request stream to be idle. + format: duration + properties: + nanos: + description: Signed fractions of a second at nanosecond + resolution of the span of time. Durations less than + one second are represented with a 0 `seconds` field + and a positive or negative `nanos` field. For durations + of one second or more, a non-zero value for the `nanos` + field must be of the same sign as the `seconds` field. + Must be from -999,999,999 to +999,999,999 inclusive. + format: int32 + type: integer + seconds: + description: 'Signed seconds of the span of time. Must + be from -315,576,000,000 to +315,576,000,000 inclusive. + Note: these bounds are computed from: 60 sec/min * + 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years' + format: int64 + type: integer + type: object + request: + description: RequestTimeout is the total amount of time + permitted for the entire downstream request (and retries) + to be processed. + format: duration + properties: + nanos: + description: Signed fractions of a second at nanosecond + resolution of the span of time. Durations less than + one second are represented with a 0 `seconds` field + and a positive or negative `nanos` field. For durations + of one second or more, a non-zero value for the `nanos` + field must be of the same sign as the `seconds` field. + Must be from -999,999,999 to +999,999,999 inclusive. + format: int32 + type: integer + seconds: + description: 'Signed seconds of the span of time. Must + be from -315,576,000,000 to +315,576,000,000 inclusive. + Note: these bounds are computed from: 60 sec/min * + 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years' + format: int64 + type: integer + type: object + type: object + type: object + type: array + type: object + status: + properties: + conditions: + description: Conditions indicate the latest available observations + of a resource's current state. + items: + description: 'Conditions define a readiness condition for a Consul + resource. See: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties' + properties: + lastTransitionTime: + description: LastTransitionTime is the last time the condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition. + type: string + required: + - status + - type + type: object + type: array + lastSyncedTime: + description: LastSyncedTime is the last time the resource successfully + synced with Consul. + format: date-time + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/control-plane/config/crd/bases/mesh.consul.hashicorp.com_httproutes.yaml b/control-plane/config/crd/bases/mesh.consul.hashicorp.com_httproutes.yaml new file mode 100644 index 0000000000..46bf7162a6 --- /dev/null +++ b/control-plane/config/crd/bases/mesh.consul.hashicorp.com_httproutes.yaml @@ -0,0 +1,668 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.1 + name: httproutes.mesh.consul.hashicorp.com +spec: + group: mesh.consul.hashicorp.com + names: + kind: HTTPRoute + listKind: HTTPRouteList + plural: httproutes + shortNames: + - http-route + singular: httproute + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The sync status of the resource with Consul + jsonPath: .status.conditions[?(@.type=="Synced")].status + name: Synced + type: string + - description: The last successful synced time of the resource with Consul + jsonPath: .status.lastSyncedTime + name: Last Synced + type: date + - description: The age of the resource + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v2beta1 + schema: + openAPIV3Schema: + description: HTTPRoute is the Schema for the HTTP Route API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: "NOTE: this should align to the GAMMA/gateway-api version, + or at least be easily translatable. \n https://gateway-api.sigs.k8s.io/references/spec/#gateway.networking.k8s.io/v1alpha2.HTTPRoute + \n This is a Resource type." + properties: + hostnames: + description: "Hostnames are the hostnames for which this HTTPRoute + should respond to requests. \n This is only valid for north/south." + items: + type: string + type: array + parentRefs: + description: "ParentRefs references the resources (usually Services) + that a Route wants to be attached to. \n It is invalid to reference + an identical parent more than once. It is valid to reference multiple + distinct sections within the same parent resource." + items: + description: 'NOTE: roughly equivalent to structs.ResourceReference' + properties: + port: + description: For east/west this is the name of the Consul Service + port to direct traffic to or empty to imply all. For north/south + this is TBD. + type: string + ref: + description: For east/west configuration, this should point + to a Service. For north/south it should point to a Gateway. + properties: + name: + description: Name is the user-given name of the resource + (e.g. the "billing" service). + type: string + section: + description: Section identifies which part of the resource + the condition relates to. + type: string + tenancy: + description: Tenancy identifies the tenancy units (i.e. + partition, namespace) in which the resource resides. + properties: + namespace: + description: "Namespace further isolates resources within + a partition. https://developer.hashicorp.com/consul/docs/enterprise/namespaces + \n When using the List and WatchList endpoints, provide + the wildcard value \"*\" to list resources across + all namespaces." + type: string + partition: + description: "Partition is the topmost administrative + boundary within a cluster. https://developer.hashicorp.com/consul/docs/enterprise/admin-partitions + \n When using the List and WatchList endpoints, provide + the wildcard value \"*\" to list resources across + all partitions." + type: string + peerName: + description: "PeerName identifies which peer the resource + is imported from. https://developer.hashicorp.com/consul/docs/connect/cluster-peering + \n When using the List and WatchList endpoints, provide + the wildcard value \"*\" to list resources across + all peers." + type: string + type: object + type: + description: Type identifies the resource's type. + properties: + group: + description: Group describes the area of functionality + to which this resource type relates (e.g. "catalog", + "authorization"). + type: string + groupVersion: + description: GroupVersion is incremented when sweeping + or backward-incompatible changes are made to the group's + resource types. + type: string + kind: + description: Kind identifies the specific resource type + within the group. + type: string + type: object + type: object + type: object + type: array + rules: + description: Rules are a list of HTTP-based routing rules that this + route should use for constructing a routing table. + items: + description: HTTPRouteRule specifies the routing rules used to determine + what upstream service an HTTP request is routed to. + properties: + backendRefs: + description: "BackendRefs defines the backend(s) where matching + requests should be sent. \n Failure behavior here depends + on how many BackendRefs are specified and how many are invalid. + \n If all entries in BackendRefs are invalid, and there are + also no filters specified in this route rule, all traffic + which matches this rule MUST receive a 500 status code. \n + See the HTTPBackendRef definition for the rules about what + makes a single HTTPBackendRef invalid. \n When a HTTPBackendRef + is invalid, 500 status codes MUST be returned for requests + that would have otherwise been routed to an invalid backend. + If multiple backends are specified, and some are invalid, + the proportion of requests that would otherwise have been + routed to an invalid backend MUST receive a 500 status code. + \n For example, if two backends are specified with equal weights, + and one is invalid, 50 percent of traffic must receive a 500. + Implementations may choose how that 50 percent is determined." + items: + properties: + backendRef: + properties: + datacenter: + type: string + port: + description: "For east/west this is the name of the + Consul Service port to direct traffic to or empty + to imply using the same value as the parent ref. + \n For north/south this is TBD." + type: string + ref: + description: For east/west configuration, this should + point to a Service. + properties: + name: + description: Name is the user-given name of the + resource (e.g. the "billing" service). + type: string + section: + description: Section identifies which part of + the resource the condition relates to. + type: string + tenancy: + description: Tenancy identifies the tenancy units + (i.e. partition, namespace) in which the resource + resides. + properties: + namespace: + description: "Namespace further isolates resources + within a partition. https://developer.hashicorp.com/consul/docs/enterprise/namespaces + \n When using the List and WatchList endpoints, + provide the wildcard value \"*\" to list + resources across all namespaces." + type: string + partition: + description: "Partition is the topmost administrative + boundary within a cluster. https://developer.hashicorp.com/consul/docs/enterprise/admin-partitions + \n When using the List and WatchList endpoints, + provide the wildcard value \"*\" to list + resources across all partitions." + type: string + peerName: + description: "PeerName identifies which peer + the resource is imported from. https://developer.hashicorp.com/consul/docs/connect/cluster-peering + \n When using the List and WatchList endpoints, + provide the wildcard value \"*\" to list + resources across all peers." + type: string + type: object + type: + description: Type identifies the resource's type. + properties: + group: + description: Group describes the area of functionality + to which this resource type relates (e.g. + "catalog", "authorization"). + type: string + groupVersion: + description: GroupVersion is incremented when + sweeping or backward-incompatible changes + are made to the group's resource types. + type: string + kind: + description: Kind identifies the specific + resource type within the group. + type: string + type: object + type: object + type: object + filters: + description: Filters defined at this level should be executed + if and only if the request is being forwarded to the + backend defined here. + items: + properties: + requestHeaderModifier: + description: RequestHeaderModifier defines a schema + for a filter that modifies request headers. + properties: + add: + description: Add adds the given header(s) (name, + value) to the request before the action. It + appends to any existing values associated + with the header name. + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + remove: + description: Remove the given header(s) from + the HTTP request before the action. The value + of Remove is a list of HTTP header names. + Note that the header names are case-insensitive + (see https://datatracker.ietf.org/doc/html/rfc2616#section-4.2). + items: + type: string + type: array + set: + description: Set overwrites the request with + the given header (name, value) before the + action. + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + type: object + responseHeaderModifier: + description: ResponseHeaderModifier defines a schema + for a filter that modifies response headers. + properties: + add: + description: Add adds the given header(s) (name, + value) to the request before the action. It + appends to any existing values associated + with the header name. + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + remove: + description: Remove the given header(s) from + the HTTP request before the action. The value + of Remove is a list of HTTP header names. + Note that the header names are case-insensitive + (see https://datatracker.ietf.org/doc/html/rfc2616#section-4.2). + items: + type: string + type: array + set: + description: Set overwrites the request with + the given header (name, value) before the + action. + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + type: object + urlRewrite: + description: URLRewrite defines a schema for a filter + that modifies a request during forwarding. + properties: + pathPrefix: + type: string + type: object + type: object + type: array + weight: + description: "Weight specifies the proportion of requests + forwarded to the referenced backend. This is computed + as weight/(sum of all weights in this BackendRefs list). + For non-zero values, there may be some epsilon from + the exact proportion defined here depending on the precision + an implementation supports. Weight is not a percentage + and the sum of weights does not need to equal 100. \n + If only one backend is specified and it has a weight + greater than 0, 100% of the traffic is forwarded to + that backend. If weight is set to 0, no traffic should + be forwarded for this entry. If unspecified, weight + defaults to 1." + format: int32 + type: integer + type: object + type: array + filters: + items: + properties: + requestHeaderModifier: + description: RequestHeaderModifier defines a schema for + a filter that modifies request headers. + properties: + add: + description: Add adds the given header(s) (name, value) + to the request before the action. It appends to + any existing values associated with the header name. + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + remove: + description: Remove the given header(s) from the HTTP + request before the action. The value of Remove is + a list of HTTP header names. Note that the header + names are case-insensitive (see https://datatracker.ietf.org/doc/html/rfc2616#section-4.2). + items: + type: string + type: array + set: + description: Set overwrites the request with the given + header (name, value) before the action. + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + type: object + responseHeaderModifier: + description: ResponseHeaderModifier defines a schema for + a filter that modifies response headers. + properties: + add: + description: Add adds the given header(s) (name, value) + to the request before the action. It appends to + any existing values associated with the header name. + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + remove: + description: Remove the given header(s) from the HTTP + request before the action. The value of Remove is + a list of HTTP header names. Note that the header + names are case-insensitive (see https://datatracker.ietf.org/doc/html/rfc2616#section-4.2). + items: + type: string + type: array + set: + description: Set overwrites the request with the given + header (name, value) before the action. + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + type: object + urlRewrite: + description: URLRewrite defines a schema for a filter + that modifies a request during forwarding. + properties: + pathPrefix: + type: string + type: object + type: object + type: array + matches: + items: + properties: + headers: + description: Headers specifies HTTP request header matchers. + Multiple match values are ANDed together, meaning, a + request must match all the specified headers to select + the route. + items: + properties: + invert: + description: 'NOTE: not in gamma; service-router + compat' + type: boolean + name: + description: "Name is the name of the HTTP Header + to be matched. Name matching MUST be case insensitive. + (See https://tools.ietf.org/html/rfc7230#section-3.2). + \n If multiple entries specify equivalent header + names, only the first entry with an equivalent + name MUST be considered for a match. Subsequent + entries with an equivalent header name MUST be + ignored. Due to the case-insensitivity of header + names, “foo” and “Foo” are considered equivalent. + \n When a header is repeated in an HTTP request, + it is implementation-specific behavior as to how + this is represented. Generally, proxies should + follow the guidance from the RFC: https://www.rfc-editor.org/rfc/rfc7230.html#section-3.2.2 + regarding processing a repeated header, with special + handling for “Set-Cookie”." + type: string + type: + description: Type specifies how to match against + the value of the header. + enum: + - HEADER_MATCH_TYPE_UNSPECIFIED + - HEADER_MATCH_TYPE_EXACT + - HEADER_MATCH_TYPE_REGEX + - HEADER_MATCH_TYPE_PRESENT + - HEADER_MATCH_TYPE_PREFIX + - HEADER_MATCH_TYPE_SUFFIX + format: int32 + type: string + value: + description: Value is the value of HTTP Header to + be matched. + type: string + type: object + type: array + method: + description: Method specifies HTTP method matcher. When + specified, this route will be matched only if the request + has the specified method. + type: string + path: + description: Path specifies a HTTP request path matcher. + If this field is not specified, a default prefix match + on the “/” path is provided. + properties: + type: + description: Type specifies how to match against the + path Value. + enum: + - PATH_MATCH_TYPE_UNSPECIFIED + - PATH_MATCH_TYPE_EXACT + - PATH_MATCH_TYPE_PREFIX + - PATH_MATCH_TYPE_REGEX + format: int32 + type: string + value: + description: Value of the HTTP path to match against. + type: string + type: object + queryParams: + description: QueryParams specifies HTTP query parameter + matchers. Multiple match values are ANDed together, + meaning, a request must match all the specified query + parameters to select the route. + items: + properties: + name: + description: "Name is the name of the HTTP query + param to be matched. This must be an exact string + match. (See https://tools.ietf.org/html/rfc7230#section-2.7.3). + \n If multiple entries specify equivalent query + param names, only the first entry with an equivalent + name MUST be considered for a match. Subsequent + entries with an equivalent query param name MUST + be ignored. \n If a query param is repeated in + an HTTP request, the behavior is purposely left + undefined, since different data planes have different + capabilities. However, it is recommended that + implementations should match against the first + value of the param if the data plane supports + it, as this behavior is expected in other load + balancing contexts outside of the Gateway API. + \n Users SHOULD NOT route traffic based on repeated + query params to guard themselves against potential + differences in the implementations." + type: string + type: + description: Type specifies how to match against + the value of the query parameter. + enum: + - QUERY_PARAM_MATCH_TYPE_UNSPECIFIED + - QUERY_PARAM_MATCH_TYPE_EXACT + - QUERY_PARAM_MATCH_TYPE_REGEX + - QUERY_PARAM_MATCH_TYPE_PRESENT + format: int32 + type: string + value: + description: Value is the value of HTTP query param + to be matched. + type: string + type: object + type: array + type: object + type: array + retries: + properties: + number: + description: Number is the number of times to retry the + request when a retryable result occurs. + properties: + value: + description: The uint32 value. + format: int32 + type: integer + type: object + onConditions: + description: RetryOn allows setting envoy specific conditions + when a request should be automatically retried. + items: + type: string + type: array + onConnectFailure: + description: RetryOnConnectFailure allows for connection + failure errors to trigger a retry. + type: boolean + onStatusCodes: + description: RetryOnStatusCodes is a flat list of http response + status codes that are eligible for retry. This again should + be feasible in any reasonable proxy. + items: + format: int32 + type: integer + type: array + type: object + timeouts: + description: HTTPRouteTimeouts defines timeouts that can be + configured for an HTTPRoute or GRPCRoute. + properties: + idle: + description: Idle specifies the total amount of time permitted + for the request stream to be idle. + format: duration + properties: + nanos: + description: Signed fractions of a second at nanosecond + resolution of the span of time. Durations less than + one second are represented with a 0 `seconds` field + and a positive or negative `nanos` field. For durations + of one second or more, a non-zero value for the `nanos` + field must be of the same sign as the `seconds` field. + Must be from -999,999,999 to +999,999,999 inclusive. + format: int32 + type: integer + seconds: + description: 'Signed seconds of the span of time. Must + be from -315,576,000,000 to +315,576,000,000 inclusive. + Note: these bounds are computed from: 60 sec/min * + 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years' + format: int64 + type: integer + type: object + request: + description: RequestTimeout is the total amount of time + permitted for the entire downstream request (and retries) + to be processed. + format: duration + properties: + nanos: + description: Signed fractions of a second at nanosecond + resolution of the span of time. Durations less than + one second are represented with a 0 `seconds` field + and a positive or negative `nanos` field. For durations + of one second or more, a non-zero value for the `nanos` + field must be of the same sign as the `seconds` field. + Must be from -999,999,999 to +999,999,999 inclusive. + format: int32 + type: integer + seconds: + description: 'Signed seconds of the span of time. Must + be from -315,576,000,000 to +315,576,000,000 inclusive. + Note: these bounds are computed from: 60 sec/min * + 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years' + format: int64 + type: integer + type: object + type: object + type: object + type: array + type: object + status: + properties: + conditions: + description: Conditions indicate the latest available observations + of a resource's current state. + items: + description: 'Conditions define a readiness condition for a Consul + resource. See: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties' + properties: + lastTransitionTime: + description: LastTransitionTime is the last time the condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition. + type: string + required: + - status + - type + type: object + type: array + lastSyncedTime: + description: LastSyncedTime is the last time the resource successfully + synced with Consul. + format: date-time + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/control-plane/config/crd/bases/mesh.consul.hashicorp.com_meshconfigurations.yaml b/control-plane/config/crd/bases/mesh.consul.hashicorp.com_meshconfigurations.yaml new file mode 100644 index 0000000000..eb044ecb6c --- /dev/null +++ b/control-plane/config/crd/bases/mesh.consul.hashicorp.com_meshconfigurations.yaml @@ -0,0 +1,95 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.1 + name: meshconfigurations.mesh.consul.hashicorp.com +spec: + group: mesh.consul.hashicorp.com + names: + kind: MeshConfiguration + listKind: MeshConfigurationList + plural: meshconfigurations + singular: meshconfiguration + scope: Cluster + versions: + - additionalPrinterColumns: + - description: The sync status of the resource with Consul + jsonPath: .status.conditions[?(@.type=="Synced")].status + name: Synced + type: string + - description: The last successful synced time of the resource with Consul + jsonPath: .status.lastSyncedTime + name: Last Synced + type: date + - description: The age of the resource + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v2beta1 + schema: + openAPIV3Schema: + description: MeshConfiguration is the Schema for the Mesh Configuration + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: MeshConfiguration is responsible for configuring the default + behavior of Mesh Gateways. This is a Resource type. + type: object + status: + properties: + conditions: + description: Conditions indicate the latest available observations + of a resource's current state. + items: + description: 'Conditions define a readiness condition for a Consul + resource. See: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties' + properties: + lastTransitionTime: + description: LastTransitionTime is the last time the condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition. + type: string + required: + - status + - type + type: object + type: array + lastSyncedTime: + description: LastSyncedTime is the last time the resource successfully + synced with Consul. + format: date-time + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/control-plane/config/crd/bases/mesh.consul.hashicorp.com_meshgateways.yaml b/control-plane/config/crd/bases/mesh.consul.hashicorp.com_meshgateways.yaml new file mode 100644 index 0000000000..47f2fcfba8 --- /dev/null +++ b/control-plane/config/crd/bases/mesh.consul.hashicorp.com_meshgateways.yaml @@ -0,0 +1,129 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.1 + name: meshgateways.mesh.consul.hashicorp.com +spec: + group: mesh.consul.hashicorp.com + names: + kind: MeshGateway + listKind: MeshGatewayList + plural: meshgateways + singular: meshgateway + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The sync status of the resource with Consul + jsonPath: .status.conditions[?(@.type=="Synced")].status + name: Synced + type: string + - description: The last successful synced time of the resource with Consul + jsonPath: .status.lastSyncedTime + name: Last Synced + type: date + - description: The age of the resource + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v2beta1 + schema: + openAPIV3Schema: + description: MeshGateway is the Schema for the Mesh Gateway API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + gatewayClassName: + description: GatewayClassName is the name of the GatewayClass used + by the MeshGateway + type: string + listeners: + items: + properties: + name: + type: string + port: + format: int32 + maximum: 65535 + minimum: 0 + type: integer + protocol: + enum: + - TCP + type: string + type: object + minItems: 1 + type: array + workloads: + description: Selection of workloads to be configured as mesh gateways + properties: + filter: + type: string + names: + items: + type: string + type: array + prefixes: + items: + type: string + type: array + type: object + type: object + status: + properties: + conditions: + description: Conditions indicate the latest available observations + of a resource's current state. + items: + description: 'Conditions define a readiness condition for a Consul + resource. See: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties' + properties: + lastTransitionTime: + description: LastTransitionTime is the last time the condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition. + type: string + required: + - status + - type + type: object + type: array + lastSyncedTime: + description: LastSyncedTime is the last time the resource successfully + synced with Consul. + format: date-time + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/control-plane/config/crd/bases/mesh.consul.hashicorp.com_proxyconfigurations.yaml b/control-plane/config/crd/bases/mesh.consul.hashicorp.com_proxyconfigurations.yaml new file mode 100644 index 0000000000..4a505adeb9 --- /dev/null +++ b/control-plane/config/crd/bases/mesh.consul.hashicorp.com_proxyconfigurations.yaml @@ -0,0 +1,400 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.1 + name: proxyconfigurations.mesh.consul.hashicorp.com +spec: + group: mesh.consul.hashicorp.com + names: + kind: ProxyConfiguration + listKind: ProxyConfigurationList + plural: proxyconfigurations + shortNames: + - proxy-configuration + singular: proxyconfiguration + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The sync status of the resource with Consul + jsonPath: .status.conditions[?(@.type=="Synced")].status + name: Synced + type: string + - description: The last successful synced time of the resource with Consul + jsonPath: .status.lastSyncedTime + name: Last Synced + type: date + - description: The age of the resource + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v2beta1 + schema: + openAPIV3Schema: + description: ProxyConfiguration is the Schema for the TCP Routes API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: This is a Resource type. + properties: + bootstrapConfig: + description: bootstrap_config is the configuration that requires proxies + to be restarted to be applied. + properties: + dogstatsdUrl: + type: string + overrideJsonTpl: + type: string + prometheusBindAddr: + type: string + readyBindAddr: + type: string + staticClustersJson: + type: string + staticListenersJson: + type: string + statsBindAddr: + type: string + statsConfigJson: + type: string + statsFlushInterval: + type: string + statsSinksJson: + type: string + statsTags: + items: + type: string + type: array + statsdUrl: + type: string + telemetryCollectorBindSocketDir: + type: string + tracingConfigJson: + type: string + type: object + dynamicConfig: + description: dynamic_config is the configuration that could be changed + dynamically (i.e. without needing restart). + properties: + accessLogs: + description: AccessLogs configures the output and format of Envoy + access logs + properties: + disableListenerLogs: + description: DisableListenerLogs turns off just listener logs + for connections rejected by Envoy because they don't have + a matching listener filter. + type: boolean + enabled: + description: Enabled turns off all access logging + type: boolean + jsonFormat: + description: The presence of one format string or the other + implies the access log string encoding. Defining both is + invalid. + type: string + path: + description: Path is the output file to write logs + type: string + textFormat: + type: string + type: + description: 'Type selects the output for logs: "file", "stderr". + "stdout"' + enum: + - LOG_SINK_TYPE_DEFAULT + - LOG_SINK_TYPE_FILE + - LOG_SINK_TYPE_STDERR + - LOG_SINK_TYPE_STDOUT + format: int32 + type: string + type: object + exposeConfig: + properties: + exposePaths: + items: + properties: + listenerPort: + format: int32 + type: integer + localPathPort: + format: int32 + type: integer + path: + type: string + protocol: + enum: + - EXPOSE_PATH_PROTOCOL_HTTP + - EXPOSE_PATH_PROTOCOL_HTTP2 + format: int32 + type: string + type: object + type: array + type: object + inboundConnections: + description: inbound_connections configures inbound connections + to the proxy. + properties: + balanceInboundConnections: + enum: + - BALANCE_CONNECTIONS_DEFAULT + - BALANCE_CONNECTIONS_EXACT + format: int32 + type: string + maxInboundConnections: + format: int32 + type: integer + type: object + listenerTracingJson: + type: string + localClusterJson: + type: string + localConnection: + additionalProperties: + description: Referenced by ProxyConfiguration + properties: + connectTimeout: + description: "A Duration represents a signed, fixed-length + span of time represented as a count of seconds and fractions + of seconds at nanosecond resolution. It is independent + of any calendar and concepts like \"day\" or \"month\". + It is related to Timestamp in that the difference between + two Timestamp values is a Duration and it can be added + or subtracted from a Timestamp. Range is approximately + +-10,000 years. \n # Examples \n Example 1: Compute Duration + from two Timestamps in pseudo code. \n Timestamp start + = ...; Timestamp end = ...; Duration duration = ...; \n + duration.seconds = end.seconds - start.seconds; duration.nanos + = end.nanos - start.nanos; \n if (duration.seconds < 0 + && duration.nanos > 0) { duration.seconds += 1; duration.nanos + -= 1000000000; } else if (duration.seconds > 0 && duration.nanos + < 0) { duration.seconds -= 1; duration.nanos += 1000000000; + } \n Example 2: Compute Timestamp from Timestamp + Duration + in pseudo code. \n Timestamp start = ...; Duration duration + = ...; Timestamp end = ...; \n end.seconds = start.seconds + + duration.seconds; end.nanos = start.nanos + duration.nanos; + \n if (end.nanos < 0) { end.seconds -= 1; end.nanos += + 1000000000; } else if (end.nanos >= 1000000000) { end.seconds + += 1; end.nanos -= 1000000000; } \n Example 3: Compute + Duration from datetime.timedelta in Python. \n td = datetime.timedelta(days=3, + minutes=10) duration = Duration() duration.FromTimedelta(td) + \n # JSON Mapping \n In JSON format, the Duration type + is encoded as a string rather than an object, where the + string ends in the suffix \"s\" (indicating seconds) and + is preceded by the number of seconds, with nanoseconds + expressed as fractional seconds. For example, 3 seconds + with 0 nanoseconds should be encoded in JSON format as + \"3s\", while 3 seconds and 1 nanosecond should be expressed + in JSON format as \"3.000000001s\", and 3 seconds and + 1 microsecond should be expressed in JSON format as \"3.000001s\"." + format: duration + properties: + nanos: + description: Signed fractions of a second at nanosecond + resolution of the span of time. Durations less than + one second are represented with a 0 `seconds` field + and a positive or negative `nanos` field. For durations + of one second or more, a non-zero value for the `nanos` + field must be of the same sign as the `seconds` field. + Must be from -999,999,999 to +999,999,999 inclusive. + format: int32 + type: integer + seconds: + description: 'Signed seconds of the span of time. Must + be from -315,576,000,000 to +315,576,000,000 inclusive. + Note: these bounds are computed from: 60 sec/min * + 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years' + format: int64 + type: integer + type: object + requestTimeout: + description: "A Duration represents a signed, fixed-length + span of time represented as a count of seconds and fractions + of seconds at nanosecond resolution. It is independent + of any calendar and concepts like \"day\" or \"month\". + It is related to Timestamp in that the difference between + two Timestamp values is a Duration and it can be added + or subtracted from a Timestamp. Range is approximately + +-10,000 years. \n # Examples \n Example 1: Compute Duration + from two Timestamps in pseudo code. \n Timestamp start + = ...; Timestamp end = ...; Duration duration = ...; \n + duration.seconds = end.seconds - start.seconds; duration.nanos + = end.nanos - start.nanos; \n if (duration.seconds < 0 + && duration.nanos > 0) { duration.seconds += 1; duration.nanos + -= 1000000000; } else if (duration.seconds > 0 && duration.nanos + < 0) { duration.seconds -= 1; duration.nanos += 1000000000; + } \n Example 2: Compute Timestamp from Timestamp + Duration + in pseudo code. \n Timestamp start = ...; Duration duration + = ...; Timestamp end = ...; \n end.seconds = start.seconds + + duration.seconds; end.nanos = start.nanos + duration.nanos; + \n if (end.nanos < 0) { end.seconds -= 1; end.nanos += + 1000000000; } else if (end.nanos >= 1000000000) { end.seconds + += 1; end.nanos -= 1000000000; } \n Example 3: Compute + Duration from datetime.timedelta in Python. \n td = datetime.timedelta(days=3, + minutes=10) duration = Duration() duration.FromTimedelta(td) + \n # JSON Mapping \n In JSON format, the Duration type + is encoded as a string rather than an object, where the + string ends in the suffix \"s\" (indicating seconds) and + is preceded by the number of seconds, with nanoseconds + expressed as fractional seconds. For example, 3 seconds + with 0 nanoseconds should be encoded in JSON format as + \"3s\", while 3 seconds and 1 nanosecond should be expressed + in JSON format as \"3.000000001s\", and 3 seconds and + 1 microsecond should be expressed in JSON format as \"3.000001s\"." + format: duration + properties: + nanos: + description: Signed fractions of a second at nanosecond + resolution of the span of time. Durations less than + one second are represented with a 0 `seconds` field + and a positive or negative `nanos` field. For durations + of one second or more, a non-zero value for the `nanos` + field must be of the same sign as the `seconds` field. + Must be from -999,999,999 to +999,999,999 inclusive. + format: int32 + type: integer + seconds: + description: 'Signed seconds of the span of time. Must + be from -315,576,000,000 to +315,576,000,000 inclusive. + Note: these bounds are computed from: 60 sec/min * + 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years' + format: int64 + type: integer + type: object + type: object + description: local_connection is the configuration that should + be used to connect to the local application provided per-port. + The map keys should correspond to port names on the workload. + type: object + localWorkloadAddress: + description: "deprecated: local_workload_address, local_workload_port, + and local_workload_socket_path are deprecated and are only needed + for migration of existing resources. \n Deprecated: Marked as + deprecated in pbmesh/v2beta1/proxy_configuration.proto." + type: string + localWorkloadPort: + description: 'Deprecated: Marked as deprecated in pbmesh/v2beta1/proxy_configuration.proto.' + format: int32 + type: integer + localWorkloadSocketPath: + description: 'Deprecated: Marked as deprecated in pbmesh/v2beta1/proxy_configuration.proto.' + type: string + meshGatewayMode: + enum: + - MESH_GATEWAY_MODE_UNSPECIFIED + - MESH_GATEWAY_MODE_NONE + - MESH_GATEWAY_MODE_LOCAL + - MESH_GATEWAY_MODE_REMOTE + format: int32 + type: string + mode: + description: mode indicates the proxy's mode. This will default + to 'transparent'. + enum: + - PROXY_MODE_DEFAULT + - PROXY_MODE_TRANSPARENT + - PROXY_MODE_DIRECT + format: int32 + type: string + mutualTlsMode: + enum: + - MUTUAL_TLS_MODE_DEFAULT + - MUTUAL_TLS_MODE_STRICT + - MUTUAL_TLS_MODE_PERMISSIVE + format: int32 + type: string + publicListenerJson: + type: string + transparentProxy: + properties: + dialedDirectly: + description: dialed_directly indicates whether this proxy + should be dialed using original destination IP in the connection + rather than load balance between all endpoints. + type: boolean + outboundListenerPort: + description: outbound_listener_port is the port for the proxy's + outbound listener. This defaults to 15001. + format: int32 + type: integer + type: object + type: object + opaqueConfig: + description: "deprecated: prevent usage when using v2 APIs directly. + needed for backwards compatibility \n Deprecated: Marked as deprecated + in pbmesh/v2beta1/proxy_configuration.proto." + type: object + x-kubernetes-preserve-unknown-fields: true + workloads: + description: Selection of workloads this proxy configuration should + apply to. These can be prefixes or specific workload names. + properties: + filter: + type: string + names: + items: + type: string + type: array + prefixes: + items: + type: string + type: array + type: object + type: object + status: + properties: + conditions: + description: Conditions indicate the latest available observations + of a resource's current state. + items: + description: 'Conditions define a readiness condition for a Consul + resource. See: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties' + properties: + lastTransitionTime: + description: LastTransitionTime is the last time the condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition. + type: string + required: + - status + - type + type: object + type: array + lastSyncedTime: + description: LastSyncedTime is the last time the resource successfully + synced with Consul. + format: date-time + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/control-plane/config/crd/bases/mesh.consul.hashicorp.com_tcproutes.yaml b/control-plane/config/crd/bases/mesh.consul.hashicorp.com_tcproutes.yaml new file mode 100644 index 0000000000..21a3a9c5ec --- /dev/null +++ b/control-plane/config/crd/bases/mesh.consul.hashicorp.com_tcproutes.yaml @@ -0,0 +1,273 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.1 + name: tcproutes.mesh.consul.hashicorp.com +spec: + group: mesh.consul.hashicorp.com + names: + kind: TCPRoute + listKind: TCPRouteList + plural: tcproutes + shortNames: + - tcp-route + singular: tcproute + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The sync status of the resource with Consul + jsonPath: .status.conditions[?(@.type=="Synced")].status + name: Synced + type: string + - description: The last successful synced time of the resource with Consul + jsonPath: .status.lastSyncedTime + name: Last Synced + type: date + - description: The age of the resource + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v2beta1 + schema: + openAPIV3Schema: + description: TCPRoute is the Schema for the TCP Route API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: "NOTE: this should align to the GAMMA/gateway-api version, + or at least be easily translatable. \n https://gateway-api.sigs.k8s.io/references/spec/#gateway.networking.k8s.io/v1alpha2.TCPRoute + \n This is a Resource type." + properties: + parentRefs: + description: "ParentRefs references the resources (usually Services) + that a Route wants to be attached to. \n It is invalid to reference + an identical parent more than once. It is valid to reference multiple + distinct sections within the same parent resource." + items: + description: 'NOTE: roughly equivalent to structs.ResourceReference' + properties: + port: + description: For east/west this is the name of the Consul Service + port to direct traffic to or empty to imply all. For north/south + this is TBD. + type: string + ref: + description: For east/west configuration, this should point + to a Service. For north/south it should point to a Gateway. + properties: + name: + description: Name is the user-given name of the resource + (e.g. the "billing" service). + type: string + section: + description: Section identifies which part of the resource + the condition relates to. + type: string + tenancy: + description: Tenancy identifies the tenancy units (i.e. + partition, namespace) in which the resource resides. + properties: + namespace: + description: "Namespace further isolates resources within + a partition. https://developer.hashicorp.com/consul/docs/enterprise/namespaces + \n When using the List and WatchList endpoints, provide + the wildcard value \"*\" to list resources across + all namespaces." + type: string + partition: + description: "Partition is the topmost administrative + boundary within a cluster. https://developer.hashicorp.com/consul/docs/enterprise/admin-partitions + \n When using the List and WatchList endpoints, provide + the wildcard value \"*\" to list resources across + all partitions." + type: string + peerName: + description: "PeerName identifies which peer the resource + is imported from. https://developer.hashicorp.com/consul/docs/connect/cluster-peering + \n When using the List and WatchList endpoints, provide + the wildcard value \"*\" to list resources across + all peers." + type: string + type: object + type: + description: Type identifies the resource's type. + properties: + group: + description: Group describes the area of functionality + to which this resource type relates (e.g. "catalog", + "authorization"). + type: string + groupVersion: + description: GroupVersion is incremented when sweeping + or backward-incompatible changes are made to the group's + resource types. + type: string + kind: + description: Kind identifies the specific resource type + within the group. + type: string + type: object + type: object + type: object + type: array + rules: + description: Rules are a list of TCP matchers and actions. + items: + properties: + backendRefs: + description: BackendRefs defines the backend(s) where matching + requests should be sent. If unspecified or invalid (refers + to a non-existent resource or a Service with no endpoints), + the underlying implementation MUST actively reject connection + attempts to this backend. Connection rejections must respect + weight; if an invalid backend is requested to have 80% of + connections, then 80% of connections must be rejected instead. + items: + properties: + backendRef: + properties: + datacenter: + type: string + port: + description: "For east/west this is the name of the + Consul Service port to direct traffic to or empty + to imply using the same value as the parent ref. + \n For north/south this is TBD." + type: string + ref: + description: For east/west configuration, this should + point to a Service. + properties: + name: + description: Name is the user-given name of the + resource (e.g. the "billing" service). + type: string + section: + description: Section identifies which part of + the resource the condition relates to. + type: string + tenancy: + description: Tenancy identifies the tenancy units + (i.e. partition, namespace) in which the resource + resides. + properties: + namespace: + description: "Namespace further isolates resources + within a partition. https://developer.hashicorp.com/consul/docs/enterprise/namespaces + \n When using the List and WatchList endpoints, + provide the wildcard value \"*\" to list + resources across all namespaces." + type: string + partition: + description: "Partition is the topmost administrative + boundary within a cluster. https://developer.hashicorp.com/consul/docs/enterprise/admin-partitions + \n When using the List and WatchList endpoints, + provide the wildcard value \"*\" to list + resources across all partitions." + type: string + peerName: + description: "PeerName identifies which peer + the resource is imported from. https://developer.hashicorp.com/consul/docs/connect/cluster-peering + \n When using the List and WatchList endpoints, + provide the wildcard value \"*\" to list + resources across all peers." + type: string + type: object + type: + description: Type identifies the resource's type. + properties: + group: + description: Group describes the area of functionality + to which this resource type relates (e.g. + "catalog", "authorization"). + type: string + groupVersion: + description: GroupVersion is incremented when + sweeping or backward-incompatible changes + are made to the group's resource types. + type: string + kind: + description: Kind identifies the specific + resource type within the group. + type: string + type: object + type: object + type: object + weight: + description: "Weight specifies the proportion of requests + forwarded to the referenced backend. This is computed + as weight/(sum of all weights in this BackendRefs list). + For non-zero values, there may be some epsilon from + the exact proportion defined here depending on the precision + an implementation supports. Weight is not a percentage + and the sum of weights does not need to equal 100. \n + If only one backend is specified and it has a weight + greater than 0, 100% of the traffic is forwarded to + that backend. If weight is set to 0, no traffic should + be forwarded for this entry. If unspecified, weight + defaults to 1." + format: int32 + type: integer + type: object + type: array + type: object + type: array + type: object + status: + properties: + conditions: + description: Conditions indicate the latest available observations + of a resource's current state. + items: + description: 'Conditions define a readiness condition for a Consul + resource. See: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties' + properties: + lastTransitionTime: + description: LastTransitionTime is the last time the condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition. + type: string + required: + - status + - type + type: object + type: array + lastSyncedTime: + description: LastSyncedTime is the last time the resource successfully + synced with Consul. + format: date-time + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/control-plane/config/crd/bases/multicluster.consul.hashicorp.com_exportedservices.yaml b/control-plane/config/crd/bases/multicluster.consul.hashicorp.com_exportedservices.yaml new file mode 100644 index 0000000000..36020e3639 --- /dev/null +++ b/control-plane/config/crd/bases/multicluster.consul.hashicorp.com_exportedservices.yaml @@ -0,0 +1,103 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.1 + name: exportedservices.multicluster.consul.hashicorp.com +spec: + group: multicluster.consul.hashicorp.com + names: + kind: ExportedServices + listKind: ExportedServicesList + plural: exportedservices + singular: exportedservices + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The sync status of the resource with Consul + jsonPath: .status.conditions[?(@.type=="Synced")].status + name: Synced + type: string + - description: The last successful synced time of the resource with Consul + jsonPath: .status.lastSyncedTime + name: Last Synced + type: date + - description: The age of the resource + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v2 + schema: + openAPIV3Schema: + description: ExportedServices is the Schema for the Exported Services API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + consumers: + items: + type: object + x-kubernetes-preserve-unknown-fields: true + type: array + services: + items: + type: string + type: array + type: object + status: + properties: + conditions: + description: Conditions indicate the latest available observations + of a resource's current state. + items: + description: 'Conditions define a readiness condition for a Consul + resource. See: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties' + properties: + lastTransitionTime: + description: LastTransitionTime is the last time the condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition. + type: string + required: + - status + - type + type: object + type: array + lastSyncedTime: + description: LastSyncedTime is the last time the resource successfully + synced with Consul. + format: date-time + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/control-plane/config/rbac/role.yaml b/control-plane/config/rbac/role.yaml index 4fac4ac9b8..c2ad591c4f 100644 --- a/control-plane/config/rbac/role.yaml +++ b/control-plane/config/rbac/role.yaml @@ -25,6 +25,26 @@ rules: - secrets/status verbs: - get +- apiGroups: + - auth.consul.hashicorp.com + resources: + - trafficpermissions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - auth.consul.hashicorp.com + resources: + - trafficpermissions/status + verbs: + - get + - patch + - update - apiGroups: - consul.hashicorp.com resources: @@ -325,3 +345,183 @@ rules: - get - patch - update +- apiGroups: + - mesh.consul.hashicorp.com + resources: + - gatewayclass + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - mesh.consul.hashicorp.com + resources: + - gatewayclass/status + verbs: + - get + - patch + - update +- apiGroups: + - mesh.consul.hashicorp.com + resources: + - gatewayclassconfig + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - mesh.consul.hashicorp.com + resources: + - gatewayclassconfig/status + verbs: + - get + - patch + - update +- apiGroups: + - mesh.consul.hashicorp.com + resources: + - grpcroute + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - mesh.consul.hashicorp.com + resources: + - grpcroute/status + verbs: + - get + - patch + - update +- apiGroups: + - mesh.consul.hashicorp.com + resources: + - httproute + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - mesh.consul.hashicorp.com + resources: + - httproute/status + verbs: + - get + - patch + - update +- apiGroups: + - mesh.consul.hashicorp.com + resources: + - meshconfiguration + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - mesh.consul.hashicorp.com + resources: + - meshconfiguration/status + verbs: + - get + - patch + - update +- apiGroups: + - mesh.consul.hashicorp.com + resources: + - meshgateway + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - mesh.consul.hashicorp.com + resources: + - meshgateway/status + verbs: + - get + - patch + - update +- apiGroups: + - mesh.consul.hashicorp.com + resources: + - proxyconfiguration + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - mesh.consul.hashicorp.com + resources: + - proxyconfiguration/status + verbs: + - get + - patch + - update +- apiGroups: + - mesh.consul.hashicorp.com + resources: + - tcproute + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - mesh.consul.hashicorp.com + resources: + - tcproute/status + verbs: + - get + - patch + - update +- apiGroups: + - multicluster.consul.hashicorp.com + resources: + - exportedservices + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - multicluster.consul.hashicorp.com + resources: + - exportedservices/status + verbs: + - get + - patch + - update diff --git a/control-plane/config/webhook/manifests.yaml b/control-plane/config/webhook/manifests.yaml index 8194b4fc12..a4b3aaadd0 100644 --- a/control-plane/config/webhook/manifests.yaml +++ b/control-plane/config/webhook/manifests.yaml @@ -7,6 +7,90 @@ kind: MutatingWebhookConfiguration metadata: name: mutating-webhook-configuration webhooks: +- admissionReviewVersions: + - v1beta1 + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-v2beta1-grpcroute + failurePolicy: Fail + name: mutate-grpcroute.auth.consul.hashicorp.com + rules: + - apiGroups: + - auth.consul.hashicorp.com + apiVersions: + - v2beta1 + operations: + - CREATE + - UPDATE + resources: + - grpcroute + sideEffects: None +- admissionReviewVersions: + - v1beta1 + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-v2beta1-httproute + failurePolicy: Fail + name: mutate-httproute.auth.consul.hashicorp.com + rules: + - apiGroups: + - auth.consul.hashicorp.com + apiVersions: + - v2beta1 + operations: + - CREATE + - UPDATE + resources: + - httproute + sideEffects: None +- admissionReviewVersions: + - v1beta1 + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-v2beta1-proxyconfiguration + failurePolicy: Fail + name: mutate-proxyconfiguration.auth.consul.hashicorp.com + rules: + - apiGroups: + - auth.consul.hashicorp.com + apiVersions: + - v2beta1 + operations: + - CREATE + - UPDATE + resources: + - proxyconfiguration + sideEffects: None +- admissionReviewVersions: + - v1beta1 + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-v2beta1-tcproute + failurePolicy: Fail + name: mutate-tcproute.auth.consul.hashicorp.com + rules: + - apiGroups: + - auth.consul.hashicorp.com + apiVersions: + - v2beta1 + operations: + - CREATE + - UPDATE + resources: + - tcproute + sideEffects: None - admissionReviewVersions: - v1beta1 - v1 @@ -322,12 +406,6 @@ webhooks: resources: - terminatinggateways sideEffects: None ---- -apiVersion: admissionregistration.k8s.io/v1 -kind: ValidatingWebhookConfiguration -metadata: - name: validating-webhook-configuration -webhooks: - admissionReviewVersions: - v1beta1 - v1 @@ -335,20 +413,26 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-v1alpha1-gatewaypolicy + path: /mutate-v2beta1-trafficpermissions failurePolicy: Fail - name: validate-gatewaypolicy.consul.hashicorp.com + name: mutate-trafficpermissions.auth.consul.hashicorp.com rules: - apiGroups: - - consul.hashicorp.com + - auth.consul.hashicorp.com apiVersions: - - v1alpha1 + - v2beta1 operations: - CREATE - UPDATE resources: - - gatewaypolicies + - trafficpermissions sideEffects: None +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: validating-webhook-configuration +webhooks: - admissionReviewVersions: - v1beta1 - v1 @@ -356,9 +440,9 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-v1alpha1-registration + path: /validate-v1alpha1-gatewaypolicy failurePolicy: Fail - name: validate-registration.consul.hashicorp.com + name: validate-gatewaypolicy.consul.hashicorp.com rules: - apiGroups: - consul.hashicorp.com @@ -368,5 +452,5 @@ webhooks: - CREATE - UPDATE resources: - - registrations + - gatewaypolicies sideEffects: None diff --git a/control-plane/connect-inject/common/annotation_processor.go b/control-plane/connect-inject/common/annotation_processor.go new file mode 100644 index 0000000000..e9ef6d2eff --- /dev/null +++ b/control-plane/connect-inject/common/annotation_processor.go @@ -0,0 +1,266 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package common + +import ( + "fmt" + "strings" + + pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1" + "github.com/hashicorp/consul/proto-public/pbresource" + corev1 "k8s.io/api/core/v1" + + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" +) + +const ( + ConsulNodeAddress = "127.0.0.1" +) + +// ProcessPodDestinationsForMeshWebhook reads the list of destinations from the Pod annotation and converts them into a pbmesh.Destinations +// object. +func ProcessPodDestinationsForMeshWebhook(pod corev1.Pod) (*pbmesh.Destinations, error) { + return ProcessPodDestinations(pod, true, true) +} + +// ProcessPodDestinations reads the list of destinations from the Pod annotation and converts them into a pbmesh.Destinations +// object. +func ProcessPodDestinations(pod corev1.Pod, enablePartitions, enableNamespaces bool) (*pbmesh.Destinations, error) { + destinations := &pbmesh.Destinations{} + raw, ok := pod.Annotations[constants.AnnotationMeshDestinations] + if !ok || raw == "" { + return nil, nil + } + + destinations.Workloads = &pbcatalog.WorkloadSelector{ + Names: []string{pod.Name}, + } + + for _, raw := range strings.Split(raw, ",") { + var destination *pbmesh.Destination + + // Determine the type of processing required unlabeled or labeled + // [service-port-name].[service-name].[service-namespace].[service-partition]:[port]:[optional datacenter] + // or + // [service-port-name].port.[service-name].svc.[service-namespace].ns.[service-peer].peer:[port] + // [service-port-name].port.[service-name].svc.[service-namespace].ns.[service-partition].ap:[port] + // [service-port-name].port.[service-name].svc.[service-namespace].ns.[service-datacenter].dc:[port] + + // Scan the string for the annotation keys. + // Even if the first key is missing, and the order is unexpected, we should let the processing + // provide us with errors + labeledFormat := false + keys := []string{"port", "svc", "ns", "ap", "peer", "dc"} + for _, v := range keys { + if strings.Contains(raw, fmt.Sprintf(".%s.", v)) || strings.Contains(raw, fmt.Sprintf(".%s:", v)) { + labeledFormat = true + break + } + } + + if labeledFormat { + var err error + destination, err = processPodLabeledDestination(pod, raw, enablePartitions, enableNamespaces) + if err != nil { + return nil, err + } + } else { + var err error + destination, err = processPodUnlabeledDestination(pod, raw, enablePartitions, enableNamespaces) + if err != nil { + return nil, err + } + } + + destinations.Destinations = append(destinations.Destinations, destination) + } + + return destinations, nil +} + +// processPodLabeledDestination processes a destination in the format: +// [service-port-name].port.[service-name].svc.[service-namespace].ns.[service-peer].peer:[port] +// [service-port-name].port.[service-name].svc.[service-namespace].ns.[service-partition].ap:[port] +// [service-port-name].port.[service-name].svc.[service-namespace].ns.[service-datacenter].dc:[port]. +// peer/ap/dc are mutually exclusive. At minimum service-port-name and service-name are required. +// The ordering matters for labeled as well as unlabeled. The ordering of the labeled parameters should follow +// the order and requirements of the unlabeled parameters. +// TODO: enable dc and peer support when ready, currently return errors if set. +func processPodLabeledDestination(pod corev1.Pod, rawUpstream string, enablePartitions, enableNamespaces bool) (*pbmesh.Destination, error) { + parts := strings.SplitN(rawUpstream, ":", 3) + var port int32 + port, _ = PortValue(pod, strings.TrimSpace(parts[1])) + if port <= 0 { + return nil, fmt.Errorf("port value %d in destination is invalid: %s", port, rawUpstream) + } + + service := parts[0] + pieces := strings.Split(service, ".") + + var portName, datacenter, svcName, namespace, partition string + if enablePartitions || enableNamespaces { + switch len(pieces) { + case 8: + end := strings.TrimSpace(pieces[7]) + switch end { + case "peer": + // TODO: uncomment and remove error when peers supported + // peer = strings.TrimSpace(pieces[6]) + return nil, fmt.Errorf("destination currently does not support peers: %s", rawUpstream) + case "ap": + partition = strings.TrimSpace(pieces[6]) + case "dc": + // TODO: uncomment and remove error when datacenters are supported + // datacenter = strings.TrimSpace(pieces[6]) + return nil, fmt.Errorf("destination currently does not support datacenters: %s", rawUpstream) + default: + return nil, fmt.Errorf("destination structured incorrectly: %s", rawUpstream) + } + fallthrough + case 6: + if strings.TrimSpace(pieces[5]) == "ns" { + namespace = strings.TrimSpace(pieces[4]) + } else { + return nil, fmt.Errorf("destination structured incorrectly: %s", rawUpstream) + } + fallthrough + case 4: + if strings.TrimSpace(pieces[3]) == "svc" { + svcName = strings.TrimSpace(pieces[2]) + } else { + return nil, fmt.Errorf("destination structured incorrectly: %s", rawUpstream) + } + if strings.TrimSpace(pieces[1]) == "port" { + portName = strings.TrimSpace(pieces[0]) + } else { + return nil, fmt.Errorf("destination structured incorrectly: %s", rawUpstream) + } + default: + return nil, fmt.Errorf("destination structured incorrectly: %s", rawUpstream) + } + } else { + switch len(pieces) { + case 6: + end := strings.TrimSpace(pieces[5]) + switch end { + case "peer": + // TODO: uncomment and remove error when peers supported + // peer = strings.TrimSpace(pieces[4]) + return nil, fmt.Errorf("destination currently does not support peers: %s", rawUpstream) + case "dc": + // TODO: uncomment and remove error when datacenter supported + // datacenter = strings.TrimSpace(pieces[4]) + return nil, fmt.Errorf("destination currently does not support datacenters: %s", rawUpstream) + default: + return nil, fmt.Errorf("destination structured incorrectly: %s", rawUpstream) + } + // TODO: uncomment and remove error when datacenter and/or peers supported + // fallthrough + case 4: + if strings.TrimSpace(pieces[3]) == "svc" { + svcName = strings.TrimSpace(pieces[2]) + } else { + return nil, fmt.Errorf("destination structured incorrectly: %s", rawUpstream) + } + if strings.TrimSpace(pieces[1]) == "port" { + portName = strings.TrimSpace(pieces[0]) + } else { + return nil, fmt.Errorf("destination structured incorrectly: %s", rawUpstream) + } + default: + return nil, fmt.Errorf("destination structured incorrectly: %s", rawUpstream) + } + } + + destination := pbmesh.Destination{ + DestinationRef: &pbresource.Reference{ + Type: pbcatalog.ServiceType, + Tenancy: &pbresource.Tenancy{ + Partition: constants.GetNormalizedConsulPartition(partition), + Namespace: constants.GetNormalizedConsulNamespace(namespace), + }, + Name: svcName, + }, + DestinationPort: portName, + Datacenter: datacenter, + ListenAddr: &pbmesh.Destination_IpPort{ + IpPort: &pbmesh.IPPortAddress{ + Port: uint32(port), + Ip: ConsulNodeAddress, + }, + }, + } + + return &destination, nil +} + +// processPodUnlabeledDestination processes a destination in the format: +// [service-port-name].[service-name].[service-namespace].[service-partition]:[port]:[optional datacenter]. +// There is no unlabeled field for peering. +// TODO: enable dc and peer support when ready, currently return errors if set. +func processPodUnlabeledDestination(pod corev1.Pod, rawUpstream string, enablePartitions, enableNamespaces bool) (*pbmesh.Destination, error) { + var portName, datacenter, svcName, namespace, partition string + var port int32 + var destination pbmesh.Destination + + parts := strings.SplitN(rawUpstream, ":", 3) + + port, _ = PortValue(pod, strings.TrimSpace(parts[1])) + + // If Consul Namespaces or Admin Partitions are enabled, attempt to parse the + // destination for a namespace. + if enableNamespaces || enablePartitions { + pieces := strings.SplitN(parts[0], ".", 4) + switch len(pieces) { + case 4: + partition = strings.TrimSpace(pieces[3]) + fallthrough + case 3: + namespace = strings.TrimSpace(pieces[2]) + fallthrough + case 2: + svcName = strings.TrimSpace(pieces[1]) + portName = strings.TrimSpace(pieces[0]) + default: + return nil, fmt.Errorf("destination structured incorrectly: %s", rawUpstream) + } + } else { + pieces := strings.SplitN(parts[0], ".", 2) + if len(pieces) < 2 { + return nil, fmt.Errorf("destination structured incorrectly: %s", rawUpstream) + } + svcName = strings.TrimSpace(pieces[1]) + portName = strings.TrimSpace(pieces[0]) + } + + // parse the optional datacenter + if len(parts) > 2 { + // TODO: uncomment and remove error when datacenters supported + // datacenter = strings.TrimSpace(parts[2]) + return nil, fmt.Errorf("destination currently does not support datacenters: %s", rawUpstream) + } + + if port > 0 { + destination = pbmesh.Destination{ + DestinationRef: &pbresource.Reference{ + Type: pbcatalog.ServiceType, + Tenancy: &pbresource.Tenancy{ + Partition: constants.GetNormalizedConsulPartition(partition), + Namespace: constants.GetNormalizedConsulNamespace(namespace), + }, + Name: svcName, + }, + DestinationPort: portName, + Datacenter: datacenter, + ListenAddr: &pbmesh.Destination_IpPort{ + IpPort: &pbmesh.IPPortAddress{ + Port: uint32(port), + Ip: ConsulNodeAddress, + }, + }, + } + } + return &destination, nil +} diff --git a/control-plane/connect-inject/common/annotation_processor_test.go b/control-plane/connect-inject/common/annotation_processor_test.go new file mode 100644 index 0000000000..3757e26154 --- /dev/null +++ b/control-plane/connect-inject/common/annotation_processor_test.go @@ -0,0 +1,1001 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package common + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/consul/api" + pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1" + "github.com/hashicorp/consul/proto-public/pbresource" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/testing/protocmp" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" +) + +func TestProcessUpstreams(t *testing.T) { + t.Parallel() + + const podName = "pod1" + + cases := []struct { + name string + pod func() *corev1.Pod + expected *pbmesh.Destinations + expErr string + configEntry func() api.ConfigEntry + consulUnavailable bool + consulNamespacesEnabled bool + consulPartitionsEnabled bool + }{ + { + name: "labeled annotated destination with svc only", + pod: func() *corev1.Pod { + pod1 := createPod(podName, "myPort.port.upstream1.svc:1234") + return pod1 + }, + expected: &pbmesh.Destinations{ + Workloads: &pbcatalog.WorkloadSelector{ + Names: []string{podName}, + }, + Destinations: []*pbmesh.Destination{ + { + DestinationRef: &pbresource.Reference{ + Type: pbcatalog.ServiceType, + Tenancy: &pbresource.Tenancy{ + Partition: constants.GetNormalizedConsulPartition(""), + Namespace: constants.GetNormalizedConsulNamespace(""), + }, + Name: "upstream1", + }, + DestinationPort: "myPort", + Datacenter: "", + ListenAddr: &pbmesh.Destination_IpPort{ + IpPort: &pbmesh.IPPortAddress{ + Port: uint32(1234), + Ip: ConsulNodeAddress, + }, + }, + }, + }, + }, + consulNamespacesEnabled: false, + consulPartitionsEnabled: false, + }, + { + name: "labeled annotated destination with svc and dc", + pod: func() *corev1.Pod { + pod1 := createPod(podName, "myPort.port.upstream1.svc.dc1.dc:1234") + return pod1 + }, + expErr: "destination currently does not support datacenters: myPort.port.upstream1.svc.dc1.dc:1234", + // TODO: uncomment this and remove expErr when datacenters is supported + //expected: &pbmesh.Destinations{ + // Workloads: &pbcatalog.WorkloadSelector{ + // Names: []string{podName}, + // }, + // Upstreams: []*pbmesh.Destination{ + // { + // DestinationRef: &pbresource.Reference{ + // Type: pbcatalog.ServiceType, + // Tenancy: &pbresource.Tenancy{ + // Partition: constants.GetNormalizedConsulPartition(""), + // Namespace: constants.GetNormalizedConsulNamespace(""), + // PeerName: constants.GetNormalizedConsulPeer(""), + // }, + // Name: "upstream1", + // }, + // DestinationPort: "myPort", + // Datacenter: "dc1", + // ListenAddr: &pbmesh.Destination_IpPort{ + // IpPort: &pbmesh.IPPortAddress{ + // Port: uint32(1234), + // Ip: ConsulNodeAddress, + // }, + // }, + // }, + // }, + //}, + consulNamespacesEnabled: false, + consulPartitionsEnabled: false, + }, + { + name: "labeled annotated destination with svc and peer", + pod: func() *corev1.Pod { + pod1 := createPod(podName, "myPort.port.upstream1.svc.peer1.peer:1234") + return pod1 + }, + expErr: "destination currently does not support peers: myPort.port.upstream1.svc.peer1.peer:1234", + // TODO: uncomment this and remove expErr when peers is supported + //expected: &pbmesh.Destinations{ + // Workloads: &pbcatalog.WorkloadSelector{ + // Names: []string{podName}, + // }, + // Upstreams: []*pbmesh.Destination{ + // { + // DestinationRef: &pbresource.Reference{ + // Type: pbcatalog.ServiceType, + // Tenancy: &pbresource.Tenancy{ + // Partition: constants.GetNormalizedConsulPartition(""), + // Namespace: constants.GetNormalizedConsulNamespace(""), + // PeerName: "peer1", + // }, + // Name: "upstream1", + // }, + // DestinationPort: "myPort", + // Datacenter: "", + // ListenAddr: &pbmesh.Destination_IpPort{ + // IpPort: &pbmesh.IPPortAddress{ + // Port: uint32(1234), + // Ip: ConsulNodeAddress, + // }, + // }, + // }, + // }, + //}, + consulNamespacesEnabled: false, + consulPartitionsEnabled: false, + }, + { + name: "labeled annotated destination with svc, ns, and peer", + pod: func() *corev1.Pod { + pod1 := createPod(podName, "myPort.port.upstream1.svc.ns1.ns.peer1.peer:1234") + return pod1 + }, + expErr: "destination currently does not support peers: myPort.port.upstream1.svc.ns1.ns.peer1.peer:1234", + // TODO: uncomment this and remove expErr when peers is supported + //expected: &pbmesh.Destinations{ + // Workloads: &pbcatalog.WorkloadSelector{ + // Names: []string{podName}, + // }, + // Upstreams: []*pbmesh.Destination{ + // { + // DestinationRef: &pbresource.Reference{ + // Type: pbcatalog.ServiceType, + // Tenancy: &pbresource.Tenancy{ + // Partition: constants.GetNormalizedConsulPartition(""), + // Namespace: "ns1", + // PeerName: "peer1", + // }, + // Name: "upstream1", + // }, + // DestinationPort: "myPort", + // Datacenter: "", + // ListenAddr: &pbmesh.Destination_IpPort{ + // IpPort: &pbmesh.IPPortAddress{ + // Port: uint32(1234), + // Ip: ConsulNodeAddress, + // }, + // }, + // }, + // }, + //}, + consulNamespacesEnabled: true, + consulPartitionsEnabled: false, + }, + { + name: "labeled annotated destination with svc, ns, and partition", + pod: func() *corev1.Pod { + pod1 := createPod(podName, "myPort.port.upstream1.svc.ns1.ns.part1.ap:1234") + return pod1 + }, + expected: &pbmesh.Destinations{ + Workloads: &pbcatalog.WorkloadSelector{ + Names: []string{podName}, + }, + Destinations: []*pbmesh.Destination{ + { + DestinationRef: &pbresource.Reference{ + Type: pbcatalog.ServiceType, + Tenancy: &pbresource.Tenancy{ + Partition: "part1", + Namespace: "ns1", + }, + Name: "upstream1", + }, + DestinationPort: "myPort", + Datacenter: "", + ListenAddr: &pbmesh.Destination_IpPort{ + IpPort: &pbmesh.IPPortAddress{ + Port: uint32(1234), + Ip: ConsulNodeAddress, + }, + }, + }, + }, + }, + consulNamespacesEnabled: true, + consulPartitionsEnabled: true, + }, + { + name: "labeled annotated destination with svc, ns, and dc", + pod: func() *corev1.Pod { + pod1 := createPod(podName, "myPort.port.upstream1.svc.ns1.ns.dc1.dc:1234") + return pod1 + }, + expErr: "destination currently does not support datacenters: myPort.port.upstream1.svc.ns1.ns.dc1.dc:1234", + // TODO: uncomment this and remove expErr when datacenters is supported + //expected: &pbmesh.Destinations{ + // Workloads: &pbcatalog.WorkloadSelector{ + // Names: []string{podName}, + // }, + // Upstreams: []*pbmesh.Destination{ + // { + // DestinationRef: &pbresource.Reference{ + // Type: pbcatalog.ServiceType, + // Tenancy: &pbresource.Tenancy{ + // Partition: constants.GetNormalizedConsulPartition(""), + // Namespace: "ns1", + // PeerName: constants.GetNormalizedConsulPeer(""), + // }, + // Name: "upstream1", + // }, + // DestinationPort: "myPort", + // Datacenter: "dc1", + // ListenAddr: &pbmesh.Destination_IpPort{ + // IpPort: &pbmesh.IPPortAddress{ + // Port: uint32(1234), + // Ip: ConsulNodeAddress, + // }, + // }, + // }, + // }, + //}, + consulNamespacesEnabled: true, + consulPartitionsEnabled: false, + }, + { + name: "labeled multiple annotated destinations", + pod: func() *corev1.Pod { + pod1 := createPod(podName, "myPort.port.upstream1.svc.ns1.ns:1234, myPort2.port.upstream2.svc:2234, myPort4.port.upstream4.svc.ns1.ns.ap1.ap:4234") + return pod1 + }, + expected: &pbmesh.Destinations{ + Workloads: &pbcatalog.WorkloadSelector{ + Names: []string{podName}, + }, + Destinations: []*pbmesh.Destination{ + { + DestinationRef: &pbresource.Reference{ + Type: pbcatalog.ServiceType, + Tenancy: &pbresource.Tenancy{ + Partition: constants.GetNormalizedConsulPartition(""), + Namespace: "ns1", + }, + Name: "upstream1", + }, + DestinationPort: "myPort", + Datacenter: "", + ListenAddr: &pbmesh.Destination_IpPort{ + IpPort: &pbmesh.IPPortAddress{ + Port: uint32(1234), + Ip: ConsulNodeAddress, + }, + }, + }, + { + DestinationRef: &pbresource.Reference{ + Type: pbcatalog.ServiceType, + Tenancy: &pbresource.Tenancy{ + Partition: constants.GetNormalizedConsulPartition(""), + Namespace: constants.GetNormalizedConsulNamespace(""), + }, + Name: "upstream2", + }, + DestinationPort: "myPort2", + Datacenter: "", + ListenAddr: &pbmesh.Destination_IpPort{ + IpPort: &pbmesh.IPPortAddress{ + Port: uint32(2234), + Ip: ConsulNodeAddress, + }, + }, + }, + { + DestinationRef: &pbresource.Reference{ + Type: pbcatalog.ServiceType, + Tenancy: &pbresource.Tenancy{ + Partition: "ap1", + Namespace: "ns1", + }, + Name: "upstream4", + }, + DestinationPort: "myPort4", + Datacenter: "", + ListenAddr: &pbmesh.Destination_IpPort{ + IpPort: &pbmesh.IPPortAddress{ + Port: uint32(4234), + Ip: ConsulNodeAddress, + }, + }, + }, + }, + }, + consulNamespacesEnabled: true, + consulPartitionsEnabled: true, + }, + { + name: "labeled multiple annotated destinations with dcs and peers", + pod: func() *corev1.Pod { + pod1 := createPod(podName, "myPort.port.upstream1.svc.ns1.ns.dc1.dc:1234, myPort2.port.upstream2.svc:2234, myPort3.port.upstream3.svc.ns1.ns:3234, myPort4.port.upstream4.svc.ns1.ns.peer1.peer:4234") + return pod1 + }, + expErr: "destination currently does not support datacenters: myPort.port.upstream1.svc.ns1.ns.dc1.dc:1234", + // TODO: uncomment this and remove expErr when datacenters is supported + //expected: &pbmesh.Destinations{ + // Workloads: &pbcatalog.WorkloadSelector{ + // Names: []string{podName}, + // }, + // Upstreams: []*pbmesh.Destination{ + // { + // DestinationRef: &pbresource.Reference{ + // Type: pbcatalog.ServiceType, + // Tenancy: &pbresource.Tenancy{ + // Partition: constants.GetNormalizedConsulPartition(""), + // Namespace: "ns1", + // PeerName: constants.GetNormalizedConsulPeer(""), + // }, + // Name: "upstream1", + // }, + // DestinationPort: "myPort", + // Datacenter: "dc1", + // ListenAddr: &pbmesh.Destination_IpPort{ + // IpPort: &pbmesh.IPPortAddress{ + // Port: uint32(1234), + // Ip: ConsulNodeAddress, + // }, + // }, + // }, + // { + // DestinationRef: &pbresource.Reference{ + // Type: pbcatalog.ServiceType, + // Tenancy: &pbresource.Tenancy{ + // Partition: constants.GetNormalizedConsulPartition(""), + // Namespace: constants.GetNormalizedConsulNamespace(""), + // PeerName: constants.GetNormalizedConsulPeer(""), + // }, + // Name: "upstream2", + // }, + // DestinationPort: "myPort2", + // Datacenter: "", + // ListenAddr: &pbmesh.Destination_IpPort{ + // IpPort: &pbmesh.IPPortAddress{ + // Port: uint32(2234), + // Ip: ConsulNodeAddress, + // }, + // }, + // }, + // { + // DestinationRef: &pbresource.Reference{ + // Type: pbcatalog.ServiceType, + // Tenancy: &pbresource.Tenancy{ + // Partition: constants.GetNormalizedConsulPartition(""), + // Namespace: "ns1", + // PeerName: constants.GetNormalizedConsulPeer(""), + // }, + // Name: "upstream3", + // }, + // DestinationPort: "myPort3", + // Datacenter: "", + // ListenAddr: &pbmesh.Destination_IpPort{ + // IpPort: &pbmesh.IPPortAddress{ + // Port: uint32(3234), + // Ip: ConsulNodeAddress, + // }, + // }, + // }, + // { + // DestinationRef: &pbresource.Reference{ + // Type: pbcatalog.ServiceType, + // Tenancy: &pbresource.Tenancy{ + // Partition: constants.GetNormalizedConsulPartition(""), + // Namespace: "ns1", + // PeerName: "peer1", + // }, + // Name: "upstream4", + // }, + // DestinationPort: "myPort4", + // Datacenter: "", + // ListenAddr: &pbmesh.Destination_IpPort{ + // IpPort: &pbmesh.IPPortAddress{ + // Port: uint32(4234), + // Ip: ConsulNodeAddress, + // }, + // }, + // }, + // }, + //}, + consulNamespacesEnabled: true, + consulPartitionsEnabled: true, + }, + { + name: "error labeled annotated destination error: invalid partition/dc/peer", + pod: func() *corev1.Pod { + pod1 := createPod(podName, "myPort.port.upstream1.svc.ns1.ns.part1.err:1234") + return pod1 + }, + expErr: "destination structured incorrectly: myPort.port.upstream1.svc.ns1.ns.part1.err:1234", + consulNamespacesEnabled: true, + consulPartitionsEnabled: false, + }, + { + name: "error labeled annotated destination with svc and peer, needs ns before peer if namespaces enabled", + pod: func() *corev1.Pod { + pod1 := createPod(podName, "myPort.port.upstream1.svc.peer1.peer:1234") + return pod1 + }, + expErr: "destination structured incorrectly: myPort.port.upstream1.svc.peer1.peer:1234", + consulNamespacesEnabled: true, + consulPartitionsEnabled: false, + }, + { + name: "error labeled annotated destination error: invalid namespace", + pod: func() *corev1.Pod { + pod1 := createPod(podName, "myPort.port.upstream1.svc.ns1.err:1234") + return pod1 + }, + expErr: "destination structured incorrectly: myPort.port.upstream1.svc.ns1.err:1234", + consulNamespacesEnabled: true, + consulPartitionsEnabled: false, + }, + { + name: "error labeled annotated destination error: invalid number of pieces in the address", + pod: func() *corev1.Pod { + pod1 := createPod(podName, "myPort.port.upstream1.svc.err:1234") + return pod1 + }, + expErr: "destination structured incorrectly: myPort.port.upstream1.svc.err:1234", + consulNamespacesEnabled: true, + consulPartitionsEnabled: false, + }, + { + name: "error labeled annotated destination error: invalid peer", + pod: func() *corev1.Pod { + pod1 := createPod(podName, "myPort.port.upstream1.svc.peer1.err:1234") + return pod1 + }, + expErr: "destination structured incorrectly: myPort.port.upstream1.svc.peer1.err:1234", + consulNamespacesEnabled: false, + consulPartitionsEnabled: false, + }, + { + name: "error labeled annotated destination error: invalid number of pieces in the address without namespaces and partitions", + pod: func() *corev1.Pod { + pod1 := createPod(podName, "myPort.port.upstream1.svc.err:1234") + return pod1 + }, + expErr: "destination structured incorrectly: myPort.port.upstream1.svc.err:1234", + consulNamespacesEnabled: false, + consulPartitionsEnabled: false, + }, + { + name: "error labeled annotated destination error: both peer and partition provided", + pod: func() *corev1.Pod { + pod1 := createPod(podName, "myPort.port.upstream1.svc.ns1.ns.part1.partition.peer1.peer:1234") + return pod1 + }, + expErr: "destination structured incorrectly: myPort.port.upstream1.svc.ns1.ns.part1.partition.peer1.peer:1234", + consulNamespacesEnabled: true, + consulPartitionsEnabled: true, + }, + { + name: "error labeled annotated destination error: both peer and dc provided", + pod: func() *corev1.Pod { + pod1 := createPod(podName, "myPort.port.upstream1.svc.ns1.ns.peer1.peer.dc1.dc:1234") + return pod1 + }, + expErr: "destination structured incorrectly: myPort.port.upstream1.svc.ns1.ns.peer1.peer.dc1.dc:1234", + consulNamespacesEnabled: true, + consulPartitionsEnabled: false, + }, + { + name: "error labeled annotated destination error: both dc and partition provided", + pod: func() *corev1.Pod { + pod1 := createPod(podName, "myPort.port.upstream1.svc.ns1.ns.part1.partition.dc1.dc:1234") + return pod1 + }, + expErr: "destination structured incorrectly: myPort.port.upstream1.svc.ns1.ns.part1.partition.dc1.dc:1234", + consulNamespacesEnabled: true, + consulPartitionsEnabled: true, + }, + { + name: "error labeled annotated destination error: wrong ordering for port and svc with namespace partition enabled", + pod: func() *corev1.Pod { + pod1 := createPod(podName, "upstream1.svc.myPort.port.ns1.ns.part1.partition.dc1.dc:1234") + return pod1 + }, + expErr: "destination structured incorrectly: upstream1.svc.myPort.port.ns1.ns.part1.partition.dc1.dc:1234", + consulNamespacesEnabled: true, + consulPartitionsEnabled: true, + }, + { + name: "error labeled annotated destination error: wrong ordering for port and svc with namespace partition disabled", + pod: func() *corev1.Pod { + pod1 := createPod(podName, "upstream1.svc.myPort.port:1234") + return pod1 + }, + expErr: "destination structured incorrectly: upstream1.svc.myPort.port:1234", + consulNamespacesEnabled: false, + consulPartitionsEnabled: false, + }, + { + name: "error labeled annotated destination error: incorrect key name namespace partition enabled", + pod: func() *corev1.Pod { + pod1 := createPod(podName, "myPort.portage.upstream1.svc.ns1.ns.part1.partition.dc1.dc:1234") + return pod1 + }, + expErr: "destination structured incorrectly: myPort.portage.upstream1.svc.ns1.ns.part1.partition.dc1.dc:1234", + consulNamespacesEnabled: true, + consulPartitionsEnabled: true, + }, + { + name: "error labeled annotated destination error: incorrect key name namespace partition disabled", + pod: func() *corev1.Pod { + pod1 := createPod(podName, "myPort.portage.upstream1.svc:1234") + return pod1 + }, + expErr: "destination structured incorrectly: myPort.portage.upstream1.svc:1234", + consulNamespacesEnabled: false, + consulPartitionsEnabled: false, + }, + { + name: "error labeled missing port name", + pod: func() *corev1.Pod { + pod1 := createPod(podName, "upstream1.svc:1234") + return pod1 + }, + expErr: "destination structured incorrectly: upstream1.svc:1234", + consulNamespacesEnabled: false, + consulPartitionsEnabled: false, + }, + { + name: "error labeled missing port name namespace partition enabled", + pod: func() *corev1.Pod { + pod1 := createPod(podName, "upstream1.svc:1234") + return pod1 + }, + expErr: "destination structured incorrectly: upstream1.svc:1234", + consulNamespacesEnabled: true, + consulPartitionsEnabled: true, + }, + { + name: "unlabeled and labeled multiple annotated destinations", + pod: func() *corev1.Pod { + pod1 := createPod(podName, "myPort.port.upstream1.svc.ns1.ns:1234, myPort2.upstream2:2234, myPort4.port.upstream4.svc.ns1.ns.ap1.ap:4234") + return pod1 + }, + expected: &pbmesh.Destinations{ + Workloads: &pbcatalog.WorkloadSelector{ + Names: []string{podName}, + }, + Destinations: []*pbmesh.Destination{ + { + DestinationRef: &pbresource.Reference{ + Type: pbcatalog.ServiceType, + Tenancy: &pbresource.Tenancy{ + Partition: constants.GetNormalizedConsulPartition(""), + Namespace: "ns1", + }, + Name: "upstream1", + }, + DestinationPort: "myPort", + Datacenter: "", + ListenAddr: &pbmesh.Destination_IpPort{ + IpPort: &pbmesh.IPPortAddress{ + Port: uint32(1234), + Ip: ConsulNodeAddress, + }, + }, + }, + { + DestinationRef: &pbresource.Reference{ + Type: pbcatalog.ServiceType, + Tenancy: &pbresource.Tenancy{ + Partition: constants.GetNormalizedConsulPartition(""), + Namespace: constants.GetNormalizedConsulNamespace(""), + }, + Name: "upstream2", + }, + DestinationPort: "myPort2", + Datacenter: "", + ListenAddr: &pbmesh.Destination_IpPort{ + IpPort: &pbmesh.IPPortAddress{ + Port: uint32(2234), + Ip: ConsulNodeAddress, + }, + }, + }, + { + DestinationRef: &pbresource.Reference{ + Type: pbcatalog.ServiceType, + Tenancy: &pbresource.Tenancy{ + Partition: "ap1", + Namespace: "ns1", + }, + Name: "upstream4", + }, + DestinationPort: "myPort4", + Datacenter: "", + ListenAddr: &pbmesh.Destination_IpPort{ + IpPort: &pbmesh.IPPortAddress{ + Port: uint32(4234), + Ip: ConsulNodeAddress, + }, + }, + }, + }, + }, + consulNamespacesEnabled: true, + consulPartitionsEnabled: true, + }, + { + name: "unlabeled single destination", + pod: func() *corev1.Pod { + pod1 := createPod(podName, "myPort.upstream:1234") + return pod1 + }, + expected: &pbmesh.Destinations{ + Workloads: &pbcatalog.WorkloadSelector{ + Names: []string{podName}, + }, + Destinations: []*pbmesh.Destination{ + { + DestinationRef: &pbresource.Reference{ + Type: pbcatalog.ServiceType, + Tenancy: &pbresource.Tenancy{ + Partition: constants.GetNormalizedConsulPartition(""), + Namespace: constants.GetNormalizedConsulNamespace(""), + }, + Name: "upstream", + }, + DestinationPort: "myPort", + Datacenter: "", + ListenAddr: &pbmesh.Destination_IpPort{ + IpPort: &pbmesh.IPPortAddress{ + Port: uint32(1234), + Ip: ConsulNodeAddress, + }, + }, + }, + }, + }, + consulNamespacesEnabled: false, + consulPartitionsEnabled: false, + }, + { + name: "unlabeled single destination with namespace", + pod: func() *corev1.Pod { + pod1 := createPod(podName, "myPort.upstream.foo:1234") + return pod1 + }, + expected: &pbmesh.Destinations{ + Workloads: &pbcatalog.WorkloadSelector{ + Names: []string{podName}, + }, + Destinations: []*pbmesh.Destination{ + { + DestinationRef: &pbresource.Reference{ + Type: pbcatalog.ServiceType, + Tenancy: &pbresource.Tenancy{ + Partition: constants.GetNormalizedConsulPartition(""), + Namespace: "foo", + }, + Name: "upstream", + }, + DestinationPort: "myPort", + Datacenter: "", + ListenAddr: &pbmesh.Destination_IpPort{ + IpPort: &pbmesh.IPPortAddress{ + Port: uint32(1234), + Ip: ConsulNodeAddress, + }, + }, + }, + }, + }, + consulNamespacesEnabled: true, + consulPartitionsEnabled: false, + }, + { + name: "unlabeled single destination with namespace and partition", + pod: func() *corev1.Pod { + pod1 := createPod(podName, "myPort.upstream.foo.bar:1234") + return pod1 + }, + expected: &pbmesh.Destinations{ + Workloads: &pbcatalog.WorkloadSelector{ + Names: []string{podName}, + }, + Destinations: []*pbmesh.Destination{ + { + DestinationRef: &pbresource.Reference{ + Type: pbcatalog.ServiceType, + Tenancy: &pbresource.Tenancy{ + Partition: "bar", + Namespace: "foo", + }, + Name: "upstream", + }, + DestinationPort: "myPort", + Datacenter: "", + ListenAddr: &pbmesh.Destination_IpPort{ + IpPort: &pbmesh.IPPortAddress{ + Port: uint32(1234), + Ip: ConsulNodeAddress, + }, + }, + }, + }, + }, + consulNamespacesEnabled: true, + consulPartitionsEnabled: true, + }, + { + name: "unlabeled multiple destinations", + pod: func() *corev1.Pod { + pod1 := createPod(podName, "myPort.upstream1:1234, myPort2.upstream2:2234") + return pod1 + }, + expected: &pbmesh.Destinations{ + Workloads: &pbcatalog.WorkloadSelector{ + Names: []string{podName}, + }, + Destinations: []*pbmesh.Destination{ + { + DestinationRef: &pbresource.Reference{ + Type: pbcatalog.ServiceType, + Tenancy: &pbresource.Tenancy{ + Partition: constants.GetNormalizedConsulPartition(""), + Namespace: constants.GetNormalizedConsulNamespace(""), + }, + Name: "upstream1", + }, + DestinationPort: "myPort", + Datacenter: "", + ListenAddr: &pbmesh.Destination_IpPort{ + IpPort: &pbmesh.IPPortAddress{ + Port: uint32(1234), + Ip: ConsulNodeAddress, + }, + }, + }, + { + DestinationRef: &pbresource.Reference{ + Type: pbcatalog.ServiceType, + Tenancy: &pbresource.Tenancy{ + Partition: constants.GetNormalizedConsulPartition(""), + Namespace: constants.GetNormalizedConsulNamespace(""), + }, + Name: "upstream2", + }, + DestinationPort: "myPort2", + Datacenter: "", + ListenAddr: &pbmesh.Destination_IpPort{ + IpPort: &pbmesh.IPPortAddress{ + Port: uint32(2234), + Ip: ConsulNodeAddress, + }, + }, + }, + }, + }, + consulNamespacesEnabled: false, + consulPartitionsEnabled: false, + }, + { + name: "unlabeled multiple destinations with consul namespaces, partitions and datacenters", + pod: func() *corev1.Pod { + pod1 := createPod(podName, "myPort.upstream1:1234, myPort2.upstream2.bar:2234, myPort3.upstream3.foo.baz:3234:dc2") + return pod1 + }, + configEntry: func() api.ConfigEntry { + ce, _ := api.MakeConfigEntry(api.ProxyDefaults, "global") + pd := ce.(*api.ProxyConfigEntry) + pd.MeshGateway.Mode = "remote" + return pd + }, + expErr: "destination currently does not support datacenters: myPort3.upstream3.foo.baz:3234:dc2", + // TODO: uncomment this and remove expErr when datacenters is supported + //expected: &pbmesh.Destinations{ + // Workloads: &pbcatalog.WorkloadSelector{ + // Names: []string{podName}, + // }, + // Upstreams: []*pbmesh.Destination{ + // { + // DestinationRef: &pbresource.Reference{ + // Type: pbcatalog.ServiceType, + // Tenancy: &pbresource.Tenancy{ + // Partition: constants.GetNormalizedConsulPartition(""), + // Namespace: constants.GetNormalizedConsulNamespace(""), + // PeerName: constants.GetNormalizedConsulPeer(""), + // }, + // Name: "upstream1", + // }, + // DestinationPort: "myPort", + // Datacenter: "", + // ListenAddr: &pbmesh.Destination_IpPort{ + // IpPort: &pbmesh.IPPortAddress{ + // Port: uint32(1234), + // Ip: ConsulNodeAddress, + // }, + // }, + // }, + // { + // DestinationRef: &pbresource.Reference{ + // Type: pbcatalog.ServiceType, + // Tenancy: &pbresource.Tenancy{ + // Partition: constants.GetNormalizedConsulPartition(""), + // Namespace: "bar", + // PeerName: constants.GetNormalizedConsulPeer(""), + // }, + // Name: "upstream2", + // }, + // DestinationPort: "myPort2", + // Datacenter: "", + // ListenAddr: &pbmesh.Destination_IpPort{ + // IpPort: &pbmesh.IPPortAddress{ + // Port: uint32(2234), + // Ip: ConsulNodeAddress, + // }, + // }, + // }, + // { + // DestinationRef: &pbresource.Reference{ + // Type: pbcatalog.ServiceType, + // Tenancy: &pbresource.Tenancy{ + // Partition: "baz", + // Namespace: "foo", + // PeerName: constants.GetNormalizedConsulPeer(""), + // }, + // Name: "upstream3", + // }, + // DestinationPort: "myPort3", + // Datacenter: "dc2", + // ListenAddr: &pbmesh.Destination_IpPort{ + // IpPort: &pbmesh.IPPortAddress{ + // Port: uint32(3234), + // Ip: ConsulNodeAddress, + // }, + // }, + // }, + // }, + //}, + consulNamespacesEnabled: true, + consulPartitionsEnabled: true, + }, + { + name: "unlabeled multiple destinations with consul namespaces and datacenters", + pod: func() *corev1.Pod { + pod1 := createPod(podName, "myPort.upstream1:1234, myPort2.upstream2.bar:2234, myPort3.upstream3.foo:3234:dc2") + return pod1 + }, + configEntry: func() api.ConfigEntry { + ce, _ := api.MakeConfigEntry(api.ProxyDefaults, "global") + pd := ce.(*api.ProxyConfigEntry) + pd.MeshGateway.Mode = "remote" + return pd + }, + expErr: "destination currently does not support datacenters: myPort3.upstream3.foo:3234:dc2", + // TODO: uncomment this and remove expErr when datacenters is supported + //expected: &pbmesh.Destinations{ + // Workloads: &pbcatalog.WorkloadSelector{ + // Names: []string{podName}, + // }, + // Upstreams: []*pbmesh.Destination{ + // { + // DestinationRef: &pbresource.Reference{ + // Type: pbcatalog.ServiceType, + // Tenancy: &pbresource.Tenancy{ + // Partition: constants.GetNormalizedConsulPartition(""), + // Namespace: constants.GetNormalizedConsulNamespace(""), + // PeerName: constants.GetNormalizedConsulPeer(""), + // }, + // Name: "upstream1", + // }, + // DestinationPort: "myPort", + // Datacenter: "", + // ListenAddr: &pbmesh.Destination_IpPort{ + // IpPort: &pbmesh.IPPortAddress{ + // Port: uint32(1234), + // Ip: ConsulNodeAddress, + // }, + // }, + // }, + // { + // DestinationRef: &pbresource.Reference{ + // Type: pbcatalog.ServiceType, + // Tenancy: &pbresource.Tenancy{ + // Partition: constants.GetNormalizedConsulPartition(""), + // Namespace: "bar", + // PeerName: constants.GetNormalizedConsulPeer(""), + // }, + // Name: "upstream2", + // }, + // DestinationPort: "myPort2", + // Datacenter: "", + // ListenAddr: &pbmesh.Destination_IpPort{ + // IpPort: &pbmesh.IPPortAddress{ + // Port: uint32(2234), + // Ip: ConsulNodeAddress, + // }, + // }, + // }, + // { + // DestinationRef: &pbresource.Reference{ + // Type: pbcatalog.ServiceType, + // Tenancy: &pbresource.Tenancy{ + // Partition: constants.GetNormalizedConsulPartition(""), + // Namespace: "foo", + // PeerName: constants.GetNormalizedConsulPeer(""), + // }, + // Name: "upstream3", + // }, + // DestinationPort: "myPort3", + // Datacenter: "dc2", + // ListenAddr: &pbmesh.Destination_IpPort{ + // IpPort: &pbmesh.IPPortAddress{ + // Port: uint32(3234), + // Ip: ConsulNodeAddress, + // }, + // }, + // }, + // }, + //}, + consulNamespacesEnabled: true, + }, + { + name: "error unlabeled missing port name with namespace and partition disabled", + pod: func() *corev1.Pod { + pod1 := createPod(podName, "upstream1:1234") + return pod1 + }, + expErr: "destination structured incorrectly: upstream1:1234", + consulNamespacesEnabled: false, + consulPartitionsEnabled: false, + }, + { + name: "error unlabeled missing port name with namespace and partition enabled", + pod: func() *corev1.Pod { + pod1 := createPod(podName, "upstream1:1234") + return pod1 + }, + expErr: "destination structured incorrectly: upstream1:1234", + consulNamespacesEnabled: true, + consulPartitionsEnabled: true, + }, + } + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + destinations, err := ProcessPodDestinations(*tt.pod(), tt.consulNamespacesEnabled, tt.consulPartitionsEnabled) + if tt.expErr != "" { + require.EqualError(t, err, tt.expErr) + } else { + require.NoError(t, err) + require.Equal(t, tt.expected, destinations) + + if diff := cmp.Diff(tt.expected, destinations, protocmp.Transform()); diff != "" { + t.Errorf("unexpected difference:\n%v", diff) + } + } + }) + } +} + +// createPod creates a multi-port pod as a base for tests. +func createPod(name string, annotation string) *corev1.Pod { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + } + pod.Annotations = map[string]string{ + constants.AnnotationMeshDestinations: annotation, + } + return pod +} diff --git a/control-plane/connect-inject/common/common.go b/control-plane/connect-inject/common/common.go index 63d0e4314a..569b4d96e6 100644 --- a/control-plane/connect-inject/common/common.go +++ b/control-plane/connect-inject/common/common.go @@ -9,10 +9,14 @@ import ( "strings" mapset "github.com/deckarep/golang-set" + pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" ) @@ -69,6 +73,20 @@ func PortValue(pod corev1.Pod, value string) (int32, error) { return int32(raw), err } +// WorkloadPortName returns the container port's name if it has one, and if not, constructs a name from the port number +// and adds a constant prefix. The port name must be 1-15 characters and must have at least 1 alpha character. +func WorkloadPortName(port *corev1.ContainerPort) string { + name := port.Name + var isNum bool + if _, err := strconv.Atoi(name); err == nil { + isNum = true + } + if name == "" || isNum { + name = constants.UnnamedWorkloadPortNamePrefix + strconv.Itoa(int(port.ContainerPort)) + } + return name +} + // TransparentProxyEnabled returns true if transparent proxy should be enabled for this pod. // It returns an error when the annotation value cannot be parsed by strconv.ParseBool or if we are unable // to read the pod's namespace label when it exists. @@ -119,6 +137,85 @@ func ConsulNodeNameFromK8sNode(nodeName string) string { return fmt.Sprintf("%s-virtual", nodeName) } +// ******************** +// V2 Exclusive Common Code +// ******************** + +// ToProtoAny is a convenience function for converting proto.Message values to anypb.Any without error handling. +// This should _only_ be used in cases where a nil or valid proto.Message value is _guaranteed_, else it will panic. +// If the type of m is *anypb.Any, that value will be returned unmodified. +func ToProtoAny(m proto.Message) *anypb.Any { + switch v := m.(type) { + case nil: + return nil + case *anypb.Any: + return v + } + a, err := anypb.New(m) + if err != nil { + panic(fmt.Errorf("unexpected error: failed to convert proto message to anypb.Any: %w", err)) + } + return a +} + +// GetPortProtocol matches the Kubernetes EndpointPort.AppProtocol or ServicePort.AppProtocol (*string) to a supported +// Consul catalog port protocol. If nil or unrecognized, the default of `PROTOCOL_UNSPECIFIED` is returned. +func GetPortProtocol(appProtocol *string) pbcatalog.Protocol { + if appProtocol == nil { + return pbcatalog.Protocol_PROTOCOL_UNSPECIFIED + } + switch *appProtocol { + case "tcp": + return pbcatalog.Protocol_PROTOCOL_TCP + case "http": + return pbcatalog.Protocol_PROTOCOL_HTTP + case "http2": + return pbcatalog.Protocol_PROTOCOL_HTTP2 + case "grpc": + return pbcatalog.Protocol_PROTOCOL_GRPC + } + // If unrecognized or empty string, return default + return pbcatalog.Protocol_PROTOCOL_UNSPECIFIED +} + +// PortValueFromIntOrString returns the integer port value from the port that can be +// a named port, an integer string (e.g. "80"), or an integer. If the port is a named port, +// this function will attempt to find the value from the containers of the pod. +func PortValueFromIntOrString(pod corev1.Pod, port intstr.IntOrString) (uint32, error) { + if port.Type == intstr.Int { + return uint32(port.IntValue()), nil + } + + // Otherwise, find named port or try to parse the string as an int. + portVal, err := PortValue(pod, port.StrVal) + if err != nil { + return 0, err + } + return uint32(portVal), nil +} + +// HasBeenMeshInjected checks the value of the status annotation and returns true if the Pod has been injected. +// Does not apply to V1 pods, which use a different key (`constants.KeyInjectStatus`). +func HasBeenMeshInjected(pod corev1.Pod) bool { + if pod.Annotations == nil { + return false + } + if anno, ok := pod.Annotations[constants.KeyMeshInjectStatus]; ok && anno == constants.Injected { + return true + } + return false +} + +func IsGateway(pod corev1.Pod) bool { + if pod.Annotations == nil { + return false + } + if anno, ok := pod.Annotations[constants.AnnotationGatewayKind]; ok && anno != "" { + return true + } + return false +} + // ConsulNamespaceIsNotFound checks the gRPC error code and message to determine // if a namespace does not exist. If the namespace exists this function returns false, true otherwise. func ConsulNamespaceIsNotFound(err error) bool { diff --git a/control-plane/connect-inject/common/common_test.go b/control-plane/connect-inject/common/common_test.go index 4cd58fea81..2c35315b89 100644 --- a/control-plane/connect-inject/common/common_test.go +++ b/control-plane/connect-inject/common/common_test.go @@ -4,17 +4,26 @@ package common import ( + "context" "fmt" "testing" mapset "github.com/deckarep/golang-set" + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/consul/sdk/testutil" "github.com/stretchr/testify/require" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + "google.golang.org/protobuf/testing/protocmp" + "google.golang.org/protobuf/types/known/anypb" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" + "github.com/hashicorp/consul/proto-public/pbresource" + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" + "github.com/hashicorp/consul-k8s/control-plane/helper/test" "github.com/hashicorp/consul-k8s/control-plane/namespaces" ) @@ -158,6 +167,46 @@ func TestCommonDetermineAndValidatePort(t *testing.T) { } } +func TestWorkloadPortName(t *testing.T) { + cases := []struct { + Name string + Port *corev1.ContainerPort + Expected string + }{ + { + Name: "named port", + Port: &corev1.ContainerPort{ + Name: "http", + ContainerPort: 8080, + }, + Expected: "http", + }, + { + Name: "unnamed port", + Port: &corev1.ContainerPort{ + Name: "", + ContainerPort: 8080, + }, + Expected: "cslport-8080", + }, + { + Name: "number port name", + Port: &corev1.ContainerPort{ + Name: "8080", + ContainerPort: 8080, + }, + Expected: "cslport-8080", + }, + } + + for _, tt := range cases { + t.Run(tt.Name, func(t *testing.T) { + name := WorkloadPortName(tt.Port) + require.Equal(t, tt.Expected, name) + }) + } +} + func TestPortValue(t *testing.T) { cases := []struct { Name string @@ -318,6 +367,156 @@ func TestShouldIgnore(t *testing.T) { } } +func TestToProtoAny(t *testing.T) { + t.Parallel() + + t.Run("nil gets nil", func(t *testing.T) { + require.Nil(t, ToProtoAny(nil)) + }) + + t.Run("anypb.Any gets same value", func(t *testing.T) { + testMsg := &pbresource.Resource{Id: &pbresource.ID{Name: "foo"}} + testAny, err := anypb.New(testMsg) + require.NoError(t, err) + + require.Equal(t, testAny, ToProtoAny(testAny)) + }) + + t.Run("valid proto is successfully serialized", func(t *testing.T) { + testMsg := &pbresource.Resource{Id: &pbresource.ID{Name: "foo"}} + testAny, err := anypb.New(testMsg) + require.NoError(t, err) + + if diff := cmp.Diff(testAny, ToProtoAny(testMsg), protocmp.Transform()); diff != "" { + t.Errorf("unexpected difference:\n%v", diff) + } + }) +} + +func TestGetPortProtocol(t *testing.T) { + t.Parallel() + toStringPtr := func(s string) *string { + return &s + } + cases := []struct { + name string + input *string + expected pbcatalog.Protocol + }{ + { + name: "nil gets UNSPECIFIED", + input: nil, + expected: pbcatalog.Protocol_PROTOCOL_UNSPECIFIED, + }, + { + name: "tcp gets TCP", + input: toStringPtr("tcp"), + expected: pbcatalog.Protocol_PROTOCOL_TCP, + }, + { + name: "http gets HTTP", + input: toStringPtr("http"), + expected: pbcatalog.Protocol_PROTOCOL_HTTP, + }, + { + name: "http2 gets HTTP2", + input: toStringPtr("http2"), + expected: pbcatalog.Protocol_PROTOCOL_HTTP2, + }, + { + name: "grpc gets GRPC", + input: toStringPtr("grpc"), + expected: pbcatalog.Protocol_PROTOCOL_GRPC, + }, + { + name: "case sensitive", + input: toStringPtr("gRPC"), + expected: pbcatalog.Protocol_PROTOCOL_UNSPECIFIED, + }, + { + name: "unknown gets UNSPECIFIED", + input: toStringPtr("foo"), + expected: pbcatalog.Protocol_PROTOCOL_UNSPECIFIED, + }, + } + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + actual := GetPortProtocol(tt.input) + require.Equal(t, tt.expected, actual) + }) + } +} + +func TestHasBeenMeshInjected(t *testing.T) { + t.Parallel() + cases := []struct { + name string + pod corev1.Pod + expected bool + }{ + { + name: "Pod with injected annotation", + pod: corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: metav1.NamespaceDefault, + Labels: map[string]string{}, + Annotations: map[string]string{ + constants.KeyMeshInjectStatus: constants.Injected, + }, + }, + }, + expected: true, + }, + { + name: "Pod without injected annotation", + pod: corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: metav1.NamespaceDefault, + Labels: map[string]string{}, + Annotations: map[string]string{ + "consul.hashicorp.com/foo": "bar", + }, + }, + }, + expected: false, + }, + { + name: "Pod with injected annotation but wrong value", + pod: corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: metav1.NamespaceDefault, + Labels: map[string]string{}, + Annotations: map[string]string{ + constants.KeyMeshInjectStatus: "hiya", + }, + }, + }, + expected: false, + }, + { + name: "Pod with nil annotations", + pod: corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: metav1.NamespaceDefault, + Labels: map[string]string{}, + }, + }, + expected: false, + }, + } + + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + actual := HasBeenMeshInjected(tt.pod) + require.Equal(t, tt.expected, actual) + }) + } +} + func Test_ConsulNamespaceIsNotFound(t *testing.T) { t.Parallel() @@ -359,3 +558,54 @@ func Test_ConsulNamespaceIsNotFound(t *testing.T) { }) } } + +// Test_ConsulNamespaceIsNotFound_ErrorMsg is an integration test that verifies the error message +// associated with a missing namespace while creating a resource doesn't drift. +func Test_ConsulNamespaceIsNotFound_ErrorMsg(t *testing.T) { + t.Parallel() + + // Create test consulServer server. + testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + c.Experiments = []string{"resource-apis"} + }) + + id := &pbresource.ID{ + Name: "foo", + Type: pbcatalog.WorkloadType, + Tenancy: &pbresource.Tenancy{ + Partition: constants.DefaultConsulPartition, + Namespace: "i-dont-exist-but-its-ok-we-will-meet-again-someday", + }, + } + + workload := &pbcatalog.Workload{ + Addresses: []*pbcatalog.WorkloadAddress{ + {Host: "10.0.0.1", Ports: []string{"mesh"}}, + }, + Ports: map[string]*pbcatalog.WorkloadPort{ + "mesh": { + Port: constants.ProxyDefaultInboundPort, + Protocol: pbcatalog.Protocol_PROTOCOL_MESH, + }, + }, + NodeName: "banana", + Identity: "foo", + } + + data := ToProtoAny(workload) + + resource := &pbresource.Resource{ + Id: id, + Data: data, + } + + _, err := testClient.ResourceClient.Write(context.Background(), &pbresource.WriteRequest{Resource: resource}) + require.Error(t, err) + + s, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, codes.InvalidArgument, s.Code()) + require.Contains(t, s.Message(), "namespace not found") + + require.True(t, ConsulNamespaceIsNotFound(err)) +} diff --git a/control-plane/connect-inject/constants/annotations_and_labels.go b/control-plane/connect-inject/constants/annotations_and_labels.go index a0a59ce91b..dca3c523a3 100644 --- a/control-plane/connect-inject/constants/annotations_and_labels.go +++ b/control-plane/connect-inject/constants/annotations_and_labels.go @@ -228,6 +228,46 @@ const ( ManagedByValue = "consul-k8s-endpoints-controller" ) +// ******************** +// V2 Exclusive Annotations & Labels +// ******************** + +const ( + // AnnotationMeshInject is the key of the annotation that controls whether + // V2 mesh injection is explicitly enabled or disabled for a pod using. + // be set to a truthy or falsy value, as parseable by strconv.ParseBool. + AnnotationMeshInject = "consul.hashicorp.com/mesh-inject" + + // KeyMeshInjectStatus is the key of the annotation that is added to + // a pod after an injection is done. + KeyMeshInjectStatus = "consul.hashicorp.com/mesh-inject-status" + + // ManagedByEndpointsValue is used in Consul metadata to identify the manager + // of resources. The 'v2' suffix is used to differentiate from the legacy + // endpoints controller of the same name. + ManagedByEndpointsValue = "consul-k8s-endpoints-controller-v2" + + // ManagedByPodValue is used in Consul metadata to identify the manager + // of resources. + ManagedByPodValue = "consul-k8s-pod-controller" + + // ManagedByServiceAccountValue is used in Consul metadata to identify the manager + // of resources. + ManagedByServiceAccountValue = "consul-k8s-service-account-controller" + + // AnnotationMeshDestinations is a list of destinations to register with the + // proxy. The service name should map to a Consul service name and the local + // port is the local port in the pod that the listener will bind to. It can + // be a named port. + AnnotationMeshDestinations = "consul.hashicorp.com/mesh-service-destinations" + + // AnnotationMeshInjectMountVolumes is the key of the annotation that controls whether + // the data volume that mesh inject uses to store data including the Consul ACL token + // is mounted to other containers in the pod. It is a comma-separated list of container names + // to mount the volume on. It will be mounted at the path `/consul/mesh-inject`. + AnnotationMeshInjectMountVolumes = "consul.hashicorp.com/mesh-inject-mount-volume" +) + // Annotations used by Prometheus. const ( AnnotationPrometheusScrape = "prometheus.io/scrape" diff --git a/control-plane/connect-inject/constants/constants.go b/control-plane/connect-inject/constants/constants.go index 83d2343012..57f276f949 100644 --- a/control-plane/connect-inject/constants/constants.go +++ b/control-plane/connect-inject/constants/constants.go @@ -83,6 +83,8 @@ const ( CACertFileEnvVar = "CONSUL_CACERT_FILE" CACertPEMEnvVar = "CONSUL_CACERT_PEM" TLSServerNameEnvVar = "CONSUL_TLS_SERVER_NAME" + + UnnamedWorkloadPortNamePrefix = "cslport-" ) // GetNormalizedConsulNamespace returns the default namespace if the passed namespace diff --git a/control-plane/connect-inject/controllers/endpointsv2/endpoints_controller.go b/control-plane/connect-inject/controllers/endpointsv2/endpoints_controller.go new file mode 100644 index 0000000000..4353590c92 --- /dev/null +++ b/control-plane/connect-inject/controllers/endpointsv2/endpoints_controller.go @@ -0,0 +1,643 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package endpointsv2 + +import ( + "context" + "crypto/sha256" + "fmt" + "net" + "sort" + "strings" + + "github.com/go-logr/logr" + pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" + "github.com/hashicorp/consul/proto-public/pbresource" + "github.com/hashicorp/go-multierror" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/hashicorp/consul-k8s/control-plane/api/common" + inject "github.com/hashicorp/consul-k8s/control-plane/connect-inject/common" + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" + "github.com/hashicorp/consul-k8s/control-plane/consul" + "github.com/hashicorp/consul-k8s/control-plane/namespaces" +) + +const ( + kindReplicaSet = "ReplicaSet" +) + +type Controller struct { + client.Client + // ConsulServerConnMgr is the watcher for the Consul server addresses used to create Consul API v2 clients. + ConsulServerConnMgr consul.ServerConnectionManager + // K8sNamespaceConfig manages allow/deny Kubernetes namespaces. + common.K8sNamespaceConfig + // ConsulTenancyConfig manages settings related to Consul namespaces and partitions. + common.ConsulTenancyConfig + + // WriteCache keeps track of records already written to Consul in order to enable debouncing of writes. + // This is useful in particular for this controller which will see potentially many more reconciles due to + // endpoint changes (e.g. pod health) than changes to service data written to Consul. + // It is intentionally simple and best-effort, and does not guarantee against all redundant writes. + // It is not persistent across restarts of the controller process. + WriteCache WriteCache + + Log logr.Logger + + Scheme *runtime.Scheme + context.Context +} + +func (r *Controller) Logger(name types.NamespacedName) logr.Logger { + return r.Log.WithValues("request", name) +} + +func (r *Controller) SetupWithManager(mgr ctrl.Manager) error { + if r.WriteCache == nil { + return fmt.Errorf("WriteCache was not configured for Controller") + } + return ctrl.NewControllerManagedBy(mgr). + For(&corev1.Endpoints{}). + Complete(r) +} + +// Reconcile reads the state of an Endpoints object for a Kubernetes Service and reconciles Consul services which +// correspond to the Kubernetes Service. These events are driven by changes to the Pods backing the Kube service. +func (r *Controller) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + var endpoints corev1.Endpoints + var service corev1.Service + + // Ignore the request if the namespace of the endpoint is not allowed. + if inject.ShouldIgnore(req.Namespace, r.DenyK8sNamespacesSet, r.AllowK8sNamespacesSet) { + return ctrl.Result{}, nil + } + + // Create Consul resource service client for this reconcile. + resourceClient, err := consul.NewResourceServiceClient(r.ConsulServerConnMgr) + if err != nil { + r.Log.Error(err, "failed to create Consul resource client", "name", req.Name, "ns", req.Namespace) + return ctrl.Result{}, err + } + + state, err := r.ConsulServerConnMgr.State() + if err != nil { + r.Log.Error(err, "failed to query Consul client state", "name", req.Name, "ns", req.Namespace) + return ctrl.Result{}, err + } + if state.Token != "" { + ctx = metadata.AppendToOutgoingContext(ctx, "x-consul-token", state.Token) + } + + // If the Endpoints object has been deleted (and we get an IsNotFound error), + // we need to deregister that service from Consul. + err = r.Client.Get(ctx, req.NamespacedName, &endpoints) + if k8serrors.IsNotFound(err) { + err = r.deregisterService(ctx, resourceClient, req) + return ctrl.Result{}, err + } else if err != nil { + r.Log.Error(err, "failed to get Endpoints", "name", req.Name, "ns", req.Namespace) + return ctrl.Result{}, err + } + r.Log.Info("retrieved Endpoints", "name", req.Name, "ns", req.Namespace) + + // We expect this to succeed if the Endpoints fetch for the Service succeeded. + err = r.Client.Get(r.Context, types.NamespacedName{Name: endpoints.Name, Namespace: endpoints.Namespace}, &service) + if err != nil { + r.Log.Error(err, "failed to get Service", "name", req.Name, "ns", req.Namespace) + return ctrl.Result{}, err + } + r.Log.Info("retrieved Service", "name", req.Name, "ns", req.Namespace) + + consulSvc, err := r.getConsulService(ctx, &ClientPodFetcher{client: r.Client}, service, endpoints) + if err != nil { + r.Log.Error(err, "failed to build Consul service resource", "name", req.Name, "ns", req.Namespace) + return ctrl.Result{}, err + } + + // If we don't have at least one mesh-injected pod selected by the service, don't register. + // Note that we only _delete_ services when they're deleted from K8s, not when endpoints or + // workload selectors are empty. This ensures that failover can occur normally when targeting + // the existing VIP (ClusterIP) assigned to the service. + if consulSvc.Workloads == nil { + return ctrl.Result{}, nil + } + + // Register the service in Consul. + id := getServiceID( + service.Name, // Consul and Kubernetes service name will always match + r.getConsulNamespace(service.Namespace), + r.getConsulPartition()) + meta := getServiceMeta(service) + k8sUid := string(service.UID) + if err = r.ensureService(ctx, &defaultResourceReadWriter{resourceClient}, k8sUid, id, meta, consulSvc); err != nil { + // We could be racing with the namespace controller. + // Requeue (which includes backoff) to try again. + if inject.ConsulNamespaceIsNotFound(err) { + r.Log.Info("Consul namespace not found; re-queueing request", + "service", service.GetName(), "ns", req.Namespace, + "consul-ns", r.getConsulNamespace(req.Namespace), "err", err.Error()) + return ctrl.Result{Requeue: true}, nil + } + return ctrl.Result{}, err + } + + return ctrl.Result{}, nil +} + +func (r *Controller) getConsulService(ctx context.Context, pf PodFetcher, service corev1.Service, endpoints corev1.Endpoints) (*pbcatalog.Service, error) { + prefixedPods, exactNamePods, err := r.getWorkloadDataFromEndpoints(ctx, pf, endpoints) + if err != nil { + return nil, err + } + + // Create Consul Service resource to be registered. + return &pbcatalog.Service{ + Workloads: getWorkloadSelector(prefixedPods, exactNamePods), + Ports: getServicePorts(service, prefixedPods, exactNamePods), + VirtualIps: r.getServiceVIPs(service), + }, nil +} + +type podSetData struct { + podCount int + samplePod *corev1.Pod +} + +// selectorPodData represents data for each set of pods represented by a WorkloadSelector value. +// The data may be for several pods (prefix) or a single pod (exact name). +// This is used for choosing the ideal Consul service TargetPort value when the K8s service target port is numeric. +type selectorPodData map[string]*podSetData + +// getWorkloadDataFromEndpoints accumulates data to supply the Consul service WorkloadSelector and TargetPort from +// Endpoints based on pod names and owners. +func (r *Controller) getWorkloadDataFromEndpoints(ctx context.Context, pf PodFetcher, endpoints corev1.Endpoints) (selectorPodData, selectorPodData, error) { + var errs error + + // Determine the workload selector by fetching as many pods as needed to accumulate prefixes + // and exact pod name matches. + // + // If the K8s service target port is numeric, we also use this information to determine the + // appropriate Consul target port value. + prefixedPods := make(selectorPodData) + exactNamePods := make(selectorPodData) + ignoredPodPrefixes := make(map[string]any) + for address := range allAddresses(endpoints.Subsets) { + if address.TargetRef != nil && address.TargetRef.Kind == "Pod" { + podName := types.NamespacedName{Name: address.TargetRef.Name, Namespace: endpoints.Namespace} + + // Accumulate owner prefixes and exact pod names for Consul workload selector. + // If this pod is already covered by a known owner prefix, skip it. + // If not, fetch the owner. If the owner has a unique prefix, add it to known prefixes. + // If not, add the pod name to exact name matches. + maybePodOwnerPrefix := getOwnerPrefixFromPodName(podName.Name) + + // If prefix is ignored, skip pod. + if _, ok := ignoredPodPrefixes[maybePodOwnerPrefix]; ok { + continue + } + + if existingPodData, ok := prefixedPods[maybePodOwnerPrefix]; !ok { + // Fetch pod info from K8s. + pod, err := pf.GetPod(ctx, podName) + if err != nil { + r.Log.Error(err, "failed to get pod", "name", podName.Name, "ns", endpoints.Namespace) + errs = multierror.Append(errs, err) + continue + } + + // Store data corresponding to the new selector value, which may be an actual set or exact pod. + podData := podSetData{ + podCount: 1, + samplePod: pod, + } + + // Add pod to workload selector values as appropriate. + // Pods can appear more than once in Endpoints subsets, so we use a set for exact names as well. + if prefix := getOwnerPrefixFromPod(pod); prefix != "" { + if inject.HasBeenMeshInjected(*pod) { + // Add to the list of pods represented by this prefix. This list is used by + // `getEffectiveTargetPort` to determine the most-used target container port name if the + // k8s service target port is numeric. + prefixedPods[prefix] = &podData + } else { + // If the pod hasn't been mesh-injected, ignore it, as it won't be available as a workload. + // Remember its prefix to avoid fetching its siblings needlessly. + ignoredPodPrefixes[prefix] = true + } + } else { + if inject.HasBeenMeshInjected(*pod) { + exactNamePods[podName.Name] = &podData + } + // If the pod hasn't been mesh-injected, ignore it, as it won't be available as a workload. + // No need to remember ignored exact pod names since we don't expect to see them twice. + } + } else { + // We've seen this prefix before. + // Keep track of how many times so that we can choose a container port name if needed later. + existingPodData.podCount += 1 + } + } + } + + return prefixedPods, exactNamePods, errs +} + +// allAddresses combines all Endpoints subset addresses to a single set. Service registration by this controller +// operates independent of health, and an address can appear in multiple subsets if it has a mix of ready and not-ready +// ports, so we combine them here to simplify iteration. +func allAddresses(subsets []corev1.EndpointSubset) map[corev1.EndpointAddress]any { + m := make(map[corev1.EndpointAddress]any) + for _, sub := range subsets { + for _, readyAddress := range sub.Addresses { + m[readyAddress] = true + } + for _, notReadyAddress := range sub.NotReadyAddresses { + m[notReadyAddress] = true + } + } + return m +} + +// getOwnerPrefixFromPodName extracts the owner name prefix from a pod name. +func getOwnerPrefixFromPodName(podName string) string { + podNameParts := strings.Split(podName, "-") + return strings.Join(podNameParts[:len(podNameParts)-1], "-") +} + +// getOwnerPrefixFromPod returns the common name prefix of the pod, if the pod is a member of a set with a unique name +// prefix. Currently, this only applies to ReplicaSets. +// +// We have to fetch the owner and check its type because pod names cannot be disambiguated from pod owner names due to +// the `-` delimiter and unique ID parts also being valid name components. +// +// If the pod owner does not have a unique name, the empty string is returned. +func getOwnerPrefixFromPod(pod *corev1.Pod) string { + for _, ref := range pod.OwnerReferences { + if ref.Kind == "ReplicaSet" { + return ref.Name + } + } + return "" +} + +// ensureService upserts a Consul service resource if an identical write has not already been made to Consul since this +// controller was started. If the check for a previous write fails, the resource is written anyway. +func (r *Controller) ensureService(ctx context.Context, rw resourceReadWriter, k8sUid string, id *pbresource.ID, meta map[string]string, consulSvc *pbcatalog.Service) error { + // Use Marshal w/ Deterministic option to ensure write hash generated from Data is consistent. + data := new(anypb.Any) + if err := anypb.MarshalFrom(data, consulSvc, proto.MarshalOptions{Deterministic: true}); err != nil { + return err + } + + // Use the locally-created Resource and ID (without Uid and Version) when writing so that it + // behaves as an upsert rather than CAS. + consulSvcResource := &pbresource.Resource{ + Id: id, + Data: data, + Metadata: meta, + } + + writeHash, err := getWriteHash(consulSvcResource) + if err != nil { + r.Log.Error(err, "failed to get write hash for service; assuming it is out of sync", + getLogFieldsForResource(id)...) + } + key := getWriteCacheKey(types.NamespacedName{Name: id.Name, Namespace: id.Tenancy.Namespace}) + generationFetchFn := func() string { + // Check for whether a matching service already exists in Consul. + // Gracefully fail on error. This allows us to make a best-effort write attempt in + // case of a persistent read error or permissions issue that does not impact writing. + resp, err := rw.Read(ctx, &pbresource.ReadRequest{Id: id}) + if s, ok := status.FromError(err); !ok || (s.Code() != codes.OK && s.Code() != codes.NotFound) { + r.Log.Error(err, "failed to read existing service resource from Consul; assuming it is out of sync", + append(getLogFieldsForResource(id), "code", s.Code(), "message", s.Message())...) + return "" + } + return resp.GetResource().GetGeneration() + } + if r.WriteCache.hasMatch(key, writeHash, generationFetchFn, k8sUid) { + r.Log.V(1).Info("skipping service write due to matching write hash") + return nil + } + + r.Log.Info("writing service to Consul", getLogFieldsForResource(consulSvcResource.Id)...) + resp, err := rw.Write(ctx, &pbresource.WriteRequest{Resource: consulSvcResource}) + if err != nil { + r.Log.Error(err, fmt.Sprintf("failed to write service: %+v", consulSvc), + getLogFieldsForResource(consulSvcResource.Id)...) + return err + } + + generation := resp.GetResource().GetGeneration() + r.Log.Info("caching service write to Consul", "hash", writeHash, "generation", generation, + "k8sUid", k8sUid) + r.WriteCache.update(key, writeHash, generation, k8sUid) + + return nil +} + +// resourceReadWriter wraps pbresource.ResourceServiceClient for testing purposes. +// The default implementation is a passthrough used outside of tests. +type resourceReadWriter interface { + Read(context.Context, *pbresource.ReadRequest) (*pbresource.ReadResponse, error) + Write(context.Context, *pbresource.WriteRequest) (*pbresource.WriteResponse, error) +} + +type defaultResourceReadWriter struct { + client pbresource.ResourceServiceClient +} + +func (c *defaultResourceReadWriter) Read(ctx context.Context, req *pbresource.ReadRequest) (*pbresource.ReadResponse, error) { + return c.client.Read(ctx, req) +} + +func (c *defaultResourceReadWriter) Write(ctx context.Context, req *pbresource.WriteRequest) (*pbresource.WriteResponse, error) { + return c.client.Write(ctx, req) +} + +func getServiceID(name, namespace, partition string) *pbresource.ID { + return &pbresource.ID{ + Name: name, + Type: pbcatalog.ServiceType, + Tenancy: &pbresource.Tenancy{ + Partition: partition, + Namespace: namespace, + }, + } +} + +// getServicePorts converts Kubernetes Service ports data into Consul service ports. +func getServicePorts(service corev1.Service, prefixedPods selectorPodData, exactNamePods selectorPodData) []*pbcatalog.ServicePort { + ports := make([]*pbcatalog.ServicePort, 0, len(service.Spec.Ports)+1) + + for _, p := range service.Spec.Ports { + // Service mesh only supports TCP as the L4 Protocol (not to be confused w/ L7 AppProtocol). + // + // This check is necessary to deduplicate VirtualPort values when multiple declared ServicePort values exist + // for the same port, which is possible in K8s when e.g. multiplexing TCP and UDP traffic over a single port. + // + // If we otherwise see repeat port values in a K8s service, we pass along and allow Consul to fail validation. + if p.Protocol == corev1.ProtocolTCP { + // TODO(NET-5705): Error check reserved "mesh" target port + ports = append(ports, &pbcatalog.ServicePort{ + VirtualPort: uint32(p.Port), + TargetPort: getEffectiveTargetPort(p.TargetPort, prefixedPods, exactNamePods), + Protocol: inject.GetPortProtocol(p.AppProtocol), + }) + } + } + + // Sort for comparison stability during write deduplication. + sort.Slice(ports, func(i, j int) bool { + return ports[i].VirtualPort < ports[j].VirtualPort + }) + + // Append Consul service mesh port in addition to discovered ports. + ports = append(ports, &pbcatalog.ServicePort{ + TargetPort: "mesh", + Protocol: pbcatalog.Protocol_PROTOCOL_MESH, + }) + + return ports +} + +func getEffectiveTargetPort(targetPort intstr.IntOrString, prefixedPods selectorPodData, exactNamePods selectorPodData) string { + // The Kubernetes service is targeting a port name; use it directly. + // The expected behavior of Kubernetes is that all included Endpoints conform and have a matching named port. + // This is the simplest path and preferred over services targeting by port number. + if targetPort.Type == intstr.String { + return targetPort.String() + } + + // The Kubernetes service is targeting a numeric port. This is more complicated for mapping to Consul: + // - Endpoints will contain _all_ selected pods, not just those with a matching declared port number. + // - Consul Workload ports always have a name, so we must determine the best name to match on. + // - There may be more than one option among the pods with named ports, including no name at all. + // + // Our best-effort approach is to find the most prevalent port name among selected pods that _do_ declare the target + // port explicitly in container ports. We'll assume that for each set of pods, the first pod is "representative" - + // i.e. we expect a ReplicaSet to be homogenous. In the vast majority of cases, this means we'll be looking for the + // largest selected ReplicaSet and using the first pod from it. + // + // The goal is to make this determination without fetching all pods belonging to the service, as that would be a + // very expensive operation to repeat every time endpoints change, and we don't expect the target port to change + // often if ever across pod/deployment lifecycles. + // + // TODO(NET-5706) in GA, we intend to change port selection to allow for Consul TargetPort to be numeric. If we + // retain the port selection model used here beyond GA, we should consider updating it to also consider pod health, + // s.t. when the selected port name changes between deployments of a ReplicaSet, we route traffic to ports + // belonging to the set most able to serve traffic, rather than simply the largest one. + targetPortInt := int32(targetPort.IntValue()) + var mostPrevalentContainerPort *corev1.ContainerPort + maxCount := 0 + effectiveNameForPort := inject.WorkloadPortName + for _, podData := range prefixedPods { + containerPort := getTargetContainerPort(targetPortInt, podData.samplePod) + + // Ignore pods without a declared port matching the service targetPort. + if containerPort == nil { + continue + } + + // If this is the most prevalent container port by pod set size, update result. + if maxCount < podData.podCount { + mostPrevalentContainerPort = containerPort + maxCount = podData.podCount + } + } + + if mostPrevalentContainerPort != nil { + return effectiveNameForPort(mostPrevalentContainerPort) + } + + // If no pod sets have the expected target port, fall back to the most common name among exact-name pods. + // An assumption here is that exact name pods mixed with pod sets will be rare, and sets should be preferred. + if len(exactNamePods) > 0 { + nameCount := make(map[string]int) + for _, podData := range exactNamePods { + if containerPort := getTargetContainerPort(targetPortInt, podData.samplePod); containerPort != nil { + nameCount[effectiveNameForPort(containerPort)] += 1 + } + } + if len(nameCount) > 0 { + maxNameCount := 0 + mostPrevalentContainerPortName := "" + for name, count := range nameCount { + if maxNameCount < count { + mostPrevalentContainerPortName = name + maxNameCount = count + } + } + return mostPrevalentContainerPortName + } + } + + // If still no match for the target port, fall back to string-ifying the target port name, which + // is what the PodController will do when converting unnamed ContainerPorts to Workload ports. + return constants.UnnamedWorkloadPortNamePrefix + targetPort.String() +} + +// getTargetContainerPort returns the pod ContainerPort matching the given numeric port value, or nil if none is found. +func getTargetContainerPort(targetPort int32, pod *corev1.Pod) *corev1.ContainerPort { + for _, c := range pod.Spec.Containers { + if len(c.Ports) == 0 { + continue + } + for _, p := range c.Ports { + if p.ContainerPort == targetPort && p.Protocol == corev1.ProtocolTCP { + return &p + } + } + } + return nil +} + +// getServiceVIPs returns the VIPs to associate with the registered Consul service. This will contain the Kubernetes +// Service ClusterIP if it exists. +// +// Note that we always provide this data regardless of whether TProxy is enabled, deferring to individual proxy configs +// to decide whether it's used. +func (r *Controller) getServiceVIPs(service corev1.Service) []string { + if parsedIP := net.ParseIP(service.Spec.ClusterIP); parsedIP == nil { + r.Log.Info("skipping service registration virtual IP assignment due to invalid or unset ClusterIP", "name", service.Name, "ns", service.Namespace, "ip", service.Spec.ClusterIP) + return nil + } + + // Note: This slice needs to be sorted for stable comparison during write deduplication. + // If additional values are added in the future, the output order should be consistent. + return []string{service.Spec.ClusterIP} +} + +func getServiceMeta(service corev1.Service) map[string]string { + meta := map[string]string{ + constants.MetaKeyKubeNS: service.Namespace, + constants.MetaKeyManagedBy: constants.ManagedByEndpointsValue, + } + return meta +} + +// getWorkloadSelector returns the WorkloadSelector for the given pod name prefixes and exact names. +// It returns nil if the provided name sets are empty. +func getWorkloadSelector(prefixedPods selectorPodData, exactNamePods selectorPodData) *pbcatalog.WorkloadSelector { + // If we don't have any values, return nil + if len(prefixedPods) == 0 && len(exactNamePods) == 0 { + return nil + } + + // Create the WorkloadSelector + workloads := &pbcatalog.WorkloadSelector{} + for v := range prefixedPods { + workloads.Prefixes = append(workloads.Prefixes, v) + } + for v := range exactNamePods { + workloads.Names = append(workloads.Names, v) + } + + // Sort for comparison stability during write deduplication + sort.Strings(workloads.Prefixes) + sort.Strings(workloads.Names) + + return workloads +} + +// deregisterService deletes the service resource corresponding to the given name and namespace from Consul. +// This operation is idempotent and can be executed for non-existent services. +func (r *Controller) deregisterService(ctx context.Context, resourceClient pbresource.ResourceServiceClient, req ctrl.Request) error { + // Regardless of whether we get an error on delete, remove the resource from the cache as we intend for it + // to be deleted and the record is no longer valid for preventing writes. + r.WriteCache.remove(getWriteCacheKey(req.NamespacedName)) + _, err := resourceClient.Delete(ctx, &pbresource.DeleteRequest{ + Id: getServiceID(req.Name, r.getConsulNamespace(req.Namespace), r.getConsulPartition()), + }) + return err +} + +// getConsulNamespace returns the Consul destination namespace for a provided Kubernetes namespace +// depending on Consul Namespaces being enabled and the value of namespace mirroring. +func (r *Controller) getConsulNamespace(kubeNamespace string) string { + ns := namespaces.ConsulNamespace( + kubeNamespace, + r.EnableConsulNamespaces, + r.ConsulDestinationNamespace, + r.EnableNSMirroring, + r.NSMirroringPrefix, + ) + + // TODO(NET-5652): remove this if and when the default namespace of resources is no longer required to be set explicitly. + if ns == "" { + ns = constants.DefaultConsulNS + } + return ns +} + +func (r *Controller) getConsulPartition() string { + if !r.EnableConsulPartitions || r.ConsulPartition == "" { + return constants.DefaultConsulPartition + } + return r.ConsulPartition +} + +// getWriteCacheKey gets a key to track syncronization of a K8s service to deduplicate writes to Consul. +// See also WriteCache.hasMatch. +func getWriteCacheKey(serviceName types.NamespacedName) string { + return serviceName.String() +} + +// getWriteHash gets a hash of the given resource to deduplicate writes to Consul. +// +// This hash is not intended to be cryptographically secure, only deterministic and collision-resistent +// for tens of thousands of values. +// +// If an error occurs marshalling the resource for the hash, returns a nil hash value and the error. +// error will be returned. +func getWriteHash(r *pbresource.Resource) ([]byte, error) { + // We Marshal the entire resource (not just its own Data, which is already serialized) + // in order to take advantage of the deterministic marshal offered by proto and include + // fields like Meta, which are not part of the resource Data. + data, err := proto.MarshalOptions{Deterministic: true}.Marshal(r) + if err != nil { + return nil, err + } + h := sha256.Sum256(data) + return h[:], nil +} + +func getLogFieldsForResource(id *pbresource.ID) []any { + return []any{ + "name", id.Name, + "ns", id.Tenancy.Namespace, + "partition", id.Tenancy.Partition, + } +} + +// PodFetcher fetches pods by NamespacedName. This interface primarily exists for testing. +type PodFetcher interface { + GetPod(context.Context, types.NamespacedName) (*corev1.Pod, error) +} + +// ClientPodFetcher wraps a Kubernetes client to implement PodFetcher. This is the only implementation outside of tests. +type ClientPodFetcher struct { + client client.Client +} + +func (c *ClientPodFetcher) GetPod(ctx context.Context, name types.NamespacedName) (*corev1.Pod, error) { + var pod corev1.Pod + err := c.client.Get(ctx, name, &pod) + if err != nil { + return nil, err + } + return &pod, nil +} diff --git a/control-plane/connect-inject/controllers/endpointsv2/endpoints_controller_ent_test.go b/control-plane/connect-inject/controllers/endpointsv2/endpoints_controller_ent_test.go new file mode 100644 index 0000000000..636a1ab923 --- /dev/null +++ b/control-plane/connect-inject/controllers/endpointsv2/endpoints_controller_ent_test.go @@ -0,0 +1,30 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build enterprise + +package endpointsv2 + +import ( + "testing" +) + +// TODO: ConsulDestinationNamespace and EnableNSMirroring +/- prefix + +// TODO(zalimeni) +// Tests new Service registration in a non-default NS and Partition with namespaces set to mirroring +func TestReconcile_CreateService_WithNamespaces(t *testing.T) { + +} + +// TODO(zalimeni) +// Tests updating Service registration in a non-default NS and Partition with namespaces set to mirroring +func TestReconcile_UpdateService_WithNamespaces(t *testing.T) { + +} + +// TODO(zalimeni) +// Tests removing Service registration in a non-default NS and Partition with namespaces set to mirroring +func TestReconcile_DeleteService_WithNamespaces(t *testing.T) { + +} diff --git a/control-plane/connect-inject/controllers/endpointsv2/endpoints_controller_test.go b/control-plane/connect-inject/controllers/endpointsv2/endpoints_controller_test.go new file mode 100644 index 0000000000..93d41f0f11 --- /dev/null +++ b/control-plane/connect-inject/controllers/endpointsv2/endpoints_controller_test.go @@ -0,0 +1,2361 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package endpointsv2 + +import ( + "context" + "fmt" + "testing" + + mapset "github.com/deckarep/golang-set" + logrtest "github.com/go-logr/logr/testr" + "github.com/google/go-cmp/cmp" + pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" + "github.com/hashicorp/consul/proto-public/pbresource" + "github.com/hashicorp/consul/sdk/testutil" + "github.com/hashicorp/go-uuid" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "github.com/hashicorp/consul-k8s/control-plane/api/common" + inject "github.com/hashicorp/consul-k8s/control-plane/connect-inject/common" + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" + "github.com/hashicorp/consul-k8s/control-plane/helper/test" +) + +const ( + kindDaemonSet = "DaemonSet" +) + +var ( + appProtocolHttp = "http" + appProtocolHttp2 = "http2" + appProtocolGrpc = "grpc" +) + +type reconcileCase struct { + name string + svcName string + k8sObjects func() []runtime.Object + existingResource *pbresource.Resource + expectedResource *pbresource.Resource + targetConsulNs string + targetConsulPartition string + expErr string + caseFn func(*testing.T, *reconcileCase, *Controller, pbresource.ResourceServiceClient) +} + +// TODO(NET-5716): Allow/deny namespaces for reconcile tests + +func TestReconcile_CreateService(t *testing.T) { + t.Parallel() + cases := []reconcileCase{ + { + name: "Empty endpoints do not get registered", + svcName: "service-created", + k8sObjects: func() []runtime.Object { + endpoints := &corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-created", + Namespace: "default", + }, + Subsets: []corev1.EndpointSubset{ + { + Addresses: []corev1.EndpointAddress{}, + }, + }, + } + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-created", + Namespace: "default", + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "172.18.0.1", + Ports: []corev1.ServicePort{ + { + Name: "public", + Port: 8080, + Protocol: "TCP", + TargetPort: intstr.FromString("my-http-port"), + AppProtocol: &appProtocolHttp, + }, + { + Name: "api", + Port: 9090, + Protocol: "TCP", + TargetPort: intstr.FromString("my-grpc-port"), + AppProtocol: &appProtocolGrpc, + }, + { + Name: "other", + Port: 10001, + Protocol: "TCP", + TargetPort: intstr.FromString("10001"), + // no app protocol specified + }, + }, + }, + } + return []runtime.Object{endpoints, service} + }, + }, + { + name: "Endpoints without injected pods do not get registered", + svcName: "service-created", + k8sObjects: func() []runtime.Object { + pod1 := createServicePodOwnedBy(kindReplicaSet, "service-created-rs-abcde") + pod2 := createServicePod(kindDaemonSet, "service-created-ds", "12345") + removeMeshInjectStatus(t, pod1) + removeMeshInjectStatus(t, pod2) + endpoints := &corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-created", + Namespace: "default", + }, + Subsets: []corev1.EndpointSubset{ + { + Addresses: addressesForPods(pod1, pod2), + }, + }, + } + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-created", + Namespace: "default", + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "172.18.0.1", + Ports: []corev1.ServicePort{ + { + Name: "public", + Port: 8080, + Protocol: "TCP", + TargetPort: intstr.FromString("my-http-port"), + AppProtocol: &appProtocolHttp, + }, + { + Name: "api", + Port: 9090, + Protocol: "TCP", + TargetPort: intstr.FromString("my-grpc-port"), + AppProtocol: &appProtocolGrpc, + }, + { + Name: "other", + Port: 10001, + Protocol: "TCP", + TargetPort: intstr.FromString("10001"), + // no app protocol specified + }, + }, + }, + } + return []runtime.Object{pod1, pod2, endpoints, service} + }, + }, + { + name: "Basic endpoints", + svcName: "service-created", + k8sObjects: func() []runtime.Object { + pod1 := createServicePodOwnedBy(kindReplicaSet, "service-created-rs-abcde") + pod2 := createServicePod(kindDaemonSet, "service-created-ds", "12345") + endpoints := &corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-created", + Namespace: "default", + }, + Subsets: []corev1.EndpointSubset{ + { + Addresses: addressesForPods(pod1, pod2), + Ports: []corev1.EndpointPort{ + { + Name: "public", + Port: 2345, + Protocol: "TCP", + AppProtocol: &appProtocolHttp, + }, + { + Name: "api", + Port: 6789, + Protocol: "TCP", + AppProtocol: &appProtocolGrpc, + }, + { + Name: "other", + Port: 10001, + Protocol: "TCP", + }, + }, + }, + }, + } + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-created", + Namespace: "default", + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "172.18.0.1", + Ports: []corev1.ServicePort{ + { + Name: "public", + Port: 8080, + Protocol: "TCP", + TargetPort: intstr.FromString("my-http-port"), + AppProtocol: &appProtocolHttp, + }, + { + Name: "api", + Port: 9090, + Protocol: "TCP", + TargetPort: intstr.FromString("my-grpc-port"), + AppProtocol: &appProtocolGrpc, + }, + { + Name: "other", + Port: 10001, + Protocol: "TCP", + TargetPort: intstr.FromString("cslport-10001"), + // no app protocol specified + }, + }, + }, + } + return []runtime.Object{pod1, pod2, endpoints, service} + }, + expectedResource: &pbresource.Resource{ + Id: &pbresource.ID{ + Name: "service-created", + Type: pbcatalog.ServiceType, + Tenancy: &pbresource.Tenancy{ + Namespace: constants.DefaultConsulNS, + Partition: constants.DefaultConsulPartition, + }, + }, + Data: inject.ToProtoAny(&pbcatalog.Service{ + Ports: []*pbcatalog.ServicePort{ + { + VirtualPort: 8080, + TargetPort: "my-http-port", + Protocol: pbcatalog.Protocol_PROTOCOL_HTTP, + }, + { + VirtualPort: 9090, + TargetPort: "my-grpc-port", + Protocol: pbcatalog.Protocol_PROTOCOL_GRPC, + }, + { + VirtualPort: 10001, + TargetPort: "cslport-10001", + Protocol: pbcatalog.Protocol_PROTOCOL_TCP, + }, + { + TargetPort: "mesh", + Protocol: pbcatalog.Protocol_PROTOCOL_MESH, + }, + }, + Workloads: &pbcatalog.WorkloadSelector{ + Prefixes: []string{"service-created-rs-abcde"}, + Names: []string{"service-created-ds-12345"}, + }, + VirtualIps: []string{"172.18.0.1"}, + }), + Metadata: map[string]string{ + constants.MetaKeyKubeNS: constants.DefaultConsulNS, + constants.MetaKeyManagedBy: constants.ManagedByEndpointsValue, + }, + }, + }, + { + name: "Unhealthy endpoints should be registered", + svcName: "service-created", + k8sObjects: func() []runtime.Object { + pod1 := createServicePodOwnedBy(kindReplicaSet, "service-created-rs-abcde") + pod2 := createServicePodOwnedBy(kindReplicaSet, "service-created-rs-fghij") + endpoints := &corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-created", + Namespace: "default", + }, + Subsets: []corev1.EndpointSubset{ + { + // Split addresses between ready and not-ready + Addresses: addressesForPods(pod1), + NotReadyAddresses: addressesForPods(pod2), + Ports: []corev1.EndpointPort{ + { + Name: "public", + Port: 2345, + Protocol: "TCP", + AppProtocol: &appProtocolHttp, + }, + }, + }, + }, + } + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-created", + Namespace: "default", + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "172.18.0.1", + Ports: []corev1.ServicePort{ + { + Name: "public", + Port: 8080, + Protocol: "TCP", + TargetPort: intstr.FromString("my-http-port"), + AppProtocol: &appProtocolHttp, + }, + }, + }, + } + return []runtime.Object{pod1, pod2, endpoints, service} + }, + expectedResource: &pbresource.Resource{ + Id: &pbresource.ID{ + Name: "service-created", + Type: pbcatalog.ServiceType, + Tenancy: &pbresource.Tenancy{ + Namespace: constants.DefaultConsulNS, + Partition: constants.DefaultConsulPartition, + }, + }, + Data: inject.ToProtoAny(&pbcatalog.Service{ + Ports: []*pbcatalog.ServicePort{ + { + VirtualPort: 8080, + TargetPort: "my-http-port", + Protocol: pbcatalog.Protocol_PROTOCOL_HTTP, + }, + { + TargetPort: "mesh", + Protocol: pbcatalog.Protocol_PROTOCOL_MESH, + }, + }, + Workloads: &pbcatalog.WorkloadSelector{ + // Both replicasets (ready and not ready) should be present + Prefixes: []string{ + "service-created-rs-abcde", + "service-created-rs-fghij", + }, + }, + VirtualIps: []string{"172.18.0.1"}, + }), + Metadata: map[string]string{ + constants.MetaKeyKubeNS: constants.DefaultConsulNS, + constants.MetaKeyManagedBy: constants.ManagedByEndpointsValue, + }, + }, + }, + { + name: "Pods with only some service ports should be registered", + svcName: "service-created", + k8sObjects: func() []runtime.Object { + pod1 := createServicePodOwnedBy(kindReplicaSet, "service-created-rs-abcde") + pod2 := createServicePodOwnedBy(kindReplicaSet, "service-created-rs-fghij") + endpoints := &corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-created", + Namespace: "default", + }, + Subsets: []corev1.EndpointSubset{ + // Two separate endpoint subsets w/ each of 2 ports served by a different replicaset + { + Addresses: addressesForPods(pod1), + Ports: []corev1.EndpointPort{ + { + Name: "public", + Port: 2345, + Protocol: "TCP", + AppProtocol: &appProtocolHttp, + }, + }, + }, + { + Addresses: addressesForPods(pod2), + Ports: []corev1.EndpointPort{ + { + Name: "api", + Port: 6789, + Protocol: "TCP", + AppProtocol: &appProtocolGrpc, + }, + }, + }, + }, + } + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-created", + Namespace: "default", + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "172.18.0.1", + Ports: []corev1.ServicePort{ + { + Name: "public", + Port: 8080, + Protocol: "TCP", + TargetPort: intstr.FromString("my-http-port"), + AppProtocol: &appProtocolHttp, + }, + { + Name: "api", + Port: 9090, + Protocol: "TCP", + TargetPort: intstr.FromString("my-grpc-port"), + AppProtocol: &appProtocolGrpc, + }, + }, + }, + } + return []runtime.Object{pod1, pod2, endpoints, service} + }, + expectedResource: &pbresource.Resource{ + Id: &pbresource.ID{ + Name: "service-created", + Type: pbcatalog.ServiceType, + Tenancy: &pbresource.Tenancy{ + Namespace: constants.DefaultConsulNS, + Partition: constants.DefaultConsulPartition, + }, + }, + Data: inject.ToProtoAny(&pbcatalog.Service{ + Ports: []*pbcatalog.ServicePort{ + { + VirtualPort: 8080, + TargetPort: "my-http-port", + Protocol: pbcatalog.Protocol_PROTOCOL_HTTP, + }, + { + VirtualPort: 9090, + TargetPort: "my-grpc-port", + Protocol: pbcatalog.Protocol_PROTOCOL_GRPC, + }, + { + TargetPort: "mesh", + Protocol: pbcatalog.Protocol_PROTOCOL_MESH, + }, + }, + Workloads: &pbcatalog.WorkloadSelector{ + // Both replicasets should be present even though neither serves both ports + Prefixes: []string{ + "service-created-rs-abcde", + "service-created-rs-fghij", + }, + }, + VirtualIps: []string{"172.18.0.1"}, + }), + Metadata: map[string]string{ + constants.MetaKeyKubeNS: constants.DefaultConsulNS, + constants.MetaKeyManagedBy: constants.ManagedByEndpointsValue, + }, + }, + }, + { + name: "Numeric service target port: Named container port gets the pod port name", + svcName: "service-created", + k8sObjects: func() []runtime.Object { + pod1 := createServicePodOwnedBy(kindReplicaSet, "service-created-rs-abcde", + // Named port with container port value matching service target port + containerWithPort("named-port", 2345), + // Unnamed port with container port value matching service target port + containerWithPort("", 6789)) + endpoints := &corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-created", + Namespace: "default", + }, + Subsets: []corev1.EndpointSubset{ + { + Addresses: addressesForPods(pod1), + Ports: []corev1.EndpointPort{ + { + Name: "public", + Port: 2345, + Protocol: "TCP", + AppProtocol: &appProtocolHttp, + }, + { + Name: "api", + Port: 6789, + Protocol: "TCP", + AppProtocol: &appProtocolGrpc, + }, + }, + }, + }, + } + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-created", + Namespace: "default", + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "172.18.0.1", + Ports: []corev1.ServicePort{ + { + Name: "public", + Port: 8080, + Protocol: "TCP", + TargetPort: intstr.FromInt(2345), // Numeric target port + AppProtocol: &appProtocolHttp, + }, + { + Name: "api", + Port: 9090, + Protocol: "TCP", + TargetPort: intstr.FromInt(6789), // Numeric target port + AppProtocol: &appProtocolGrpc, + }, + { + Name: "unmatched-port", + Port: 10010, + Protocol: "TCP", + TargetPort: intstr.FromInt(10010), // Numeric target port + AppProtocol: &appProtocolHttp, + }, + }, + }, + } + return []runtime.Object{pod1, endpoints, service} + }, + expectedResource: &pbresource.Resource{ + Id: &pbresource.ID{ + Name: "service-created", + Type: pbcatalog.ServiceType, + Tenancy: &pbresource.Tenancy{ + Namespace: constants.DefaultConsulNS, + Partition: constants.DefaultConsulPartition, + }, + }, + Data: inject.ToProtoAny(&pbcatalog.Service{ + Ports: []*pbcatalog.ServicePort{ + { + VirtualPort: 8080, + TargetPort: "named-port", // Matches container port name, not service target number + Protocol: pbcatalog.Protocol_PROTOCOL_HTTP, + }, + { + VirtualPort: 9090, + TargetPort: "cslport-6789", // Matches service target number + Protocol: pbcatalog.Protocol_PROTOCOL_GRPC, + }, + { + VirtualPort: 10010, + TargetPort: "cslport-10010", // Matches service target number (unmatched by container ports) + Protocol: pbcatalog.Protocol_PROTOCOL_HTTP, + }, + { + TargetPort: "mesh", + Protocol: pbcatalog.Protocol_PROTOCOL_MESH, + }, + }, + Workloads: &pbcatalog.WorkloadSelector{ + Prefixes: []string{"service-created-rs-abcde"}, + }, + VirtualIps: []string{"172.18.0.1"}, + }), + Metadata: map[string]string{ + constants.MetaKeyKubeNS: constants.DefaultConsulNS, + constants.MetaKeyManagedBy: constants.ManagedByEndpointsValue, + }, + }, + }, + { + name: "Numeric service target port: Container port mix gets the name from largest matching pod set", + svcName: "service-created", + k8sObjects: func() []runtime.Object { + // Unnamed port matching service target port. + // Also has second named port, and is not the most prevalent set for that port. + pod1 := createServicePodOwnedBy(kindReplicaSet, "service-created-rs-abcde", + containerWithPort("", 2345), + containerWithPort("api-port", 6789)) + + // Named port with different name from most prevalent pods. + // Also has second unnamed port, and _is_ the most prevalent set for that port. + pod2a := createServicePodOwnedBy(kindReplicaSet, "service-created-rs-fghij", + containerWithPort("another-port-name", 2345), + containerWithPort("", 6789)) + pod2b := createServicePodOwnedBy(kindReplicaSet, "service-created-rs-fghij", + containerWithPort("another-port-name", 2345), + containerWithPort("", 6789)) + + // Named port with container port value matching service target port. + // The most common "set" of pods, so should become the port name for service target port. + pod3a := createServicePodOwnedBy(kindReplicaSet, "service-created-rs-klmno", + containerWithPort("named-port", 2345)) + pod3b := createServicePodOwnedBy(kindReplicaSet, "service-created-rs-klmno", + containerWithPort("named-port", 2345)) + pod3c := createServicePodOwnedBy(kindReplicaSet, "service-created-rs-klmno", + containerWithPort("named-port", 2345)) + + // Named port that does not match service target port. + // More common "set" of pods selected by the service, but does not have a target port (value) match. + pod4a := createServicePodOwnedBy(kindReplicaSet, "service-created-rs-pqrst", + containerWithPort("non-matching-named-port", 5432)) + pod4b := createServicePodOwnedBy(kindReplicaSet, "service-created-rs-pqrst", + containerWithPort("non-matching-named-port", 5432)) + pod4c := createServicePodOwnedBy(kindReplicaSet, "service-created-rs-pqrst", + containerWithPort("non-matching-named-port", 5432)) + pod4d := createServicePodOwnedBy(kindReplicaSet, "service-created-rs-pqrst", + containerWithPort("non-matching-named-port", 5432)) + + // Named port from non-injected pods. + // More common "set" of pods selected by the service, but should be filtered out. + pod5a := createServicePodOwnedBy(kindReplicaSet, "service-created-rs-uvwxy", + containerWithPort("ignored-named-port", 2345)) + pod5b := createServicePodOwnedBy(kindReplicaSet, "service-created-rs-uvwxy", + containerWithPort("ignored-named-port", 2345)) + pod5c := createServicePodOwnedBy(kindReplicaSet, "service-created-rs-uvwxy", + containerWithPort("ignored-named-port", 2345)) + pod5d := createServicePodOwnedBy(kindReplicaSet, "service-created-rs-uvwxy", + containerWithPort("ignored-named-port", 2345)) + for _, p := range []*corev1.Pod{pod5a, pod5b, pod5c, pod5d} { + removeMeshInjectStatus(t, p) + } + + // Named port with container port value matching service target port. + // Single pod from non-ReplicaSet owner. Should not take precedence over set pods. + pod6a := createServicePod(kindDaemonSet, "service-created-ds", "12345", + containerWithPort("another-port-name", 2345)) + + endpoints := &corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-created", + Namespace: "default", + }, + Subsets: []corev1.EndpointSubset{ + { + Addresses: addressesForPods( + pod1, + pod2a, pod2b, + pod3a, pod3b, pod3c, + pod4a, pod4b, pod4c, pod4d, + pod5a, pod5b, pod5c, pod5d, + pod6a), + Ports: []corev1.EndpointPort{ + { + Name: "public", + Port: 2345, + Protocol: "TCP", + AppProtocol: &appProtocolHttp, + }, + }, + }, + }, + } + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-created", + Namespace: "default", + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "172.18.0.1", + Ports: []corev1.ServicePort{ + { + Name: "public", + Port: 8080, + Protocol: "TCP", + TargetPort: intstr.FromInt(2345), // Numeric target port + AppProtocol: &appProtocolHttp, + }, + { + Name: "api", + Port: 9090, + Protocol: "TCP", + TargetPort: intstr.FromInt(6789), // Numeric target port + AppProtocol: &appProtocolGrpc, + }, + }, + }, + } + return []runtime.Object{ + pod1, + pod2a, pod2b, + pod3a, pod3b, pod3c, + pod4a, pod4b, pod4c, pod4d, + pod5a, pod5b, pod5c, pod5d, + pod6a, + endpoints, service} + }, + expectedResource: &pbresource.Resource{ + Id: &pbresource.ID{ + Name: "service-created", + Type: pbcatalog.ServiceType, + Tenancy: &pbresource.Tenancy{ + Namespace: constants.DefaultConsulNS, + Partition: constants.DefaultConsulPartition, + }, + }, + Data: inject.ToProtoAny(&pbcatalog.Service{ + Ports: []*pbcatalog.ServicePort{ + { + VirtualPort: 8080, + TargetPort: "named-port", // Matches container port name, not service target number + Protocol: pbcatalog.Protocol_PROTOCOL_HTTP, + }, + { + VirtualPort: 9090, + TargetPort: "cslport-6789", // Matches service target number due to unnamed being most common + Protocol: pbcatalog.Protocol_PROTOCOL_GRPC, + }, + { + TargetPort: "mesh", + Protocol: pbcatalog.Protocol_PROTOCOL_MESH, + }, + }, + Workloads: &pbcatalog.WorkloadSelector{ + Prefixes: []string{ + "service-created-rs-abcde", + "service-created-rs-fghij", + "service-created-rs-klmno", + "service-created-rs-pqrst", + }, + Names: []string{ + "service-created-ds-12345", + }, + }, + VirtualIps: []string{"172.18.0.1"}, + }), + Metadata: map[string]string{ + constants.MetaKeyKubeNS: constants.DefaultConsulNS, + constants.MetaKeyManagedBy: constants.ManagedByEndpointsValue, + }, + }, + }, + { + name: "Numeric service target port: Most used container port name from exact name pods used when no pod sets present", + svcName: "service-created", + k8sObjects: func() []runtime.Object { + // Named port with different name from most prevalent pods. + pod1a := createServicePod(kindDaemonSet, "service-created-ds1", "12345", + containerWithPort("another-port-name", 2345)) + + // Named port with container port value matching service target port. + // The most common container port name, so should become the port name for service target port. + pod2a := createServicePod(kindDaemonSet, "service-created-ds2", "12345", + containerWithPort("named-port", 2345)) + pod2b := createServicePod(kindDaemonSet, "service-created-ds2", "23456", + containerWithPort("named-port", 2345)) + + endpoints := &corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-created", + Namespace: "default", + }, + Subsets: []corev1.EndpointSubset{ + { + Addresses: addressesForPods( + pod1a, + pod2a, pod2b), + Ports: []corev1.EndpointPort{ + { + Name: "public", + Port: 2345, + Protocol: "TCP", + AppProtocol: &appProtocolHttp, + }, + }, + }, + }, + } + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-created", + Namespace: "default", + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "172.18.0.1", + Ports: []corev1.ServicePort{ + { + Name: "public", + Port: 8080, + Protocol: "TCP", + TargetPort: intstr.FromInt(2345), // Numeric target port + AppProtocol: &appProtocolHttp, + }, + }, + }, + } + return []runtime.Object{ + pod1a, + pod2a, pod2b, + endpoints, service} + }, + expectedResource: &pbresource.Resource{ + Id: &pbresource.ID{ + Name: "service-created", + Type: pbcatalog.ServiceType, + Tenancy: &pbresource.Tenancy{ + Namespace: constants.DefaultConsulNS, + Partition: constants.DefaultConsulPartition, + }, + }, + Data: inject.ToProtoAny(&pbcatalog.Service{ + Ports: []*pbcatalog.ServicePort{ + { + VirtualPort: 8080, + TargetPort: "named-port", // Matches container port name, not service target number + Protocol: pbcatalog.Protocol_PROTOCOL_HTTP, + }, + { + TargetPort: "mesh", + Protocol: pbcatalog.Protocol_PROTOCOL_MESH, + }, + }, + Workloads: &pbcatalog.WorkloadSelector{ + Names: []string{ + "service-created-ds1-12345", + "service-created-ds2-12345", + "service-created-ds2-23456", + }, + }, + VirtualIps: []string{"172.18.0.1"}, + }), + Metadata: map[string]string{ + constants.MetaKeyKubeNS: constants.DefaultConsulNS, + constants.MetaKeyManagedBy: constants.ManagedByEndpointsValue, + }, + }, + }, + { + name: "Only L4 TCP ports get a Consul Service port when L4 protocols are multiplexed", + svcName: "service-created", + k8sObjects: func() []runtime.Object { + pod1 := createServicePodOwnedBy(kindReplicaSet, "service-created-rs-abcde") + endpoints := &corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-created", + Namespace: "default", + }, + Subsets: []corev1.EndpointSubset{ + { + Addresses: addressesForPods(pod1), + Ports: []corev1.EndpointPort{ + { + Name: "public-tcp", + Port: 2345, + Protocol: "TCP", + }, + { + Name: "public-udp", + Port: 2345, + Protocol: "UDP", + }, + }, + }, + }, + } + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-created", + Namespace: "default", + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "172.18.0.1", + Ports: []corev1.ServicePort{ + // Two L4 protocols on one exposed port + { + Name: "public-tcp", + Port: 8080, + Protocol: "TCP", + TargetPort: intstr.FromString("my-svc-port"), + }, + { + Name: "public-udp", + Port: 8080, + Protocol: "UDP", + TargetPort: intstr.FromString("my-svc-port"), + }, + }, + }, + } + return []runtime.Object{pod1, endpoints, service} + }, + expectedResource: &pbresource.Resource{ + Id: &pbresource.ID{ + Name: "service-created", + Type: pbcatalog.ServiceType, + Tenancy: &pbresource.Tenancy{ + Namespace: constants.DefaultConsulNS, + Partition: constants.DefaultConsulPartition, + }, + }, + Data: inject.ToProtoAny(&pbcatalog.Service{ + Ports: []*pbcatalog.ServicePort{ + { + VirtualPort: 8080, + TargetPort: "my-svc-port", + Protocol: pbcatalog.Protocol_PROTOCOL_TCP, + }, + { + TargetPort: "mesh", + Protocol: pbcatalog.Protocol_PROTOCOL_MESH, + }, + }, + Workloads: &pbcatalog.WorkloadSelector{ + Prefixes: []string{"service-created-rs-abcde"}, + }, + VirtualIps: []string{"172.18.0.1"}, + }), + Metadata: map[string]string{ + constants.MetaKeyKubeNS: constants.DefaultConsulNS, + constants.MetaKeyManagedBy: constants.ManagedByEndpointsValue, + }, + }, + }, + { + name: "Services without mesh-injected pods should not be registered", + svcName: "service-created", + k8sObjects: func() []runtime.Object { + pod1 := createServicePodOwnedBy(kindReplicaSet, "service-created-rs-abcde") + removeMeshInjectStatus(t, pod1) + endpoints := &corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-created", + Namespace: "default", + }, + Subsets: []corev1.EndpointSubset{ + { + Addresses: addressesForPods(pod1), + Ports: []corev1.EndpointPort{ + { + Name: "public", + Port: 2345, + Protocol: "TCP", + AppProtocol: &appProtocolHttp, + }, + }, + }, + }, + } + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-created", + Namespace: "default", + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "172.18.0.1", + Ports: []corev1.ServicePort{ + { + Name: "public", + Port: 8080, + Protocol: "TCP", + TargetPort: intstr.FromString("my-http-port"), + AppProtocol: &appProtocolHttp, + }, + }, + }, + } + return []runtime.Object{pod1, endpoints, service} + }, + // No expected resource + }, + { + name: "Services with mix of injected and non-injected pods registered with only injected selectors", + svcName: "service-created", + k8sObjects: func() []runtime.Object { + pod1 := createServicePodOwnedBy(kindReplicaSet, "service-created-rs-abcde") + pod2 := createServicePodOwnedBy(kindReplicaSet, "service-created-rs-fghij") + pod3 := createServicePod(kindDaemonSet, "service-created-ds", "12345") + pod4 := createServicePod(kindDaemonSet, "service-created-ds", "23456") + removeMeshInjectStatus(t, pod1) + removeMeshInjectStatus(t, pod3) + // Retain status of second pod + endpoints := &corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-created", + Namespace: "default", + }, + Subsets: []corev1.EndpointSubset{ + { + Addresses: addressesForPods(pod1, pod2, pod3, pod4), + Ports: []corev1.EndpointPort{ + { + Name: "public", + Port: 2345, + Protocol: "TCP", + AppProtocol: &appProtocolHttp, + }, + }, + }, + }, + } + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-created", + Namespace: "default", + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "172.18.0.1", + Ports: []corev1.ServicePort{ + { + Name: "public", + Port: 8080, + Protocol: "TCP", + TargetPort: intstr.FromString("my-http-port"), + AppProtocol: &appProtocolHttp, + }, + }, + }, + } + return []runtime.Object{pod1, pod2, pod3, pod4, endpoints, service} + }, + expectedResource: &pbresource.Resource{ + Id: &pbresource.ID{ + Name: "service-created", + Type: pbcatalog.ServiceType, + Tenancy: &pbresource.Tenancy{ + Namespace: constants.DefaultConsulNS, + Partition: constants.DefaultConsulPartition, + }, + }, + Data: inject.ToProtoAny(&pbcatalog.Service{ + Ports: []*pbcatalog.ServicePort{ + { + VirtualPort: 8080, + TargetPort: "my-http-port", + Protocol: pbcatalog.Protocol_PROTOCOL_HTTP, + }, + { + TargetPort: "mesh", + Protocol: pbcatalog.Protocol_PROTOCOL_MESH, + }, + }, + Workloads: &pbcatalog.WorkloadSelector{ + // Selector only contains values for injected pods + Prefixes: []string{"service-created-rs-fghij"}, + Names: []string{"service-created-ds-23456"}, + }, + VirtualIps: []string{"172.18.0.1"}, + }), + Metadata: map[string]string{ + constants.MetaKeyKubeNS: constants.DefaultConsulNS, + constants.MetaKeyManagedBy: constants.ManagedByEndpointsValue, + }, + }, + }, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + runReconcileCase(t, tc) + }) + } +} + +func TestReconcile_UpdateService(t *testing.T) { + t.Parallel() + cases := []reconcileCase{ + { + name: "Pods changed", + svcName: "service-updated", + k8sObjects: func() []runtime.Object { + pod1 := createServicePodOwnedBy(kindReplicaSet, "service-created-rs-abcde") + pod2 := createServicePodOwnedBy(kindReplicaSet, "service-created-rs-klmno") + pod3 := createServicePod(kindDaemonSet, "service-created-ds", "12345") + pod4 := createServicePod(kindDaemonSet, "service-created-ds", "34567") + endpoints := &corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-updated", + Namespace: "default", + }, + Subsets: []corev1.EndpointSubset{ + { + Addresses: addressesForPods(pod1, pod2, pod3, pod4), + Ports: []corev1.EndpointPort{ + { + Name: "my-http-port", + Port: 2345, + Protocol: "TCP", + AppProtocol: &appProtocolHttp, + }, + }, + }, + }, + } + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-updated", + Namespace: "default", + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "172.18.0.1", + Ports: []corev1.ServicePort{ + { + Name: "public", + Port: 8080, + Protocol: "TCP", + TargetPort: intstr.FromString("my-http-port"), + AppProtocol: &appProtocolHttp, + }, + }, + }, + } + return []runtime.Object{pod1, pod2, pod3, pod4, endpoints, service} + }, + existingResource: &pbresource.Resource{ + Id: &pbresource.ID{ + Name: "service-created", + Type: pbcatalog.ServiceType, + Tenancy: &pbresource.Tenancy{ + Namespace: constants.DefaultConsulNS, + Partition: constants.DefaultConsulPartition, + }, + }, + Data: inject.ToProtoAny(&pbcatalog.Service{ + Ports: []*pbcatalog.ServicePort{ + { + VirtualPort: 8080, + TargetPort: "my-http-port", + Protocol: pbcatalog.Protocol_PROTOCOL_HTTP, + }, + { + TargetPort: "mesh", + Protocol: pbcatalog.Protocol_PROTOCOL_MESH, + }, + }, + Workloads: &pbcatalog.WorkloadSelector{ + Prefixes: []string{ + "service-created-rs-abcde", // Retained + "service-created-rs-fghij", // Removed + }, + Names: []string{ + "service-created-ds-12345", // Retained + "service-created-ds-23456", // Removed + }, + }, + VirtualIps: []string{"172.18.0.1"}, + }), + Metadata: map[string]string{ + constants.MetaKeyKubeNS: constants.DefaultConsulNS, + constants.MetaKeyManagedBy: constants.ManagedByEndpointsValue, + }, + }, + expectedResource: &pbresource.Resource{ + Id: &pbresource.ID{ + Name: "service-created", + Type: pbcatalog.ServiceType, + Tenancy: &pbresource.Tenancy{ + Namespace: constants.DefaultConsulNS, + Partition: constants.DefaultConsulPartition, + }, + }, + Data: inject.ToProtoAny(&pbcatalog.Service{ + Ports: []*pbcatalog.ServicePort{ + { + VirtualPort: 8080, + TargetPort: "my-http-port", + Protocol: pbcatalog.Protocol_PROTOCOL_HTTP, + }, + { + TargetPort: "mesh", + Protocol: pbcatalog.Protocol_PROTOCOL_MESH, + }, + }, + Workloads: &pbcatalog.WorkloadSelector{ + + Prefixes: []string{ + "service-created-rs-abcde", // Retained + "service-created-rs-klmno", // New + }, + Names: []string{ + "service-created-ds-12345", // Retained + "service-created-ds-34567", // New + }, + }, + VirtualIps: []string{"172.18.0.1"}, + }), + Metadata: map[string]string{ + constants.MetaKeyKubeNS: constants.DefaultConsulNS, + constants.MetaKeyManagedBy: constants.ManagedByEndpointsValue, + }, + }, + }, + { + name: "Service ports changed", + svcName: "service-updated", + k8sObjects: func() []runtime.Object { + pod1 := createServicePodOwnedBy(kindReplicaSet, "service-created-rs-abcde") + pod2 := createServicePod(kindDaemonSet, "service-created-ds", "12345") + endpoints := &corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-updated", + Namespace: "default", + }, + Subsets: []corev1.EndpointSubset{ + { + Addresses: addressesForPods(pod1, pod2), + Ports: []corev1.EndpointPort{ + { + Name: "my-http-port", + Port: 2345, + Protocol: "TCP", + AppProtocol: &appProtocolHttp, + }, + { + Name: "my-grpc-port", + Port: 6789, + Protocol: "TCP", + AppProtocol: &appProtocolHttp, + }, + }, + }, + }, + } + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-updated", + Namespace: "default", + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "172.18.0.1", + Ports: []corev1.ServicePort{ + { + Name: "public", + Port: 8080, + Protocol: "TCP", + TargetPort: intstr.FromString("new-http-port"), + AppProtocol: &appProtocolHttp2, + }, + { + Name: "api", + Port: 9091, + Protocol: "TCP", + TargetPort: intstr.FromString("my-grpc-port"), + AppProtocol: &appProtocolGrpc, + }, + }, + }, + } + return []runtime.Object{pod1, pod2, endpoints, service} + }, + existingResource: &pbresource.Resource{ + Id: &pbresource.ID{ + Name: "service-updated", + Type: pbcatalog.ServiceType, + Tenancy: &pbresource.Tenancy{ + Namespace: constants.DefaultConsulNS, + Partition: constants.DefaultConsulPartition, + }, + }, + Data: inject.ToProtoAny(&pbcatalog.Service{ + Ports: []*pbcatalog.ServicePort{ + { + VirtualPort: 8080, + TargetPort: "my-http-port", + Protocol: pbcatalog.Protocol_PROTOCOL_HTTP, + }, + { + VirtualPort: 9090, + TargetPort: "my-grpc-port", + Protocol: pbcatalog.Protocol_PROTOCOL_GRPC, + }, + { + VirtualPort: 10001, + TargetPort: "unspec-port", //this might need to be changed to "my_unspecified_port" + Protocol: pbcatalog.Protocol_PROTOCOL_UNSPECIFIED, + }, + { + TargetPort: "mesh", + Protocol: pbcatalog.Protocol_PROTOCOL_MESH, + }, + }, + Workloads: &pbcatalog.WorkloadSelector{ + Prefixes: []string{"service-created-rs-abcde"}, + Names: []string{"service-created-ds-12345"}, + }, + VirtualIps: []string{"172.18.0.1"}, + }), + Metadata: map[string]string{ + constants.MetaKeyKubeNS: constants.DefaultConsulNS, + constants.MetaKeyManagedBy: constants.ManagedByEndpointsValue, + }, + }, + expectedResource: &pbresource.Resource{ + Id: &pbresource.ID{ + Name: "service-updated", + Type: pbcatalog.ServiceType, + Tenancy: &pbresource.Tenancy{ + Namespace: constants.DefaultConsulNS, + Partition: constants.DefaultConsulPartition, + }, + }, + Data: inject.ToProtoAny(&pbcatalog.Service{ + Ports: []*pbcatalog.ServicePort{ + { + VirtualPort: 8080, + TargetPort: "new-http-port", // Updated + Protocol: pbcatalog.Protocol_PROTOCOL_HTTP2, // Updated + }, + { + VirtualPort: 9091, // Updated + TargetPort: "my-grpc-port", + Protocol: pbcatalog.Protocol_PROTOCOL_GRPC, + }, + // Port 10001 removed + { + TargetPort: "mesh", + Protocol: pbcatalog.Protocol_PROTOCOL_MESH, + }, + }, + Workloads: &pbcatalog.WorkloadSelector{ + Prefixes: []string{"service-created-rs-abcde"}, + Names: []string{"service-created-ds-12345"}, + }, + VirtualIps: []string{"172.18.0.1"}, + }), + Metadata: map[string]string{ + constants.MetaKeyKubeNS: constants.DefaultConsulNS, + constants.MetaKeyManagedBy: constants.ManagedByEndpointsValue, + }, + }, + }, + { + name: "Redundant reconcile does not write to Consul unless resource was modified", + svcName: "service-updated", + k8sObjects: func() []runtime.Object { + pod1 := createServicePodOwnedBy(kindReplicaSet, "service-created-rs-abcde") + pod2 := createServicePodOwnedBy(kindReplicaSet, "service-created-rs-abcde") + endpoints := &corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-updated", + Namespace: "default", + }, + Subsets: []corev1.EndpointSubset{ + { + Addresses: addressesForPods(pod1, pod2), + Ports: []corev1.EndpointPort{ + { + Name: "my-http-port", + Port: 2345, + Protocol: "TCP", + AppProtocol: &appProtocolHttp, + }, + { + Name: "my-grpc-port", + Port: 6789, + Protocol: "TCP", + AppProtocol: &appProtocolGrpc, + }, + { + Name: "other", + Port: 10001, + Protocol: "TCP", + }, + }, + }, + }, + } + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-updated", + Namespace: "default", + UID: types.UID(randomUid()), + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "172.18.0.1", + Ports: []corev1.ServicePort{ + { + Name: "public", + Port: 8080, + Protocol: "TCP", + TargetPort: intstr.FromString("my-http-port"), + AppProtocol: &appProtocolHttp, + }, + { + Name: "api", + Port: 9090, + Protocol: "TCP", + TargetPort: intstr.FromString("my-grpc-port"), + AppProtocol: &appProtocolGrpc, + }, + { + Name: "other", + Port: 10001, + Protocol: "TCP", + TargetPort: intstr.FromString("cslport-10001"), + // no app protocol specified + }, + }, + }, + } + return []runtime.Object{pod1, pod2, endpoints, service} + }, + expectedResource: &pbresource.Resource{ + Id: &pbresource.ID{ + Name: "service-updated", + Type: pbcatalog.ServiceType, + Tenancy: &pbresource.Tenancy{ + Namespace: constants.DefaultConsulNS, + Partition: constants.DefaultConsulPartition, + }, + }, + Data: inject.ToProtoAny(&pbcatalog.Service{ + Ports: []*pbcatalog.ServicePort{ + { + VirtualPort: 8080, + TargetPort: "my-http-port", + Protocol: pbcatalog.Protocol_PROTOCOL_HTTP, + }, + { + VirtualPort: 9090, + TargetPort: "my-grpc-port", + Protocol: pbcatalog.Protocol_PROTOCOL_GRPC, + }, + { + VirtualPort: 10001, + TargetPort: "cslport-10001", + Protocol: pbcatalog.Protocol_PROTOCOL_TCP, + }, + { + TargetPort: "mesh", + Protocol: pbcatalog.Protocol_PROTOCOL_MESH, + }, + }, + Workloads: &pbcatalog.WorkloadSelector{ + Prefixes: []string{"service-created-rs-abcde"}, + }, + VirtualIps: []string{"172.18.0.1"}, + }), + Metadata: map[string]string{ + constants.MetaKeyKubeNS: constants.DefaultConsulNS, + constants.MetaKeyManagedBy: constants.ManagedByEndpointsValue, + }, + }, + caseFn: func(t *testing.T, tc *reconcileCase, ep *Controller, resourceClient pbresource.ResourceServiceClient) { + runReconcile := func() { + r, err := ep.Reconcile(context.Background(), ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: tc.svcName, + Namespace: tc.targetConsulNs, + }}) + require.False(t, r.Requeue) + require.NoError(t, err) + } + + // Get resource before additional reconcile + beforeResource := getAndValidateResource(t, resourceClient, tc.expectedResource.Id) + + // Run several additional reconciles, expecting no writes to Consul + for i := 0; i < 5; i++ { + runReconcile() + require.Equal(t, beforeResource.GetGeneration(), + getAndValidateResource(t, resourceClient, tc.expectedResource.Id).GetGeneration(), + "wanted same version for before and after resources following repeat reconcile") + } + + // Modify resource external to controller + modified := proto.Clone(beforeResource).(*pbresource.Resource) + modified.Metadata = map[string]string{"foo": "bar"} + modified.Version = "" + modified.Generation = "" + _, err := resourceClient.Write(context.Background(), &pbresource.WriteRequest{ + Resource: modified, + }) + require.NoError(t, err) + + // Get resource after additional reconcile, now expecting a new write to occur + runReconcile() + + require.NotEqual(t, beforeResource.GetGeneration(), + getAndValidateResource(t, resourceClient, tc.expectedResource.Id).GetGeneration(), + "wanted different version for before and after resources following modification and reconcile") + + // Get resource before additional reconcile + beforeResource = getAndValidateResource(t, resourceClient, tc.expectedResource.Id) + + // Run several additional reconciles, expecting no writes to Consul + for i := 0; i < 5; i++ { + runReconcile() + require.Equal(t, beforeResource.GetGeneration(), + getAndValidateResource(t, resourceClient, tc.expectedResource.Id).GetGeneration(), + "wanted same version for before and after resources following repeat reconcile") + } + }, + }, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + runReconcileCase(t, tc) + }) + } +} + +func TestEnsureService(t *testing.T) { + t.Parallel() + + type args struct { + k8sUid string + meta map[string]string + consulSvc *pbcatalog.Service + } + + uuid1 := randomUid() + uuid2 := randomUid() + meta1 := getServiceMeta(corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + }}) + meta2 := getServiceMeta(corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + }}) + meta2["some-other-key"] = "value" + + id := getServiceID( + "my-svc", + constants.DefaultConsulNS, + constants.DefaultConsulPartition) + + cases := []struct { + name string + beforeArgs args + afterArgs args + readFn func(context.Context, *pbresource.ReadRequest) (*pbresource.ReadResponse, error) + writeFn func(context.Context, *pbresource.WriteRequest) (*pbresource.WriteResponse, error) + expectWrite bool + expectAlwaysWrite bool + expectErr string + caseFn func(t *testing.T, ep *Controller) + }{ + { + name: "Identical args writes once", + beforeArgs: args{ + k8sUid: uuid1, + meta: meta1, + consulSvc: &pbcatalog.Service{ + Ports: []*pbcatalog.ServicePort{ + { + VirtualPort: 8080, + TargetPort: "my-http-port", + Protocol: pbcatalog.Protocol_PROTOCOL_HTTP, + }, + { + TargetPort: "mesh", + Protocol: pbcatalog.Protocol_PROTOCOL_MESH, + }, + }, + Workloads: &pbcatalog.WorkloadSelector{ + Prefixes: []string{"service-created-rs-abcde"}, + }, + VirtualIps: []string{"172.18.0.1"}, + }, + }, + // Identical to before + afterArgs: args{ + k8sUid: uuid1, + meta: meta1, + consulSvc: &pbcatalog.Service{ + Ports: []*pbcatalog.ServicePort{ + { + VirtualPort: 8080, + TargetPort: "my-http-port", + Protocol: pbcatalog.Protocol_PROTOCOL_HTTP, + }, + { + TargetPort: "mesh", + Protocol: pbcatalog.Protocol_PROTOCOL_MESH, + }, + }, + Workloads: &pbcatalog.WorkloadSelector{ + Prefixes: []string{"service-created-rs-abcde"}, + }, + VirtualIps: []string{"172.18.0.1"}, + }, + }, + expectWrite: false, + }, + { + name: "Changed service payload updates resource", + beforeArgs: args{ + k8sUid: uuid1, + meta: meta1, + consulSvc: &pbcatalog.Service{ + Ports: []*pbcatalog.ServicePort{ + { + VirtualPort: 8080, + TargetPort: "my-http-port", + Protocol: pbcatalog.Protocol_PROTOCOL_HTTP, + }, + { + TargetPort: "mesh", + Protocol: pbcatalog.Protocol_PROTOCOL_MESH, + }, + }, + Workloads: &pbcatalog.WorkloadSelector{ + Prefixes: []string{"service-created-rs-abcde"}, + }, + VirtualIps: []string{"172.18.0.1"}, + }, + }, + afterArgs: args{ + k8sUid: uuid1, + meta: meta1, + consulSvc: &pbcatalog.Service{ + Ports: []*pbcatalog.ServicePort{ + { + VirtualPort: 8080, + TargetPort: "my-http-port", + Protocol: pbcatalog.Protocol_PROTOCOL_HTTP, + }, + { + TargetPort: "mesh", + Protocol: pbcatalog.Protocol_PROTOCOL_MESH, + }, + }, + Workloads: &pbcatalog.WorkloadSelector{ + // Different workload selector + Prefixes: []string{"service-created-rs-fghij"}, + }, + VirtualIps: []string{"172.18.0.1"}, + }, + }, + expectWrite: true, + }, + { + name: "Changed service meta updates resource", + beforeArgs: args{ + k8sUid: uuid1, + meta: meta1, + consulSvc: &pbcatalog.Service{ + Ports: []*pbcatalog.ServicePort{ + { + VirtualPort: 8080, + TargetPort: "my-http-port", + Protocol: pbcatalog.Protocol_PROTOCOL_HTTP, + }, + { + TargetPort: "mesh", + Protocol: pbcatalog.Protocol_PROTOCOL_MESH, + }, + }, + Workloads: &pbcatalog.WorkloadSelector{ + Prefixes: []string{"service-created-rs-abcde"}, + }, + VirtualIps: []string{"172.18.0.1"}, + }, + }, + afterArgs: args{ + k8sUid: uuid1, + meta: meta2, // Updated meta + consulSvc: &pbcatalog.Service{ + Ports: []*pbcatalog.ServicePort{ + { + VirtualPort: 8080, + TargetPort: "my-http-port", + Protocol: pbcatalog.Protocol_PROTOCOL_HTTP, + }, + { + TargetPort: "mesh", + Protocol: pbcatalog.Protocol_PROTOCOL_MESH, + }, + }, + Workloads: &pbcatalog.WorkloadSelector{ + Prefixes: []string{"service-created-rs-abcde"}, + }, + VirtualIps: []string{"172.18.0.1"}, + }, + }, + expectWrite: true, + }, + { + name: "Changed k8s UID updates resource", + beforeArgs: args{ + k8sUid: uuid1, + consulSvc: &pbcatalog.Service{ + Ports: []*pbcatalog.ServicePort{ + { + VirtualPort: 8080, + TargetPort: "my-http-port", + Protocol: pbcatalog.Protocol_PROTOCOL_HTTP, + }, + { + TargetPort: "mesh", + Protocol: pbcatalog.Protocol_PROTOCOL_MESH, + }, + }, + Workloads: &pbcatalog.WorkloadSelector{ + Prefixes: []string{"service-created-rs-abcde"}, + }, + VirtualIps: []string{"172.18.0.1"}, + }, + }, + // Identical to before except K8s UID, indicating delete and rewrite of K8s service + afterArgs: args{ + k8sUid: uuid2, + consulSvc: &pbcatalog.Service{ + Ports: []*pbcatalog.ServicePort{ + { + VirtualPort: 8080, + TargetPort: "my-http-port", + Protocol: pbcatalog.Protocol_PROTOCOL_HTTP, + }, + { + TargetPort: "mesh", + Protocol: pbcatalog.Protocol_PROTOCOL_MESH, + }, + }, + Workloads: &pbcatalog.WorkloadSelector{ + Prefixes: []string{"service-created-rs-abcde"}, + }, + VirtualIps: []string{"172.18.0.1"}, + }, + }, + expectWrite: true, + }, + { + name: "Read not found fails open and writes update", + readFn: func(context.Context, *pbresource.ReadRequest) (*pbresource.ReadResponse, error) { + return nil, status.Error(codes.NotFound, "not found") + }, + expectWrite: true, + expectAlwaysWrite: true, + }, + { + name: "Read error fails open and writes update", + readFn: func(context.Context, *pbresource.ReadRequest) (*pbresource.ReadResponse, error) { + return nil, status.Error(codes.PermissionDenied, "not allowed") + }, + expectWrite: true, + expectAlwaysWrite: true, + }, + { + name: "Write error does not prevent future writes (cache not updated)", + writeFn: func(ctx context.Context, request *pbresource.WriteRequest) (*pbresource.WriteResponse, error) { + return nil, status.Error(codes.Internal, "oops") + }, + expectErr: "rpc error: code = Internal desc = oops", + caseFn: func(t *testing.T, ep *Controller) { + require.Empty(t, ep.WriteCache.(*writeCache).data) + }, + }, + } + + // Create test Consul server. + testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + c.Experiments = []string{"resource-apis"} + }) + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + // Create the Endpoints controller. + ep := &Controller{ + Client: fake.NewClientBuilder().WithRuntimeObjects().Build(), // No k8s fetches should be needed + WriteCache: NewWriteCache(logrtest.New(t)), + Log: logrtest.New(t), + ConsulServerConnMgr: testClient.Watcher, + K8sNamespaceConfig: common.K8sNamespaceConfig{ + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSetWith(), + }, + } + + // Set up test resourceReadWriter + rw := struct{ testReadWriter }{} + defaultRw := defaultResourceReadWriter{testClient.ResourceClient} + rw.readFn = defaultRw.Read + rw.writeFn = defaultRw.Write + if tc.readFn != nil { + rw.readFn = tc.readFn + } + if tc.writeFn != nil { + rw.writeFn = tc.writeFn + } + + // Ensure caseFn runs if provided, regardless of whether error is expected + if tc.caseFn != nil { + defer tc.caseFn(t, ep) + } + + // Call first time + err := ep.ensureService(context.Background(), &rw, tc.beforeArgs.k8sUid, id, tc.beforeArgs.meta, tc.beforeArgs.consulSvc) + if tc.expectErr != "" { + require.Contains(t, err.Error(), tc.expectErr) + return + } + require.NoError(t, err) + + // Get written resource before additional calls + beforeResource := getAndValidateResource(t, testClient.ResourceClient, id) + + // Call a second time + err = ep.ensureService(context.Background(), &rw, tc.afterArgs.k8sUid, id, tc.afterArgs.meta, tc.afterArgs.consulSvc) + require.NoError(t, err) + + // Check for change on second call to ensureService + if tc.expectWrite { + require.NotEqual(t, beforeResource.GetGeneration(), getAndValidateResource(t, testClient.ResourceClient, id).GetGeneration(), + "wanted different version for before and after resources following modification and reconcile") + } else { + require.Equal(t, beforeResource.GetGeneration(), getAndValidateResource(t, testClient.ResourceClient, id).GetGeneration(), + "wanted same version for before and after resources following repeat reconcile") + } + + // Call several additional times + for i := 0; i < 5; i++ { + // Get written resource before each additional call + beforeResource = getAndValidateResource(t, testClient.ResourceClient, id) + + err := ep.ensureService(context.Background(), &rw, tc.afterArgs.k8sUid, id, tc.afterArgs.meta, tc.afterArgs.consulSvc) + require.NoError(t, err) + + if tc.expectAlwaysWrite { + require.NotEqual(t, beforeResource.GetGeneration(), getAndValidateResource(t, testClient.ResourceClient, id).GetGeneration(), + "wanted different version for before and after resources following modification and reconcile") + } else { + require.Equal(t, beforeResource.GetGeneration(), getAndValidateResource(t, testClient.ResourceClient, id).GetGeneration(), + "wanted same version for before and after resources following repeat reconcile") + } + } + }) + } +} + +type testReadWriter struct { + readFn func(context.Context, *pbresource.ReadRequest) (*pbresource.ReadResponse, error) + writeFn func(context.Context, *pbresource.WriteRequest) (*pbresource.WriteResponse, error) +} + +func (rw *testReadWriter) Read(ctx context.Context, req *pbresource.ReadRequest) (*pbresource.ReadResponse, error) { + return rw.readFn(ctx, req) +} + +func (rw *testReadWriter) Write(ctx context.Context, req *pbresource.WriteRequest) (*pbresource.WriteResponse, error) { + return rw.writeFn(ctx, req) +} + +func TestReconcile_DeleteService(t *testing.T) { + t.Parallel() + cases := []reconcileCase{ + { + name: "Basic Endpoints not found (service deleted) deregisters service", + svcName: "service-deleted", + existingResource: &pbresource.Resource{ + Id: &pbresource.ID{ + Name: "service-deleted", + Type: pbcatalog.ServiceType, + Tenancy: &pbresource.Tenancy{ + Namespace: constants.DefaultConsulNS, + Partition: constants.DefaultConsulPartition, + }, + }, + Data: inject.ToProtoAny(&pbcatalog.Service{ + Ports: []*pbcatalog.ServicePort{ + { + VirtualPort: 8080, + TargetPort: "my-http-port", + Protocol: pbcatalog.Protocol_PROTOCOL_HTTP, + }, + { + TargetPort: "mesh", + Protocol: pbcatalog.Protocol_PROTOCOL_MESH, + }, + }, + Workloads: &pbcatalog.WorkloadSelector{ + Prefixes: []string{"service-created-rs-abcde"}, + Names: []string{"service-created-ds-12345"}, + }, + VirtualIps: []string{"172.18.0.1"}, + }), + Metadata: map[string]string{ + constants.MetaKeyKubeNS: constants.DefaultConsulNS, + constants.MetaKeyManagedBy: constants.ManagedByEndpointsValue, + }, + }, + caseFn: func(t *testing.T, _ *reconcileCase, ep *Controller, _ pbresource.ResourceServiceClient) { + // Ensure cache was also cleared + require.Empty(t, ep.WriteCache.(*writeCache).data) + }, + }, + { + name: "Empty endpoints does not cause deregistration of existing service", + svcName: "service-deleted", + k8sObjects: func() []runtime.Object { + endpoints := &corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-deleted", + Namespace: "default", + }, + Subsets: []corev1.EndpointSubset{ + { + Addresses: []corev1.EndpointAddress{}, + }, + }, + } + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-deleted", + Namespace: "default", + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "172.18.0.1", + Ports: []corev1.ServicePort{ + { + Name: "public", + Port: 8080, + Protocol: "TCP", + TargetPort: intstr.FromString("my-http-port"), + AppProtocol: &appProtocolHttp, + }, + }, + }, + } + return []runtime.Object{endpoints, service} + }, + existingResource: &pbresource.Resource{ + Id: &pbresource.ID{ + Name: "service-deleted", + Type: pbcatalog.ServiceType, + Tenancy: &pbresource.Tenancy{ + Namespace: constants.DefaultConsulNS, + Partition: constants.DefaultConsulPartition, + }, + }, + Data: inject.ToProtoAny(&pbcatalog.Service{ + Ports: []*pbcatalog.ServicePort{ + { + VirtualPort: 8080, + TargetPort: "my-http-port", + Protocol: pbcatalog.Protocol_PROTOCOL_HTTP, + }, + { + TargetPort: "mesh", + Protocol: pbcatalog.Protocol_PROTOCOL_MESH, + }, + }, + Workloads: &pbcatalog.WorkloadSelector{ + Prefixes: []string{"service-created-rs-abcde"}, + Names: []string{"service-created-ds-12345"}, + }, + VirtualIps: []string{"172.18.0.1"}, + }), + Metadata: map[string]string{ + constants.MetaKeyKubeNS: constants.DefaultConsulNS, + constants.MetaKeyManagedBy: constants.ManagedByEndpointsValue, + }, + }, + expectedResource: &pbresource.Resource{ + Id: &pbresource.ID{ + Name: "service-deleted", + Type: pbcatalog.ServiceType, + Tenancy: &pbresource.Tenancy{ + Namespace: constants.DefaultConsulNS, + Partition: constants.DefaultConsulPartition, + }, + }, + Data: inject.ToProtoAny(&pbcatalog.Service{ + Ports: []*pbcatalog.ServicePort{ + { + VirtualPort: 8080, + TargetPort: "my-http-port", + Protocol: pbcatalog.Protocol_PROTOCOL_HTTP, + }, + { + TargetPort: "mesh", + Protocol: pbcatalog.Protocol_PROTOCOL_MESH, + }, + }, + Workloads: &pbcatalog.WorkloadSelector{ + Prefixes: []string{"service-created-rs-abcde"}, + Names: []string{"service-created-ds-12345"}, + }, + VirtualIps: []string{"172.18.0.1"}, + }), + Metadata: map[string]string{ + constants.MetaKeyKubeNS: constants.DefaultConsulNS, + constants.MetaKeyManagedBy: constants.ManagedByEndpointsValue, + }, + }, + }, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + runReconcileCase(t, tc) + }) + } +} + +func TestGetWorkloadSelectorFromEndpoints(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + type testCase struct { + name string + endpoints corev1.Endpoints + responses map[types.NamespacedName]*corev1.Pod + expected *pbcatalog.WorkloadSelector + mockFn func(*testing.T, *MockPodFetcher) + } + + rsPods := []*corev1.Pod{ + createServicePod(kindReplicaSet, "svc-rs-abcde", "12345"), + createServicePod(kindReplicaSet, "svc-rs-abcde", "23456"), + createServicePod(kindReplicaSet, "svc-rs-abcde", "34567"), + createServicePod(kindReplicaSet, "svc-rs-fghij", "12345"), + createServicePod(kindReplicaSet, "svc-rs-fghij", "23456"), + createServicePod(kindReplicaSet, "svc-rs-fghij", "34567"), + } + otherPods := []*corev1.Pod{ + createServicePod(kindDaemonSet, "svc-ds", "12345"), + createServicePod(kindDaemonSet, "svc-ds", "23456"), + createServicePod(kindDaemonSet, "svc-ds", "34567"), + createServicePod("StatefulSet", "svc-ss", "12345"), + createServicePod("StatefulSet", "svc-ss", "23456"), + createServicePod("StatefulSet", "svc-ss", "34567"), + } + ignoredPods := []*corev1.Pod{ + createServicePod(kindReplicaSet, "svc-rs-ignored-klmno", "12345"), + createServicePod(kindReplicaSet, "svc-rs-ignored-klmno", "23456"), + createServicePod(kindReplicaSet, "svc-rs-ignored-klmno", "34567"), + } + + podsByName := make(map[types.NamespacedName]*corev1.Pod) + for _, p := range rsPods { + podsByName[types.NamespacedName{Name: p.Name, Namespace: p.Namespace}] = p + } + for _, p := range otherPods { + podsByName[types.NamespacedName{Name: p.Name, Namespace: p.Namespace}] = p + } + for _, p := range ignoredPods { + removeMeshInjectStatus(t, p) + podsByName[types.NamespacedName{Name: p.Name, Namespace: p.Namespace}] = p + } + + cases := []testCase{ + { + name: "Pod is fetched once per ReplicaSet", + endpoints: corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "svc", + Namespace: "default", + }, + Subsets: []corev1.EndpointSubset{ + { + Addresses: addressesForPods(rsPods...), + Ports: []corev1.EndpointPort{ + { + Name: "my-http-port", + AppProtocol: &appProtocolHttp, + Port: 2345, + }, + }, + }, + }, + }, + responses: podsByName, + expected: getWorkloadSelector( + // Selector should consist of prefixes only. + selectorPodData{ + "svc-rs-abcde": &podSetData{}, + "svc-rs-fghij": &podSetData{}, + }, + selectorPodData{}), + mockFn: func(t *testing.T, pf *MockPodFetcher) { + // Assert called once per set. + require.Equal(t, 2, len(pf.calls)) + }, + }, + { + name: "Pod is fetched once per other pod owner type", + endpoints: corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "svc", + Namespace: "default", + }, + Subsets: []corev1.EndpointSubset{ + { + Addresses: addressesForPods(otherPods...), + Ports: []corev1.EndpointPort{ + { + Name: "my-http-port", + AppProtocol: &appProtocolHttp, + Port: 2345, + }, + }, + }, + }, + }, + responses: podsByName, + expected: getWorkloadSelector( + // Selector should consist of exact name matches only. + selectorPodData{}, + selectorPodData{ + "svc-ds-12345": &podSetData{}, + "svc-ds-23456": &podSetData{}, + "svc-ds-34567": &podSetData{}, + "svc-ss-12345": &podSetData{}, + "svc-ss-23456": &podSetData{}, + "svc-ss-34567": &podSetData{}, + }), + mockFn: func(t *testing.T, pf *MockPodFetcher) { + // Assert called once per pod. + require.Equal(t, len(otherPods), len(pf.calls)) + }, + }, + { + name: "Pod is ignored if not mesh-injected", + endpoints: corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "svc", + Namespace: "default", + }, + Subsets: []corev1.EndpointSubset{ + { + Addresses: addressesForPods(ignoredPods...), + Ports: []corev1.EndpointPort{ + { + Name: "my-http-port", + AppProtocol: &appProtocolHttp, + Port: 2345, + }, + }, + }, + }, + }, + responses: podsByName, + expected: nil, + mockFn: func(t *testing.T, pf *MockPodFetcher) { + // Assert called once for single set. + require.Equal(t, 1, len(pf.calls)) + }, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + // Create mock pod fetcher. + pf := MockPodFetcher{responses: tc.responses} + + // Create the Endpoints controller. + ep := &Controller{ + WriteCache: NewWriteCache(logrtest.New(t)), + Log: logrtest.New(t), + } + + prefixedPods, exactNamePods, err := ep.getWorkloadDataFromEndpoints(ctx, &pf, tc.endpoints) + require.NoError(t, err) + + ws := getWorkloadSelector(prefixedPods, exactNamePods) + if diff := cmp.Diff(tc.expected, ws, test.CmpProtoIgnoreOrder()...); diff != "" { + t.Errorf("unexpected difference:\n%v", diff) + } + tc.mockFn(t, &pf) + }) + } +} + +type MockPodFetcher struct { + calls []types.NamespacedName + responses map[types.NamespacedName]*corev1.Pod +} + +func (m *MockPodFetcher) GetPod(_ context.Context, name types.NamespacedName) (*corev1.Pod, error) { + m.calls = append(m.calls, name) + if v, ok := m.responses[name]; !ok { + panic(fmt.Errorf("test is missing response for passed pod name: %v", name)) + } else { + return v, nil + } +} + +func runReconcileCase(t *testing.T, tc reconcileCase) { + t.Helper() + + // Create fake k8s client + var k8sObjects []runtime.Object + if tc.k8sObjects != nil { + k8sObjects = tc.k8sObjects() + } + fakeClient := fake.NewClientBuilder().WithRuntimeObjects(k8sObjects...).Build() + + // Create test Consul server. + testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + c.Experiments = []string{"resource-apis"} + }) + + // Create the Endpoints controller. + ep := &Controller{ + Client: fakeClient, + WriteCache: NewWriteCache(logrtest.New(t)), + Log: logrtest.New(t), + ConsulServerConnMgr: testClient.Watcher, + K8sNamespaceConfig: common.K8sNamespaceConfig{ + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSetWith(), + }, + } + + // Default ns and partition if not specified in test. + if tc.targetConsulNs == "" { + tc.targetConsulNs = constants.DefaultConsulNS + } + if tc.targetConsulPartition == "" { + tc.targetConsulPartition = constants.DefaultConsulPartition + } + + // If existing resource specified, create it and ensure it exists. + if tc.existingResource != nil { + writeReq := &pbresource.WriteRequest{Resource: tc.existingResource} + _, err := testClient.ResourceClient.Write(context.Background(), writeReq) + require.NoError(t, err) + test.ResourceHasPersisted(t, context.Background(), testClient.ResourceClient, tc.existingResource.Id) + } + + // Run actual reconcile and verify results. + resp, err := ep.Reconcile(context.Background(), ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: tc.svcName, + Namespace: tc.targetConsulNs, + }, + }) + if tc.expErr != "" { + require.ErrorContains(t, err, tc.expErr) + } else { + require.NoError(t, err) + } + require.False(t, resp.Requeue) + + expectedServiceMatches(t, testClient.ResourceClient, tc.svcName, tc.targetConsulNs, tc.targetConsulPartition, tc.expectedResource) + + if tc.caseFn != nil { + tc.caseFn(t, &tc, ep, testClient.ResourceClient) + } +} + +func expectedServiceMatches(t *testing.T, client pbresource.ResourceServiceClient, name, namespace, partition string, expectedResource *pbresource.Resource) { + req := &pbresource.ReadRequest{Id: getServiceID(name, namespace, partition)} + + res, err := client.Read(context.Background(), req) + + if expectedResource == nil { + require.Error(t, err) + s, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, codes.NotFound, s.Code()) + return + } + + require.NoError(t, err) + require.NotNil(t, res) + require.NotNil(t, res.GetResource().GetData()) + + expectedService := &pbcatalog.Service{} + err = anypb.UnmarshalTo(expectedResource.Data, expectedService, proto.UnmarshalOptions{}) + require.NoError(t, err) + + actualService := &pbcatalog.Service{} + err = res.GetResource().GetData().UnmarshalTo(actualService) + require.NoError(t, err) + + if diff := cmp.Diff(expectedService, actualService, test.CmpProtoIgnoreOrder()...); diff != "" { + t.Errorf("unexpected difference:\n%v", diff) + } +} + +func createServicePodOwnedBy(ownerKind, ownerName string, containers ...corev1.Container) *corev1.Pod { + return createServicePod(ownerKind, ownerName, randomKubernetesId(), containers...) +} + +func createServicePod(ownerKind, ownerName, podId string, containers ...corev1.Container) *corev1.Pod { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s", ownerName, podId), + Namespace: "default", + Labels: map[string]string{}, + Annotations: map[string]string{ + constants.AnnotationConsulK8sVersion: "1.3.0", + constants.KeyMeshInjectStatus: constants.Injected, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: ownerName, + Kind: ownerKind, + }, + }, + }, + Spec: corev1.PodSpec{ + Containers: containers, + }, + } + return pod +} + +func containerWithPort(name string, port int32) corev1.Container { + return corev1.Container{ + Ports: []corev1.ContainerPort{ + { + Name: name, + ContainerPort: port, + Protocol: "TCP", + }, + }, + } +} + +func addressesForPods(pods ...*corev1.Pod) []corev1.EndpointAddress { + var addresses []corev1.EndpointAddress + for i, p := range pods { + addresses = append(addresses, corev1.EndpointAddress{ + IP: fmt.Sprintf("1.2.3.%d", i), + TargetRef: &corev1.ObjectReference{ + Kind: "Pod", + Name: p.Name, + Namespace: p.Namespace, + }, + }) + } + return addresses +} + +func randomKubernetesId() string { + u, err := uuid.GenerateUUID() + if err != nil { + panic(err) + } + return u[:5] +} + +func randomUid() string { + u, err := uuid.GenerateUUID() + if err != nil { + panic(err) + } + return u +} + +func removeMeshInjectStatus(t *testing.T, pod *corev1.Pod) { + delete(pod.Annotations, constants.KeyMeshInjectStatus) + require.False(t, inject.HasBeenMeshInjected(*pod)) +} + +func getAndValidateResource(t *testing.T, resourceClient pbresource.ResourceServiceClient, id *pbresource.ID) *pbresource.Resource { + resp, err := resourceClient.Read(metadata.NewOutgoingContext( + context.Background(), + // Read with strong consistency to avoid race conditions + metadata.New(map[string]string{"x-consul-consistency-mode": "consistent"}), + ), &pbresource.ReadRequest{ + Id: id, + }) + require.NoError(t, err) + r := resp.GetResource() + require.NotNil(t, r) + require.NotEmpty(t, r.GetGeneration()) + return r +} diff --git a/control-plane/connect-inject/controllers/endpointsv2/write_cache.go b/control-plane/connect-inject/controllers/endpointsv2/write_cache.go new file mode 100644 index 0000000000..0baf537ef7 --- /dev/null +++ b/control-plane/connect-inject/controllers/endpointsv2/write_cache.go @@ -0,0 +1,130 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package endpointsv2 + +import ( + "bytes" + "fmt" + "github.com/go-logr/logr" + "github.com/hashicorp/go-multierror" + "sync" +) + +// consulWriteRecord is a record of writing a resource to Consul for the sake of deduplicating writes. +// +// It is bounded in size and even a low-resource pod should be able to store 10Ks of them in-memory without worrying +// about eviction. On average, assuming a SHA256 hash, the total size of each record should be approximately 150 bytes. +type consulWriteRecord struct { + // inputHash is a detrministic hash of the payload written to Consul. + // It should be derived from the "source" data rather than the returned payload in order to be unaffected by added + // fields and defaulting behavior defined by Consul. + inputHash []byte + // generation is the generation of the written resource in Consul. This ensures that we write to Consul if a + // redundant reconcile occurs, but the actual Consul resource has been modified since the last write. + generation string + // k8sUid is the UID of the corresponding resource in K8s. This allows us to check for K8s service recreation in + // between successful reconciles even though deletion of a K8s resource does not expose the UID of the deleted + // resource (the reconcile request only contains the namespaced name). + k8sUid string +} + +// WriteCache is a simple, unbounded, thread-safe in-memory cache for tracking writes of Consul resources. +// It can be used to deduplicate identical writes client-side to "debounce" writes during repeat reconciles +// that do not impact data already written to Consul. +type WriteCache interface { + hasMatch(key string, hash []byte, generationFetchFn func() string, k8sUid string) bool + update(key string, hash []byte, generation string, k8sUid string) + remove(key string) +} + +type writeCache struct { + data map[string]consulWriteRecord + dataMutex sync.RWMutex + + log logr.Logger +} + +func NewWriteCache(log logr.Logger) WriteCache { + return &writeCache{ + data: make(map[string]consulWriteRecord), + log: log.WithName("writeCache"), + } +} + +// update upserts a record containing the given hash and generation to the cache at the given key. +func (c *writeCache) update(key string, hash []byte, generation string, k8sUid string) { + c.dataMutex.Lock() + defer c.dataMutex.Unlock() + + var err error + if key == "" { + err = multierror.Append(err, fmt.Errorf("key was empty")) + } + if len(hash) == 0 { + err = multierror.Append(err, fmt.Errorf("hash was empty")) + } + if generation == "" { + err = multierror.Append(err, fmt.Errorf("generation was empty")) + } + if k8sUid == "" { + err = multierror.Append(err, fmt.Errorf("k8sUid was empty")) + } + if err != nil { + c.log.Error(err, "writeCache could not be updated due to empty value(s) - redundant writes may be repeated") + return + } + + c.data[key] = consulWriteRecord{ + inputHash: hash, + generation: generation, + k8sUid: k8sUid, + } +} + +// remove removes a record from the cache at the given key. +func (c *writeCache) remove(key string) { + c.dataMutex.Lock() + defer c.dataMutex.Unlock() + + delete(c.data, key) +} + +// hasMatch returns true iff. there is an existing write record for the given key in the cache, and that record matches +// the provided non-empty hash, generation, and Kubernetes UID. +// +// The generation is fetched rather than provided directly s.t. a call to Consul can be skipped if a record is not found +// or other available fields do not match. +// +// While not strictly necessary assuming the controller is the sole writer of the resource, the generation check ensures +// that the resource is kept in sync even if externally modified. +// +// When checking for a match, ensures the UID of the K8s service also matches s.t. we don't skip updates on recreation +// of a K8s service, as the intent of the user may have been to force a sync, and a future solution that stores write +// fingerprints in K8s annotations would also have this behavior. +func (c *writeCache) hasMatch(key string, hash []byte, generationFetchFn func() string, k8sUid string) bool { + var lastHash []byte + lastGeneration := "" + lastK8sUid := "" + if s, ok := c.get(key); ok { + lastHash = s.inputHash + lastGeneration = s.generation + lastK8sUid = s.k8sUid + } + + if len(lastHash) == 0 || lastGeneration == "" || lastK8sUid == "" { + return false + } + + return bytes.Equal(lastHash, hash) && + lastK8sUid == k8sUid && + lastGeneration == generationFetchFn() // Fetch generation only if other fields match +} + +func (c *writeCache) get(key string) (consulWriteRecord, bool) { + c.dataMutex.RLock() + defer c.dataMutex.RUnlock() + + v, ok := c.data[key] + return v, ok +} diff --git a/control-plane/connect-inject/controllers/endpointsv2/write_cache_test.go b/control-plane/connect-inject/controllers/endpointsv2/write_cache_test.go new file mode 100644 index 0000000000..2b22c5707a --- /dev/null +++ b/control-plane/connect-inject/controllers/endpointsv2/write_cache_test.go @@ -0,0 +1,240 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package endpointsv2 + +import ( + logrtest "github.com/go-logr/logr/testr" + "github.com/hashicorp/go-uuid" + "testing" +) + +func Test_writeCache(t *testing.T) { + testHash := randomBytes() + testGeneration := randomString() + testK8sUid := randomString() + + type args struct { + key string + hash []byte + generationFetchFn func() string + k8sUid string + } + cases := []struct { + name string + args args + setupFn func(args args, cache WriteCache) + want bool + }{ + { + name: "No data returns false", + args: args{ + "foo", + testHash, + func() string { + return testGeneration + }, + testK8sUid, + }, + want: false, + }, + { + name: "Non-matching key returns false", + args: args{ + "foo", + testHash, + func() string { + return testGeneration + }, + testK8sUid, + }, + setupFn: func(args args, cache WriteCache) { + cache.update("another-key", args.hash, args.generationFetchFn(), args.k8sUid) + }, + want: false, + }, + { + name: "Non-matching hash returns false", + args: args{ + "foo", + testHash, + func() string { + return testGeneration + }, + testK8sUid, + }, + setupFn: func(args args, cache WriteCache) { + cache.update(args.key, randomBytes(), args.generationFetchFn(), args.k8sUid) + }, + want: false, + }, + { + name: "Non-matching generation returns false", + args: args{ + "foo", + testHash, + func() string { + return testGeneration + }, + testK8sUid, + }, + setupFn: func(args args, cache WriteCache) { + cache.update(args.key, args.hash, randomString(), args.k8sUid) + }, + want: false, + }, + { + name: "Non-matching k8sUid returns false", + args: args{ + "foo", + testHash, + func() string { + return testGeneration + }, + testK8sUid, + }, + setupFn: func(args args, cache WriteCache) { + cache.update(args.key, args.hash, args.generationFetchFn(), randomString()) + }, + want: false, + }, + { + name: "Matching data returns true", + args: args{ + "foo", + testHash, + func() string { + return testGeneration + }, + testK8sUid, + }, + setupFn: func(args args, cache WriteCache) { + cache.update(args.key, args.hash, args.generationFetchFn(), args.k8sUid) + }, + want: true, + }, + { + name: "Removed data returns false", + args: args{ + "foo", + testHash, + func() string { + return testGeneration + }, + testK8sUid, + }, + setupFn: func(args args, cache WriteCache) { + cache.update(args.key, args.hash, args.generationFetchFn(), args.k8sUid) + cache.update("another-key", randomBytes(), randomString(), randomString()) + cache.remove(args.key) + }, + want: false, + }, + { + name: "Replaced data returns false", + args: args{ + "foo", + testHash, + func() string { + return testGeneration + }, + testK8sUid, + }, + setupFn: func(args args, cache WriteCache) { + cache.update(args.key, args.hash, args.generationFetchFn(), args.k8sUid) + cache.update(args.key, randomBytes(), args.generationFetchFn(), args.k8sUid) + }, + want: false, + }, + { + name: "Invalid hash does not update cache", + args: args{ + "foo", + testHash, + func() string { + return testGeneration + }, + testK8sUid, + }, + setupFn: func(args args, cache WriteCache) { + cache.update(args.key, args.hash, args.generationFetchFn(), args.k8sUid) + cache.update(args.key, []byte{}, args.generationFetchFn(), args.k8sUid) + }, + want: true, + }, + { + name: "Invalid generation does not update cache", + args: args{ + "foo", + testHash, + func() string { + return testGeneration + }, + testK8sUid, + }, + setupFn: func(args args, cache WriteCache) { + cache.update(args.key, args.hash, args.generationFetchFn(), args.k8sUid) + cache.update(args.key, args.hash, "", args.k8sUid) + }, + want: true, + }, + { + name: "Invalid k8sUid does not update cache", + args: args{ + "foo", + testHash, + func() string { + return testGeneration + }, + testK8sUid, + }, + setupFn: func(args args, cache WriteCache) { + cache.update(args.key, args.hash, args.generationFetchFn(), args.k8sUid) + cache.update(args.key, args.hash, args.generationFetchFn(), "") + }, + want: true, + }, + { + name: "Invalid key is ignored", + args: args{ + "", + testHash, + func() string { + return testGeneration + }, + testK8sUid, + }, + setupFn: func(args args, cache WriteCache) { + cache.update("", args.hash, args.generationFetchFn(), args.k8sUid) + }, + want: false, + }, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + c := NewWriteCache(logrtest.New(t)) + if tc.setupFn != nil { + tc.setupFn(tc.args, c) + } + if got := c.hasMatch(tc.args.key, tc.args.hash, tc.args.generationFetchFn, tc.args.k8sUid); got != tc.want { + t.Errorf("hasMatch() = %v, want %v", got, tc.want) + } + }) + } +} + +func randomBytes() []byte { + b, err := uuid.GenerateRandomBytes(32) + if err != nil { + panic(err) + } + return b +} + +func randomString() string { + u, err := uuid.GenerateUUID() + if err != nil { + panic(err) + } + return u +} diff --git a/control-plane/connect-inject/controllers/pod/pod_controller.go b/control-plane/connect-inject/controllers/pod/pod_controller.go new file mode 100644 index 0000000000..febe1d6122 --- /dev/null +++ b/control-plane/connect-inject/controllers/pod/pod_controller.go @@ -0,0 +1,754 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pod + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "regexp" + "strings" + + "github.com/go-logr/logr" + "github.com/hashicorp/consul/api" + pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1" + "github.com/hashicorp/consul/proto-public/pbresource" + "github.com/hashicorp/go-multierror" + "google.golang.org/grpc/metadata" + "google.golang.org/protobuf/proto" + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/hashicorp/consul-k8s/control-plane/api/common" + inject "github.com/hashicorp/consul-k8s/control-plane/connect-inject/common" + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/metrics" + "github.com/hashicorp/consul-k8s/control-plane/consul" + "github.com/hashicorp/consul-k8s/control-plane/namespaces" +) + +const ( + DefaultTelemetryBindSocketDir = "/consul/mesh-inject" + consulNodeAddress = "127.0.0.1" + tokenMetaPodNameKey = "pod" +) + +// Controller watches Pod events and converts them to V2 Workloads and HealthStatus. +// The translation from Pod to Workload is 1:1 and the HealthStatus object is a representation +// of the Pod's Status field. Controller is also responsible for generating V2 Upstreams resources +// when not in transparent proxy mode. ProxyConfiguration is also optionally created. +type Controller struct { + client.Client + // ConsulClientConfig is the config for the Consul API client. + ConsulClientConfig *consul.Config + // ConsulServerConnMgr is the watcher for the Consul server addresses. + ConsulServerConnMgr consul.ServerConnectionManager + // K8sNamespaceConfig manages allow/deny Kubernetes namespaces. + common.K8sNamespaceConfig + // ConsulTenancyConfig manages settings related to Consul namespaces and partitions. + common.ConsulTenancyConfig + + // TODO: EnableWANFederation + + // EnableTransparentProxy controls whether transparent proxy should be enabled + // for all proxy service registrations. + EnableTransparentProxy bool + // TProxyOverwriteProbes controls whether the pods controller should expose pod's HTTP probes + // via Envoy proxy. + TProxyOverwriteProbes bool + + // AuthMethod is the name of the Kubernetes Auth Method that + // was used to login with Consul. The pods controller + // will delete any tokens associated with this auth method + // whenever service instances are deregistered. + AuthMethod string + + // EnableTelemetryCollector controls whether the proxy service should be registered + // with config to enable telemetry forwarding. + EnableTelemetryCollector bool + + MetricsConfig metrics.Config + Log logr.Logger + + // ResourceClient is a gRPC client for the resource service. It is public for testing purposes + ResourceClient pbresource.ResourceServiceClient +} + +// TODO: logs, logs, logs + +// Reconcile reads the state of a Kubernetes Pod and reconciles Consul workloads that are 1:1 mapped. +func (r *Controller) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + var errs error + var pod corev1.Pod + + // Ignore the request if the namespace of the pod is not allowed. + // Strictly speaking, this is not required because the mesh webhook also knows valid namespaces + // for injection, but it will somewhat reduce the amount of unnecessary deletions for non-injected + // pods + if inject.ShouldIgnore(req.Namespace, r.DenyK8sNamespacesSet, r.AllowK8sNamespacesSet) { + return ctrl.Result{}, nil + } + + rc, err := consul.NewResourceServiceClient(r.ConsulServerConnMgr) + if err != nil { + r.Log.Error(err, "failed to create resource client", "name", req.Name, "ns", req.Namespace) + return ctrl.Result{}, err + } + r.ResourceClient = rc + + apiClient, err := consul.NewClientFromConnMgr(r.ConsulClientConfig, r.ConsulServerConnMgr) + if err != nil { + r.Log.Error(err, "failed to create Consul API client", "name", req.Name) + return ctrl.Result{}, err + } + + if r.ConsulClientConfig.APIClientConfig.Token != "" { + ctx = metadata.AppendToOutgoingContext(ctx, "x-consul-token", r.ConsulClientConfig.APIClientConfig.Token) + } + + err = r.Client.Get(ctx, req.NamespacedName, &pod) + + // If the pod object has been deleted (and we get an IsNotFound error), + // we need to remove the Workload from Consul. + if k8serrors.IsNotFound(err) { + + // Consul should also clean up the orphaned HealthStatus + if err := r.deleteWorkload(ctx, req.NamespacedName); err != nil { + errs = multierror.Append(errs, err) + } + + // Delete destinations, if any exist + if err := r.deleteDestinations(ctx, req.NamespacedName); err != nil { + errs = multierror.Append(errs, err) + } + + if err := r.deleteProxyConfiguration(ctx, req.NamespacedName); err != nil { + errs = multierror.Append(errs, err) + } + + if r.AuthMethod != "" { + r.Log.Info("deleting ACL tokens for pod", "name", req.Name, "ns", req.Namespace) + err := r.deleteACLTokensForPod(apiClient, req.NamespacedName) + if err != nil { + r.Log.Error(err, "failed to delete ACL tokens for pod", "name", req.Name, "ns", req.Namespace) + errs = multierror.Append(errs, err) + } + } + + return ctrl.Result{}, errs + } else if err != nil { + r.Log.Error(err, "failed to get Pod", "name", req.Name, "ns", req.Namespace) + return ctrl.Result{}, err + } + + r.Log.Info("retrieved", "name", pod.Name, "ns", pod.Namespace) + + if inject.HasBeenMeshInjected(pod) || inject.IsGateway(pod) { + + // It is possible the pod was scheduled but doesn't have an allocated IP yet, so safely requeue + if pod.Status.PodIP == "" { + r.Log.Info("pod does not have IP allocated; re-queueing request", "pod", req.Name, "ns", req.Namespace) + return ctrl.Result{Requeue: true}, nil + } + + if err := r.writeProxyConfiguration(ctx, pod); err != nil { + // We could be racing with the namespace controller. + // Requeue (which includes backoff) to try again. + if inject.ConsulNamespaceIsNotFound(err) { + r.Log.Info("Consul namespace not found; re-queueing request", + "pod", req.Name, "ns", req.Namespace, "consul-ns", + r.getConsulNamespace(req.Namespace), "err", err.Error()) + return ctrl.Result{Requeue: true}, nil + } + errs = multierror.Append(errs, err) + } + + if err := r.writeWorkload(ctx, pod); err != nil { + // Technically this is not needed, but keeping in case this gets refactored in + // a different order + if inject.ConsulNamespaceIsNotFound(err) { + r.Log.Info("Consul namespace not found; re-queueing request", + "pod", req.Name, "ns", req.Namespace, "consul-ns", + r.getConsulNamespace(req.Namespace), "err", err.Error()) + return ctrl.Result{Requeue: true}, nil + } + errs = multierror.Append(errs, err) + } + + // Create explicit destinations (if any exist) + if err := r.writeDestinations(ctx, pod); err != nil { + // Technically this is not needed, but keeping in case this gets refactored in + // a different order + if inject.ConsulNamespaceIsNotFound(err) { + r.Log.Info("Consul namespace not found; re-queueing request", + "pod", req.Name, "ns", req.Namespace, "consul-ns", + r.getConsulNamespace(req.Namespace), "err", err.Error()) + return ctrl.Result{Requeue: true}, nil + } + errs = multierror.Append(errs, err) + } + + if err := r.writeHealthStatus(ctx, pod); err != nil { + // Technically this is not needed, but keeping in case this gets refactored in + // a different order + if inject.ConsulNamespaceIsNotFound(err) { + r.Log.Info("Consul namespace not found; re-queueing request", + "pod", req.Name, "ns", req.Namespace, "consul-ns", + r.getConsulNamespace(req.Namespace), "err", err.Error()) + return ctrl.Result{Requeue: true}, nil + } + errs = multierror.Append(errs, err) + } + } + + return ctrl.Result{}, errs +} + +func (r *Controller) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&corev1.Pod{}). + Complete(r) +} + +func (r *Controller) deleteWorkload(ctx context.Context, pod types.NamespacedName) error { + req := &pbresource.DeleteRequest{ + Id: getWorkloadID(pod.Name, r.getConsulNamespace(pod.Namespace), r.getPartition()), + } + + _, err := r.ResourceClient.Delete(ctx, req) + return err +} + +func (r *Controller) deleteProxyConfiguration(ctx context.Context, pod types.NamespacedName) error { + req := &pbresource.DeleteRequest{ + Id: getProxyConfigurationID(pod.Name, r.getConsulNamespace(pod.Namespace), r.getPartition()), + } + + _, err := r.ResourceClient.Delete(ctx, req) + return err +} + +// deleteACLTokensForPod finds the ACL tokens that belongs to the pod and delete them from Consul. +// It will only check for ACL tokens that have been created with the auth method this controller +// has been configured with and will only delete tokens for the provided pod Name. +func (r *Controller) deleteACLTokensForPod(apiClient *api.Client, pod types.NamespacedName) error { + // Skip if name is empty. + if pod.Name == "" { + return nil + } + + // Use the V1 logic for getting a compatible namespace + consulNamespace := namespaces.ConsulNamespace( + pod.Namespace, + r.EnableConsulNamespaces, + r.ConsulDestinationNamespace, r.EnableNSMirroring, r.NSMirroringPrefix, + ) + + // TODO: create an index for the workloadidentity in Consul, which will also require + // the identity to be attached to the token for templated-policies. + tokens, _, err := apiClient.ACL().TokenListFiltered( + api.ACLTokenFilterOptions{ + AuthMethod: r.AuthMethod, + }, + &api.QueryOptions{ + Namespace: consulNamespace, + }) + if err != nil { + return fmt.Errorf("failed to get a list of tokens from Consul: %s", err) + } + + // We iterate through each token in the auth method, which is terribly inefficient. + // See discussion above about optimizing the token list query. + for _, token := range tokens { + tokenMeta, err := getTokenMetaFromDescription(token.Description) + // It is possible this is from another component, so continue searching + if errors.Is(err, NoMetadataErr) { + continue + } + if err != nil { + return fmt.Errorf("failed to parse token metadata: %s", err) + } + + tokenPodName := strings.TrimPrefix(tokenMeta[tokenMetaPodNameKey], pod.Namespace+"/") + + // If we can't find token's pod, delete it. + if tokenPodName == pod.Name { + r.Log.Info("deleting ACL token", "name", pod.Name, "namespace", pod.Namespace, "ID", token.AccessorID) + if _, err := apiClient.ACL().TokenDelete(token.AccessorID, &api.WriteOptions{Namespace: consulNamespace}); err != nil { + return fmt.Errorf("failed to delete token from Consul: %s", err) + } + } + } + return nil +} + +var NoMetadataErr = fmt.Errorf("failed to extract token metadata from description") + +// getTokenMetaFromDescription parses JSON metadata from token's description. +func getTokenMetaFromDescription(description string) (map[string]string, error) { + re := regexp.MustCompile(`.*({.+})`) + + matches := re.FindStringSubmatch(description) + if len(matches) != 2 { + return nil, NoMetadataErr + } + tokenMetaJSON := matches[1] + + var tokenMeta map[string]string + err := json.Unmarshal([]byte(tokenMetaJSON), &tokenMeta) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal token metadata '%s': %s", tokenMetaJSON, err) + } + + return tokenMeta, nil +} + +func (r *Controller) writeWorkload(ctx context.Context, pod corev1.Pod) error { + + // TODO: we should add some validation on the required fields here + // e.g. what if token automount is disabled and there is not SA. The API call + // will fail with no indication to the user other than controller logs + ports, workloadPorts := getWorkloadPorts(pod) + + var node corev1.Node + // Ignore errors because we don't want failures to block running services. + _ = r.Client.Get(context.Background(), types.NamespacedName{Name: pod.Spec.NodeName, Namespace: pod.Namespace}, &node) + locality := parseLocality(node) + + workload := &pbcatalog.Workload{ + Addresses: []*pbcatalog.WorkloadAddress{ + {Host: pod.Status.PodIP, Ports: ports}, + }, + Identity: pod.Spec.ServiceAccountName, + Locality: locality, + // Adding a node does not currently work because the node doesn't exist so its health status will always be + // unhealthy, causing any endpoints on that node to also be unhealthy. + // TODO: (v2/nitya) Bring this back when node controller is built. + // NodeName: inject.ConsulNodeNameFromK8sNode(pod.Spec.NodeName), + Ports: workloadPorts, + } + data := inject.ToProtoAny(workload) + + resourceID := getWorkloadID(pod.GetName(), r.getConsulNamespace(pod.Namespace), r.getPartition()) + r.Log.Info("registering workload with Consul", getLogFieldsForResource(resourceID)...) + req := &pbresource.WriteRequest{ + Resource: &pbresource.Resource{ + Id: resourceID, + Metadata: metaFromPod(pod), + Data: data, + }, + } + _, err := r.ResourceClient.Write(ctx, req) + return err +} + +func (r *Controller) writeProxyConfiguration(ctx context.Context, pod corev1.Pod) error { + mode, err := r.getTproxyMode(ctx, pod) + if err != nil { + return fmt.Errorf("failed to get transparent proxy mode: %w", err) + } + + exposeConfig, err := r.getExposeConfig(pod) + if err != nil { + return fmt.Errorf("failed to get expose config: %w", err) + } + + bootstrapConfig, err := r.getBootstrapConfig(pod) + if err != nil { + return fmt.Errorf("failed to get bootstrap config: %w", err) + } + + if exposeConfig == nil && + bootstrapConfig == nil && + mode == pbmesh.ProxyMode_PROXY_MODE_DEFAULT { + // It's possible to remove interesting annotations and need to clear any existing config, + // but for now we treat pods as immutable configs owned by other managers. + return nil + } + + pc := &pbmesh.ProxyConfiguration{ + Workloads: &pbcatalog.WorkloadSelector{ + Names: []string{pod.GetName()}, + }, + DynamicConfig: &pbmesh.DynamicConfig{ + Mode: mode, + ExposeConfig: exposeConfig, + }, + BootstrapConfig: bootstrapConfig, + } + data := inject.ToProtoAny(pc) + + req := &pbresource.WriteRequest{ + Resource: &pbresource.Resource{ + Id: getProxyConfigurationID(pod.GetName(), r.getConsulNamespace(pod.Namespace), r.getPartition()), + Metadata: metaFromPod(pod), + Data: data, + }, + } + _, err = r.ResourceClient.Write(ctx, req) + return err +} + +func (r *Controller) getTproxyMode(ctx context.Context, pod corev1.Pod) (pbmesh.ProxyMode, error) { + // A user can enable/disable tproxy for an entire namespace. + var ns corev1.Namespace + err := r.Client.Get(ctx, types.NamespacedName{Name: pod.GetNamespace()}, &ns) + if err != nil { + return pbmesh.ProxyMode_PROXY_MODE_DEFAULT, fmt.Errorf("could not get namespace info for %s: %w", pod.GetNamespace(), err) + } + + tproxyEnabled, err := inject.TransparentProxyEnabled(ns, pod, r.EnableTransparentProxy) + if err != nil { + return pbmesh.ProxyMode_PROXY_MODE_DEFAULT, fmt.Errorf("could not determine if transparent proxy is enabled: %w", err) + } + + if tproxyEnabled { + return pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT, nil + } + return pbmesh.ProxyMode_PROXY_MODE_DEFAULT, nil +} + +func (r *Controller) getExposeConfig(pod corev1.Pod) (*pbmesh.ExposeConfig, error) { + // Expose k8s probes as Envoy listeners if needed. + overwriteProbes, err := inject.ShouldOverwriteProbes(pod, r.TProxyOverwriteProbes) + if err != nil { + return nil, fmt.Errorf("could not determine if probes should be overwritten: %w", err) + } + + if !overwriteProbes { + return nil, nil + } + + var originalPod corev1.Pod + err = json.Unmarshal([]byte(pod.Annotations[constants.AnnotationOriginalPod]), &originalPod) + if err != nil { + return nil, fmt.Errorf("failed to get original pod spec: %w", err) + } + + exposeConfig := &pbmesh.ExposeConfig{} + for _, mutatedContainer := range pod.Spec.Containers { + for _, originalContainer := range originalPod.Spec.Containers { + if originalContainer.Name == mutatedContainer.Name { + paths, err := getContainerExposePaths(originalPod, originalContainer, mutatedContainer) + if err != nil { + return nil, fmt.Errorf("error getting container expose path for %s: %w", originalContainer.Name, err) + } + + exposeConfig.ExposePaths = append(exposeConfig.ExposePaths, paths...) + } + } + } + + if len(exposeConfig.ExposePaths) == 0 { + return nil, nil + } + return exposeConfig, nil +} + +func getContainerExposePaths(originalPod corev1.Pod, originalContainer, mutatedContainer corev1.Container) ([]*pbmesh.ExposePath, error) { + var paths []*pbmesh.ExposePath + if mutatedContainer.LivenessProbe != nil && mutatedContainer.LivenessProbe.HTTPGet != nil { + originalLivenessPort, err := inject.PortValueFromIntOrString(originalPod, originalContainer.LivenessProbe.HTTPGet.Port) + if err != nil { + return nil, err + } + + newPath := &pbmesh.ExposePath{ + ListenerPort: uint32(mutatedContainer.LivenessProbe.HTTPGet.Port.IntValue()), + LocalPathPort: originalLivenessPort, + Path: mutatedContainer.LivenessProbe.HTTPGet.Path, + } + paths = append(paths, newPath) + } + if mutatedContainer.ReadinessProbe != nil && mutatedContainer.ReadinessProbe.HTTPGet != nil { + originalReadinessPort, err := inject.PortValueFromIntOrString(originalPod, originalContainer.ReadinessProbe.HTTPGet.Port) + if err != nil { + return nil, err + } + + newPath := &pbmesh.ExposePath{ + ListenerPort: uint32(mutatedContainer.ReadinessProbe.HTTPGet.Port.IntValue()), + LocalPathPort: originalReadinessPort, + Path: mutatedContainer.ReadinessProbe.HTTPGet.Path, + } + paths = append(paths, newPath) + } + if mutatedContainer.StartupProbe != nil && mutatedContainer.StartupProbe.HTTPGet != nil { + originalStartupPort, err := inject.PortValueFromIntOrString(originalPod, originalContainer.StartupProbe.HTTPGet.Port) + if err != nil { + return nil, err + } + + newPath := &pbmesh.ExposePath{ + ListenerPort: uint32(mutatedContainer.StartupProbe.HTTPGet.Port.IntValue()), + LocalPathPort: originalStartupPort, + Path: mutatedContainer.StartupProbe.HTTPGet.Path, + } + paths = append(paths, newPath) + } + return paths, nil +} + +func (r *Controller) getBootstrapConfig(pod corev1.Pod) (*pbmesh.BootstrapConfig, error) { + bootstrap := &pbmesh.BootstrapConfig{} + + // If metrics are enabled, the BootstrapConfig should set envoy_prometheus_bind_addr to a listener on 0.0.0.0 on + // the PrometheusScrapePort. The backend for this listener will be determined by + // the consul-dataplane command line flags generated by the webhook. + // If there is a merged metrics server, the backend would be that server. + // If we are not running the merged metrics server, the backend should just be the Envoy metrics endpoint. + enableMetrics, err := r.MetricsConfig.EnableMetrics(pod) + if err != nil { + return nil, fmt.Errorf("error determining if metrics are enabled: %w", err) + } + if enableMetrics { + prometheusScrapePort, err := r.MetricsConfig.PrometheusScrapePort(pod) + if err != nil { + return nil, err + } + prometheusScrapeListener := fmt.Sprintf("0.0.0.0:%s", prometheusScrapePort) + bootstrap.PrometheusBindAddr = prometheusScrapeListener + } + + if r.EnableTelemetryCollector { + bootstrap.TelemetryCollectorBindSocketDir = DefaultTelemetryBindSocketDir + } + + if proto.Equal(bootstrap, &pbmesh.BootstrapConfig{}) { + return nil, nil + } + return bootstrap, nil +} + +func (r *Controller) writeHealthStatus(ctx context.Context, pod corev1.Pod) error { + status := getHealthStatusFromPod(pod) + + hs := &pbcatalog.HealthStatus{ + Type: constants.ConsulKubernetesCheckType, + Status: status, + Description: constants.ConsulKubernetesCheckName, + Output: getHealthStatusReason(status, pod), + } + data := inject.ToProtoAny(hs) + + req := &pbresource.WriteRequest{ + Resource: &pbresource.Resource{ + Id: getHealthStatusID(pod.GetName(), r.getConsulNamespace(pod.Namespace), r.getPartition()), + Owner: getWorkloadID(pod.GetName(), r.getConsulNamespace(pod.Namespace), r.getPartition()), + Metadata: metaFromPod(pod), + Data: data, + }, + } + _, err := r.ResourceClient.Write(ctx, req) + return err +} + +// TODO: delete ACL token for workload +// deleteACLTokensForServiceInstance finds the ACL tokens that belongs to the service instance and deletes it from Consul. +// It will only check for ACL tokens that have been created with the auth method this controller +// has been configured with and will only delete tokens for the provided podName. +// func (r *Controller) deleteACLTokensForWorkload(apiClient *api.Client, svc *api.AgentService, k8sNS, podName string) error { + +// writeDestinations will write explicit destinations if pod annotations exist. +func (r *Controller) writeDestinations(ctx context.Context, pod corev1.Pod) error { + uss, err := inject.ProcessPodDestinations(pod, r.EnableConsulPartitions, r.EnableConsulNamespaces) + if err != nil { + return fmt.Errorf("error processing destination annotations: %s", err.Error()) + } + if uss == nil { + return nil + } + + data := inject.ToProtoAny(uss) + req := &pbresource.WriteRequest{ + Resource: &pbresource.Resource{ + Id: getDestinationsID(pod.GetName(), r.getConsulNamespace(pod.Namespace), r.getPartition()), + Metadata: metaFromPod(pod), + Data: data, + }, + } + _, err = r.ResourceClient.Write(ctx, req) + + return err +} + +func (r *Controller) deleteDestinations(ctx context.Context, pod types.NamespacedName) error { + req := &pbresource.DeleteRequest{ + Id: getDestinationsID(pod.Name, r.getConsulNamespace(pod.Namespace), r.getPartition()), + } + + _, err := r.ResourceClient.Delete(ctx, req) + return err +} + +// consulNamespace returns the Consul destination namespace for a provided Kubernetes namespace +// depending on Consul Namespaces being enabled and the value of namespace mirroring. +func (r *Controller) getConsulNamespace(kubeNamespace string) string { + ns := namespaces.ConsulNamespace( + kubeNamespace, + r.EnableConsulNamespaces, + r.ConsulDestinationNamespace, + r.EnableNSMirroring, + r.NSMirroringPrefix, + ) + + // TODO: remove this if and when the default namespace of resources change. + if ns == "" { + ns = constants.DefaultConsulNS + } + return ns +} + +func (r *Controller) getPartition() string { + if !r.EnableConsulPartitions || r.ConsulPartition == "" { + return constants.DefaultConsulPartition + } + return r.ConsulPartition +} + +func getWorkloadPorts(pod corev1.Pod) ([]string, map[string]*pbcatalog.WorkloadPort) { + ports := make([]string, 0) + workloadPorts := map[string]*pbcatalog.WorkloadPort{} + + for _, container := range pod.Spec.Containers { + for _, port := range container.Ports { + name := inject.WorkloadPortName(&port) + + // TODO: error check reserved "mesh" keyword and 20000 + + if port.Protocol != corev1.ProtocolTCP { + // TODO: also throw an error here + continue + } + + ports = append(ports, name) + workloadPorts[name] = &pbcatalog.WorkloadPort{ + Port: uint32(port.ContainerPort), + + // We leave the protocol unspecified so that it can be inherited from the Service appProtocol + Protocol: pbcatalog.Protocol_PROTOCOL_UNSPECIFIED, + } + } + } + + ports = append(ports, "mesh") + workloadPorts["mesh"] = &pbcatalog.WorkloadPort{ + Port: constants.ProxyDefaultInboundPort, + Protocol: pbcatalog.Protocol_PROTOCOL_MESH, + } + + return ports, workloadPorts +} + +func parseLocality(node corev1.Node) *pbcatalog.Locality { + region := node.Labels[corev1.LabelTopologyRegion] + zone := node.Labels[corev1.LabelTopologyZone] + + if region == "" { + return nil + } + + return &pbcatalog.Locality{ + Region: region, + Zone: zone, + } +} + +func metaFromPod(pod corev1.Pod) map[string]string { + // TODO: allow custom workload metadata + meta := map[string]string{ + constants.MetaKeyKubeNS: pod.GetNamespace(), + constants.MetaKeyManagedBy: constants.ManagedByPodValue, + } + + if gatewayKind := pod.Annotations[constants.AnnotationGatewayKind]; gatewayKind != "" { + meta[constants.MetaGatewayKind] = gatewayKind + } + + return meta +} + +// getHealthStatusFromPod checks the Pod for a "Ready" condition that is true. +// This is true when all the containers are ready, vs. "Running" on the PodPhase, +// which is true if any container is running. +func getHealthStatusFromPod(pod corev1.Pod) pbcatalog.Health { + if pod.Status.Conditions == nil { + return pbcatalog.Health_HEALTH_CRITICAL + } + + for _, condition := range pod.Status.Conditions { + if condition.Type == corev1.PodReady && condition.Status == corev1.ConditionTrue { + return pbcatalog.Health_HEALTH_PASSING + } + } + + return pbcatalog.Health_HEALTH_CRITICAL +} + +// getHealthStatusReason takes Consul's health check status (either passing or critical) +// and the pod to return a descriptive output for the HealthStatus Output. +func getHealthStatusReason(state pbcatalog.Health, pod corev1.Pod) string { + if state == pbcatalog.Health_HEALTH_PASSING { + return constants.KubernetesSuccessReasonMsg + } + + return fmt.Sprintf("Pod \"%s/%s\" is not ready", pod.GetNamespace(), pod.GetName()) +} + +func getWorkloadID(name, namespace, partition string) *pbresource.ID { + return &pbresource.ID{ + Name: name, + Type: pbcatalog.WorkloadType, + Tenancy: &pbresource.Tenancy{ + Partition: partition, + Namespace: namespace, + }, + } +} + +func getProxyConfigurationID(name, namespace, partition string) *pbresource.ID { + return &pbresource.ID{ + Name: name, + Type: pbmesh.ProxyConfigurationType, + Tenancy: &pbresource.Tenancy{ + Partition: partition, + Namespace: namespace, + }, + } +} + +func getHealthStatusID(name, namespace, partition string) *pbresource.ID { + return &pbresource.ID{ + Name: name, + Type: pbcatalog.HealthStatusType, + Tenancy: &pbresource.Tenancy{ + Partition: partition, + Namespace: namespace, + }, + } +} + +func getDestinationsID(name, namespace, partition string) *pbresource.ID { + return &pbresource.ID{ + Name: name, + Type: pbmesh.DestinationsType, + Tenancy: &pbresource.Tenancy{ + Partition: partition, + Namespace: namespace, + }, + } +} + +func getLogFieldsForResource(id *pbresource.ID) []any { + return []any{ + "name", id.Name, + "ns", id.Tenancy.Namespace, + "partition", id.Tenancy.Partition, + } +} diff --git a/control-plane/connect-inject/controllers/pod/pod_controller_ent_test.go b/control-plane/connect-inject/controllers/pod/pod_controller_ent_test.go new file mode 100644 index 0000000000..614526254e --- /dev/null +++ b/control-plane/connect-inject/controllers/pod/pod_controller_ent_test.go @@ -0,0 +1,765 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build enterprise + +package pod + +import ( + "context" + "testing" + + mapset "github.com/deckarep/golang-set" + logrtest "github.com/go-logr/logr/testr" + capi "github.com/hashicorp/consul/api" + pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1" + "github.com/hashicorp/consul/sdk/testutil" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "github.com/hashicorp/consul-k8s/control-plane/api/common" + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/metrics" + "github.com/hashicorp/consul-k8s/control-plane/helper/test" +) + +const ( + testPodName = "foo" + testPartition = "my-partition" +) + +type testCase struct { + name string + podName string // This needs to be aligned with the pod created in `k8sObjects` + podNamespace string // Defaults to metav1.NamespaceDefault if empty. + partition string + + k8sObjects func() []runtime.Object // testing node is injected separately + + // Pod Controller Settings + acls bool + tproxy bool + overwriteProbes bool + metrics bool + telemetry bool + + namespaceMirroring bool + namespaceDestination string + namespacePrefix string + + // Initial Consul state. + existingConsulNamespace string // This namespace will be populated before the test is executed. + existingWorkload *pbcatalog.Workload + existingHealthStatus *pbcatalog.HealthStatus + existingProxyConfiguration *pbmesh.ProxyConfiguration + existingDestinations *pbmesh.Destinations + + // Expected Consul state. + expectedConsulNamespace string // This namespace will be used to query Consul for the results + expectedWorkload *pbcatalog.Workload + expectedHealthStatus *pbcatalog.HealthStatus + expectedProxyConfiguration *pbmesh.ProxyConfiguration + expectedDestinations *pbmesh.Destinations + + // Reconcile loop outputs + expErr string + expRequeue bool // The response from the reconcile function +} + +// TestReconcileCreatePodWithMirrorNamespaces creates a Pod object in a non-default NS and Partition +// with namespaces set to mirroring +func TestReconcileCreatePodWithMirrorNamespaces(t *testing.T) { + t.Parallel() + + testCases := []testCase{ + { + name: "kitchen sink new pod, ns and partition", + podName: testPodName, + partition: constants.DefaultConsulPartition, + + k8sObjects: func() []runtime.Object { + pod := createPod(testPodName, metav1.NamespaceDefault, true, true) + addProbesAndOriginalPodAnnotation(pod) + + return []runtime.Object{pod} + }, + tproxy: true, + telemetry: true, + metrics: true, + overwriteProbes: true, + + namespaceMirroring: true, + + expectedConsulNamespace: constants.DefaultConsulNS, + expectedWorkload: createWorkload(), + expectedHealthStatus: createPassingHealthStatus(), + expectedProxyConfiguration: createProxyConfiguration(testPodName, true, pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT), + }, + { + name: "kitchen sink new pod, non-default ns and partition", + podName: testPodName, + podNamespace: "bar", + partition: testPartition, + + k8sObjects: func() []runtime.Object { + pod := createPod(testPodName, "bar", true, true) + addProbesAndOriginalPodAnnotation(pod) + + return []runtime.Object{pod} + }, + tproxy: true, + telemetry: true, + metrics: true, + overwriteProbes: true, + + namespaceMirroring: true, + + existingConsulNamespace: "bar", + + expectedConsulNamespace: "bar", + expectedWorkload: createWorkload(), + expectedHealthStatus: createPassingHealthStatus(), + expectedProxyConfiguration: createProxyConfiguration(testPodName, true, pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT), + }, + { + name: "new pod with namespace prefix", + podName: testPodName, + podNamespace: "bar", + partition: testPartition, + + k8sObjects: func() []runtime.Object { + pod := createPod(testPodName, "bar", true, true) + addProbesAndOriginalPodAnnotation(pod) + + return []runtime.Object{pod} + }, + + namespaceMirroring: true, + namespacePrefix: "foo-", + + existingConsulNamespace: "foo-bar", + + expectedConsulNamespace: "foo-bar", + expectedWorkload: createWorkload(), + expectedHealthStatus: createPassingHealthStatus(), + }, + { + name: "namespace mirroring overrides destination namespace", + podName: testPodName, + podNamespace: "bar", + partition: testPartition, + + k8sObjects: func() []runtime.Object { + pod := createPod(testPodName, "bar", true, true) + addProbesAndOriginalPodAnnotation(pod) + + return []runtime.Object{pod} + }, + + namespaceMirroring: true, + namespaceDestination: "supernova", + + existingConsulNamespace: "bar", + + expectedConsulNamespace: "bar", + expectedWorkload: createWorkload(), + expectedHealthStatus: createPassingHealthStatus(), + }, + { + name: "new pod with explicit destinations, ns and partition", + podName: testPodName, + partition: constants.DefaultConsulPartition, + + k8sObjects: func() []runtime.Object { + pod := createPod(testPodName, metav1.NamespaceDefault, true, true) + addProbesAndOriginalPodAnnotation(pod) + pod.Annotations[constants.AnnotationMeshDestinations] = "destination.port.mySVC.svc:24601" + return []runtime.Object{pod} + }, + tproxy: false, + telemetry: true, + metrics: true, + overwriteProbes: true, + + namespaceMirroring: true, + + expectedConsulNamespace: constants.DefaultConsulNS, + expectedWorkload: createWorkload(), + expectedHealthStatus: createPassingHealthStatus(), + expectedProxyConfiguration: createProxyConfiguration(testPodName, true, pbmesh.ProxyMode_PROXY_MODE_DEFAULT), + expectedDestinations: createDestinations(), + }, + { + name: "namespace in Consul does not exist", + podName: testPodName, + podNamespace: "bar", + partition: testPartition, + + k8sObjects: func() []runtime.Object { + pod := createPod(testPodName, "bar", true, true) + return []runtime.Object{pod} + }, + + namespaceMirroring: true, + + // The equivalent namespace in Consul does not exist, so requeue for backoff. + expRequeue: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + runControllerTest(t, tc) + }) + } +} + +// TestReconcileUpdatePodWithMirrorNamespaces updates a Pod object in a non-default NS and Partition +// with namespaces set to mirroring. +func TestReconcileUpdatePodWithMirrorNamespaces(t *testing.T) { + t.Parallel() + + testCases := []testCase{ + { + name: "update pod health", + podName: testPodName, + podNamespace: "bar", + partition: testPartition, + + k8sObjects: func() []runtime.Object { + pod := createPod(testPodName, "bar", true, false) // failing + return []runtime.Object{pod} + }, + + namespaceMirroring: true, + namespacePrefix: "foo-", + + existingConsulNamespace: "foo-bar", + existingWorkload: createWorkload(), + existingHealthStatus: createPassingHealthStatus(), + + expectedConsulNamespace: "foo-bar", + expectedWorkload: createWorkload(), + expectedHealthStatus: createCriticalHealthStatus(testPodName, "bar"), + }, + { + name: "duplicated pod event", + podName: testPodName, + podNamespace: "bar", + partition: testPartition, + + k8sObjects: func() []runtime.Object { + pod := createPod(testPodName, "bar", true, true) + addProbesAndOriginalPodAnnotation(pod) + + return []runtime.Object{pod} + }, + + namespaceMirroring: true, + + tproxy: true, + telemetry: true, + metrics: true, + overwriteProbes: true, + + existingConsulNamespace: "bar", + existingWorkload: createWorkload(), + existingHealthStatus: createPassingHealthStatus(), + existingProxyConfiguration: createProxyConfiguration(testPodName, true, pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT), + + expectedConsulNamespace: "bar", + expectedWorkload: createWorkload(), + expectedHealthStatus: createPassingHealthStatus(), + expectedProxyConfiguration: createProxyConfiguration(testPodName, true, pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT), + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + runControllerTest(t, tc) + }) + } +} + +// TestReconcileDeletePodWithMirrorNamespaces deletes a Pod object in a non-default NS and Partition +// with namespaces set to mirroring. +func TestReconcileDeletePodWithMirrorNamespaces(t *testing.T) { + t.Parallel() + + testCases := []testCase{ + { + name: "delete kitchen sink pod", + podName: testPodName, + podNamespace: "bar", + partition: testPartition, + + tproxy: true, + telemetry: true, + metrics: true, + overwriteProbes: true, + + namespaceMirroring: true, + + existingConsulNamespace: "bar", + existingWorkload: createWorkload(), + existingHealthStatus: createPassingHealthStatus(), + existingProxyConfiguration: createProxyConfiguration(testPodName, true, pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT), + + expectedConsulNamespace: "bar", + }, + { + name: "delete pod w/ explicit destinations", + podName: testPodName, + podNamespace: "bar", + partition: testPartition, + + telemetry: true, + metrics: true, + overwriteProbes: true, + + namespaceMirroring: true, + + existingConsulNamespace: "bar", + existingWorkload: createWorkload(), + existingHealthStatus: createPassingHealthStatus(), + existingProxyConfiguration: createProxyConfiguration(testPodName, true, pbmesh.ProxyMode_PROXY_MODE_DEFAULT), + existingDestinations: createDestinations(), + + expectedConsulNamespace: "bar", + }, + { + name: "delete pod with namespace prefix", + podName: testPodName, + podNamespace: "bar", + partition: testPartition, + + namespaceMirroring: true, + namespacePrefix: "foo-", + + existingConsulNamespace: "foo-bar", + existingWorkload: createWorkload(), + existingHealthStatus: createPassingHealthStatus(), + + expectedConsulNamespace: "foo-bar", + }, + { + name: "resources are already gone in Consul", + podName: testPodName, + podNamespace: "bar", + partition: testPartition, + + tproxy: true, + telemetry: true, + metrics: true, + overwriteProbes: true, + + namespaceMirroring: true, + + existingConsulNamespace: "bar", + + expectedConsulNamespace: "bar", + }, + { + name: "namespace is already missing in Consul", + podName: testPodName, + podNamespace: "bar", + partition: testPartition, + + namespaceMirroring: true, + + expectedConsulNamespace: "bar", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + runControllerTest(t, tc) + }) + } +} + +// TestReconcileCreatePodWithDestinationNamespace creates a Pod object in a non-default NS and Partition +// with namespaces set to a destination. +func TestReconcileCreatePodWithDestinationNamespace(t *testing.T) { + t.Parallel() + + testCases := []testCase{ + { + name: "kitchen sink new pod, ns and partition", + podName: testPodName, + partition: constants.DefaultConsulPartition, + + k8sObjects: func() []runtime.Object { + pod := createPod(testPodName, metav1.NamespaceDefault, true, true) + addProbesAndOriginalPodAnnotation(pod) + + return []runtime.Object{pod} + }, + tproxy: true, + telemetry: true, + metrics: true, + overwriteProbes: true, + + namespaceDestination: constants.DefaultConsulNS, + + existingConsulNamespace: constants.DefaultConsulNS, + + expectedConsulNamespace: constants.DefaultConsulNS, + expectedWorkload: createWorkload(), + expectedHealthStatus: createPassingHealthStatus(), + expectedProxyConfiguration: createProxyConfiguration(testPodName, true, pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT), + }, + { + name: "new pod with explicit destinations, ns and partition", + podName: testPodName, + partition: constants.DefaultConsulPartition, + + k8sObjects: func() []runtime.Object { + pod := createPod(testPodName, metav1.NamespaceDefault, true, true) + addProbesAndOriginalPodAnnotation(pod) + pod.Annotations[constants.AnnotationMeshDestinations] = "destination.port.mySVC.svc:24601" + return []runtime.Object{pod} + }, + telemetry: true, + metrics: true, + overwriteProbes: true, + + namespaceDestination: constants.DefaultConsulNS, + + existingConsulNamespace: constants.DefaultConsulNS, + + expectedConsulNamespace: constants.DefaultConsulNS, + expectedWorkload: createWorkload(), + expectedHealthStatus: createPassingHealthStatus(), + expectedProxyConfiguration: createProxyConfiguration(testPodName, true, pbmesh.ProxyMode_PROXY_MODE_DEFAULT), + expectedDestinations: createDestinations(), + }, + { + name: "kitchen sink new pod, non-default ns and partition", + podName: testPodName, + podNamespace: "bar", + partition: testPartition, + + k8sObjects: func() []runtime.Object { + pod := createPod(testPodName, "bar", true, true) + addProbesAndOriginalPodAnnotation(pod) + + return []runtime.Object{pod} + }, + tproxy: true, + telemetry: true, + metrics: true, + overwriteProbes: true, + + namespaceDestination: "a-penguin-walks-into-a-bar", + + existingConsulNamespace: "a-penguin-walks-into-a-bar", + + expectedConsulNamespace: "a-penguin-walks-into-a-bar", + expectedWorkload: createWorkload(), + expectedHealthStatus: createPassingHealthStatus(), + expectedProxyConfiguration: createProxyConfiguration(testPodName, true, pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT), + }, + { + name: "namespace in Consul does not exist", + podName: testPodName, + podNamespace: "bar", + partition: testPartition, + + k8sObjects: func() []runtime.Object { + pod := createPod(testPodName, "bar", true, true) + return []runtime.Object{pod} + }, + + namespaceDestination: "a-penguin-walks-into-a-bar", + + // The equivalent namespace in Consul does not exist, so requeue for backoff. + expRequeue: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + runControllerTest(t, tc) + }) + } +} + +// TestReconcileUpdatePodWithDestinationNamespace updates a Pod object in a non-default NS and Partition +// with namespaces set to a destination. +func TestReconcileUpdatePodWithDestinationNamespace(t *testing.T) { + t.Parallel() + + testCases := []testCase{ + { + name: "update pod health", + podName: testPodName, + podNamespace: "bar", + partition: testPartition, + + k8sObjects: func() []runtime.Object { + pod := createPod(testPodName, "bar", true, false) // failing + return []runtime.Object{pod} + }, + + namespaceDestination: "a-penguin-walks-into-a-bar", + + existingConsulNamespace: "a-penguin-walks-into-a-bar", + existingWorkload: createWorkload(), + existingHealthStatus: createPassingHealthStatus(), + + expectedConsulNamespace: "a-penguin-walks-into-a-bar", + expectedWorkload: createWorkload(), + expectedHealthStatus: createCriticalHealthStatus(testPodName, "bar"), + }, + { + name: "duplicated pod event", + podName: testPodName, + podNamespace: "bar", + partition: testPartition, + + k8sObjects: func() []runtime.Object { + pod := createPod(testPodName, "bar", true, true) + addProbesAndOriginalPodAnnotation(pod) + + return []runtime.Object{pod} + }, + + namespaceDestination: "a-penguin-walks-into-a-bar", + + tproxy: true, + telemetry: true, + metrics: true, + overwriteProbes: true, + + existingConsulNamespace: "a-penguin-walks-into-a-bar", + existingWorkload: createWorkload(), + existingHealthStatus: createPassingHealthStatus(), + existingProxyConfiguration: createProxyConfiguration(testPodName, true, pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT), + + expectedConsulNamespace: "a-penguin-walks-into-a-bar", + expectedWorkload: createWorkload(), + expectedHealthStatus: createPassingHealthStatus(), + expectedProxyConfiguration: createProxyConfiguration(testPodName, true, pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT), + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + runControllerTest(t, tc) + }) + } +} + +// TestReconcileDeletePodWithDestinationNamespace deletes a Pod object in a non-default NS and Partition +// with namespaces set to a destination. +func TestReconcileDeletePodWithDestinationNamespace(t *testing.T) { + t.Parallel() + + testCases := []testCase{ + { + name: "delete kitchen sink pod", + podName: testPodName, + podNamespace: "bar", + partition: testPartition, + + tproxy: true, + telemetry: true, + metrics: true, + overwriteProbes: true, + + namespaceDestination: "a-penguin-walks-into-a-bar", + + existingConsulNamespace: "a-penguin-walks-into-a-bar", + existingWorkload: createWorkload(), + existingHealthStatus: createPassingHealthStatus(), + existingProxyConfiguration: createProxyConfiguration(testPodName, true, pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT), + + expectedConsulNamespace: "a-penguin-walks-into-a-bar", + }, + { + name: "delete pod with explicit destinations", + podName: testPodName, + podNamespace: "bar", + partition: testPartition, + + telemetry: true, + metrics: true, + overwriteProbes: true, + + namespaceDestination: "a-penguin-walks-into-a-bar", + + existingConsulNamespace: "a-penguin-walks-into-a-bar", + existingWorkload: createWorkload(), + existingHealthStatus: createPassingHealthStatus(), + existingProxyConfiguration: createProxyConfiguration(testPodName, true, pbmesh.ProxyMode_PROXY_MODE_DEFAULT), + existingDestinations: createDestinations(), + + expectedConsulNamespace: "a-penguin-walks-into-a-bar", + }, + { + name: "resources are already gone in Consul", + podName: testPodName, + podNamespace: "bar", + partition: testPartition, + + tproxy: true, + telemetry: true, + metrics: true, + overwriteProbes: true, + + namespaceDestination: "a-penguin-walks-into-a-bar", + + existingConsulNamespace: "a-penguin-walks-into-a-bar", + + expectedConsulNamespace: "a-penguin-walks-into-a-bar", + }, + { + name: "namespace is already missing in Consul", + podName: testPodName, + podNamespace: "bar", + partition: testPartition, + + namespaceDestination: "a-penguin-walks-into-a-bar", + + expectedConsulNamespace: "a-penguin-walks-into-a-bar", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + runControllerTest(t, tc) + }) + } +} + +func runControllerTest(t *testing.T, tc testCase) { + + ns := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{ + Name: metav1.NamespaceDefault, + }} + nsBar := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{ + Name: "bar", + }} + node := corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: nodeName}} + + k8sObjects := []runtime.Object{ + &ns, + &nsBar, + &node, + } + if tc.k8sObjects != nil { + k8sObjects = append(k8sObjects, tc.k8sObjects()...) + } + + fakeClient := fake.NewClientBuilder().WithRuntimeObjects(k8sObjects...).Build() + + // Create test consulServer server. + adminToken := "123e4567-e89b-12d3-a456-426614174000" + testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + c.Experiments = []string{"resource-apis"} + if tc.acls { + c.ACL.Enabled = tc.acls + c.ACL.Tokens.InitialManagement = adminToken + } + }) + + // Create the partition in Consul. + if tc.partition != "" { + testClient.Cfg.APIClientConfig.Partition = tc.partition + + partition := &capi.Partition{ + Name: tc.partition, + } + _, _, err := testClient.APIClient.Partitions().Create(context.Background(), partition, nil) + require.NoError(t, err) + } + + // Create the namespace in Consul if specified. + if tc.existingConsulNamespace != "" { + namespace := &capi.Namespace{ + Name: tc.existingConsulNamespace, + Partition: tc.partition, + } + + _, _, err := testClient.APIClient.Namespaces().Create(namespace, nil) + require.NoError(t, err) + } + + // Create the pod controller. + pc := &Controller{ + Client: fakeClient, + Log: logrtest.New(t), + ConsulClientConfig: testClient.Cfg, + ConsulServerConnMgr: testClient.Watcher, + K8sNamespaceConfig: common.K8sNamespaceConfig{ + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSetWith(), + }, + ConsulTenancyConfig: common.ConsulTenancyConfig{ + EnableConsulNamespaces: true, + NSMirroringPrefix: tc.namespacePrefix, + EnableNSMirroring: tc.namespaceMirroring, + ConsulDestinationNamespace: tc.namespaceDestination, + EnableConsulPartitions: true, + ConsulPartition: tc.partition, + }, + TProxyOverwriteProbes: tc.overwriteProbes, + EnableTransparentProxy: tc.tproxy, + EnableTelemetryCollector: tc.telemetry, + } + if tc.metrics { + pc.MetricsConfig = metrics.Config{ + DefaultEnableMetrics: true, + DefaultPrometheusScrapePort: "1234", + } + } + if tc.acls { + pc.AuthMethod = test.AuthMethod + } + + podNamespace := tc.podNamespace + if podNamespace == "" { + podNamespace = metav1.NamespaceDefault + } + + workloadID := getWorkloadID(tc.podName, tc.expectedConsulNamespace, tc.partition) + loadResource(t, context.Background(), testClient.ResourceClient, workloadID, tc.existingWorkload, nil) + loadResource(t, context.Background(), testClient.ResourceClient, getHealthStatusID(tc.podName, tc.expectedConsulNamespace, tc.partition), tc.existingHealthStatus, workloadID) + loadResource(t, context.Background(), testClient.ResourceClient, getProxyConfigurationID(tc.podName, tc.expectedConsulNamespace, tc.partition), tc.existingProxyConfiguration, nil) + loadResource(t, context.Background(), testClient.ResourceClient, getDestinationsID(tc.podName, tc.expectedConsulNamespace, tc.partition), tc.existingDestinations, nil) + + namespacedName := types.NamespacedName{ + Namespace: podNamespace, + Name: tc.podName, + } + + resp, err := pc.Reconcile(context.Background(), ctrl.Request{ + NamespacedName: namespacedName, + }) + if tc.expErr != "" { + require.EqualError(t, err, tc.expErr) + } else { + require.NoError(t, err) + } + + require.Equal(t, tc.expRequeue, resp.Requeue) + + wID := getWorkloadID(tc.podName, tc.expectedConsulNamespace, tc.partition) + expectedWorkloadMatches(t, context.Background(), testClient.ResourceClient, wID, tc.expectedWorkload) + + hsID := getHealthStatusID(tc.podName, tc.expectedConsulNamespace, tc.partition) + expectedHealthStatusMatches(t, context.Background(), testClient.ResourceClient, hsID, tc.expectedHealthStatus) + + pcID := getProxyConfigurationID(tc.podName, tc.expectedConsulNamespace, tc.partition) + expectedProxyConfigurationMatches(t, context.Background(), testClient.ResourceClient, pcID, tc.expectedProxyConfiguration) + + uID := getDestinationsID(tc.podName, metav1.NamespaceDefault, constants.DefaultConsulPartition) + expectedDestinationMatches(t, context.Background(), testClient.ResourceClient, uID, tc.expectedDestinations) +} diff --git a/control-plane/connect-inject/controllers/pod/pod_controller_test.go b/control-plane/connect-inject/controllers/pod/pod_controller_test.go new file mode 100644 index 0000000000..605d9dbce9 --- /dev/null +++ b/control-plane/connect-inject/controllers/pod/pod_controller_test.go @@ -0,0 +1,2142 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pod + +import ( + "context" + "encoding/json" + "fmt" + "testing" + "time" + + mapset "github.com/deckarep/golang-set" + logrtest "github.com/go-logr/logr/testr" + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/consul/api" + pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1" + "github.com/hashicorp/consul/proto-public/pbresource" + "github.com/hashicorp/consul/sdk/testutil" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/testing/protocmp" + "google.golang.org/protobuf/types/known/anypb" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "github.com/hashicorp/consul-k8s/control-plane/api/common" + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/metrics" + "github.com/hashicorp/consul-k8s/control-plane/helper/test" +) + +const ( + // TODO: (v2/nitya) Bring back consulLocalityNodeName once node controller is implemented and assertions for + // workloads need node names again. + nodeName = "test-node" + localityNodeName = "test-node-w-locality" + consulNodeName = "test-node-virtual" +) + +func TestParseLocality(t *testing.T) { + t.Run("no labels", func(t *testing.T) { + n := corev1.Node{} + require.Nil(t, parseLocality(n)) + }) + + t.Run("zone only", func(t *testing.T) { + n := corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + corev1.LabelTopologyZone: "us-west-1a", + }, + }, + } + require.Nil(t, parseLocality(n)) + }) + + t.Run("everything", func(t *testing.T) { + n := corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + corev1.LabelTopologyRegion: "us-west-1", + corev1.LabelTopologyZone: "us-west-1a", + }, + }, + } + require.True(t, proto.Equal(&pbcatalog.Locality{Region: "us-west-1", Zone: "us-west-1a"}, parseLocality(n))) + }) +} + +func TestWorkloadWrite(t *testing.T) { + t.Parallel() + + ns := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{ + Name: metav1.NamespaceDefault, + Namespace: metav1.NamespaceDefault, + }} + node := corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: nodeName}} + localityNode := corev1.Node{ObjectMeta: metav1.ObjectMeta{ + Name: localityNodeName, + Namespace: metav1.NamespaceDefault, + Labels: map[string]string{ + corev1.LabelTopologyRegion: "us-east1", + corev1.LabelTopologyZone: "us-east1-b", + }, + }} + + type testCase struct { + name string + pod *corev1.Pod + podModifier func(pod *corev1.Pod) + expectedWorkload *pbcatalog.Workload + } + + run := func(t *testing.T, tc testCase) { + if tc.podModifier != nil { + tc.podModifier(tc.pod) + } + + k8sObjects := []runtime.Object{ + &ns, + &node, + &localityNode, + } + + fakeClient := fake.NewClientBuilder().WithRuntimeObjects(k8sObjects...).Build() + + // Create test consulServer server. + testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + c.Experiments = []string{"resource-apis"} + }) + + // Create the pod controller. + pc := &Controller{ + Client: fakeClient, + Log: logrtest.New(t), + ConsulClientConfig: testClient.Cfg, + ConsulServerConnMgr: testClient.Watcher, + K8sNamespaceConfig: common.K8sNamespaceConfig{ + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSetWith(), + }, + ResourceClient: testClient.ResourceClient, + } + + err := pc.writeWorkload(context.Background(), *tc.pod) + require.NoError(t, err) + + req := &pbresource.ReadRequest{ + Id: getWorkloadID(tc.pod.GetName(), metav1.NamespaceDefault, constants.DefaultConsulPartition), + } + actualRes, err := testClient.ResourceClient.Read(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, actualRes) + + requireEqualID(t, actualRes, tc.pod.GetName(), constants.DefaultConsulNS, constants.DefaultConsulPartition) + require.NotNil(t, actualRes.GetResource().GetData()) + + actualWorkload := &pbcatalog.Workload{} + err = actualRes.GetResource().GetData().UnmarshalTo(actualWorkload) + require.NoError(t, err) + + require.True(t, proto.Equal(actualWorkload, tc.expectedWorkload)) + } + + testCases := []testCase{ + { + name: "multi-port single-container", + pod: createPod("foo", "", true, true), + expectedWorkload: createWorkload(), + }, + { + name: "multi-port multi-container", + pod: createPod("foo", "", true, true), + podModifier: func(pod *corev1.Pod) { + container := corev1.Container{ + Name: "logger", + Ports: []corev1.ContainerPort{ + { + Name: "agent", + Protocol: corev1.ProtocolTCP, + ContainerPort: 6666, + }, + }, + } + pod.Spec.Containers = append(pod.Spec.Containers, container) + }, + expectedWorkload: &pbcatalog.Workload{ + Addresses: []*pbcatalog.WorkloadAddress{ + {Host: "10.0.0.1", Ports: []string{"public", "admin", "agent", "mesh"}}, + }, + Ports: map[string]*pbcatalog.WorkloadPort{ + "public": { + Port: 80, + Protocol: pbcatalog.Protocol_PROTOCOL_UNSPECIFIED, + }, + "admin": { + Port: 8080, + Protocol: pbcatalog.Protocol_PROTOCOL_UNSPECIFIED, + }, + "agent": { + Port: 6666, + Protocol: pbcatalog.Protocol_PROTOCOL_UNSPECIFIED, + }, + "mesh": { + Port: constants.ProxyDefaultInboundPort, + Protocol: pbcatalog.Protocol_PROTOCOL_MESH, + }, + }, + Identity: "foo", + }, + }, + { + name: "pod with locality", + pod: createPod("foo", "", true, true), + podModifier: func(pod *corev1.Pod) { + pod.Spec.NodeName = localityNodeName + }, + expectedWorkload: &pbcatalog.Workload{ + Addresses: []*pbcatalog.WorkloadAddress{ + {Host: "10.0.0.1", Ports: []string{"public", "admin", "mesh"}}, + }, + Ports: map[string]*pbcatalog.WorkloadPort{ + "public": { + Port: 80, + Protocol: pbcatalog.Protocol_PROTOCOL_UNSPECIFIED, + }, + "admin": { + Port: 8080, + Protocol: pbcatalog.Protocol_PROTOCOL_UNSPECIFIED, + }, + "mesh": { + Port: constants.ProxyDefaultInboundPort, + Protocol: pbcatalog.Protocol_PROTOCOL_MESH, + }, + }, + Locality: &pbcatalog.Locality{ + Region: "us-east1", + Zone: "us-east1-b", + }, + Identity: "foo", + }, + }, + { + name: "pod with unnamed ports", + pod: createPod("foo", "", true, true), + podModifier: func(pod *corev1.Pod) { + pod.Spec.Containers[0].Ports[0].Name = "" + pod.Spec.Containers[0].Ports[1].Name = "" + }, + expectedWorkload: &pbcatalog.Workload{ + Addresses: []*pbcatalog.WorkloadAddress{ + {Host: "10.0.0.1", Ports: []string{"cslport-80", "cslport-8080", "mesh"}}, + }, + Ports: map[string]*pbcatalog.WorkloadPort{ + "cslport-80": { + Port: 80, + Protocol: pbcatalog.Protocol_PROTOCOL_UNSPECIFIED, + }, + "cslport-8080": { + Port: 8080, + Protocol: pbcatalog.Protocol_PROTOCOL_UNSPECIFIED, + }, + "mesh": { + Port: constants.ProxyDefaultInboundPort, + Protocol: pbcatalog.Protocol_PROTOCOL_MESH, + }, + }, + Identity: "foo", + }, + }, + { + name: "pod with no ports", + pod: createPod("foo", "", true, true), + podModifier: func(pod *corev1.Pod) { + pod.Spec.Containers[0].Ports = nil + }, + expectedWorkload: &pbcatalog.Workload{ + Addresses: []*pbcatalog.WorkloadAddress{ + {Host: "10.0.0.1", Ports: []string{"mesh"}}, + }, + Ports: map[string]*pbcatalog.WorkloadPort{ + "mesh": { + Port: constants.ProxyDefaultInboundPort, + Protocol: pbcatalog.Protocol_PROTOCOL_MESH, + }, + }, + Identity: "foo", + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + run(t, tc) + }) + } +} + +func TestWorkloadDelete(t *testing.T) { + t.Parallel() + + type testCase struct { + name string + pod *corev1.Pod + existingWorkload *pbcatalog.Workload + } + + run := func(t *testing.T, tc testCase) { + fakeClient := fake.NewClientBuilder().WithRuntimeObjects().Build() + + // Create test consulServer server. + testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + c.Experiments = []string{"resource-apis"} + }) + + // Create the pod controller. + pc := &Controller{ + Client: fakeClient, + Log: logrtest.New(t), + ConsulClientConfig: testClient.Cfg, + ConsulServerConnMgr: testClient.Watcher, + K8sNamespaceConfig: common.K8sNamespaceConfig{ + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSetWith(), + }, + ResourceClient: testClient.ResourceClient, + } + + workload, err := anypb.New(tc.existingWorkload) + require.NoError(t, err) + + workloadID := getWorkloadID(tc.pod.GetName(), metav1.NamespaceDefault, constants.DefaultConsulPartition) + writeReq := &pbresource.WriteRequest{ + Resource: &pbresource.Resource{ + Id: workloadID, + Data: workload, + }, + } + + _, err = testClient.ResourceClient.Write(context.Background(), writeReq) + require.NoError(t, err) + test.ResourceHasPersisted(t, context.Background(), testClient.ResourceClient, workloadID) + + reconcileReq := types.NamespacedName{ + Namespace: metav1.NamespaceDefault, + Name: tc.pod.GetName(), + } + err = pc.deleteWorkload(context.Background(), reconcileReq) + require.NoError(t, err) + + readReq := &pbresource.ReadRequest{ + Id: getWorkloadID(tc.pod.GetName(), metav1.NamespaceDefault, constants.DefaultConsulPartition), + } + _, err = testClient.ResourceClient.Read(context.Background(), readReq) + require.Error(t, err) + s, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, codes.NotFound, s.Code()) + } + + testCases := []testCase{ + { + name: "basic pod delete", + pod: createPod("foo", "", true, true), + existingWorkload: createWorkload(), + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + run(t, tc) + }) + } +} + +func TestHealthStatusWrite(t *testing.T) { + t.Parallel() + + type testCase struct { + name string + pod *corev1.Pod + podModifier func(pod *corev1.Pod) + expectedHealthStatus *pbcatalog.HealthStatus + } + + run := func(t *testing.T, tc testCase) { + if tc.podModifier != nil { + tc.podModifier(tc.pod) + } + + fakeClient := fake.NewClientBuilder().WithRuntimeObjects().Build() + + // Create test consulServer server. + testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + c.Experiments = []string{"resource-apis"} + }) + + // Create the pod controller. + pc := &Controller{ + Client: fakeClient, + Log: logrtest.New(t), + ConsulClientConfig: testClient.Cfg, + ConsulServerConnMgr: testClient.Watcher, + K8sNamespaceConfig: common.K8sNamespaceConfig{ + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSetWith(), + }, + ResourceClient: testClient.ResourceClient, + } + + // The owner of a resource is validated, so create a dummy workload for the HealthStatus + workloadData, err := anypb.New(createWorkload()) + require.NoError(t, err) + + workloadID := getWorkloadID(tc.pod.GetName(), metav1.NamespaceDefault, constants.DefaultConsulPartition) + writeReq := &pbresource.WriteRequest{ + Resource: &pbresource.Resource{ + Id: workloadID, + Data: workloadData, + }, + } + _, err = testClient.ResourceClient.Write(context.Background(), writeReq) + require.NoError(t, err) + + // Test writing the pod to a HealthStatus + err = pc.writeHealthStatus(context.Background(), *tc.pod) + require.NoError(t, err) + + req := &pbresource.ReadRequest{ + Id: getHealthStatusID(tc.pod.GetName(), metav1.NamespaceDefault, constants.DefaultConsulPartition), + } + actualRes, err := testClient.ResourceClient.Read(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, actualRes) + + requireEqualID(t, actualRes, tc.pod.GetName(), constants.DefaultConsulNS, constants.DefaultConsulPartition) + require.NotNil(t, actualRes.GetResource().GetData()) + + actualHealthStatus := &pbcatalog.HealthStatus{} + err = actualRes.GetResource().GetData().UnmarshalTo(actualHealthStatus) + require.NoError(t, err) + + require.True(t, proto.Equal(actualHealthStatus, tc.expectedHealthStatus)) + } + + testCases := []testCase{ + { + name: "ready pod", + pod: createPod("foo", "", true, true), + expectedHealthStatus: createPassingHealthStatus(), + }, + { + name: "not ready pod", + pod: createPod("foo", "", true, false), + expectedHealthStatus: createCriticalHealthStatus("foo", "default"), + }, + { + name: "pod with no condition", + pod: createPod("foo", "", true, true), + podModifier: func(pod *corev1.Pod) { + pod.Status.Conditions = []corev1.PodCondition{} + }, + expectedHealthStatus: createCriticalHealthStatus("foo", "default"), + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + run(t, tc) + }) + } +} + +func TestProxyConfigurationWrite(t *testing.T) { + t.Parallel() + + type testCase struct { + name string + pod *corev1.Pod + podModifier func(pod *corev1.Pod) + expectedProxyConfiguration *pbmesh.ProxyConfiguration + + tproxy bool + overwriteProbes bool + metrics bool + telemetry bool + } + + run := func(t *testing.T, tc testCase) { + ns := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{ + Name: metav1.NamespaceDefault, + }} + + nsTproxy := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{ + Name: "tproxy-party", + Labels: map[string]string{ + constants.KeyTransparentProxy: "true", + }, + }} + + if tc.podModifier != nil { + tc.podModifier(tc.pod) + } + + fakeClient := fake.NewClientBuilder().WithRuntimeObjects(&ns, &nsTproxy).Build() + + // Create test consulServer server. + testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + c.Experiments = []string{"resource-apis"} + }) + + // Create the pod controller. + pc := &Controller{ + Client: fakeClient, + Log: logrtest.New(t), + ConsulClientConfig: testClient.Cfg, + ConsulServerConnMgr: testClient.Watcher, + K8sNamespaceConfig: common.K8sNamespaceConfig{ + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSetWith(), + }, + EnableTransparentProxy: tc.tproxy, + TProxyOverwriteProbes: tc.overwriteProbes, + EnableTelemetryCollector: tc.telemetry, + ResourceClient: testClient.ResourceClient, + } + + if tc.metrics { + pc.MetricsConfig = metrics.Config{ + DefaultEnableMetrics: true, + DefaultPrometheusScrapePort: "5678", + } + } + + // Test writing the pod to a HealthStatus + err := pc.writeProxyConfiguration(context.Background(), *tc.pod) + require.NoError(t, err) + + req := &pbresource.ReadRequest{ + Id: getProxyConfigurationID(tc.pod.GetName(), metav1.NamespaceDefault, constants.DefaultConsulPartition), + } + actualRes, err := testClient.ResourceClient.Read(context.Background(), req) + + if tc.expectedProxyConfiguration == nil { + require.Error(t, err) + s, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, codes.NotFound, s.Code()) + return + } + + require.NoError(t, err) + require.NotNil(t, actualRes) + + requireEqualID(t, actualRes, tc.pod.GetName(), constants.DefaultConsulNS, constants.DefaultConsulPartition) + require.NotNil(t, actualRes.GetResource().GetData()) + + actualProxyConfiguration := &pbmesh.ProxyConfiguration{} + err = actualRes.GetResource().GetData().UnmarshalTo(actualProxyConfiguration) + require.NoError(t, err) + + diff := cmp.Diff(actualProxyConfiguration, tc.expectedProxyConfiguration, test.CmpProtoIgnoreOrder()...) + require.Equal(t, "", diff) + } + + testCases := []testCase{ + { + name: "no tproxy, no telemetry, no metrics, no probe overwrite", + pod: createPod("foo", "", true, true), + expectedProxyConfiguration: nil, + }, + { + name: "kitchen sink - globally enabled", + pod: createPod("foo", "", true, true), + podModifier: func(pod *corev1.Pod) { + addProbesAndOriginalPodAnnotation(pod) + }, + tproxy: true, + overwriteProbes: true, + metrics: true, + telemetry: true, + expectedProxyConfiguration: &pbmesh.ProxyConfiguration{ + Workloads: &pbcatalog.WorkloadSelector{ + Names: []string{"foo"}, + }, + DynamicConfig: &pbmesh.DynamicConfig{ + Mode: pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT, + ExposeConfig: &pbmesh.ExposeConfig{ + ExposePaths: []*pbmesh.ExposePath{ + { + ListenerPort: 20400, + LocalPathPort: 2001, + Path: "/livez", + }, + { + ListenerPort: 20300, + LocalPathPort: 2000, + Path: "/readyz", + }, + { + ListenerPort: 20500, + LocalPathPort: 2002, + Path: "/startupz", + }, + }, + }, + TransparentProxy: &pbmesh.TransparentProxy{ + OutboundListenerPort: 15001, + }, + }, + BootstrapConfig: &pbmesh.BootstrapConfig{ + PrometheusBindAddr: "0.0.0.0:5678", + TelemetryCollectorBindSocketDir: DefaultTelemetryBindSocketDir, + }, + }, + }, + { + name: "tproxy, metrics, and probe overwrite enabled on pod", + pod: createPod("foo", "", true, true), + podModifier: func(pod *corev1.Pod) { + pod.Annotations[constants.KeyTransparentProxy] = "true" + pod.Annotations[constants.AnnotationTransparentProxyOverwriteProbes] = "true" + pod.Annotations[constants.AnnotationEnableMetrics] = "true" + pod.Annotations[constants.AnnotationPrometheusScrapePort] = "21234" + + addProbesAndOriginalPodAnnotation(pod) + }, + expectedProxyConfiguration: &pbmesh.ProxyConfiguration{ + Workloads: &pbcatalog.WorkloadSelector{ + Names: []string{"foo"}, + }, + DynamicConfig: &pbmesh.DynamicConfig{ + Mode: pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT, + ExposeConfig: &pbmesh.ExposeConfig{ + ExposePaths: []*pbmesh.ExposePath{ + { + ListenerPort: 20400, + LocalPathPort: 2001, + Path: "/livez", + }, + { + ListenerPort: 20300, + LocalPathPort: 2000, + Path: "/readyz", + }, + { + ListenerPort: 20500, + LocalPathPort: 2002, + Path: "/startupz", + }, + }, + }, + TransparentProxy: &pbmesh.TransparentProxy{ + OutboundListenerPort: 15001, + }, + }, + BootstrapConfig: &pbmesh.BootstrapConfig{ + PrometheusBindAddr: "0.0.0.0:21234", + }, + }, + }, + { + name: "tproxy enabled on namespace", + pod: createPod("foo", "", true, true), + podModifier: func(pod *corev1.Pod) { + pod.Namespace = "tproxy-party" + }, + expectedProxyConfiguration: &pbmesh.ProxyConfiguration{ + Workloads: &pbcatalog.WorkloadSelector{ + Names: []string{"foo"}, + }, + DynamicConfig: &pbmesh.DynamicConfig{ + Mode: pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT, + TransparentProxy: &pbmesh.TransparentProxy{ + OutboundListenerPort: 15001, + }, + }, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + run(t, tc) + }) + } +} + +func requireEqualID(t *testing.T, res *pbresource.ReadResponse, name string, ns string, partition string) { + require.Equal(t, name, res.GetResource().GetId().GetName()) + require.Equal(t, ns, res.GetResource().GetId().GetTenancy().GetNamespace()) + require.Equal(t, partition, res.GetResource().GetId().GetTenancy().GetPartition()) +} + +func TestProxyConfigurationDelete(t *testing.T) { + t.Parallel() + + type testCase struct { + name string + pod *corev1.Pod + existingProxyConfiguration *pbmesh.ProxyConfiguration + } + + run := func(t *testing.T, tc testCase) { + fakeClient := fake.NewClientBuilder().WithRuntimeObjects().Build() + + // Create test consulServer server. + testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + c.Experiments = []string{"resource-apis"} + }) + + // Create the pod controller. + pc := &Controller{ + Client: fakeClient, + Log: logrtest.New(t), + ConsulClientConfig: testClient.Cfg, + ConsulServerConnMgr: testClient.Watcher, + K8sNamespaceConfig: common.K8sNamespaceConfig{ + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSetWith(), + }, + ResourceClient: testClient.ResourceClient, + } + + // Create the existing ProxyConfiguration + pcData, err := anypb.New(tc.existingProxyConfiguration) + require.NoError(t, err) + + pcID := getProxyConfigurationID(tc.pod.GetName(), metav1.NamespaceDefault, constants.DefaultConsulPartition) + writeReq := &pbresource.WriteRequest{ + Resource: &pbresource.Resource{ + Id: pcID, + Data: pcData, + }, + } + + _, err = testClient.ResourceClient.Write(context.Background(), writeReq) + require.NoError(t, err) + test.ResourceHasPersisted(t, context.Background(), testClient.ResourceClient, pcID) + + reconcileReq := types.NamespacedName{ + Namespace: metav1.NamespaceDefault, + Name: tc.pod.GetName(), + } + err = pc.deleteProxyConfiguration(context.Background(), reconcileReq) + require.NoError(t, err) + + readReq := &pbresource.ReadRequest{ + Id: getProxyConfigurationID(tc.pod.GetName(), metav1.NamespaceDefault, constants.DefaultConsulPartition), + } + _, err = testClient.ResourceClient.Read(context.Background(), readReq) + require.Error(t, err) + s, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, codes.NotFound, s.Code()) + } + + testCases := []testCase{ + { + name: "proxy configuration delete", + pod: createPod("foo", "", true, true), + existingProxyConfiguration: createProxyConfiguration("foo", true, pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT), + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + run(t, tc) + }) + } +} + +// TestDestinationsWrite does a subsampling of tests covered in TestProcessUpstreams to make sure things are hooked up +// correctly. For the sake of test speed, more exhaustive testing is performed in TestProcessUpstreams. +func TestDestinationsWrite(t *testing.T) { + t.Parallel() + + const podName = "pod1" + + cases := []struct { + name string + pod func() *corev1.Pod + expected *pbmesh.Destinations + expErr string + consulNamespacesEnabled bool + consulPartitionsEnabled bool + }{ + { + name: "labeled annotated destination with svc only", + pod: func() *corev1.Pod { + pod1 := createPod(podName, "", true, true) + pod1.Annotations[constants.AnnotationMeshDestinations] = "destination.port.upstream1.svc:1234" + return pod1 + }, + expected: &pbmesh.Destinations{ + Workloads: &pbcatalog.WorkloadSelector{ + Names: []string{podName}, + }, + Destinations: []*pbmesh.Destination{ + { + DestinationRef: &pbresource.Reference{ + Type: pbcatalog.ServiceType, + Tenancy: &pbresource.Tenancy{ + Partition: constants.GetNormalizedConsulPartition(""), + Namespace: constants.GetNormalizedConsulNamespace(""), + }, + Name: "upstream1", + }, + DestinationPort: "destination", + Datacenter: "", + ListenAddr: &pbmesh.Destination_IpPort{ + IpPort: &pbmesh.IPPortAddress{ + Port: uint32(1234), + Ip: consulNodeAddress, + }, + }, + }, + }, + }, + consulNamespacesEnabled: false, + consulPartitionsEnabled: false, + }, + { + name: "labeled annotated destination with svc, ns, and peer", + pod: func() *corev1.Pod { + pod1 := createPod(podName, "", true, true) + pod1.Annotations[constants.AnnotationMeshDestinations] = "destination.port.upstream1.svc.ns1.ns.peer1.peer:1234" + return pod1 + }, + expErr: "error processing destination annotations: destination currently does not support peers: destination.port.upstream1.svc.ns1.ns.peer1.peer:1234", + // TODO: uncomment this and remove expErr when peers is supported + // expected: &pbmesh.Destinations{ + // Workloads: &pbcatalog.WorkloadSelector{ + // Names: []string{podName}, + // }, + // Destinations: []*pbmesh.Destination{ + // { + // DestinationRef: &pbresource.Reference{ + // Type: pbcatalog.ServiceType, + // Tenancy: &pbresource.Tenancy{ + // Partition: constants.GetNormalizedConsulPartition(""), + // Namespace: "ns1", + // PeerName: "peer1", + // }, + // Name: "upstream1", + // }, + // DestinationPort: "destination", + // Datacenter: "", + // ListenAddr: &pbmesh.Destination_IpPort{ + // IpPort: &pbmesh.IPPortAddress{ + // Port: uint32(1234), + // Ip: consulNodeAddress, + // }, + // }, + // }, + // }, + // }, + consulNamespacesEnabled: true, + consulPartitionsEnabled: false, + }, + { + name: "labeled annotated destination with svc, ns, and partition", + pod: func() *corev1.Pod { + pod1 := createPod(podName, "", true, true) + pod1.Annotations[constants.AnnotationMeshDestinations] = "destination.port.upstream1.svc.ns1.ns.part1.ap:1234" + return pod1 + }, + expected: &pbmesh.Destinations{ + Workloads: &pbcatalog.WorkloadSelector{ + Names: []string{podName}, + }, + Destinations: []*pbmesh.Destination{ + { + DestinationRef: &pbresource.Reference{ + Type: pbcatalog.ServiceType, + Tenancy: &pbresource.Tenancy{ + Partition: "part1", + Namespace: "ns1", + }, + Name: "upstream1", + }, + DestinationPort: "destination", + Datacenter: "", + ListenAddr: &pbmesh.Destination_IpPort{ + IpPort: &pbmesh.IPPortAddress{ + Port: uint32(1234), + Ip: consulNodeAddress, + }, + }, + }, + }, + }, + consulNamespacesEnabled: true, + consulPartitionsEnabled: true, + }, + { + name: "error labeled annotated destination error: invalid partition/dc/peer", + pod: func() *corev1.Pod { + pod1 := createPod(podName, "", true, true) + pod1.Annotations[constants.AnnotationMeshDestinations] = "destination.port.upstream1.svc.ns1.ns.part1.err:1234" + return pod1 + }, + expErr: "error processing destination annotations: destination structured incorrectly: destination.port.upstream1.svc.ns1.ns.part1.err:1234", + consulNamespacesEnabled: true, + consulPartitionsEnabled: false, + }, + { + name: "unlabeled single destination", + pod: func() *corev1.Pod { + pod1 := createPod(podName, "", true, true) + pod1.Annotations[constants.AnnotationMeshDestinations] = "destination.upstream:1234" + return pod1 + }, + expected: &pbmesh.Destinations{ + Workloads: &pbcatalog.WorkloadSelector{ + Names: []string{podName}, + }, + Destinations: []*pbmesh.Destination{ + { + DestinationRef: &pbresource.Reference{ + Type: pbcatalog.ServiceType, + Tenancy: &pbresource.Tenancy{ + Partition: constants.GetNormalizedConsulPartition(""), + Namespace: constants.GetNormalizedConsulNamespace(""), + }, + Name: "upstream", + }, + DestinationPort: "destination", + Datacenter: "", + ListenAddr: &pbmesh.Destination_IpPort{ + IpPort: &pbmesh.IPPortAddress{ + Port: uint32(1234), + Ip: consulNodeAddress, + }, + }, + }, + }, + }, + consulNamespacesEnabled: false, + consulPartitionsEnabled: false, + }, + { + name: "unlabeled single destination with namespace and partition", + pod: func() *corev1.Pod { + pod1 := createPod(podName, "", true, true) + pod1.Annotations[constants.AnnotationMeshDestinations] = "destination.upstream.foo.bar:1234" + return pod1 + }, + expected: &pbmesh.Destinations{ + Workloads: &pbcatalog.WorkloadSelector{ + Names: []string{podName}, + }, + Destinations: []*pbmesh.Destination{ + { + DestinationRef: &pbresource.Reference{ + Type: pbcatalog.ServiceType, + Tenancy: &pbresource.Tenancy{ + Partition: "bar", + Namespace: "foo", + }, + Name: "upstream", + }, + DestinationPort: "destination", + Datacenter: "", + ListenAddr: &pbmesh.Destination_IpPort{ + IpPort: &pbmesh.IPPortAddress{ + Port: uint32(1234), + Ip: consulNodeAddress, + }, + }, + }, + }, + }, + consulNamespacesEnabled: true, + consulPartitionsEnabled: true, + }, + } + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + // Create test consulServer client. + testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + c.Experiments = []string{"resource-apis"} + }) + + pc := &Controller{ + Log: logrtest.New(t), + K8sNamespaceConfig: common.K8sNamespaceConfig{ + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSetWith(), + }, + ConsulTenancyConfig: common.ConsulTenancyConfig{ + EnableConsulNamespaces: tt.consulNamespacesEnabled, + EnableConsulPartitions: tt.consulPartitionsEnabled, + }, + ResourceClient: testClient.ResourceClient, + } + + err := pc.writeDestinations(context.Background(), *tt.pod()) + + if tt.expErr != "" { + require.EqualError(t, err, tt.expErr) + } else { + require.NoError(t, err) + uID := getDestinationsID(tt.pod().Name, metav1.NamespaceDefault, constants.DefaultConsulPartition) + expectedDestinationMatches(t, context.Background(), testClient.ResourceClient, uID, tt.expected) + } + }) + } +} + +func TestDestinationsDelete(t *testing.T) { + t.Parallel() + + const podName = "pod1" + + cases := []struct { + name string + pod func() *corev1.Pod + existingDestinations *pbmesh.Destinations + expErr string + configEntry func() api.ConfigEntry + consulUnavailable bool + }{ + { + name: "labeled annotated destination with svc only", + pod: func() *corev1.Pod { + pod1 := createPod(podName, "", true, true) + pod1.Annotations[constants.AnnotationMeshDestinations] = "destination.port.upstream1.svc:1234" + return pod1 + }, + existingDestinations: &pbmesh.Destinations{ + Workloads: &pbcatalog.WorkloadSelector{ + Names: []string{podName}, + }, + Destinations: []*pbmesh.Destination{ + { + DestinationRef: &pbresource.Reference{ + Type: pbcatalog.ServiceType, + Tenancy: &pbresource.Tenancy{ + Partition: constants.GetNormalizedConsulPartition(""), + Namespace: constants.GetNormalizedConsulNamespace(""), + }, + Name: "upstream1", + }, + DestinationPort: "destination", + Datacenter: "", + ListenAddr: &pbmesh.Destination_IpPort{ + IpPort: &pbmesh.IPPortAddress{ + Port: uint32(1234), + Ip: consulNodeAddress, + }, + }, + }, + }, + }, + }, + } + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + // Create test consulServer server. + testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + c.Experiments = []string{"resource-apis"} + }) + + pc := &Controller{ + Log: logrtest.New(t), + K8sNamespaceConfig: common.K8sNamespaceConfig{ + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSetWith(), + }, + ResourceClient: testClient.ResourceClient, + } + + // Load in the upstream for us to delete and check that it's there + loadResource(t, context.Background(), testClient.ResourceClient, getDestinationsID(tt.pod().Name, constants.DefaultConsulNS, constants.DefaultConsulPartition), tt.existingDestinations, nil) + uID := getDestinationsID(tt.pod().Name, metav1.NamespaceDefault, constants.DefaultConsulPartition) + expectedDestinationMatches(t, context.Background(), testClient.ResourceClient, uID, tt.existingDestinations) + + // Delete the upstream + nn := types.NamespacedName{Name: tt.pod().Name} + err := pc.deleteDestinations(context.Background(), nn) + + // Verify the upstream has been deleted or that an expected error has been returned + if tt.expErr != "" { + require.EqualError(t, err, tt.expErr) + } else { + require.NoError(t, err) + uID := getDestinationsID(tt.pod().Name, metav1.NamespaceDefault, constants.DefaultConsulPartition) + expectedDestinationMatches(t, context.Background(), testClient.ResourceClient, uID, nil) + } + }) + } +} + +func TestDeleteACLTokens(t *testing.T) { + t.Parallel() + + podName := "foo-123" + serviceName := "foo" + + // Create test consulServer server. + masterToken := "b78d37c7-0ca7-5f4d-99ee-6d9975ce4586" + testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + c.ACL.Enabled = true + c.ACL.Tokens.InitialManagement = masterToken + c.Experiments = []string{"resource-apis"} + }) + + test.SetupK8sAuthMethodV2(t, testClient.APIClient, serviceName, metav1.NamespaceDefault) + token, _, err := testClient.APIClient.ACL().Login(&api.ACLLoginParams{ + AuthMethod: test.AuthMethod, + BearerToken: test.ServiceAccountJWTToken, + Meta: map[string]string{ + "pod": fmt.Sprintf("%s/%s", metav1.NamespaceDefault, podName), + "component": "connect-injector", + }, + }, nil) + require.NoError(t, err) + + pc := &Controller{ + Log: logrtest.New(t), + K8sNamespaceConfig: common.K8sNamespaceConfig{ + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSetWith(), + }, + ResourceClient: testClient.ResourceClient, + AuthMethod: test.AuthMethod, + ConsulClientConfig: testClient.Cfg, + ConsulServerConnMgr: testClient.Watcher, + } + + // Delete the ACL Token + pod := types.NamespacedName{Name: podName, Namespace: metav1.NamespaceDefault} + err = pc.deleteACLTokensForPod(testClient.APIClient, pod) + require.NoError(t, err) + + // Verify the token has been deleted. + _, _, err = testClient.APIClient.ACL().TokenRead(token.AccessorID, nil) + require.Contains(t, err.Error(), "ACL not found") +} + +// TestReconcileCreatePod ensures that a new pod reconciliation fans out to create +// the appropriate Consul resources. Translation details from pod to Consul workload are +// tested at the relevant private functions. Any error states that are also tested here. +func TestReconcileCreatePod(t *testing.T) { + t.Parallel() + + ns := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{ + Name: metav1.NamespaceDefault, + }} + node := corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: nodeName}} + + type testCase struct { + name string + podName string // This needs to be aligned with the pod created in `k8sObjects` + namespace string // Defaults to metav1.NamespaceDefault if empty. Should be aligned with the ns in the pod + + k8sObjects func() []runtime.Object // testing node is injected separately + expectedWorkload *pbcatalog.Workload + expectedHealthStatus *pbcatalog.HealthStatus + expectedProxyConfiguration *pbmesh.ProxyConfiguration + expectedDestinations *pbmesh.Destinations + + tproxy bool + overwriteProbes bool + metrics bool + telemetry bool + + requeue bool + expErr string + } + + run := func(t *testing.T, tc testCase) { + k8sObjects := []runtime.Object{ + &ns, + &node, + } + if tc.k8sObjects != nil { + k8sObjects = append(k8sObjects, tc.k8sObjects()...) + } + + fakeClient := fake.NewClientBuilder().WithRuntimeObjects(k8sObjects...).Build() + + // Create test consulServer server. + testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + c.Experiments = []string{"resource-apis"} + }) + + // Create the pod controller. + pc := &Controller{ + Client: fakeClient, + Log: logrtest.New(t), + ConsulClientConfig: testClient.Cfg, + ConsulServerConnMgr: testClient.Watcher, + K8sNamespaceConfig: common.K8sNamespaceConfig{ + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSetWith(), + }, + TProxyOverwriteProbes: tc.overwriteProbes, + EnableTransparentProxy: tc.tproxy, + EnableTelemetryCollector: tc.telemetry, + } + if tc.metrics { + pc.MetricsConfig = metrics.Config{ + DefaultEnableMetrics: true, + DefaultPrometheusScrapePort: "1234", + } + } + + namespace := tc.namespace + if namespace == "" { + namespace = metav1.NamespaceDefault + } + + namespacedName := types.NamespacedName{ + Namespace: namespace, + Name: tc.podName, + } + + resp, err := pc.Reconcile(context.Background(), ctrl.Request{ + NamespacedName: namespacedName, + }) + if tc.expErr != "" { + require.EqualError(t, err, tc.expErr) + } else { + require.NoError(t, err) + } + require.Equal(t, tc.requeue, resp.Requeue) + + wID := getWorkloadID(tc.podName, metav1.NamespaceDefault, constants.DefaultConsulPartition) + expectedWorkloadMatches(t, context.Background(), testClient.ResourceClient, wID, tc.expectedWorkload) + + hsID := getHealthStatusID(tc.podName, metav1.NamespaceDefault, constants.DefaultConsulPartition) + expectedHealthStatusMatches(t, context.Background(), testClient.ResourceClient, hsID, tc.expectedHealthStatus) + + pcID := getProxyConfigurationID(tc.podName, metav1.NamespaceDefault, constants.DefaultConsulPartition) + expectedProxyConfigurationMatches(t, context.Background(), testClient.ResourceClient, pcID, tc.expectedProxyConfiguration) + + uID := getDestinationsID(tc.podName, metav1.NamespaceDefault, constants.DefaultConsulPartition) + expectedDestinationMatches(t, context.Background(), testClient.ResourceClient, uID, tc.expectedDestinations) + } + + testCases := []testCase{ + { + name: "vanilla new mesh-injected pod", + podName: "foo", + k8sObjects: func() []runtime.Object { + pod := createPod("foo", "", true, true) + addProbesAndOriginalPodAnnotation(pod) + + return []runtime.Object{pod} + }, + tproxy: true, + telemetry: true, + metrics: true, + overwriteProbes: true, + expectedWorkload: createWorkload(), + expectedHealthStatus: createPassingHealthStatus(), + expectedProxyConfiguration: createProxyConfiguration("foo", true, pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT), + }, + { + name: "vanilla new gateway pod (not mesh-injected)", + podName: "foo", + k8sObjects: func() []runtime.Object { + pod := createPod("foo", "", false, true) + pod.Annotations[constants.AnnotationGatewayKind] = "mesh-gateway" + pod.Annotations[constants.AnnotationMeshInject] = "false" + pod.Annotations[constants.AnnotationTransparentProxyOverwriteProbes] = "false" + + return []runtime.Object{pod} + }, + tproxy: true, + telemetry: true, + metrics: true, + overwriteProbes: true, + expectedWorkload: createWorkload(), + expectedHealthStatus: createPassingHealthStatus(), + expectedProxyConfiguration: createProxyConfiguration("foo", false, pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT), + }, + { + name: "pod in ignored namespace", + podName: "foo", + namespace: metav1.NamespaceSystem, + k8sObjects: func() []runtime.Object { + pod := createPod("foo", "", true, true) + pod.ObjectMeta.Namespace = metav1.NamespaceSystem + return []runtime.Object{pod} + }, + }, + { + name: "unhealthy new pod", + podName: "foo", + k8sObjects: func() []runtime.Object { + pod := createPod("foo", "", true, false) + return []runtime.Object{pod} + }, + expectedWorkload: createWorkload(), + expectedHealthStatus: createCriticalHealthStatus("foo", "default"), + }, + { + name: "return error - pod has no original pod annotation", + podName: "foo", + k8sObjects: func() []runtime.Object { + pod := createPod("foo", "", true, false) + return []runtime.Object{pod} + }, + tproxy: true, + overwriteProbes: true, + expectedWorkload: createWorkload(), + expectedHealthStatus: createCriticalHealthStatus("foo", "default"), + expErr: "1 error occurred:\n\t* failed to get expose config: failed to get original pod spec: unexpected end of JSON input\n\n", + }, + { + name: "pod has not been injected", + podName: "foo", + k8sObjects: func() []runtime.Object { + pod := createPod("foo", "", false, true) + return []runtime.Object{pod} + }, + }, + { + name: "pod with annotations", + podName: "foo", + k8sObjects: func() []runtime.Object { + pod := createPod("foo", "", true, true) + addProbesAndOriginalPodAnnotation(pod) + pod.Annotations[constants.AnnotationMeshDestinations] = "destination.port.mySVC.svc:24601" + return []runtime.Object{pod} + }, + tproxy: false, + telemetry: true, + metrics: true, + overwriteProbes: true, + expectedWorkload: createWorkload(), + expectedHealthStatus: createPassingHealthStatus(), + expectedProxyConfiguration: createProxyConfiguration("foo", true, pbmesh.ProxyMode_PROXY_MODE_DEFAULT), + expectedDestinations: createDestinations(), + }, + { + name: "pod w/o IP", + podName: "foo", + k8sObjects: func() []runtime.Object { + pod := createPod("foo", "", true, true) + pod.Status.PodIP = "" + return []runtime.Object{pod} + }, + requeue: true, + }, + // TODO: make sure multi-error accumulates errors + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + run(t, tc) + }) + } +} + +// TestReconcileUpdatePod test updating a Pod object when there is already matching resources in Consul. +// Updates are unlikely because of the immutable behaviors of pods as members of deployment/statefulset, +// but theoretically it is possible to update annotations and labels in-place. Most likely this will be +// from a change in health status. +func TestReconcileUpdatePod(t *testing.T) { + t.Parallel() + + ns := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{ + Name: metav1.NamespaceDefault, + }} + node := corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: nodeName}} + + type testCase struct { + name string + podName string // This needs to be aligned with the pod created in `k8sObjects` + namespace string // Defaults to metav1.NamespaceDefault if empty. Should be aligned with the ns in the pod + + k8sObjects func() []runtime.Object // testing node is injected separately + + existingWorkload *pbcatalog.Workload + existingHealthStatus *pbcatalog.HealthStatus + existingProxyConfiguration *pbmesh.ProxyConfiguration + existingDestinations *pbmesh.Destinations + + expectedWorkload *pbcatalog.Workload + expectedHealthStatus *pbcatalog.HealthStatus + expectedProxyConfiguration *pbmesh.ProxyConfiguration + expectedDestinations *pbmesh.Destinations + + tproxy bool + overwriteProbes bool + metrics bool + telemetry bool + + expErr string + } + + run := func(t *testing.T, tc testCase) { + k8sObjects := []runtime.Object{ + &ns, + &node, + } + if tc.k8sObjects != nil { + k8sObjects = append(k8sObjects, tc.k8sObjects()...) + } + + fakeClient := fake.NewClientBuilder().WithRuntimeObjects(k8sObjects...).Build() + + // Create test consulServer server. + testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + c.Experiments = []string{"resource-apis"} + }) + + // Create the pod controller. + pc := &Controller{ + Client: fakeClient, + Log: logrtest.New(t), + ConsulClientConfig: testClient.Cfg, + ConsulServerConnMgr: testClient.Watcher, + K8sNamespaceConfig: common.K8sNamespaceConfig{ + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSetWith(), + }, + TProxyOverwriteProbes: tc.overwriteProbes, + EnableTransparentProxy: tc.tproxy, + EnableTelemetryCollector: tc.telemetry, + } + if tc.metrics { + pc.MetricsConfig = metrics.Config{ + DefaultEnableMetrics: true, + DefaultPrometheusScrapePort: "1234", + } + } + + namespace := tc.namespace + if namespace == "" { + namespace = metav1.NamespaceDefault + } + + workloadID := getWorkloadID(tc.podName, constants.DefaultConsulNS, constants.DefaultConsulPartition) + loadResource(t, context.Background(), testClient.ResourceClient, workloadID, tc.existingWorkload, nil) + loadResource(t, context.Background(), testClient.ResourceClient, getHealthStatusID(tc.podName, constants.DefaultConsulNS, constants.DefaultConsulPartition), tc.existingHealthStatus, workloadID) + loadResource(t, context.Background(), testClient.ResourceClient, getProxyConfigurationID(tc.podName, constants.DefaultConsulNS, constants.DefaultConsulPartition), tc.existingProxyConfiguration, nil) + loadResource(t, context.Background(), testClient.ResourceClient, getDestinationsID(tc.podName, constants.DefaultConsulNS, constants.DefaultConsulPartition), tc.existingDestinations, nil) + + namespacedName := types.NamespacedName{ + Namespace: namespace, + Name: tc.podName, + } + + resp, err := pc.Reconcile(context.Background(), ctrl.Request{ + NamespacedName: namespacedName, + }) + if tc.expErr != "" { + require.EqualError(t, err, tc.expErr) + } else { + require.NoError(t, err) + } + require.False(t, resp.Requeue) + + wID := getWorkloadID(tc.podName, metav1.NamespaceDefault, constants.DefaultConsulPartition) + expectedWorkloadMatches(t, context.Background(), testClient.ResourceClient, wID, tc.expectedWorkload) + + hsID := getHealthStatusID(tc.podName, metav1.NamespaceDefault, constants.DefaultConsulPartition) + expectedHealthStatusMatches(t, context.Background(), testClient.ResourceClient, hsID, tc.expectedHealthStatus) + + pcID := getProxyConfigurationID(tc.podName, metav1.NamespaceDefault, constants.DefaultConsulPartition) + expectedProxyConfigurationMatches(t, context.Background(), testClient.ResourceClient, pcID, tc.expectedProxyConfiguration) + + uID := getDestinationsID(tc.podName, metav1.NamespaceDefault, constants.DefaultConsulPartition) + expectedDestinationMatches(t, context.Background(), testClient.ResourceClient, uID, tc.expectedDestinations) + } + + testCases := []testCase{ + { + name: "pod update ports", + podName: "foo", + k8sObjects: func() []runtime.Object { + pod := createPod("foo", "", true, true) + return []runtime.Object{pod} + }, + existingHealthStatus: createPassingHealthStatus(), + existingWorkload: &pbcatalog.Workload{ + Addresses: []*pbcatalog.WorkloadAddress{ + {Host: "10.0.0.1", Ports: []string{"public", "mesh"}}, + }, + Ports: map[string]*pbcatalog.WorkloadPort{ + "public": { + Port: 80, + Protocol: pbcatalog.Protocol_PROTOCOL_UNSPECIFIED, + }, + "mesh": { + Port: constants.ProxyDefaultInboundPort, + Protocol: pbcatalog.Protocol_PROTOCOL_MESH, + }, + }, + NodeName: consulNodeName, + Identity: "foo", + }, + expectedWorkload: createWorkload(), + expectedHealthStatus: createPassingHealthStatus(), + }, + { + name: "pod healthy to unhealthy", + podName: "foo", + k8sObjects: func() []runtime.Object { + pod := createPod("foo", "", true, false) + return []runtime.Object{pod} + }, + existingWorkload: createWorkload(), + existingHealthStatus: createPassingHealthStatus(), + expectedWorkload: createWorkload(), + expectedHealthStatus: createCriticalHealthStatus("foo", "default"), + }, + { + name: "add metrics, tproxy and probe overwrite to pod", + podName: "foo", + k8sObjects: func() []runtime.Object { + pod := createPod("foo", "", true, true) + pod.Annotations[constants.KeyTransparentProxy] = "true" + pod.Annotations[constants.AnnotationTransparentProxyOverwriteProbes] = "true" + pod.Annotations[constants.AnnotationEnableMetrics] = "true" + pod.Annotations[constants.AnnotationPrometheusScrapePort] = "21234" + addProbesAndOriginalPodAnnotation(pod) + + return []runtime.Object{pod} + }, + existingWorkload: createWorkload(), + existingHealthStatus: createPassingHealthStatus(), + expectedWorkload: createWorkload(), + expectedHealthStatus: createPassingHealthStatus(), + expectedProxyConfiguration: &pbmesh.ProxyConfiguration{ + Workloads: &pbcatalog.WorkloadSelector{ + Names: []string{"foo"}, + }, + DynamicConfig: &pbmesh.DynamicConfig{ + Mode: pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT, + ExposeConfig: &pbmesh.ExposeConfig{ + ExposePaths: []*pbmesh.ExposePath{ + { + ListenerPort: 20400, + LocalPathPort: 2001, + Path: "/livez", + }, + { + ListenerPort: 20300, + LocalPathPort: 2000, + Path: "/readyz", + }, + { + ListenerPort: 20500, + LocalPathPort: 2002, + Path: "/startupz", + }, + }, + }, + TransparentProxy: &pbmesh.TransparentProxy{ + OutboundListenerPort: 15001, + }, + }, + BootstrapConfig: &pbmesh.BootstrapConfig{ + PrometheusBindAddr: "0.0.0.0:21234", + }, + }, + }, + { + name: "pod update explicit destination", + podName: "foo", + k8sObjects: func() []runtime.Object { + pod := createPod("foo", "", true, true) + pod.Annotations[constants.AnnotationMeshDestinations] = "destination.port.mySVC.svc:24601" + return []runtime.Object{pod} + }, + existingWorkload: createWorkload(), + existingHealthStatus: createPassingHealthStatus(), + existingDestinations: &pbmesh.Destinations{ + Workloads: &pbcatalog.WorkloadSelector{ + Names: []string{"foo"}, + }, + Destinations: []*pbmesh.Destination{ + { + DestinationRef: &pbresource.Reference{ + Type: pbcatalog.ServiceType, + Tenancy: &pbresource.Tenancy{ + Partition: "ap1", + Namespace: "ns1", + }, + Name: "mySVC3", + }, + DestinationPort: "destination2", + Datacenter: "", + ListenAddr: &pbmesh.Destination_IpPort{ + IpPort: &pbmesh.IPPortAddress{ + Port: uint32(1234), + Ip: consulNodeAddress, + }, + }, + }, + }, + }, + expectedWorkload: createWorkload(), + expectedHealthStatus: createPassingHealthStatus(), + expectedDestinations: createDestinations(), + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + run(t, tc) + }) + } +} + +// Tests deleting a Pod object, with and without matching Consul resources. +func TestReconcileDeletePod(t *testing.T) { + t.Parallel() + + ns := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{ + Name: metav1.NamespaceDefault, + Namespace: metav1.NamespaceDefault, + }} + node := corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: nodeName}} + + type testCase struct { + name string + podName string // This needs to be aligned with the pod created in `k8sObjects` + namespace string // Defaults to metav1.NamespaceDefault if empty. Should be aligned with the ns in the pod + + k8sObjects func() []runtime.Object // testing node is injected separately + + existingWorkload *pbcatalog.Workload + existingHealthStatus *pbcatalog.HealthStatus + existingProxyConfiguration *pbmesh.ProxyConfiguration + existingDestinations *pbmesh.Destinations + + expectedWorkload *pbcatalog.Workload + expectedHealthStatus *pbcatalog.HealthStatus + expectedProxyConfiguration *pbmesh.ProxyConfiguration + expectedDestinations *pbmesh.Destinations + + aclsEnabled bool + + expErr string + } + + run := func(t *testing.T, tc testCase) { + k8sObjects := []runtime.Object{ + &ns, + &node, + } + if tc.k8sObjects != nil { + k8sObjects = append(k8sObjects, tc.k8sObjects()...) + } + + fakeClient := fake.NewClientBuilder().WithRuntimeObjects(k8sObjects...).Build() + + // Create test consulServer server. + masterToken := "b78d37c7-0ca7-5f4d-99ee-6d9975ce4586" + + testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + if tc.aclsEnabled { + c.ACL.Enabled = true + c.ACL.Tokens.InitialManagement = masterToken + } + c.Experiments = []string{"resource-apis"} + }) + + ctx := context.Background() + if tc.aclsEnabled { + ctx = metadata.AppendToOutgoingContext(context.Background(), "x-consul-token", masterToken) + } + + // Wait for the default partition to be created + require.Eventually(t, func() bool { + _, _, err := testClient.APIClient.Partitions().Read(ctx, constants.DefaultConsulPartition, nil) + return err == nil + }, 5*time.Second, 500*time.Millisecond) + + // Create the pod controller. + pc := &Controller{ + Client: fakeClient, + Log: logrtest.New(t), + ConsulClientConfig: testClient.Cfg, + ConsulServerConnMgr: testClient.Watcher, + K8sNamespaceConfig: common.K8sNamespaceConfig{ + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSetWith(), + }, + } + if tc.aclsEnabled { + pc.AuthMethod = test.AuthMethod + } + + namespace := tc.namespace + if namespace == "" { + namespace = metav1.NamespaceDefault + } + + workloadID := getWorkloadID(tc.podName, constants.DefaultConsulNS, constants.DefaultConsulPartition) + loadResource(t, ctx, testClient.ResourceClient, workloadID, tc.existingWorkload, nil) + loadResource(t, ctx, testClient.ResourceClient, getHealthStatusID(tc.podName, constants.DefaultConsulNS, constants.DefaultConsulPartition), tc.existingHealthStatus, workloadID) + loadResource(t, ctx, testClient.ResourceClient, getProxyConfigurationID(tc.podName, constants.DefaultConsulNS, constants.DefaultConsulPartition), tc.existingProxyConfiguration, nil) + loadResource(t, ctx, testClient.ResourceClient, getDestinationsID(tc.podName, constants.DefaultConsulNS, constants.DefaultConsulPartition), tc.existingDestinations, nil) + + var token *api.ACLToken + var err error + if tc.aclsEnabled { + test.SetupK8sAuthMethodV2(t, testClient.APIClient, tc.podName, metav1.NamespaceDefault) // podName is a standin for the service name + token, _, err = testClient.APIClient.ACL().Login(&api.ACLLoginParams{ + AuthMethod: test.AuthMethod, + BearerToken: test.ServiceAccountJWTToken, + Meta: map[string]string{ + "pod": fmt.Sprintf("%s/%s", metav1.NamespaceDefault, tc.podName), + "component": "connect-injector", + }, + }, nil) + require.NoError(t, err) + + // We create another junk token here just to make sure it doesn't interfere with cleaning up the + // previous "real" token that has metadata. + _, _, err = testClient.APIClient.ACL().Login(&api.ACLLoginParams{ + AuthMethod: test.AuthMethod, + BearerToken: test.ServiceAccountJWTToken, + }, nil) + require.NoError(t, err) + } + + namespacedName := types.NamespacedName{ + Namespace: namespace, + Name: tc.podName, + } + + resp, err := pc.Reconcile(context.Background(), ctrl.Request{ + NamespacedName: namespacedName, + }) + if tc.expErr != "" { + require.EqualError(t, err, tc.expErr) + } else { + require.NoError(t, err) + } + require.False(t, resp.Requeue) + + wID := getWorkloadID(tc.podName, metav1.NamespaceDefault, constants.DefaultConsulPartition) + expectedWorkloadMatches(t, ctx, testClient.ResourceClient, wID, tc.expectedWorkload) + + hsID := getHealthStatusID(tc.podName, metav1.NamespaceDefault, constants.DefaultConsulPartition) + expectedHealthStatusMatches(t, ctx, testClient.ResourceClient, hsID, tc.expectedHealthStatus) + + pcID := getProxyConfigurationID(tc.podName, metav1.NamespaceDefault, constants.DefaultConsulPartition) + expectedProxyConfigurationMatches(t, ctx, testClient.ResourceClient, pcID, tc.expectedProxyConfiguration) + + uID := getDestinationsID(tc.podName, metav1.NamespaceDefault, constants.DefaultConsulPartition) + expectedDestinationMatches(t, ctx, testClient.ResourceClient, uID, tc.expectedDestinations) + + if tc.aclsEnabled { + _, _, err = testClient.APIClient.ACL().TokenRead(token.AccessorID, nil) + require.Contains(t, err.Error(), "ACL not found") + } + + } + + testCases := []testCase{ + { + name: "vanilla delete pod", + podName: "foo", + existingWorkload: createWorkload(), + existingHealthStatus: createPassingHealthStatus(), + existingProxyConfiguration: createProxyConfiguration("foo", true, pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT), + }, + { + name: "annotated delete pod", + podName: "foo", + existingWorkload: createWorkload(), + existingHealthStatus: createPassingHealthStatus(), + existingProxyConfiguration: createProxyConfiguration("foo", true, pbmesh.ProxyMode_PROXY_MODE_DEFAULT), + existingDestinations: createDestinations(), + }, + { + name: "delete pod w/ acls", + podName: "foo", + existingWorkload: createWorkload(), + existingHealthStatus: createPassingHealthStatus(), + existingProxyConfiguration: createProxyConfiguration("foo", true, pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT), + aclsEnabled: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + run(t, tc) + }) + } +} + +// createPod creates a multi-port pod as a base for tests. If `namespace` is empty, +// the default Kube namespace will be used. +func createPod(name, namespace string, inject, ready bool) *corev1.Pod { + if namespace == "" { + namespace = metav1.NamespaceDefault + } + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: map[string]string{}, + Annotations: map[string]string{ + constants.AnnotationConsulK8sVersion: "1.3.0", + }, + }, + Status: corev1.PodStatus{ + PodIP: "10.0.0.1", + HostIP: consulNodeAddress, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + Ports: []corev1.ContainerPort{ + { + Name: "public", + Protocol: corev1.ProtocolTCP, + ContainerPort: 80, + }, + { + Name: "admin", + Protocol: corev1.ProtocolTCP, + ContainerPort: 8080, + }, + }, + ReadinessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/readyz", + Port: intstr.FromInt(2000), + }, + }, + }, + LivenessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/livez", + Port: intstr.FromInt(2001), + }, + }, + }, + StartupProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/startupz", + Port: intstr.FromInt(2002), + }, + }, + }, + }, + }, + NodeName: nodeName, + ServiceAccountName: name, + }, + } + if ready { + pod.Status.Conditions = []corev1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionTrue, + }, + } + } else { + pod.Status.Conditions = []corev1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionFalse, + }, + } + } + + if inject { + pod.Labels[constants.KeyMeshInjectStatus] = constants.Injected + pod.Annotations[constants.KeyMeshInjectStatus] = constants.Injected + } + return pod +} + +// createWorkload creates a workload that matches the pod from createPod. +func createWorkload() *pbcatalog.Workload { + return &pbcatalog.Workload{ + Addresses: []*pbcatalog.WorkloadAddress{ + {Host: "10.0.0.1", Ports: []string{"public", "admin", "mesh"}}, + }, + Ports: map[string]*pbcatalog.WorkloadPort{ + "public": { + Port: 80, + Protocol: pbcatalog.Protocol_PROTOCOL_UNSPECIFIED, + }, + "admin": { + Port: 8080, + Protocol: pbcatalog.Protocol_PROTOCOL_UNSPECIFIED, + }, + "mesh": { + Port: constants.ProxyDefaultInboundPort, + Protocol: pbcatalog.Protocol_PROTOCOL_MESH, + }, + }, + Identity: "foo", + } +} + +// createPassingHealthStatus creates a passing HealthStatus that matches the pod from createPod. +func createPassingHealthStatus() *pbcatalog.HealthStatus { + return &pbcatalog.HealthStatus{ + Type: constants.ConsulKubernetesCheckType, + Status: pbcatalog.Health_HEALTH_PASSING, + Output: constants.KubernetesSuccessReasonMsg, + Description: constants.ConsulKubernetesCheckName, + } +} + +// createCriticalHealthStatus creates a failing HealthStatus that matches the pod from createPod. +func createCriticalHealthStatus(name string, namespace string) *pbcatalog.HealthStatus { + return &pbcatalog.HealthStatus{ + Type: constants.ConsulKubernetesCheckType, + Status: pbcatalog.Health_HEALTH_CRITICAL, + Output: fmt.Sprintf("Pod \"%s/%s\" is not ready", namespace, name), + Description: constants.ConsulKubernetesCheckName, + } +} + +// createProxyConfiguration creates a proxyConfiguration that matches the pod from createPod, +// assuming that metrics, telemetry, and overwrite probes are enabled separately. +func createProxyConfiguration(podName string, overwriteProbes bool, mode pbmesh.ProxyMode) *pbmesh.ProxyConfiguration { + mesh := &pbmesh.ProxyConfiguration{ + Workloads: &pbcatalog.WorkloadSelector{ + Names: []string{podName}, + }, + DynamicConfig: &pbmesh.DynamicConfig{ + Mode: mode, + ExposeConfig: nil, + }, + BootstrapConfig: &pbmesh.BootstrapConfig{ + PrometheusBindAddr: "0.0.0.0:1234", + TelemetryCollectorBindSocketDir: DefaultTelemetryBindSocketDir, + }, + } + + if overwriteProbes { + mesh.DynamicConfig.ExposeConfig = &pbmesh.ExposeConfig{ + ExposePaths: []*pbmesh.ExposePath{ + { + ListenerPort: 20400, + LocalPathPort: 2001, + Path: "/livez", + }, + { + ListenerPort: 20300, + LocalPathPort: 2000, + Path: "/readyz", + }, + { + ListenerPort: 20500, + LocalPathPort: 2002, + Path: "/startupz", + }, + }, + } + } + + if mode == pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT { + mesh.DynamicConfig.TransparentProxy = &pbmesh.TransparentProxy{ + OutboundListenerPort: 15001, + } + } + + return mesh +} + +// createCriticalHealthStatus creates a failing HealthStatus that matches the pod from createPod. +func createDestinations() *pbmesh.Destinations { + return &pbmesh.Destinations{ + Workloads: &pbcatalog.WorkloadSelector{ + Names: []string{"foo"}, + }, + Destinations: []*pbmesh.Destination{ + { + DestinationRef: &pbresource.Reference{ + Type: pbcatalog.ServiceType, + Tenancy: &pbresource.Tenancy{ + Partition: constants.GetNormalizedConsulPartition(""), + Namespace: constants.GetNormalizedConsulNamespace(""), + }, + Name: "mySVC", + }, + DestinationPort: "destination", + Datacenter: "", + ListenAddr: &pbmesh.Destination_IpPort{ + IpPort: &pbmesh.IPPortAddress{ + Port: uint32(24601), + Ip: consulNodeAddress, + }, + }, + }, + }, + } +} + +func expectedWorkloadMatches(t *testing.T, ctx context.Context, client pbresource.ResourceServiceClient, id *pbresource.ID, expectedWorkload *pbcatalog.Workload) { + req := &pbresource.ReadRequest{Id: id} + + res, err := client.Read(ctx, req) + + if expectedWorkload == nil { + require.Error(t, err) + s, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, codes.NotFound, s.Code()) + return + } + + require.NoError(t, err) + require.NotNil(t, res) + + requireEqualResourceID(t, id, res.GetResource().GetId()) + + require.NotNil(t, res.GetResource().GetData()) + + actualWorkload := &pbcatalog.Workload{} + err = res.GetResource().GetData().UnmarshalTo(actualWorkload) + require.NoError(t, err) + + diff := cmp.Diff(expectedWorkload, actualWorkload, test.CmpProtoIgnoreOrder()...) + require.Equal(t, "", diff, "Workloads do not match") +} + +func expectedHealthStatusMatches(t *testing.T, ctx context.Context, client pbresource.ResourceServiceClient, id *pbresource.ID, expectedHealthStatus *pbcatalog.HealthStatus) { + req := &pbresource.ReadRequest{Id: id} + + res, err := client.Read(ctx, req) + + if expectedHealthStatus == nil { + // Because HealthStatus is asynchronously garbage-collected, we can retry to make sure it gets cleaned up. + require.Eventually(t, func() bool { + _, err := client.Read(ctx, req) + s, ok := status.FromError(err) + return ok && codes.NotFound == s.Code() + }, 3*time.Second, 500*time.Millisecond) + return + } + + require.NoError(t, err) + require.NotNil(t, res) + + requireEqualResourceID(t, id, res.GetResource().GetId()) + + require.NotNil(t, res.GetResource().GetData()) + + actualHealthStatus := &pbcatalog.HealthStatus{} + err = res.GetResource().GetData().UnmarshalTo(actualHealthStatus) + require.NoError(t, err) + + diff := cmp.Diff(expectedHealthStatus, actualHealthStatus, test.CmpProtoIgnoreOrder()...) + require.Equal(t, "", diff, "HealthStatuses do not match") +} + +func expectedProxyConfigurationMatches(t *testing.T, ctx context.Context, client pbresource.ResourceServiceClient, id *pbresource.ID, expectedProxyConfiguration *pbmesh.ProxyConfiguration) { + req := &pbresource.ReadRequest{Id: id} + + res, err := client.Read(ctx, req) + + if expectedProxyConfiguration == nil { + require.Error(t, err) + s, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, codes.NotFound, s.Code()) + return + } + + require.NoError(t, err) + require.NotNil(t, res) + + requireEqualResourceID(t, id, res.GetResource().GetId()) + + require.NotNil(t, res.GetResource().GetData()) + + actualProxyConfiguration := &pbmesh.ProxyConfiguration{} + err = res.GetResource().GetData().UnmarshalTo(actualProxyConfiguration) + require.NoError(t, err) + + diff := cmp.Diff(expectedProxyConfiguration, actualProxyConfiguration, test.CmpProtoIgnoreOrder()...) + require.Equal(t, "", diff, "ProxyConfigurations do not match") +} + +func expectedDestinationMatches(t *testing.T, ctx context.Context, client pbresource.ResourceServiceClient, id *pbresource.ID, expectedUpstreams *pbmesh.Destinations) { + req := &pbresource.ReadRequest{Id: id} + res, err := client.Read(ctx, req) + + if expectedUpstreams == nil { + require.Error(t, err) + s, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, codes.NotFound, s.Code()) + return + } + + require.NoError(t, err) + require.NotNil(t, res) + + requireEqualResourceID(t, id, res.GetResource().GetId()) + + require.NotNil(t, res.GetResource().GetData()) + + actualUpstreams := &pbmesh.Destinations{} + err = res.GetResource().GetData().UnmarshalTo(actualUpstreams) + require.NoError(t, err) + + require.True(t, proto.Equal(actualUpstreams, expectedUpstreams)) +} + +func loadResource(t *testing.T, ctx context.Context, client pbresource.ResourceServiceClient, id *pbresource.ID, proto proto.Message, owner *pbresource.ID) { + if id == nil || !proto.ProtoReflect().IsValid() { + return + } + + data, err := anypb.New(proto) + require.NoError(t, err) + + resource := &pbresource.Resource{ + Id: id, + Data: data, + Owner: owner, + } + + req := &pbresource.WriteRequest{Resource: resource} + _, err = client.Write(ctx, req) + require.NoError(t, err) + test.ResourceHasPersisted(t, ctx, client, id) +} + +func addProbesAndOriginalPodAnnotation(pod *corev1.Pod) { + podBytes, _ := json.Marshal(pod) + pod.Annotations[constants.AnnotationOriginalPod] = string(podBytes) + + // Fake the probe changes that would be added by the mesh webhook + pod.Spec.Containers[0].ReadinessProbe.HTTPGet.Port = intstr.FromInt(20300) + pod.Spec.Containers[0].LivenessProbe.HTTPGet.Port = intstr.FromInt(20400) + pod.Spec.Containers[0].StartupProbe.HTTPGet.Port = intstr.FromInt(20500) +} + +func requireEqualResourceID(t *testing.T, expected, actual *pbresource.ID) { + opts := []cmp.Option{ + protocmp.IgnoreFields(&pbresource.ID{}, "uid"), + } + opts = append(opts, test.CmpProtoIgnoreOrder()...) + diff := cmp.Diff(expected, actual, opts...) + require.Equal(t, "", diff, "resource IDs do not match") +} diff --git a/control-plane/connect-inject/controllers/serviceaccount/serviceaccount_controller.go b/control-plane/connect-inject/controllers/serviceaccount/serviceaccount_controller.go new file mode 100644 index 0000000000..98e5c949c5 --- /dev/null +++ b/control-plane/connect-inject/controllers/serviceaccount/serviceaccount_controller.go @@ -0,0 +1,193 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package serviceaccount + +import ( + "context" + + "github.com/go-logr/logr" + pbauth "github.com/hashicorp/consul/proto-public/pbauth/v2beta1" + "github.com/hashicorp/consul/proto-public/pbresource" + "google.golang.org/grpc/metadata" + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/hashicorp/consul-k8s/control-plane/api/common" + inject "github.com/hashicorp/consul-k8s/control-plane/connect-inject/common" + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" + "github.com/hashicorp/consul-k8s/control-plane/consul" + "github.com/hashicorp/consul-k8s/control-plane/namespaces" +) + +const ( + defaultServiceAccountName = "default" +) + +type Controller struct { + client.Client + // ConsulServerConnMgr is the watcher for the Consul server addresses used to create Consul API v2 clients. + ConsulServerConnMgr consul.ServerConnectionManager + // K8sNamespaceConfig manages allow/deny Kubernetes namespaces. + common.K8sNamespaceConfig + // ConsulTenancyConfig manages settings related to Consul namespaces and partitions. + common.ConsulTenancyConfig + + Log logr.Logger + + Scheme *runtime.Scheme + context.Context +} + +func (r *Controller) Logger(name types.NamespacedName) logr.Logger { + return r.Log.WithValues("request", name) +} + +func (r *Controller) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&corev1.ServiceAccount{}). + Complete(r) +} + +// Reconcile reads the state of a ServiceAccount object for a Kubernetes namespace and reconciles the corresponding +// Consul WorkloadIdentity. +func (r *Controller) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + var serviceAccount corev1.ServiceAccount + + // Ignore the request if the namespace of the service account is not allowed. + if inject.ShouldIgnore(req.Namespace, r.DenyK8sNamespacesSet, r.AllowK8sNamespacesSet) { + return ctrl.Result{}, nil + } + + // Create Consul resource service client for this reconcile. + resourceClient, err := consul.NewResourceServiceClient(r.ConsulServerConnMgr) + if err != nil { + r.Log.Error(err, "failed to create Consul resource client", "name", req.Name, "ns", req.Namespace) + return ctrl.Result{}, err + } + + state, err := r.ConsulServerConnMgr.State() + if err != nil { + r.Log.Error(err, "failed to query Consul client state", "name", req.Name, "ns", req.Namespace) + return ctrl.Result{}, err + } + if state.Token != "" { + ctx = metadata.AppendToOutgoingContext(ctx, "x-consul-token", state.Token) + } + + // We don't allow the default service account synced to prevent unintended TrafficPermissions + if req.Name == defaultServiceAccountName { + r.Log.Info("Not syncing default Kubernetes service account", "namespace", req.Namespace) + return ctrl.Result{}, nil + } + + // If the ServiceAccount object has been deleted (and we get an IsNotFound error), + // we need to deregister that WorkloadIdentity from Consul. + err = r.Client.Get(ctx, req.NamespacedName, &serviceAccount) + if k8serrors.IsNotFound(err) { + err = r.deregisterWorkloadIdentity(ctx, resourceClient, req.Name, r.getConsulNamespace(req.Namespace), r.getConsulPartition()) + return ctrl.Result{}, err + } else if err != nil { + r.Log.Error(err, "failed to get ServiceAccount", "name", req.Name, "ns", req.Namespace) + return ctrl.Result{}, err + } + r.Log.Info("retrieved ServiceAccount", "name", req.Name, "ns", req.Namespace) + + // Ensure the WorkloadIdentity exists. + workloadIdentityResource := r.getWorkloadIdentityResource( + serviceAccount.Name, // Consul and Kubernetes service account name will always match + r.getConsulNamespace(serviceAccount.Namespace), + r.getConsulPartition(), + map[string]string{ + constants.MetaKeyKubeNS: serviceAccount.Namespace, + constants.MetaKeyKubeServiceAccountName: serviceAccount.Name, + constants.MetaKeyManagedBy: constants.ManagedByServiceAccountValue, + }, + ) + + r.Log.Info("registering workload identity with Consul", getLogFieldsForResource(workloadIdentityResource.Id)...) + // We currently blindly write these records as changes to service accounts and resulting reconciles should be rare, + // and there's no data to conflict with in the payload. + if _, err := resourceClient.Write(ctx, &pbresource.WriteRequest{Resource: workloadIdentityResource}); err != nil { + // We could be racing with the namespace controller. + // Requeue (which includes backoff) to try again. + if inject.ConsulNamespaceIsNotFound(err) { + r.Log.Info("Consul namespace not found; re-queueing request", + "service-account", serviceAccount.Name, "ns", serviceAccount.Namespace, + "consul-ns", workloadIdentityResource.GetId().GetTenancy().GetNamespace(), "err", err.Error()) + return ctrl.Result{Requeue: true}, nil + } + + r.Log.Error(err, "failed to register workload identity", getLogFieldsForResource(workloadIdentityResource.Id)...) + return ctrl.Result{}, err + } + + return ctrl.Result{}, nil +} + +// deregisterWorkloadIdentity deletes the WorkloadIdentity resource corresponding to the given name and namespace from +// Consul. This operation is idempotent and can be executed for non-existent service accounts. +func (r *Controller) deregisterWorkloadIdentity(ctx context.Context, resourceClient pbresource.ResourceServiceClient, name, namespace, partition string) error { + _, err := resourceClient.Delete(ctx, &pbresource.DeleteRequest{ + Id: getWorkloadIdentityID(name, namespace, partition), + }) + return err +} + +// getWorkloadIdentityResource converts the given Consul WorkloadIdentity and metadata to a Consul resource API record. +func (r *Controller) getWorkloadIdentityResource(name, namespace, partition string, meta map[string]string) *pbresource.Resource { + return &pbresource.Resource{ + Id: getWorkloadIdentityID(name, namespace, partition), + // WorkloadIdentity is currently an empty message. + Data: inject.ToProtoAny(&pbauth.WorkloadIdentity{}), + Metadata: meta, + } +} + +func getWorkloadIdentityID(name, namespace, partition string) *pbresource.ID { + return &pbresource.ID{ + Name: name, + Type: pbauth.WorkloadIdentityType, + Tenancy: &pbresource.Tenancy{ + Partition: partition, + Namespace: namespace, + }, + } +} + +// getConsulNamespace returns the Consul destination namespace for a provided Kubernetes namespace +// depending on Consul Namespaces being enabled and the value of namespace mirroring. +func (r *Controller) getConsulNamespace(kubeNamespace string) string { + ns := namespaces.ConsulNamespace( + kubeNamespace, + r.EnableConsulNamespaces, + r.ConsulDestinationNamespace, + r.EnableNSMirroring, + r.NSMirroringPrefix, + ) + + // TODO: remove this if and when the default namespace of resources is no longer required to be set explicitly. + if ns == "" { + ns = constants.DefaultConsulNS + } + return ns +} + +func (r *Controller) getConsulPartition() string { + if !r.EnableConsulPartitions || r.ConsulPartition == "" { + return constants.DefaultConsulPartition + } + return r.ConsulPartition +} + +func getLogFieldsForResource(id *pbresource.ID) []any { + return []any{ + "name", id.Name, + "ns", id.Tenancy.Namespace, + "partition", id.Tenancy.Partition, + } +} diff --git a/control-plane/connect-inject/controllers/serviceaccount/serviceaccount_controller_ent_test.go b/control-plane/connect-inject/controllers/serviceaccount/serviceaccount_controller_ent_test.go new file mode 100644 index 0000000000..d90791d093 --- /dev/null +++ b/control-plane/connect-inject/controllers/serviceaccount/serviceaccount_controller_ent_test.go @@ -0,0 +1,24 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build enterprise + +package serviceaccount + +import ( + "testing" +) + +// TODO(NET-5719): ConsulDestinationNamespace and EnableNSMirroring +/- prefix + +// TODO(NET-5719) +// Tests new WorkloadIdentity registration in a non-default NS and Partition with namespaces set to mirroring +func TestReconcile_CreateWorkloadIdentity_WithNamespaces(t *testing.T) { + //TODO(NET-5719): Add test case to cover Consul namespace missing and check for backoff +} + +// TODO(NET-5719) +// Tests removing WorkloadIdentity registration in a non-default NS and Partition with namespaces set to mirroring +func TestReconcile_DeleteWorkloadIdentity_WithNamespaces(t *testing.T) { + //TODO(NET-5719): Add test case to cover Consul namespace missing and check for backoff +} diff --git a/control-plane/connect-inject/controllers/serviceaccount/serviceaccount_controller_test.go b/control-plane/connect-inject/controllers/serviceaccount/serviceaccount_controller_test.go new file mode 100644 index 0000000000..27bb909d2c --- /dev/null +++ b/control-plane/connect-inject/controllers/serviceaccount/serviceaccount_controller_test.go @@ -0,0 +1,307 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package serviceaccount + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + "google.golang.org/protobuf/proto" + + mapset "github.com/deckarep/golang-set" + logrtest "github.com/go-logr/logr/testr" + pbauth "github.com/hashicorp/consul/proto-public/pbauth/v2beta1" + "github.com/hashicorp/consul/proto-public/pbresource" + "github.com/hashicorp/consul/sdk/testutil" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/anypb" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "github.com/hashicorp/consul-k8s/control-plane/api/common" + inject "github.com/hashicorp/consul-k8s/control-plane/connect-inject/common" + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" + "github.com/hashicorp/consul-k8s/control-plane/helper/test" +) + +type reconcileCase struct { + name string + svcAccountName string + k8sObjects func() []runtime.Object + existingResource *pbresource.Resource + expectedResource *pbresource.Resource + targetConsulNs string + targetConsulPartition string + expErr string +} + +// TODO(NET-5719): Allow/deny namespaces for reconcile tests + +// TestReconcile_CreateWorkloadIdentity ensures that a new ServiceAccount is reconciled +// to a Consul WorkloadIdentity. +func TestReconcile_CreateWorkloadIdentity(t *testing.T) { + t.Parallel() + cases := []reconcileCase{ + { + name: "Default ServiceAccount not synced", + svcAccountName: "default", + k8sObjects: func() []runtime.Object { + return []runtime.Object{createServiceAccount("default", "default")} + }, + }, + { + name: "Custom ServiceAccount", + svcAccountName: "my-svc-account", + k8sObjects: func() []runtime.Object { + return []runtime.Object{ + createServiceAccount("default", "default"), + createServiceAccount("my-svc-account", "default"), + } + }, + expectedResource: &pbresource.Resource{ + Id: &pbresource.ID{ + Name: "my-svc-account", + Type: pbauth.WorkloadIdentityType, + Tenancy: &pbresource.Tenancy{ + Namespace: constants.DefaultConsulNS, + Partition: constants.DefaultConsulPartition, + }, + }, + Data: getWorkloadIdentityData(), + Metadata: map[string]string{ + constants.MetaKeyKubeNS: constants.DefaultConsulNS, + constants.MetaKeyManagedBy: constants.ManagedByServiceAccountValue, + }, + }, + }, + { + name: "Already exists", + svcAccountName: "my-svc-account", + k8sObjects: func() []runtime.Object { + return []runtime.Object{ + createServiceAccount("default", "default"), + createServiceAccount("my-svc-account", "default"), + } + }, + existingResource: &pbresource.Resource{ + Id: &pbresource.ID{ + Name: "my-svc-account", + Type: pbauth.WorkloadIdentityType, + Tenancy: &pbresource.Tenancy{ + Namespace: constants.DefaultConsulNS, + Partition: constants.DefaultConsulPartition, + }, + }, + Data: getWorkloadIdentityData(), + Metadata: map[string]string{ + constants.MetaKeyKubeNS: constants.DefaultConsulNS, + constants.MetaKeyManagedBy: constants.ManagedByServiceAccountValue, + }, + }, + expectedResource: &pbresource.Resource{ + Id: &pbresource.ID{ + Name: "my-svc-account", + Type: pbauth.WorkloadIdentityType, + Tenancy: &pbresource.Tenancy{ + Namespace: constants.DefaultConsulNS, + Partition: constants.DefaultConsulPartition, + }, + }, + Data: getWorkloadIdentityData(), + Metadata: map[string]string{ + constants.MetaKeyKubeNS: constants.DefaultConsulNS, + constants.MetaKeyManagedBy: constants.ManagedByServiceAccountValue, + }, + }, + }, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + runReconcileCase(t, tc) + }) + } +} + +// Tests deleting a WorkloadIdentity object, with and without matching Consul resources. +func TestReconcile_DeleteWorkloadIdentity(t *testing.T) { + t.Parallel() + cases := []reconcileCase{ + { + name: "Basic ServiceAccount not found (deleted)", + svcAccountName: "my-svc-account", + k8sObjects: func() []runtime.Object { + // Only default exists (always exists). + return []runtime.Object{createServiceAccount("default", "default")} + }, + existingResource: &pbresource.Resource{ + Id: &pbresource.ID{ + Name: "my-svc-account", + Type: pbauth.WorkloadIdentityType, + Tenancy: &pbresource.Tenancy{ + Namespace: constants.DefaultConsulNS, + Partition: constants.DefaultConsulPartition, + }, + }, + Data: getWorkloadIdentityData(), + Metadata: map[string]string{ + constants.MetaKeyKubeNS: constants.DefaultConsulNS, + constants.MetaKeyManagedBy: constants.ManagedByServiceAccountValue, + }, + }, + }, + { + name: "Other ServiceAccount exists", + svcAccountName: "my-svc-account", + k8sObjects: func() []runtime.Object { + // Default and other ServiceAccount exist + return []runtime.Object{ + createServiceAccount("default", "default"), + createServiceAccount("other-svc-account", "default"), + } + }, + existingResource: &pbresource.Resource{ + Id: &pbresource.ID{ + Name: "my-svc-account", + Type: pbauth.WorkloadIdentityType, + Tenancy: &pbresource.Tenancy{ + Namespace: constants.DefaultConsulNS, + Partition: constants.DefaultConsulPartition, + }, + }, + Data: getWorkloadIdentityData(), + Metadata: map[string]string{ + constants.MetaKeyKubeNS: constants.DefaultConsulNS, + constants.MetaKeyManagedBy: constants.ManagedByServiceAccountValue, + }, + }, + }, + { + name: "Already deleted", + svcAccountName: "my-svc-account", + k8sObjects: func() []runtime.Object { + // Only default exists (always exists). + return []runtime.Object{createServiceAccount("default", "default")} + }, + }, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + runReconcileCase(t, tc) + }) + } +} + +func runReconcileCase(t *testing.T, tc reconcileCase) { + t.Helper() + + // Create fake k8s client + var k8sObjects []runtime.Object + if tc.k8sObjects != nil { + k8sObjects = tc.k8sObjects() + } + fakeClient := fake.NewClientBuilder().WithRuntimeObjects(k8sObjects...).Build() + + // Create test Consul server. + testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + c.Experiments = []string{"resource-apis"} + }) + + // Create the ServiceAccount controller. + sa := &Controller{ + Client: fakeClient, + Log: logrtest.New(t), + ConsulServerConnMgr: testClient.Watcher, + K8sNamespaceConfig: common.K8sNamespaceConfig{ + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSetWith(), + }, + } + + // Default ns and partition if not specified in test. + if tc.targetConsulNs == "" { + tc.targetConsulNs = constants.DefaultConsulNS + } + if tc.targetConsulPartition == "" { + tc.targetConsulPartition = constants.DefaultConsulPartition + } + + // If existing resource specified, create it and ensure it exists. + if tc.existingResource != nil { + writeReq := &pbresource.WriteRequest{Resource: tc.existingResource} + _, err := testClient.ResourceClient.Write(context.Background(), writeReq) + require.NoError(t, err) + test.ResourceHasPersisted(t, context.Background(), testClient.ResourceClient, tc.existingResource.Id) + } + + // Run actual reconcile and verify results. + resp, err := sa.Reconcile(context.Background(), ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: tc.svcAccountName, + Namespace: tc.targetConsulNs, + }, + }) + if tc.expErr != "" { + require.ErrorContains(t, err, tc.expErr) + } else { + require.NoError(t, err) + } + require.False(t, resp.Requeue) + + expectedWorkloadIdentityMatches(t, testClient.ResourceClient, tc.svcAccountName, tc.targetConsulNs, tc.targetConsulPartition, tc.expectedResource) +} + +func expectedWorkloadIdentityMatches(t *testing.T, client pbresource.ResourceServiceClient, name, namespace, partition string, expectedResource *pbresource.Resource) { + req := &pbresource.ReadRequest{Id: getWorkloadIdentityID(name, namespace, partition)} + + res, err := client.Read(context.Background(), req) + + if expectedResource == nil { + require.Error(t, err) + s, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, codes.NotFound, s.Code()) + return + } + + require.NoError(t, err) + require.NotNil(t, res) + require.NotNil(t, res.GetResource().GetData()) + + // This equality check isn't technically necessary because WorkloadIdentity is an empty message, + // but this supports the addition of fields in the future. + expectedWorkloadIdentity := &pbauth.WorkloadIdentity{} + err = anypb.UnmarshalTo(expectedResource.Data, expectedWorkloadIdentity, proto.UnmarshalOptions{}) + require.NoError(t, err) + + actualWorkloadIdentity := &pbauth.WorkloadIdentity{} + err = res.GetResource().GetData().UnmarshalTo(actualWorkloadIdentity) + require.NoError(t, err) + + if diff := cmp.Diff(expectedWorkloadIdentity, actualWorkloadIdentity, test.CmpProtoIgnoreOrder()...); diff != "" { + t.Errorf("unexpected difference:\n%v", diff) + } +} + +// getWorkloadIdentityData returns a WorkloadIdentity resource payload. +// This function takes no arguments because WorkloadIdentity is currently an empty proto message. +func getWorkloadIdentityData() *anypb.Any { + return inject.ToProtoAny(&pbauth.WorkloadIdentity{}) +} + +func createServiceAccount(name, namespace string) *corev1.ServiceAccount { + return &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + // Other fields exist, but we ignore them in this controller. + } +} diff --git a/control-plane/connect-inject/namespace/namespace_controller.go b/control-plane/connect-inject/namespace/namespace_controller.go new file mode 100644 index 0000000000..86035bc69f --- /dev/null +++ b/control-plane/connect-inject/namespace/namespace_controller.go @@ -0,0 +1,131 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package namespace + +import ( + "context" + "fmt" + + mapset "github.com/deckarep/golang-set" + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/common" + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" + "github.com/hashicorp/consul-k8s/control-plane/consul" + "github.com/hashicorp/consul-k8s/control-plane/namespaces" +) + +type Controller struct { + client.Client + // ConsulClientConfig is the config for the Consul API client. + ConsulClientConfig *consul.Config + // ConsulServerConnMgr is the watcher for the Consul server addresses. + ConsulServerConnMgr consul.ServerConnectionManager + // AllowK8sNamespacesSet determines kube namespace that are reconciled. + AllowK8sNamespacesSet mapset.Set + // DenyK8sNamespacesSet determines kube namespace that are ignored. + DenyK8sNamespacesSet mapset.Set + + // Partition is not required. It should already be set in the API ClientConfig + + // ConsulDestinationNamespace is the name of the Consul namespace to create + // all config entries in. If EnableNSMirroring is true this is ignored. + ConsulDestinationNamespace string + // EnableNSMirroring causes Consul namespaces to be created to match the + // k8s namespace of any config entry custom resource. Config entries will + // be created in the matching Consul namespace. + EnableNSMirroring bool + // NSMirroringPrefix is an optional prefix that can be added to the Consul + // namespaces created while mirroring. For example, if it is set to "k8s-", + // then the k8s `default` namespace will be mirrored in Consul's + // `k8s-default` namespace. + NSMirroringPrefix string + + // CrossNamespaceACLPolicy is the name of the ACL policy to attach to + // any created Consul namespaces to allow cross namespace service discovery. + // Only necessary if ACLs are enabled. + CrossNamespaceACLPolicy string + + Log logr.Logger +} + +// Reconcile reads a Kubernetes Namespace and reconciles the mapped namespace in Consul. +// TODO: Move the creation of a destination namespace to a dedicated, single-flight goroutine. +func (r *Controller) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + var namespace corev1.Namespace + + // Ignore the request if the namespace is not allowed. + if common.ShouldIgnore(req.Name, r.DenyK8sNamespacesSet, r.AllowK8sNamespacesSet) { + return ctrl.Result{}, nil + } + + apiClient, err := consul.NewClientFromConnMgr(r.ConsulClientConfig, r.ConsulServerConnMgr) + if err != nil { + r.Log.Error(err, "failed to create Consul API client", "name", req.Name) + return ctrl.Result{}, err + } + + err = r.Client.Get(ctx, req.NamespacedName, &namespace) + + // If the namespace object has been deleted (and we get an IsNotFound error), + // we need to remove the Namespace from Consul. + if k8serrors.IsNotFound(err) { + + // if we are using a destination namespace, NEVER delete it. + if !r.EnableNSMirroring { + return ctrl.Result{}, nil + } + + if err := namespaces.EnsureDeleted(apiClient, r.getConsulNamespace(req.Name)); err != nil { + r.Log.Error(err, "error deleting namespace", + "namespace", r.getConsulNamespace(req.Name)) + return ctrl.Result{}, fmt.Errorf("error deleting namespace: %w", err) + } + + return ctrl.Result{}, nil + } else if err != nil { + r.Log.Error(err, "failed to get namespace", "name", req.Name) + return ctrl.Result{}, err + } + + r.Log.Info("retrieved", "namespace", namespace.GetName()) + + // TODO: eventually we will want to replace the V1 namespace APIs with the native V2 resource creation for tenancy + if _, err := namespaces.EnsureExists(apiClient, r.getConsulNamespace(namespace.GetName()), r.CrossNamespaceACLPolicy); err != nil { + r.Log.Error(err, "error checking or creating namespace", + "namespace", r.getConsulNamespace(namespace.GetName())) + return ctrl.Result{}, fmt.Errorf("error checking or creating namespace: %w", err) + } + + return ctrl.Result{}, nil +} + +// SetupWithManager registers this controller with the manager. +func (r *Controller) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&corev1.Namespace{}). + Complete(r) +} + +// getConsulNamespace returns the Consul destination namespace for a provided Kubernetes namespace +// depending on Consul Namespaces being enabled and the value of namespace mirroring. +func (r *Controller) getConsulNamespace(kubeNamespace string) string { + ns := namespaces.ConsulNamespace( + kubeNamespace, + true, + r.ConsulDestinationNamespace, + r.EnableNSMirroring, + r.NSMirroringPrefix, + ) + + // TODO: remove this if and when the default namespace of resources change. + if ns == "" { + ns = constants.DefaultConsulNS + } + return ns +} diff --git a/control-plane/connect-inject/namespace/namespace_controller_ent_test.go b/control-plane/connect-inject/namespace/namespace_controller_ent_test.go new file mode 100644 index 0000000000..1b63161976 --- /dev/null +++ b/control-plane/connect-inject/namespace/namespace_controller_ent_test.go @@ -0,0 +1,413 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build enterprise + +package namespace + +import ( + "context" + "testing" + + mapset "github.com/deckarep/golang-set" + logrtest "github.com/go-logr/logr/testr" + capi "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/sdk/testutil" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" + "github.com/hashicorp/consul-k8s/control-plane/helper/test" +) + +const ( + testNamespaceName = "foo" + testCrossACLPolicy = "cross-namespace-policy" +) + +// TestReconcileCreateNamespace ensures that a new namespace is reconciled to a +// Consul namespace. The actual namespace in Consul depends on if the controller +// is configured with a destination namespace or mirroring enabled. +func TestReconcileCreateNamespace(t *testing.T) { + t.Parallel() + + ns := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{ + Name: testNamespaceName, + }} + nsDefault := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{ + Name: metav1.NamespaceDefault, + }} + + type testCase struct { + name string + kubeNamespaceName string // this will default to "foo" + partition string + + consulDestinationNamespace string + enableNSMirroring bool + nsMirrorPrefix string + + expectedConsulNamespaceName string + expectedConsulNamespace *capi.Namespace + + acls bool + expErr string + } + + run := func(t *testing.T, tc testCase) { + k8sObjects := []runtime.Object{ + &ns, + &nsDefault, + } + fakeClient := fake.NewClientBuilder().WithRuntimeObjects(k8sObjects...).Build() + + // Create test consulServer server. + adminToken := "123e4567-e89b-12d3-a456-426614174000" + testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + c.Experiments = []string{"resource-apis"} + if tc.acls { + c.ACL.Enabled = tc.acls + c.ACL.Tokens.InitialManagement = adminToken + } + }) + + if tc.partition != "" { + testClient.Cfg.APIClientConfig.Partition = tc.partition + + partition := &capi.Partition{ + Name: tc.partition, + } + _, _, err := testClient.APIClient.Partitions().Create(context.Background(), partition, nil) + require.NoError(t, err) + } + + // Create the namespace controller. + nc := &Controller{ + Client: fakeClient, + Log: logrtest.New(t), + ConsulClientConfig: testClient.Cfg, + ConsulServerConnMgr: testClient.Watcher, + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSetWith(), + EnableNSMirroring: tc.enableNSMirroring, + NSMirroringPrefix: tc.nsMirrorPrefix, + ConsulDestinationNamespace: tc.consulDestinationNamespace, + } + if tc.acls { + nc.CrossNamespaceACLPolicy = testCrossACLPolicy + + policy := &capi.ACLPolicy{Name: testCrossACLPolicy} + _, _, err := testClient.APIClient.ACL().PolicyCreate(policy, nil) + require.NoError(t, err) + } + + if tc.kubeNamespaceName == "" { + tc.kubeNamespaceName = testNamespaceName + } + + namespacedName := types.NamespacedName{ + Name: tc.kubeNamespaceName, + } + + resp, err := nc.Reconcile(context.Background(), ctrl.Request{ + NamespacedName: namespacedName, + }) + if tc.expErr != "" { + require.EqualError(t, err, tc.expErr) + } else { + require.NoError(t, err) + } + require.False(t, resp.Requeue) + + expectedNamespaceMatches(t, testClient.APIClient, tc.expectedConsulNamespaceName, tc.partition, tc.expectedConsulNamespace) + } + + testCases := []testCase{ + { + // This also tests that we don't overwrite anything about the default Consul namespace, + // because the original description is maintained. + name: "destination namespace default", + expectedConsulNamespaceName: constants.DefaultConsulNS, + expectedConsulNamespace: getNamespace(constants.DefaultConsulNS, "", false), + }, + { + name: "destination namespace, non-default", + consulDestinationNamespace: "bar", + expectedConsulNamespaceName: "bar", + expectedConsulNamespace: getNamespace("bar", "", false), + }, + { + name: "destination namespace, non-default with ACLs enabled", + consulDestinationNamespace: "bar", + acls: true, + expectedConsulNamespaceName: "bar", + expectedConsulNamespace: getNamespace("bar", constants.DefaultConsulPartition, true), // For some reason, we the partition is returned by Consul in this case, even though it is default + }, + { + name: "destination namespace, non-default, non-default partition", + partition: "baz", + consulDestinationNamespace: "bar", + expectedConsulNamespaceName: "bar", + expectedConsulNamespace: getNamespace("bar", "baz", false), + }, + { + name: "mirrored namespaces", + enableNSMirroring: true, + expectedConsulNamespaceName: testNamespaceName, + expectedConsulNamespace: getNamespace(testNamespaceName, "", false), + }, + { + name: "mirrored namespaces, non-default partition", + partition: "baz", + enableNSMirroring: true, + expectedConsulNamespaceName: testNamespaceName, + expectedConsulNamespace: getNamespace(testNamespaceName, "baz", false), + }, + { + name: "mirrored namespaces with acls", + acls: true, + enableNSMirroring: true, + expectedConsulNamespaceName: testNamespaceName, + expectedConsulNamespace: getNamespace(testNamespaceName, constants.DefaultConsulPartition, true), // For some reason, we the partition is returned by Consul in this case, even though it is default + }, + { + name: "mirrored namespaces with prefix", + nsMirrorPrefix: "k8s-", + enableNSMirroring: true, + expectedConsulNamespaceName: "k8s-foo", + expectedConsulNamespace: getNamespace("k8s-foo", "", false), + }, + { + name: "mirrored namespaces with prefix, non-default partition", + nsMirrorPrefix: "k8s-", + partition: "baz", + enableNSMirroring: true, + expectedConsulNamespaceName: "k8s-foo", + expectedConsulNamespace: getNamespace("k8s-foo", "baz", false), + }, + { + name: "mirrored namespaces with prefix and acls", + nsMirrorPrefix: "k8s-", + acls: true, + enableNSMirroring: true, + expectedConsulNamespaceName: "k8s-foo", + expectedConsulNamespace: getNamespace("k8s-foo", constants.DefaultConsulPartition, true), // For some reason, we the partition is returned by Consul in this case, even though it is default + }, + { + name: "mirrored namespaces overrides destination namespace", + enableNSMirroring: true, + consulDestinationNamespace: "baz", + expectedConsulNamespaceName: testNamespaceName, + expectedConsulNamespace: getNamespace(testNamespaceName, "", false), + }, + { + name: "ignore kube-system", + kubeNamespaceName: metav1.NamespaceSystem, + consulDestinationNamespace: "bar", + expectedConsulNamespaceName: "bar", // we make sure that this doesn't get created from the kube-system space by not providing the actual struct + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + run(t, tc) + }) + } +} + +// Tests deleting a Namespace object, with and without matching Consul resources. +func TestReconcileDeleteNamespace(t *testing.T) { + t.Parallel() + + type testCase struct { + name string + kubeNamespaceName string // this will default to "foo" + partition string + + destinationNamespace string + enableNSMirroring bool + nsMirrorPrefix string + + existingConsulNamespace *capi.Namespace + + expectedConsulNamespace *capi.Namespace + } + + run := func(t *testing.T, tc testCase) { + fakeClient := fake.NewClientBuilder().WithRuntimeObjects().Build() + + // Create test consulServer server. + testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + c.Experiments = []string{"resource-apis"} + }) + + if tc.partition != "" { + testClient.Cfg.APIClientConfig.Partition = tc.partition + + partition := &capi.Partition{ + Name: tc.partition, + } + _, _, err := testClient.APIClient.Partitions().Create(context.Background(), partition, nil) + require.NoError(t, err) + } + + if tc.existingConsulNamespace != nil { + _, _, err := testClient.APIClient.Namespaces().Create(tc.existingConsulNamespace, nil) + require.NoError(t, err) + } + + // Create the namespace controller. + nc := &Controller{ + Client: fakeClient, + Log: logrtest.New(t), + ConsulClientConfig: testClient.Cfg, + ConsulServerConnMgr: testClient.Watcher, + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSetWith(), + EnableNSMirroring: tc.enableNSMirroring, + NSMirroringPrefix: tc.nsMirrorPrefix, + ConsulDestinationNamespace: tc.destinationNamespace, + } + + if tc.kubeNamespaceName == "" { + tc.kubeNamespaceName = testNamespaceName + } + + namespacedName := types.NamespacedName{ + Name: tc.kubeNamespaceName, + } + + resp, err := nc.Reconcile(context.Background(), ctrl.Request{ + NamespacedName: namespacedName, + }) + require.NoError(t, err) + require.False(t, resp.Requeue) + + if tc.existingConsulNamespace != nil { + expectedNamespaceMatches(t, testClient.APIClient, tc.existingConsulNamespace.Name, tc.partition, tc.expectedConsulNamespace) + } else { + expectedNamespaceMatches(t, testClient.APIClient, testNamespaceName, tc.partition, tc.expectedConsulNamespace) + } + } + + testCases := []testCase{ + { + name: "destination namespace with default is not cleaned up", + existingConsulNamespace: getNamespace(constants.DefaultConsulNS, "", false), + expectedConsulNamespace: getNamespace(constants.DefaultConsulNS, "", false), + }, + { + name: "destination namespace with non-default is not cleaned up", + destinationNamespace: "bar", + existingConsulNamespace: getNamespace("bar", "", false), + expectedConsulNamespace: getNamespace("bar", "", false), + }, + { + name: "destination namespace with non-default is not cleaned up, non-default partition", + destinationNamespace: "bar", + partition: "baz", + existingConsulNamespace: getNamespace("bar", "baz", false), + expectedConsulNamespace: getNamespace("bar", "baz", false), + }, + { + name: "mirrored namespaces", + enableNSMirroring: true, + existingConsulNamespace: getNamespace(testNamespaceName, "", false), + }, + { + name: "mirrored namespaces but it's the default namespace", + kubeNamespaceName: metav1.NamespaceDefault, + enableNSMirroring: true, + existingConsulNamespace: getNamespace(constants.DefaultConsulNS, "", false), + expectedConsulNamespace: getNamespace(constants.DefaultConsulNS, "", false), // Don't ever delete the Consul default NS + }, + { + name: "mirrored namespaces, non-default partition", + partition: "baz", + enableNSMirroring: true, + existingConsulNamespace: getNamespace(testNamespaceName, "baz", false), + }, + { + name: "mirrored namespaces with prefix", + nsMirrorPrefix: "k8s-", + enableNSMirroring: true, + existingConsulNamespace: getNamespace("k8s-foo", "", false), + }, + { + name: "mirrored namespaces with prefix, non-default partition", + partition: "baz", + nsMirrorPrefix: "k8s-", + enableNSMirroring: true, + existingConsulNamespace: getNamespace("k8s-foo", "baz", false), + }, + { + name: "mirrored namespaces overrides destination namespace", + enableNSMirroring: true, + destinationNamespace: "baz", + existingConsulNamespace: getNamespace(testNamespaceName, "", false), + }, + { + name: "mirrored namespace, but the namespace is already removed from Consul", + enableNSMirroring: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + run(t, tc) + }) + } +} + +// getNamespace return a basic Consul V1 namespace for testing setup and comparison +func getNamespace(name string, partition string, acls bool) *capi.Namespace { + ns := &capi.Namespace{ + Name: name, + Partition: partition, + } + + if name != constants.DefaultConsulNS { + ns.Description = "Auto-generated by consul-k8s" + ns.Meta = map[string]string{"external-source": "kubernetes"} + ns.ACLs = &capi.NamespaceACLConfig{} + } else { + ns.Description = "Builtin Default Namespace" + } + + if acls && name != constants.DefaultConsulNS { + // Create the ACLs config for the cross-Consul-namespace + // default policy that needs to be attached + ns.ACLs = &capi.NamespaceACLConfig{ + PolicyDefaults: []capi.ACLLink{ + {Name: testCrossACLPolicy}, + }, + } + } + + return ns +} + +func expectedNamespaceMatches(t *testing.T, client *capi.Client, name string, partition string, expectedNamespace *capi.Namespace) { + namespaceInfo, _, err := client.Namespaces().Read(name, &capi.QueryOptions{Partition: partition}) + + require.NoError(t, err) + + if expectedNamespace == nil { + require.True(t, namespaceInfo == nil || namespaceInfo.DeletedAt != nil) + return + } + + require.NotNil(t, namespaceInfo) + // Zero out the Raft Index, in this case it is irrelevant. + namespaceInfo.CreateIndex = 0 + namespaceInfo.ModifyIndex = 0 + if namespaceInfo.ACLs != nil && len(namespaceInfo.ACLs.PolicyDefaults) > 0 { + namespaceInfo.ACLs.PolicyDefaults[0].ID = "" // Zero out the ID for ACLs enabled to facilitate testing. + } + require.Equal(t, *expectedNamespace, *namespaceInfo) +} diff --git a/control-plane/connect-inject/webhook/consul_dataplane_sidecar.go b/control-plane/connect-inject/webhook/consul_dataplane_sidecar.go index c30b672093..0df47d6085 100644 --- a/control-plane/connect-inject/webhook/consul_dataplane_sidecar.go +++ b/control-plane/connect-inject/webhook/consul_dataplane_sidecar.go @@ -97,10 +97,9 @@ func (w *MeshWebhook) consulDataplaneSidecar(namespace corev1.Namespace, pod cor } container := corev1.Container{ - Name: containerName, - Image: w.ImageConsulDataplane, - ImagePullPolicy: corev1.PullPolicy(w.GlobalImagePullPolicy), - Resources: resources, + Name: containerName, + Image: w.ImageConsulDataplane, + Resources: resources, // We need to set tmp dir to an ephemeral volume that we're mounting so that // consul-dataplane can write files to it. Otherwise, it wouldn't be able to // because we set file system to be read-only. diff --git a/control-plane/connect-inject/webhook/container_init.go b/control-plane/connect-inject/webhook/container_init.go index 6ba4ca35a5..c5a442335a 100644 --- a/control-plane/connect-inject/webhook/container_init.go +++ b/control-plane/connect-inject/webhook/container_init.go @@ -104,9 +104,8 @@ func (w *MeshWebhook) containerInit(namespace corev1.Namespace, pod corev1.Pod, initContainerName = fmt.Sprintf("%s-%s", injectInitContainerName, mpi.serviceName) } container := corev1.Container{ - Name: initContainerName, - Image: w.ImageConsulK8S, - ImagePullPolicy: corev1.PullPolicy(w.GlobalImagePullPolicy), + Name: initContainerName, + Image: w.ImageConsulK8S, Env: []corev1.EnvVar{ { Name: "POD_NAME", diff --git a/control-plane/connect-inject/webhook/mesh_webhook.go b/control-plane/connect-inject/webhook/mesh_webhook.go index f568b3a907..cdacd895f4 100644 --- a/control-plane/connect-inject/webhook/mesh_webhook.go +++ b/control-plane/connect-inject/webhook/mesh_webhook.go @@ -75,9 +75,6 @@ type MeshWebhook struct { // This image is used for the consul-sidecar container. ImageConsulK8S string - // GlobalImagePullPolicy is the pull policy for all Consul images (consul, consul-dataplane, consul-k8s) - GlobalImagePullPolicy string - // Optional: set when you need extra options to be set when running envoy // See a list of args here: https://www.envoyproxy.io/docs/envoy/latest/operations/cli EnvoyExtraArgs string diff --git a/control-plane/connect-inject/webhookv2/consul_dataplane_sidecar.go b/control-plane/connect-inject/webhookv2/consul_dataplane_sidecar.go new file mode 100644 index 0000000000..d94dbeaaac --- /dev/null +++ b/control-plane/connect-inject/webhookv2/consul_dataplane_sidecar.go @@ -0,0 +1,529 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package webhookv2 + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" + + "github.com/google/shlex" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/pointer" + + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/common" + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" +) + +const ( + consulDataplaneDNSBindHost = "127.0.0.1" + consulDataplaneDNSBindPort = 8600 +) + +func (w *MeshWebhook) consulDataplaneSidecar(namespace corev1.Namespace, pod corev1.Pod) (corev1.Container, error) { + resources, err := w.sidecarResources(pod) + if err != nil { + return corev1.Container{}, err + } + + // Extract the service account token's volume mount. + var bearerTokenFile string + var saTokenVolumeMount corev1.VolumeMount + if w.AuthMethod != "" { + saTokenVolumeMount, bearerTokenFile, err = findServiceAccountVolumeMount(pod) + if err != nil { + return corev1.Container{}, err + } + } + + args, err := w.getContainerSidecarArgs(namespace, bearerTokenFile, pod) + if err != nil { + return corev1.Container{}, err + } + + containerName := sidecarContainer + + var probe *corev1.Probe + if useProxyHealthCheck(pod) { + // If using the proxy health check for a service, configure an HTTP handler + // that queries the '/ready' endpoint of the proxy. + probe = &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(constants.ProxyDefaultHealthPort), + Path: "/ready", + }, + }, + InitialDelaySeconds: 1, + } + } else { + probe = &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + TCPSocket: &corev1.TCPSocketAction{ + Port: intstr.FromInt(constants.ProxyDefaultInboundPort), + }, + }, + InitialDelaySeconds: 1, + } + } + + container := corev1.Container{ + Name: containerName, + Image: w.ImageConsulDataplane, + Resources: resources, + // We need to set tmp dir to an ephemeral volume that we're mounting so that + // consul-dataplane can write files to it. Otherwise, it wouldn't be able to + // because we set file system to be read-only. + Env: []corev1.EnvVar{ + { + Name: "TMPDIR", + Value: "/consul/mesh-inject", + }, + { + Name: "NODE_NAME", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "spec.nodeName", + }, + }, + }, + // The pod name isn't known currently, so we must rely on the environment variable to fill it in rather than using args. + { + Name: "POD_NAME", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{FieldPath: "metadata.name"}, + }, + }, + { + Name: "POD_NAMESPACE", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{FieldPath: "metadata.namespace"}, + }, + }, + { + Name: "DP_PROXY_ID", + Value: "$(POD_NAME)", + }, + { + Name: "DP_CREDENTIAL_LOGIN_META", + Value: "pod=$(POD_NAMESPACE)/$(POD_NAME)", + }, + // This entry exists to support newer versions of consul dataplane, where environment variable entries + // utilize this numbered notation to indicate individual KV pairs in a map. + { + Name: "DP_CREDENTIAL_LOGIN_META1", + Value: "pod=$(POD_NAMESPACE)/$(POD_NAME)", + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: volumeName, + MountPath: "/consul/mesh-inject", + }, + }, + Args: args, + } + + container.ReadinessProbe = probe + + if w.AuthMethod != "" { + container.VolumeMounts = append(container.VolumeMounts, saTokenVolumeMount) + } + + if useProxyHealthCheck(pod) { + // Configure the Readiness Address for the proxy's health check to be the Pod IP. + container.Env = append(container.Env, corev1.EnvVar{ + Name: "DP_ENVOY_READY_BIND_ADDRESS", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{FieldPath: "status.podIP"}, + }, + }) + // Configure the port on which the readiness probe will query the proxy for its health. + container.Ports = append(container.Ports, corev1.ContainerPort{ + Name: "proxy-health", + ContainerPort: int32(constants.ProxyDefaultHealthPort), + }) + } + + // Add any extra VolumeMounts. + if userVolMount, ok := pod.Annotations[constants.AnnotationConsulSidecarUserVolumeMount]; ok { + var volumeMounts []corev1.VolumeMount + err := json.Unmarshal([]byte(userVolMount), &volumeMounts) + if err != nil { + return corev1.Container{}, err + } + container.VolumeMounts = append(container.VolumeMounts, volumeMounts...) + } + + // Container Ports + metricsPorts, err := w.getMetricsPorts(pod) + if err != nil { + return corev1.Container{}, err + } + if metricsPorts != nil { + container.Ports = append(container.Ports, metricsPorts...) + } + + tproxyEnabled, err := common.TransparentProxyEnabled(namespace, pod, w.EnableTransparentProxy) + if err != nil { + return corev1.Container{}, err + } + + // If not running in transparent proxy mode and in an OpenShift environment, + // skip setting the security context and let OpenShift set it for us. + // When transparent proxy is enabled, then consul-dataplane needs to run as our specific user + // so that traffic redirection will work. + if tproxyEnabled || !w.EnableOpenShift { + if pod.Spec.SecurityContext != nil { + // User container and consul-dataplane container cannot have the same UID. + if pod.Spec.SecurityContext.RunAsUser != nil && *pod.Spec.SecurityContext.RunAsUser == sidecarUserAndGroupID { + return corev1.Container{}, fmt.Errorf("pod's security context cannot have the same UID as consul-dataplane: %v", sidecarUserAndGroupID) + } + } + // Ensure that none of the user's containers have the same UID as consul-dataplane. At this point in injection the meshWebhook + // has only injected init containers so all containers defined in pod.Spec.Containers are from the user. + for _, c := range pod.Spec.Containers { + // User container and consul-dataplane container cannot have the same UID. + if c.SecurityContext != nil && c.SecurityContext.RunAsUser != nil && *c.SecurityContext.RunAsUser == sidecarUserAndGroupID && c.Image != w.ImageConsulDataplane { + return corev1.Container{}, fmt.Errorf("container %q has runAsUser set to the same UID \"%d\" as consul-dataplane which is not allowed", c.Name, sidecarUserAndGroupID) + } + } + container.SecurityContext = &corev1.SecurityContext{ + RunAsUser: pointer.Int64(sidecarUserAndGroupID), + RunAsGroup: pointer.Int64(sidecarUserAndGroupID), + RunAsNonRoot: pointer.Bool(true), + ReadOnlyRootFilesystem: pointer.Bool(true), + AllowPrivilegeEscalation: pointer.Bool(false), + } + } + + return container, nil +} + +func (w *MeshWebhook) getContainerSidecarArgs(namespace corev1.Namespace, bearerTokenFile string, pod corev1.Pod) ([]string, error) { + envoyConcurrency := w.DefaultEnvoyProxyConcurrency + + // Check to see if the user has overriden concurrency via an annotation. + if envoyConcurrencyAnnotation, ok := pod.Annotations[constants.AnnotationEnvoyProxyConcurrency]; ok { + val, err := strconv.ParseUint(envoyConcurrencyAnnotation, 10, 64) + if err != nil { + return nil, fmt.Errorf("unable to parse annotation %q: %w", constants.AnnotationEnvoyProxyConcurrency, err) + } + envoyConcurrency = int(val) + } + + args := []string{ + "-addresses", w.ConsulAddress, + "-grpc-port=" + strconv.Itoa(w.ConsulConfig.GRPCPort), + "-log-level=" + w.LogLevel, + "-log-json=" + strconv.FormatBool(w.LogJSON), + "-envoy-concurrency=" + strconv.Itoa(envoyConcurrency), + } + + if w.SkipServerWatch { + args = append(args, "-server-watch-disabled=true") + } + + if w.AuthMethod != "" { + args = append(args, + "-credential-type=login", + "-login-auth-method="+w.AuthMethod, + "-login-bearer-token-path="+bearerTokenFile, + // We don't know the pod name at this time, so we must use environment variables to populate the login-meta instead. + ) + if w.EnableNamespaces { + if w.EnableK8SNSMirroring { + args = append(args, "-login-namespace=default") + } else { + args = append(args, "-login-namespace="+w.consulNamespace(namespace.Name)) + } + } + if w.ConsulPartition != "" { + args = append(args, "-login-partition="+w.ConsulPartition) + } + } + if w.EnableNamespaces { + args = append(args, "-proxy-namespace="+w.consulNamespace(namespace.Name)) + } + if w.ConsulPartition != "" { + args = append(args, "-proxy-partition="+w.ConsulPartition) + } + if w.TLSEnabled { + if w.ConsulTLSServerName != "" { + args = append(args, "-tls-server-name="+w.ConsulTLSServerName) + } + if w.ConsulCACert != "" { + args = append(args, "-ca-certs="+constants.ConsulCAFile) + } + } else { + args = append(args, "-tls-disabled") + } + + // Configure the readiness port on the dataplane sidecar if proxy health checks are enabled. + if useProxyHealthCheck(pod) { + args = append(args, fmt.Sprintf("%s=%d", "-envoy-ready-bind-port", constants.ProxyDefaultHealthPort)) + } + + // The consul-dataplane HTTP listener always starts for graceful shutdown. To avoid port conflicts, the + // graceful port always needs to be set + gracefulPort, err := w.LifecycleConfig.GracefulPort(pod) + if err != nil { + return nil, fmt.Errorf("unable to determine proxy lifecycle graceful port: %w", err) + } + + args = append(args, fmt.Sprintf("-graceful-port=%d", gracefulPort)) + + enableProxyLifecycle, err := w.LifecycleConfig.EnableProxyLifecycle(pod) + if err != nil { + return nil, fmt.Errorf("unable to determine if proxy lifecycle management is enabled: %w", err) + } + if enableProxyLifecycle { + shutdownDrainListeners, err := w.LifecycleConfig.EnableShutdownDrainListeners(pod) + if err != nil { + return nil, fmt.Errorf("unable to determine if proxy lifecycle shutdown listener draining is enabled: %w", err) + } + if shutdownDrainListeners { + args = append(args, "-shutdown-drain-listeners") + } + + shutdownGracePeriodSeconds, err := w.LifecycleConfig.ShutdownGracePeriodSeconds(pod) + if err != nil { + return nil, fmt.Errorf("unable to determine proxy lifecycle shutdown grace period: %w", err) + } + args = append(args, fmt.Sprintf("-shutdown-grace-period-seconds=%d", shutdownGracePeriodSeconds)) + + gracefulShutdownPath := w.LifecycleConfig.GracefulShutdownPath(pod) + args = append(args, fmt.Sprintf("-graceful-shutdown-path=%s", gracefulShutdownPath)) + + startupGracePeriodSeconds, err := w.LifecycleConfig.StartupGracePeriodSeconds(pod) + if err != nil { + return nil, fmt.Errorf("unable to determine proxy lifecycle startup grace period: %w", err) + } + args = append(args, fmt.Sprintf("-startup-grace-period-seconds=%d", startupGracePeriodSeconds)) + + gracefulStartupPath := w.LifecycleConfig.GracefulStartupPath(pod) + args = append(args, fmt.Sprintf("-graceful-startup-path=%s", gracefulStartupPath)) + } + + // Set a default scrape path that can be overwritten by the annotation. + prometheusScrapePath := w.MetricsConfig.PrometheusScrapePath(pod) + args = append(args, "-telemetry-prom-scrape-path="+prometheusScrapePath) + + metricsServer, err := w.MetricsConfig.ShouldRunMergedMetricsServer(pod) + if err != nil { + return nil, fmt.Errorf("unable to determine if merged metrics is enabled: %w", err) + } + if metricsServer { + mergedMetricsPort, err := w.MetricsConfig.MergedMetricsPort(pod) + if err != nil { + return nil, fmt.Errorf("unable to determine if merged metrics port: %w", err) + } + args = append(args, "-telemetry-prom-merge-port="+mergedMetricsPort) + + serviceMetricsPath := w.MetricsConfig.ServiceMetricsPath(pod) + serviceMetricsPort, err := w.MetricsConfig.ServiceMetricsPort(pod) + if err != nil { + return nil, fmt.Errorf("unable to determine if service metrics port: %w", err) + } + + if serviceMetricsPath != "" && serviceMetricsPort != "" { + args = append(args, "-telemetry-prom-service-metrics-url="+fmt.Sprintf("http://127.0.0.1:%s%s", serviceMetricsPort, serviceMetricsPath)) + } + + // Pull the TLS config from the relevant annotations. + var prometheusCAFile string + if raw, ok := pod.Annotations[constants.AnnotationPrometheusCAFile]; ok && raw != "" { + prometheusCAFile = raw + } + + var prometheusCAPath string + if raw, ok := pod.Annotations[constants.AnnotationPrometheusCAPath]; ok && raw != "" { + prometheusCAPath = raw + } + + var prometheusCertFile string + if raw, ok := pod.Annotations[constants.AnnotationPrometheusCertFile]; ok && raw != "" { + prometheusCertFile = raw + } + + var prometheusKeyFile string + if raw, ok := pod.Annotations[constants.AnnotationPrometheusKeyFile]; ok && raw != "" { + prometheusKeyFile = raw + } + + // Validate required Prometheus TLS config is present if set. + if prometheusCAFile != "" || prometheusCAPath != "" || prometheusCertFile != "" || prometheusKeyFile != "" { + if prometheusCAFile == "" && prometheusCAPath == "" { + return nil, fmt.Errorf("must set one of %q or %q when providing prometheus TLS config", constants.AnnotationPrometheusCAFile, constants.AnnotationPrometheusCAPath) + } + if prometheusCertFile == "" { + return nil, fmt.Errorf("must set %q when providing prometheus TLS config", constants.AnnotationPrometheusCertFile) + } + if prometheusKeyFile == "" { + return nil, fmt.Errorf("must set %q when providing prometheus TLS config", constants.AnnotationPrometheusKeyFile) + } + // TLS config has been validated, add them to the consul-dataplane cmd args + args = append(args, "-telemetry-prom-ca-certs-file="+prometheusCAFile, + "-telemetry-prom-ca-certs-path="+prometheusCAPath, + "-telemetry-prom-cert-file="+prometheusCertFile, + "-telemetry-prom-key-file="+prometheusKeyFile) + } + } + + // If Consul DNS is enabled, we want to configure consul-dataplane to be the DNS proxy + // for Consul DNS in the pod. + dnsEnabled, err := consulDNSEnabled(namespace, pod, w.EnableConsulDNS, w.EnableTransparentProxy) + if err != nil { + return nil, err + } + if dnsEnabled { + args = append(args, "-consul-dns-bind-port="+strconv.Itoa(consulDataplaneDNSBindPort)) + } + + var envoyExtraArgs []string + extraArgs, annotationSet := pod.Annotations[constants.AnnotationEnvoyExtraArgs] + + if annotationSet || w.EnvoyExtraArgs != "" { + extraArgsToUse := w.EnvoyExtraArgs + + // Prefer args set by pod annotation over the flag to the consul-k8s binary (h.EnvoyExtraArgs). + if annotationSet { + extraArgsToUse = extraArgs + } + + // Split string into tokens. + // e.g. "--foo bar --boo baz" --> ["--foo", "bar", "--boo", "baz"] + tokens, err := shlex.Split(extraArgsToUse) + if err != nil { + return []string{}, err + } + for _, t := range tokens { + if strings.Contains(t, " ") { + t = strconv.Quote(t) + } + envoyExtraArgs = append(envoyExtraArgs, t) + } + } + if envoyExtraArgs != nil { + args = append(args, "--") + args = append(args, envoyExtraArgs...) + } + return args, nil +} + +func (w *MeshWebhook) sidecarResources(pod corev1.Pod) (corev1.ResourceRequirements, error) { + resources := corev1.ResourceRequirements{ + Limits: corev1.ResourceList{}, + Requests: corev1.ResourceList{}, + } + // zeroQuantity is used for comparison to see if a quantity was explicitly + // set. + var zeroQuantity resource.Quantity + + // NOTE: We only want to set the limit/request if the default or annotation + // was explicitly set. If it's not explicitly set, it will be the zero value + // which would show up in the pod spec as being explicitly set to zero if we + // set that key, e.g. "cpu" to zero. + // We want it to not show up in the pod spec at all if it's not explicitly + // set so that users aren't wondering why it's set to 0 when they didn't specify + // a request/limit. If they have explicitly set it to 0 then it will be set + // to 0 in the pod spec because we're doing a comparison to the zero-valued + // struct. + + // CPU Limit. + if anno, ok := pod.Annotations[constants.AnnotationSidecarProxyCPULimit]; ok { + cpuLimit, err := resource.ParseQuantity(anno) + if err != nil { + return corev1.ResourceRequirements{}, fmt.Errorf("parsing annotation %s:%q: %s", constants.AnnotationSidecarProxyCPULimit, anno, err) + } + resources.Limits[corev1.ResourceCPU] = cpuLimit + } else if w.DefaultProxyCPULimit != zeroQuantity { + resources.Limits[corev1.ResourceCPU] = w.DefaultProxyCPULimit + } + + // CPU Request. + if anno, ok := pod.Annotations[constants.AnnotationSidecarProxyCPURequest]; ok { + cpuRequest, err := resource.ParseQuantity(anno) + if err != nil { + return corev1.ResourceRequirements{}, fmt.Errorf("parsing annotation %s:%q: %s", constants.AnnotationSidecarProxyCPURequest, anno, err) + } + resources.Requests[corev1.ResourceCPU] = cpuRequest + } else if w.DefaultProxyCPURequest != zeroQuantity { + resources.Requests[corev1.ResourceCPU] = w.DefaultProxyCPURequest + } + + // Memory Limit. + if anno, ok := pod.Annotations[constants.AnnotationSidecarProxyMemoryLimit]; ok { + memoryLimit, err := resource.ParseQuantity(anno) + if err != nil { + return corev1.ResourceRequirements{}, fmt.Errorf("parsing annotation %s:%q: %s", constants.AnnotationSidecarProxyMemoryLimit, anno, err) + } + resources.Limits[corev1.ResourceMemory] = memoryLimit + } else if w.DefaultProxyMemoryLimit != zeroQuantity { + resources.Limits[corev1.ResourceMemory] = w.DefaultProxyMemoryLimit + } + + // Memory Request. + if anno, ok := pod.Annotations[constants.AnnotationSidecarProxyMemoryRequest]; ok { + memoryRequest, err := resource.ParseQuantity(anno) + if err != nil { + return corev1.ResourceRequirements{}, fmt.Errorf("parsing annotation %s:%q: %s", constants.AnnotationSidecarProxyMemoryRequest, anno, err) + } + resources.Requests[corev1.ResourceMemory] = memoryRequest + } else if w.DefaultProxyMemoryRequest != zeroQuantity { + resources.Requests[corev1.ResourceMemory] = w.DefaultProxyMemoryRequest + } + + return resources, nil +} + +// useProxyHealthCheck returns true if the pod has the annotation 'consul.hashicorp.com/use-proxy-health-check' +// set to truthy values. +func useProxyHealthCheck(pod corev1.Pod) bool { + if v, ok := pod.Annotations[constants.AnnotationUseProxyHealthCheck]; ok { + useProxyHealthCheck, err := strconv.ParseBool(v) + if err != nil { + return false + } + return useProxyHealthCheck + } + return false +} + +// getMetricsPorts creates container ports for exposing services such as prometheus. +// Prometheus in particular needs a named port for use with the operator. +// https://github.com/hashicorp/consul-k8s/pull/1440 +func (w *MeshWebhook) getMetricsPorts(pod corev1.Pod) ([]corev1.ContainerPort, error) { + enableMetrics, err := w.MetricsConfig.EnableMetrics(pod) + if err != nil { + return nil, fmt.Errorf("error determining if metrics are enabled: %w", err) + } + if !enableMetrics { + return nil, nil + } + + prometheusScrapePort, err := w.MetricsConfig.PrometheusScrapePort(pod) + if err != nil { + return nil, fmt.Errorf("error parsing prometheus port from pod: %w", err) + } + if prometheusScrapePort == "" { + return nil, nil + } + + port, err := strconv.Atoi(prometheusScrapePort) + if err != nil { + return nil, fmt.Errorf("error parsing prometheus port from pod: %w", err) + } + + return []corev1.ContainerPort{ + { + Name: "prometheus", + ContainerPort: int32(port), + Protocol: corev1.ProtocolTCP, + }, + }, nil +} diff --git a/control-plane/connect-inject/webhookv2/consul_dataplane_sidecar_test.go b/control-plane/connect-inject/webhookv2/consul_dataplane_sidecar_test.go new file mode 100644 index 0000000000..3b5fb3c0c7 --- /dev/null +++ b/control-plane/connect-inject/webhookv2/consul_dataplane_sidecar_test.go @@ -0,0 +1,1291 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package webhookv2 + +import ( + "fmt" + "strconv" + "strings" + "testing" + + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/pointer" + + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/lifecycle" + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/metrics" + "github.com/hashicorp/consul-k8s/control-plane/consul" +) + +const nodeName = "test-node" + +func TestHandlerConsulDataplaneSidecar(t *testing.T) { + cases := map[string]struct { + webhookSetupFunc func(w *MeshWebhook) + additionalExpCmdArgs string + }{ + "default": { + webhookSetupFunc: nil, + additionalExpCmdArgs: " -tls-disabled -graceful-port=20600 -telemetry-prom-scrape-path=/metrics", + }, + "with custom gRPC port": { + webhookSetupFunc: func(w *MeshWebhook) { + w.ConsulConfig.GRPCPort = 8602 + }, + additionalExpCmdArgs: " -tls-disabled -graceful-port=20600 -telemetry-prom-scrape-path=/metrics", + }, + "with ACLs": { + webhookSetupFunc: func(w *MeshWebhook) { + w.AuthMethod = "test-auth-method" + }, + additionalExpCmdArgs: " -credential-type=login -login-auth-method=test-auth-method -login-bearer-token-path=/var/run/secrets/kubernetes.io/serviceaccount/token " + + "-tls-disabled -graceful-port=20600 -telemetry-prom-scrape-path=/metrics", + }, + "with ACLs and namespace mirroring": { + webhookSetupFunc: func(w *MeshWebhook) { + w.AuthMethod = "test-auth-method" + w.EnableNamespaces = true + w.EnableK8SNSMirroring = true + }, + additionalExpCmdArgs: " -credential-type=login -login-auth-method=test-auth-method -login-bearer-token-path=/var/run/secrets/kubernetes.io/serviceaccount/token " + + "-login-namespace=default -proxy-namespace=k8snamespace -tls-disabled -graceful-port=20600 -telemetry-prom-scrape-path=/metrics", + }, + "with ACLs and single destination namespace": { + webhookSetupFunc: func(w *MeshWebhook) { + w.AuthMethod = "test-auth-method" + w.EnableNamespaces = true + w.ConsulDestinationNamespace = "test-ns" + }, + additionalExpCmdArgs: " -credential-type=login -login-auth-method=test-auth-method -login-bearer-token-path=/var/run/secrets/kubernetes.io/serviceaccount/token " + + "-login-namespace=test-ns -proxy-namespace=test-ns -tls-disabled -graceful-port=20600 -telemetry-prom-scrape-path=/metrics", + }, + "with ACLs and partitions": { + webhookSetupFunc: func(w *MeshWebhook) { + w.AuthMethod = "test-auth-method" + w.ConsulPartition = "test-part" + }, + additionalExpCmdArgs: " -credential-type=login -login-auth-method=test-auth-method -login-bearer-token-path=/var/run/secrets/kubernetes.io/serviceaccount/token " + + "-login-partition=test-part -proxy-partition=test-part -tls-disabled -graceful-port=20600 -telemetry-prom-scrape-path=/metrics", + }, + "with TLS and CA cert provided": { + webhookSetupFunc: func(w *MeshWebhook) { + w.TLSEnabled = true + w.ConsulTLSServerName = "server.dc1.consul" + w.ConsulCACert = "consul-ca-cert" + }, + additionalExpCmdArgs: " -tls-server-name=server.dc1.consul -ca-certs=/consul/mesh-inject/consul-ca.pem -graceful-port=20600 -telemetry-prom-scrape-path=/metrics", + }, + "with TLS and no CA cert provided": { + webhookSetupFunc: func(w *MeshWebhook) { + w.TLSEnabled = true + w.ConsulTLSServerName = "server.dc1.consul" + }, + additionalExpCmdArgs: " -tls-server-name=server.dc1.consul -graceful-port=20600 -telemetry-prom-scrape-path=/metrics", + }, + "with single destination namespace": { + webhookSetupFunc: func(w *MeshWebhook) { + w.EnableNamespaces = true + w.ConsulDestinationNamespace = "consul-namespace" + }, + additionalExpCmdArgs: " -proxy-namespace=consul-namespace -tls-disabled -graceful-port=20600 -telemetry-prom-scrape-path=/metrics", + }, + "with namespace mirroring": { + webhookSetupFunc: func(w *MeshWebhook) { + w.EnableNamespaces = true + w.EnableK8SNSMirroring = true + }, + additionalExpCmdArgs: " -proxy-namespace=k8snamespace -tls-disabled -graceful-port=20600 -telemetry-prom-scrape-path=/metrics", + }, + "with namespace mirroring prefix": { + webhookSetupFunc: func(w *MeshWebhook) { + w.EnableNamespaces = true + w.EnableK8SNSMirroring = true + w.K8SNSMirroringPrefix = "foo-" + }, + additionalExpCmdArgs: " -proxy-namespace=foo-k8snamespace -tls-disabled -graceful-port=20600 -telemetry-prom-scrape-path=/metrics", + }, + "with partitions": { + webhookSetupFunc: func(w *MeshWebhook) { + w.ConsulPartition = "partition-1" + }, + additionalExpCmdArgs: " -proxy-partition=partition-1 -tls-disabled -graceful-port=20600 -telemetry-prom-scrape-path=/metrics", + }, + "with different log level": { + webhookSetupFunc: func(w *MeshWebhook) { + w.LogLevel = "debug" + }, + additionalExpCmdArgs: " -tls-disabled -graceful-port=20600 -telemetry-prom-scrape-path=/metrics", + }, + "with different log level and log json": { + webhookSetupFunc: func(w *MeshWebhook) { + w.LogLevel = "debug" + w.LogJSON = true + }, + additionalExpCmdArgs: " -tls-disabled -graceful-port=20600 -telemetry-prom-scrape-path=/metrics", + }, + "skip server watch enabled": { + webhookSetupFunc: func(w *MeshWebhook) { + w.SkipServerWatch = true + }, + additionalExpCmdArgs: " -server-watch-disabled=true -tls-disabled -graceful-port=20600 -telemetry-prom-scrape-path=/metrics", + }, + "custom prometheus scrape path": { + webhookSetupFunc: func(w *MeshWebhook) { + w.MetricsConfig.DefaultPrometheusScrapePath = "/scrape-path" // Simulate what would be passed as a flag + }, + additionalExpCmdArgs: " -tls-disabled -graceful-port=20600 -telemetry-prom-scrape-path=/scrape-path", + }, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + w := &MeshWebhook{ + ConsulAddress: "1.1.1.1", + ConsulConfig: &consul.Config{GRPCPort: 8502}, + LogLevel: "info", + LogJSON: false, + } + if c.webhookSetupFunc != nil { + c.webhookSetupFunc(w) + } + pod := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Annotations: map[string]string{ + constants.AnnotationService: "foo", + }, + }, + + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + }, + { + Name: "web-side", + }, + { + Name: "auth-method-secret", + VolumeMounts: []corev1.VolumeMount{ + { + Name: "service-account-secret", + MountPath: "/var/run/secrets/kubernetes.io/serviceaccount", + }, + }, + }, + }, + ServiceAccountName: "web", + NodeName: nodeName, + }, + } + + container, err := w.consulDataplaneSidecar(testNS, pod) + require.NoError(t, err) + expCmd := "-addresses 1.1.1.1 -grpc-port=" + strconv.Itoa(w.ConsulConfig.GRPCPort) + + " -log-level=" + w.LogLevel + " -log-json=" + strconv.FormatBool(w.LogJSON) + " -envoy-concurrency=0" + c.additionalExpCmdArgs + require.Equal(t, expCmd, strings.Join(container.Args, " ")) + + if w.AuthMethod != "" { + require.Equal(t, container.VolumeMounts, []corev1.VolumeMount{ + { + Name: volumeName, + MountPath: "/consul/mesh-inject", + }, + { + Name: "service-account-secret", + MountPath: "/var/run/secrets/kubernetes.io/serviceaccount", + }, + }) + } else { + require.Equal(t, container.VolumeMounts, []corev1.VolumeMount{ + { + Name: volumeName, + MountPath: "/consul/mesh-inject", + }, + }) + } + + expectedProbe := &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + TCPSocket: &corev1.TCPSocketAction{ + Port: intstr.FromInt(constants.ProxyDefaultInboundPort), + }, + }, + InitialDelaySeconds: 1, + } + require.Equal(t, expectedProbe, container.ReadinessProbe) + require.Nil(t, container.StartupProbe) + require.Len(t, container.Env, 7) + require.Equal(t, container.Env[0].Name, "TMPDIR") + require.Equal(t, container.Env[0].Value, "/consul/mesh-inject") + require.Equal(t, container.Env[2].Name, "POD_NAME") + require.Equal(t, container.Env[3].Name, "POD_NAMESPACE") + require.Equal(t, container.Env[4].Name, "DP_PROXY_ID") + require.Equal(t, container.Env[4].Value, "$(POD_NAME)") + require.Equal(t, container.Env[5].Name, "DP_CREDENTIAL_LOGIN_META") + require.Equal(t, container.Env[5].Value, "pod=$(POD_NAMESPACE)/$(POD_NAME)") + }) + } +} + +func TestHandlerConsulDataplaneSidecar_Concurrency(t *testing.T) { + cases := map[string]struct { + annotations map[string]string + expFlags string + expErr string + }{ + "default settings, no annotations": { + annotations: map[string]string{ + constants.AnnotationService: "foo", + }, + expFlags: "-envoy-concurrency=0", + }, + "default settings, annotation override": { + annotations: map[string]string{ + constants.AnnotationService: "foo", + constants.AnnotationEnvoyProxyConcurrency: "42", + }, + expFlags: "-envoy-concurrency=42", + }, + "default settings, invalid concurrency annotation negative number": { + annotations: map[string]string{ + constants.AnnotationService: "foo", + constants.AnnotationEnvoyProxyConcurrency: "-42", + }, + expErr: "unable to parse annotation \"consul.hashicorp.com/consul-envoy-proxy-concurrency\": strconv.ParseUint: parsing \"-42\": invalid syntax", + }, + "default settings, not-parseable concurrency annotation": { + annotations: map[string]string{ + constants.AnnotationService: "foo", + constants.AnnotationEnvoyProxyConcurrency: "not-int", + }, + expErr: "unable to parse annotation \"consul.hashicorp.com/consul-envoy-proxy-concurrency\": strconv.ParseUint: parsing \"not-int\": invalid syntax", + }, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + h := MeshWebhook{ + ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502}, + } + pod := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: c.annotations, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + }, + }, + }, + } + container, err := h.consulDataplaneSidecar(testNS, pod) + if c.expErr != "" { + require.EqualError(t, err, c.expErr) + } else { + require.NoError(t, err) + require.Contains(t, strings.Join(container.Args, " "), c.expFlags) + } + }) + } +} + +// Test that we pass the dns proxy flag to dataplane correctly. +func TestHandlerConsulDataplaneSidecar_DNSProxy(t *testing.T) { + + // We only want the flag passed when DNS and tproxy are both enabled. DNS/tproxy can + // both be enabled/disabled with annotations/labels on the pod and namespace and then globally + // through the helm chart. To test this we use an outer loop with the possible DNS settings and then + // and inner loop with possible tproxy settings. + dnsCases := []struct { + GlobalConsulDNS bool + NamespaceDNS *bool + PodDNS *bool + ExpEnabled bool + }{ + { + GlobalConsulDNS: false, + ExpEnabled: false, + }, + { + GlobalConsulDNS: true, + ExpEnabled: true, + }, + { + GlobalConsulDNS: false, + NamespaceDNS: boolPtr(true), + ExpEnabled: true, + }, + { + GlobalConsulDNS: false, + PodDNS: boolPtr(true), + ExpEnabled: true, + }, + } + tproxyCases := []struct { + GlobalTProxy bool + NamespaceTProxy *bool + PodTProxy *bool + ExpEnabled bool + }{ + { + GlobalTProxy: false, + ExpEnabled: false, + }, + { + GlobalTProxy: true, + ExpEnabled: true, + }, + { + GlobalTProxy: false, + NamespaceTProxy: boolPtr(true), + ExpEnabled: true, + }, + { + GlobalTProxy: false, + PodTProxy: boolPtr(true), + ExpEnabled: true, + }, + } + + // Outer loop is permutations of dns being enabled. Inner loop is permutations of tproxy being enabled. + // Both must be enabled for dns to be enabled. + for i, dnsCase := range dnsCases { + for j, tproxyCase := range tproxyCases { + t.Run(fmt.Sprintf("dns=%d,tproxy=%d", i, j), func(t *testing.T) { + + // Test setup. + h := MeshWebhook{ + ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502}, + EnableTransparentProxy: tproxyCase.GlobalTProxy, + EnableConsulDNS: dnsCase.GlobalConsulDNS, + } + pod := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + }, + }, + }, + } + if dnsCase.PodDNS != nil { + pod.Annotations[constants.KeyConsulDNS] = strconv.FormatBool(*dnsCase.PodDNS) + } + if tproxyCase.PodTProxy != nil { + pod.Annotations[constants.KeyTransparentProxy] = strconv.FormatBool(*tproxyCase.PodTProxy) + } + + ns := corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: k8sNamespace, + Labels: map[string]string{}, + }, + } + if dnsCase.NamespaceDNS != nil { + ns.Labels[constants.KeyConsulDNS] = strconv.FormatBool(*dnsCase.NamespaceDNS) + } + if tproxyCase.NamespaceTProxy != nil { + ns.Labels[constants.KeyTransparentProxy] = strconv.FormatBool(*tproxyCase.NamespaceTProxy) + } + + // Actual test here. + container, err := h.consulDataplaneSidecar(ns, pod) + require.NoError(t, err) + // Flag should only be passed if both tproxy and dns are enabled. + if tproxyCase.ExpEnabled && dnsCase.ExpEnabled { + require.Contains(t, container.Args, "-consul-dns-bind-port=8600") + } else { + require.NotContains(t, container.Args, "-consul-dns-bind-port=8600") + } + }) + } + } +} + +func TestHandlerConsulDataplaneSidecar_ProxyHealthCheck(t *testing.T) { + h := MeshWebhook{ + ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502}, + ConsulAddress: "1.1.1.1", + LogLevel: "info", + } + pod := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + constants.AnnotationUseProxyHealthCheck: "true", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + }, + }, + }, + } + container, err := h.consulDataplaneSidecar(testNS, pod) + expectedProbe := &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(21000), + Path: "/ready", + }, + }, + InitialDelaySeconds: 1, + } + require.NoError(t, err) + require.Contains(t, container.Args, "-envoy-ready-bind-port=21000") + require.Equal(t, expectedProbe, container.ReadinessProbe) + require.Contains(t, container.Env, corev1.EnvVar{ + Name: "DP_ENVOY_READY_BIND_ADDRESS", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{FieldPath: "status.podIP"}, + }, + }) + require.Contains(t, container.Ports, corev1.ContainerPort{ + Name: "proxy-health", + ContainerPort: 21000, + }) +} + +func TestHandlerConsulDataplaneSidecar_withSecurityContext(t *testing.T) { + cases := map[string]struct { + tproxyEnabled bool + openShiftEnabled bool + expSecurityContext *corev1.SecurityContext + }{ + "tproxy disabled; openshift disabled": { + tproxyEnabled: false, + openShiftEnabled: false, + expSecurityContext: &corev1.SecurityContext{ + RunAsUser: pointer.Int64(sidecarUserAndGroupID), + RunAsGroup: pointer.Int64(sidecarUserAndGroupID), + RunAsNonRoot: pointer.Bool(true), + ReadOnlyRootFilesystem: pointer.Bool(true), + AllowPrivilegeEscalation: pointer.Bool(false), + }, + }, + "tproxy enabled; openshift disabled": { + tproxyEnabled: true, + openShiftEnabled: false, + expSecurityContext: &corev1.SecurityContext{ + RunAsUser: pointer.Int64(sidecarUserAndGroupID), + RunAsGroup: pointer.Int64(sidecarUserAndGroupID), + RunAsNonRoot: pointer.Bool(true), + ReadOnlyRootFilesystem: pointer.Bool(true), + AllowPrivilegeEscalation: pointer.Bool(false), + }, + }, + "tproxy disabled; openshift enabled": { + tproxyEnabled: false, + openShiftEnabled: true, + expSecurityContext: nil, + }, + "tproxy enabled; openshift enabled": { + tproxyEnabled: true, + openShiftEnabled: true, + expSecurityContext: &corev1.SecurityContext{ + RunAsUser: pointer.Int64(sidecarUserAndGroupID), + RunAsGroup: pointer.Int64(sidecarUserAndGroupID), + RunAsNonRoot: pointer.Bool(true), + ReadOnlyRootFilesystem: pointer.Bool(true), + AllowPrivilegeEscalation: pointer.Bool(false), + }, + }, + } + for name, c := range cases { + t.Run(name, func(t *testing.T) { + w := MeshWebhook{ + EnableTransparentProxy: c.tproxyEnabled, + EnableOpenShift: c.openShiftEnabled, + ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502}, + } + pod := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + constants.AnnotationService: "foo", + }, + }, + + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + }, + }, + }, + } + ec, err := w.consulDataplaneSidecar(testNS, pod) + require.NoError(t, err) + require.Equal(t, c.expSecurityContext, ec.SecurityContext) + }) + } +} + +// Test that if the user specifies a pod security context with the same uid as `sidecarUserAndGroupID` that we return +// an error to the meshWebhook. +func TestHandlerConsulDataplaneSidecar_FailsWithDuplicatePodSecurityContextUID(t *testing.T) { + require := require.New(t) + w := MeshWebhook{ + ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502}, + } + pod := corev1.Pod{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + }, + }, + SecurityContext: &corev1.PodSecurityContext{ + RunAsUser: pointer.Int64(sidecarUserAndGroupID), + }, + }, + } + _, err := w.consulDataplaneSidecar(testNS, pod) + require.EqualError(err, fmt.Sprintf("pod's security context cannot have the same UID as consul-dataplane: %v", sidecarUserAndGroupID)) +} + +// Test that if the user specifies a container with security context with the same uid as `sidecarUserAndGroupID` that we +// return an error to the meshWebhook. If a container using the consul-dataplane image has the same uid, we don't return an error +// because in multiport pod there can be multiple consul-dataplane sidecars. +func TestHandlerConsulDataplaneSidecar_FailsWithDuplicateContainerSecurityContextUID(t *testing.T) { + cases := []struct { + name string + pod corev1.Pod + webhook MeshWebhook + expErr bool + expErrMessage string + }{ + { + name: "fails with non consul-dataplane image", + pod: corev1.Pod{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + // Setting RunAsUser: 1 should succeed. + SecurityContext: &corev1.SecurityContext{ + RunAsUser: pointer.Int64(1), + }, + }, + { + Name: "app", + // Setting RunAsUser: 5995 should fail. + SecurityContext: &corev1.SecurityContext{ + RunAsUser: pointer.Int64(sidecarUserAndGroupID), + }, + Image: "not-consul-dataplane", + }, + }, + }, + }, + webhook: MeshWebhook{}, + expErr: true, + expErrMessage: fmt.Sprintf("container \"app\" has runAsUser set to the same UID \"%d\" as consul-dataplane which is not allowed", sidecarUserAndGroupID), + }, + { + name: "doesn't fail with envoy image", + pod: corev1.Pod{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + // Setting RunAsUser: 1 should succeed. + SecurityContext: &corev1.SecurityContext{ + RunAsUser: pointer.Int64(1), + }, + }, + { + Name: "sidecar", + // Setting RunAsUser: 5995 should succeed if the image matches h.ImageConsulDataplane. + SecurityContext: &corev1.SecurityContext{ + RunAsUser: pointer.Int64(sidecarUserAndGroupID), + }, + Image: "envoy", + }, + }, + }, + }, + webhook: MeshWebhook{ + ImageConsulDataplane: "envoy", + }, + expErr: false, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + tc.webhook.ConsulConfig = &consul.Config{HTTPPort: 8500, GRPCPort: 8502} + _, err := tc.webhook.consulDataplaneSidecar(testNS, tc.pod) + if tc.expErr { + require.EqualError(t, err, tc.expErrMessage) + } else { + require.NoError(t, err) + } + }) + } +} + +// Test that we can pass extra args to envoy via the extraEnvoyArgs flag +// or via pod annotations. When arguments are passed in both ways, the +// arguments set via pod annotations are used. +func TestHandlerConsulDataplaneSidecar_EnvoyExtraArgs(t *testing.T) { + cases := []struct { + name string + envoyExtraArgs string + pod *corev1.Pod + expectedExtraArgs string + }{ + { + name: "no extra options provided", + envoyExtraArgs: "", + pod: &corev1.Pod{}, + expectedExtraArgs: "", + }, + { + name: "via flag: extra log-level option", + envoyExtraArgs: "--log-level debug", + pod: &corev1.Pod{}, + expectedExtraArgs: "-- --log-level debug", + }, + { + name: "via flag: multiple arguments with quotes", + envoyExtraArgs: "--log-level debug --admin-address-path \"/tmp/consul/foo bar\"", + pod: &corev1.Pod{}, + expectedExtraArgs: "-- --log-level debug --admin-address-path \"/tmp/consul/foo bar\"", + }, + { + name: "via annotation: multiple arguments with quotes", + envoyExtraArgs: "", + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + constants.AnnotationEnvoyExtraArgs: "--log-level debug --admin-address-path \"/tmp/consul/foo bar\"", + }, + }, + }, + expectedExtraArgs: "-- --log-level debug --admin-address-path \"/tmp/consul/foo bar\"", + }, + { + name: "via flag and annotation: should prefer setting via the annotation", + envoyExtraArgs: "this should be overwritten", + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + constants.AnnotationEnvoyExtraArgs: "--log-level debug --admin-address-path \"/tmp/consul/foo bar\"", + }, + }, + }, + expectedExtraArgs: "-- --log-level debug --admin-address-path \"/tmp/consul/foo bar\"", + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + h := MeshWebhook{ + ImageConsul: "hashicorp/consul:latest", + ImageConsulDataplane: "hashicorp/consul-k8s:latest", + ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502}, + EnvoyExtraArgs: tc.envoyExtraArgs, + } + + c, err := h.consulDataplaneSidecar(testNS, *tc.pod) + require.NoError(t, err) + require.Contains(t, strings.Join(c.Args, " "), tc.expectedExtraArgs) + }) + } +} + +func TestHandlerConsulDataplaneSidecar_UserVolumeMounts(t *testing.T) { + cases := []struct { + name string + pod corev1.Pod + expectedContainerVolumeMounts []corev1.VolumeMount + expErr string + }{ + { + name: "able to set a sidecar container volume mount via annotation", + pod: corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + constants.AnnotationEnvoyExtraArgs: "--log-level debug --admin-address-path \"/tmp/consul/foo bar\"", + constants.AnnotationConsulSidecarUserVolumeMount: "[{\"name\": \"tls-cert\", \"mountPath\": \"/custom/path\"}, {\"name\": \"tls-ca\", \"mountPath\": \"/custom/path2\"}]", + }, + }, + }, + expectedContainerVolumeMounts: []corev1.VolumeMount{ + { + Name: "consul-mesh-inject-data", + MountPath: "/consul/mesh-inject", + }, + { + Name: "tls-cert", + MountPath: "/custom/path", + }, + { + Name: "tls-ca", + MountPath: "/custom/path2", + }, + }, + }, + { + name: "invalid annotation results in error", + pod: corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + constants.AnnotationEnvoyExtraArgs: "--log-level debug --admin-address-path \"/tmp/consul/foo bar\"", + constants.AnnotationConsulSidecarUserVolumeMount: "[abcdefg]", + }, + }, + }, + expErr: "invalid character 'a' looking ", + }, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + h := MeshWebhook{ + ImageConsul: "hashicorp/consul:latest", + ImageConsulDataplane: "hashicorp/consul-k8s:latest", + ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502}, + } + c, err := h.consulDataplaneSidecar(testNS, tc.pod) + if tc.expErr == "" { + require.NoError(t, err) + require.Equal(t, tc.expectedContainerVolumeMounts, c.VolumeMounts) + } else { + require.Error(t, err) + require.Contains(t, err.Error(), tc.expErr) + } + }) + } +} + +func TestHandlerConsulDataplaneSidecar_Resources(t *testing.T) { + mem1 := resource.MustParse("100Mi") + mem2 := resource.MustParse("200Mi") + cpu1 := resource.MustParse("100m") + cpu2 := resource.MustParse("200m") + zero := resource.MustParse("0") + + cases := map[string]struct { + webhook MeshWebhook + annotations map[string]string + expResources corev1.ResourceRequirements + expErr string + }{ + "no defaults, no annotations": { + webhook: MeshWebhook{}, + annotations: nil, + expResources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{}, + Requests: corev1.ResourceList{}, + }, + }, + "all defaults, no annotations": { + webhook: MeshWebhook{ + DefaultProxyCPURequest: cpu1, + DefaultProxyCPULimit: cpu2, + DefaultProxyMemoryRequest: mem1, + DefaultProxyMemoryLimit: mem2, + }, + annotations: nil, + expResources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: cpu2, + corev1.ResourceMemory: mem2, + }, + Requests: corev1.ResourceList{ + corev1.ResourceCPU: cpu1, + corev1.ResourceMemory: mem1, + }, + }, + }, + "no defaults, all annotations": { + webhook: MeshWebhook{}, + annotations: map[string]string{ + constants.AnnotationSidecarProxyCPURequest: "100m", + constants.AnnotationSidecarProxyMemoryRequest: "100Mi", + constants.AnnotationSidecarProxyCPULimit: "200m", + constants.AnnotationSidecarProxyMemoryLimit: "200Mi", + }, + expResources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: cpu2, + corev1.ResourceMemory: mem2, + }, + Requests: corev1.ResourceList{ + corev1.ResourceCPU: cpu1, + corev1.ResourceMemory: mem1, + }, + }, + }, + "annotations override defaults": { + webhook: MeshWebhook{ + DefaultProxyCPURequest: zero, + DefaultProxyCPULimit: zero, + DefaultProxyMemoryRequest: zero, + DefaultProxyMemoryLimit: zero, + }, + annotations: map[string]string{ + constants.AnnotationSidecarProxyCPURequest: "100m", + constants.AnnotationSidecarProxyMemoryRequest: "100Mi", + constants.AnnotationSidecarProxyCPULimit: "200m", + constants.AnnotationSidecarProxyMemoryLimit: "200Mi", + }, + expResources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: cpu2, + corev1.ResourceMemory: mem2, + }, + Requests: corev1.ResourceList{ + corev1.ResourceCPU: cpu1, + corev1.ResourceMemory: mem1, + }, + }, + }, + "defaults set to zero, no annotations": { + webhook: MeshWebhook{ + DefaultProxyCPURequest: zero, + DefaultProxyCPULimit: zero, + DefaultProxyMemoryRequest: zero, + DefaultProxyMemoryLimit: zero, + }, + annotations: nil, + expResources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: zero, + corev1.ResourceMemory: zero, + }, + Requests: corev1.ResourceList{ + corev1.ResourceCPU: zero, + corev1.ResourceMemory: zero, + }, + }, + }, + "annotations set to 0": { + webhook: MeshWebhook{}, + annotations: map[string]string{ + constants.AnnotationSidecarProxyCPURequest: "0", + constants.AnnotationSidecarProxyMemoryRequest: "0", + constants.AnnotationSidecarProxyCPULimit: "0", + constants.AnnotationSidecarProxyMemoryLimit: "0", + }, + expResources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: zero, + corev1.ResourceMemory: zero, + }, + Requests: corev1.ResourceList{ + corev1.ResourceCPU: zero, + corev1.ResourceMemory: zero, + }, + }, + }, + "invalid cpu request": { + webhook: MeshWebhook{}, + annotations: map[string]string{ + constants.AnnotationSidecarProxyCPURequest: "invalid", + }, + expErr: "parsing annotation consul.hashicorp.com/sidecar-proxy-cpu-request:\"invalid\": quantities must match the regular expression", + }, + "invalid cpu limit": { + webhook: MeshWebhook{}, + annotations: map[string]string{ + constants.AnnotationSidecarProxyCPULimit: "invalid", + }, + expErr: "parsing annotation consul.hashicorp.com/sidecar-proxy-cpu-limit:\"invalid\": quantities must match the regular expression", + }, + "invalid memory request": { + webhook: MeshWebhook{}, + annotations: map[string]string{ + constants.AnnotationSidecarProxyMemoryRequest: "invalid", + }, + expErr: "parsing annotation consul.hashicorp.com/sidecar-proxy-memory-request:\"invalid\": quantities must match the regular expression", + }, + "invalid memory limit": { + webhook: MeshWebhook{}, + annotations: map[string]string{ + constants.AnnotationSidecarProxyMemoryLimit: "invalid", + }, + expErr: "parsing annotation consul.hashicorp.com/sidecar-proxy-memory-limit:\"invalid\": quantities must match the regular expression", + }, + } + + for name, c := range cases { + t.Run(name, func(tt *testing.T) { + c.webhook.ConsulConfig = &consul.Config{HTTPPort: 8500, GRPCPort: 8502} + require := require.New(tt) + pod := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: c.annotations, + }, + + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + }, + }, + }, + } + container, err := c.webhook.consulDataplaneSidecar(testNS, pod) + if c.expErr != "" { + require.NotNil(err) + require.Contains(err.Error(), c.expErr) + } else { + require.NoError(err) + require.Equal(c.expResources, container.Resources) + } + }) + } +} + +func TestHandlerConsulDataplaneSidecar_Metrics(t *testing.T) { + cases := []struct { + name string + pod corev1.Pod + expCmdArgs string + expPorts []corev1.ContainerPort + expErr string + }{ + { + name: "default", + pod: corev1.Pod{}, + expCmdArgs: "", + }, + { + name: "turning on merged metrics", + pod: corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + constants.AnnotationService: "web", + constants.AnnotationEnableMetrics: "true", + constants.AnnotationEnableMetricsMerging: "true", + constants.AnnotationMergedMetricsPort: "20100", + constants.AnnotationPort: "1234", + constants.AnnotationPrometheusScrapePath: "/scrape-path", + }, + }, + }, + expCmdArgs: "-telemetry-prom-scrape-path=/scrape-path -telemetry-prom-merge-port=20100 -telemetry-prom-service-metrics-url=http://127.0.0.1:1234/metrics", + expPorts: []corev1.ContainerPort{ + { + Name: "prometheus", + ContainerPort: 20200, + Protocol: corev1.ProtocolTCP, + }, + }, + }, + { + name: "metrics with prometheus port override", + pod: corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + constants.AnnotationService: "web", + constants.AnnotationEnableMetrics: "true", + constants.AnnotationEnableMetricsMerging: "true", + constants.AnnotationMergedMetricsPort: "20123", + constants.AnnotationPort: "1234", + constants.AnnotationPrometheusScrapePath: "/scrape-path", + constants.AnnotationPrometheusScrapePort: "6789", + }, + }, + }, + expCmdArgs: "-telemetry-prom-scrape-path=/scrape-path -telemetry-prom-merge-port=20123 -telemetry-prom-service-metrics-url=http://127.0.0.1:1234/metrics", + expPorts: []corev1.ContainerPort{ + { + Name: "prometheus", + ContainerPort: 6789, + Protocol: corev1.ProtocolTCP, + }, + }, + }, + { + name: "merged metrics with TLS enabled", + pod: corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + constants.AnnotationService: "web", + constants.AnnotationEnableMetrics: "true", + constants.AnnotationEnableMetricsMerging: "true", + constants.AnnotationMergedMetricsPort: "20100", + constants.AnnotationPort: "1234", + constants.AnnotationPrometheusScrapePath: "/scrape-path", + constants.AnnotationPrometheusCAFile: "/certs/ca.crt", + constants.AnnotationPrometheusCAPath: "/certs/ca", + constants.AnnotationPrometheusCertFile: "/certs/server.crt", + constants.AnnotationPrometheusKeyFile: "/certs/key.pem", + }, + }, + }, + expCmdArgs: "-telemetry-prom-scrape-path=/scrape-path -telemetry-prom-merge-port=20100 -telemetry-prom-service-metrics-url=http://127.0.0.1:1234/metrics -telemetry-prom-ca-certs-file=/certs/ca.crt -telemetry-prom-ca-certs-path=/certs/ca -telemetry-prom-cert-file=/certs/server.crt -telemetry-prom-key-file=/certs/key.pem", + expPorts: []corev1.ContainerPort{ + { + Name: "prometheus", + ContainerPort: 20200, + Protocol: corev1.ProtocolTCP, + }, + }, + }, + { + name: "merge metrics with TLS enabled, missing CA gives an error", + pod: corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + constants.AnnotationService: "web", + constants.AnnotationEnableMetrics: "true", + constants.AnnotationEnableMetricsMerging: "true", + constants.AnnotationMergedMetricsPort: "20100", + constants.AnnotationPort: "1234", + constants.AnnotationPrometheusScrapePath: "/scrape-path", + constants.AnnotationPrometheusCertFile: "/certs/server.crt", + constants.AnnotationPrometheusKeyFile: "/certs/key.pem", + }, + }, + }, + expCmdArgs: "", + expErr: fmt.Sprintf("must set one of %q or %q when providing prometheus TLS config", constants.AnnotationPrometheusCAFile, constants.AnnotationPrometheusCAPath), + }, + { + name: "merge metrics with TLS enabled, missing cert gives an error", + pod: corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + constants.AnnotationService: "web", + constants.AnnotationEnableMetrics: "true", + constants.AnnotationEnableMetricsMerging: "true", + constants.AnnotationMergedMetricsPort: "20100", + constants.AnnotationPort: "1234", + constants.AnnotationPrometheusScrapePath: "/scrape-path", + constants.AnnotationPrometheusCAFile: "/certs/ca.crt", + constants.AnnotationPrometheusKeyFile: "/certs/key.pem", + }, + }, + }, + expCmdArgs: "", + expErr: fmt.Sprintf("must set %q when providing prometheus TLS config", constants.AnnotationPrometheusCertFile), + }, + { + name: "merge metrics with TLS enabled, missing key file gives an error", + pod: corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + constants.AnnotationService: "web", + constants.AnnotationEnableMetrics: "true", + constants.AnnotationEnableMetricsMerging: "true", + constants.AnnotationMergedMetricsPort: "20100", + constants.AnnotationPort: "1234", + constants.AnnotationPrometheusScrapePath: "/scrape-path", + constants.AnnotationPrometheusCAPath: "/certs/ca", + constants.AnnotationPrometheusCertFile: "/certs/server.crt", + }, + }, + }, + expCmdArgs: "", + expErr: fmt.Sprintf("must set %q when providing prometheus TLS config", constants.AnnotationPrometheusKeyFile), + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + h := MeshWebhook{ + ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502}, + MetricsConfig: metrics.Config{ + // These are all the default values passed from the CLI + DefaultPrometheusScrapePort: "20200", + DefaultPrometheusScrapePath: "/metrics", + DefaultMergedMetricsPort: "20100", + }, + } + container, err := h.consulDataplaneSidecar(testNS, c.pod) + if c.expErr != "" { + require.NotNil(t, err) + require.Contains(t, err.Error(), c.expErr) + } else { + require.NoError(t, err) + require.Contains(t, strings.Join(container.Args, " "), c.expCmdArgs) + if c.expPorts != nil { + require.ElementsMatch(t, container.Ports, c.expPorts) + } + } + }) + } +} + +func TestHandlerConsulDataplaneSidecar_Lifecycle(t *testing.T) { + gracefulShutdownSeconds := 10 + gracefulStartupSeconds := 10 + gracefulPort := "20307" + gracefulShutdownPath := "/exit" + gracefulStartupPath := "/start" + + cases := []struct { + name string + webhook MeshWebhook + annotations map[string]string + expCmdArgs string + expErr string + }{ + { + name: "no defaults, no annotations", + webhook: MeshWebhook{}, + annotations: nil, + expCmdArgs: "", + }, + { + name: "all defaults, no annotations", + webhook: MeshWebhook{ + LifecycleConfig: lifecycle.Config{ + DefaultEnableProxyLifecycle: true, + DefaultEnableShutdownDrainListeners: true, + DefaultShutdownGracePeriodSeconds: gracefulShutdownSeconds, + DefaultStartupGracePeriodSeconds: gracefulStartupSeconds, + DefaultGracefulPort: gracefulPort, + DefaultGracefulShutdownPath: gracefulShutdownPath, + DefaultGracefulStartupPath: gracefulStartupPath, + }, + }, + annotations: nil, + expCmdArgs: "graceful-port=20307 -shutdown-drain-listeners -shutdown-grace-period-seconds=10 -graceful-shutdown-path=/exit -startup-grace-period-seconds=10 -graceful-startup-path=/start", + }, + { + name: "no defaults, all annotations", + webhook: MeshWebhook{}, + annotations: map[string]string{ + constants.AnnotationEnableSidecarProxyLifecycle: "true", + constants.AnnotationEnableSidecarProxyLifecycleShutdownDrainListeners: "true", + constants.AnnotationSidecarProxyLifecycleShutdownGracePeriodSeconds: fmt.Sprint(gracefulShutdownSeconds), + constants.AnnotationSidecarProxyLifecycleStartupGracePeriodSeconds: fmt.Sprint(gracefulStartupSeconds), + constants.AnnotationSidecarProxyLifecycleGracefulPort: gracefulPort, + constants.AnnotationSidecarProxyLifecycleGracefulShutdownPath: gracefulShutdownPath, + constants.AnnotationSidecarProxyLifecycleGracefulStartupPath: gracefulStartupPath, + }, + expCmdArgs: "-graceful-port=20307 -shutdown-drain-listeners -shutdown-grace-period-seconds=10 -graceful-shutdown-path=/exit -startup-grace-period-seconds=10 -graceful-startup-path=/start", + }, + { + name: "annotations override defaults", + webhook: MeshWebhook{ + LifecycleConfig: lifecycle.Config{ + DefaultEnableProxyLifecycle: false, + DefaultEnableShutdownDrainListeners: true, + DefaultShutdownGracePeriodSeconds: gracefulShutdownSeconds, + DefaultStartupGracePeriodSeconds: gracefulStartupSeconds, + DefaultGracefulPort: gracefulPort, + DefaultGracefulShutdownPath: gracefulShutdownPath, + DefaultGracefulStartupPath: gracefulStartupPath, + }, + }, + annotations: map[string]string{ + constants.AnnotationEnableSidecarProxyLifecycle: "true", + constants.AnnotationEnableSidecarProxyLifecycleShutdownDrainListeners: "false", + constants.AnnotationSidecarProxyLifecycleShutdownGracePeriodSeconds: fmt.Sprint(gracefulShutdownSeconds + 5), + constants.AnnotationSidecarProxyLifecycleStartupGracePeriodSeconds: fmt.Sprint(gracefulStartupSeconds + 5), + constants.AnnotationSidecarProxyLifecycleGracefulPort: "20317", + constants.AnnotationSidecarProxyLifecycleGracefulShutdownPath: "/foo", + constants.AnnotationSidecarProxyLifecycleGracefulStartupPath: "/bar", + }, + expCmdArgs: "-graceful-port=20317 -shutdown-grace-period-seconds=15 -graceful-shutdown-path=/foo -startup-grace-period-seconds=15 -graceful-startup-path=/bar", + }, + { + name: "lifecycle disabled, no annotations", + webhook: MeshWebhook{ + LifecycleConfig: lifecycle.Config{ + DefaultEnableProxyLifecycle: false, + DefaultEnableShutdownDrainListeners: true, + DefaultShutdownGracePeriodSeconds: gracefulShutdownSeconds, + DefaultStartupGracePeriodSeconds: gracefulStartupSeconds, + DefaultGracefulPort: gracefulPort, + DefaultGracefulShutdownPath: gracefulShutdownPath, + DefaultGracefulStartupPath: gracefulStartupPath, + }, + }, + annotations: nil, + expCmdArgs: "-graceful-port=20307", + }, + { + name: "lifecycle enabled, defaults omited, no annotations", + webhook: MeshWebhook{ + LifecycleConfig: lifecycle.Config{ + DefaultEnableProxyLifecycle: true, + }, + }, + annotations: nil, + expCmdArgs: "", + }, + { + name: "annotations disable lifecycle default", + webhook: MeshWebhook{ + LifecycleConfig: lifecycle.Config{ + DefaultEnableProxyLifecycle: true, + DefaultEnableShutdownDrainListeners: true, + DefaultShutdownGracePeriodSeconds: gracefulShutdownSeconds, + DefaultStartupGracePeriodSeconds: gracefulStartupSeconds, + DefaultGracefulPort: gracefulPort, + DefaultGracefulShutdownPath: gracefulShutdownPath, + DefaultGracefulStartupPath: gracefulStartupPath, + }, + }, + annotations: map[string]string{ + constants.AnnotationEnableSidecarProxyLifecycle: "false", + }, + expCmdArgs: "-graceful-port=20307", + }, + { + name: "annotations skip graceful shutdown", + webhook: MeshWebhook{ + LifecycleConfig: lifecycle.Config{ + DefaultEnableProxyLifecycle: false, + DefaultEnableShutdownDrainListeners: true, + DefaultShutdownGracePeriodSeconds: gracefulShutdownSeconds, + }, + }, + annotations: map[string]string{ + constants.AnnotationEnableSidecarProxyLifecycle: "false", + constants.AnnotationEnableSidecarProxyLifecycleShutdownDrainListeners: "false", + constants.AnnotationSidecarProxyLifecycleShutdownGracePeriodSeconds: "0", + }, + expCmdArgs: "", + }, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + c.webhook.ConsulConfig = &consul.Config{HTTPPort: 8500, GRPCPort: 8502} + require := require.New(t) + pod := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: c.annotations, + }, + + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + }, + }, + }, + } + container, err := c.webhook.consulDataplaneSidecar(testNS, pod) + if c.expErr != "" { + require.NotNil(err) + require.Contains(err.Error(), c.expErr) + } else { + require.NoError(err) + require.Contains(strings.Join(container.Args, " "), c.expCmdArgs) + } + }) + } +} + +// boolPtr returns pointer to b. +func boolPtr(b bool) *bool { + return &b +} diff --git a/control-plane/connect-inject/webhookv2/container_env.go b/control-plane/connect-inject/webhookv2/container_env.go new file mode 100644 index 0000000000..b612b3c6aa --- /dev/null +++ b/control-plane/connect-inject/webhookv2/container_env.go @@ -0,0 +1,42 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package webhookv2 + +import ( + "fmt" + "strconv" + "strings" + + corev1 "k8s.io/api/core/v1" + + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/common" +) + +func (w *MeshWebhook) containerEnvVars(pod corev1.Pod) ([]corev1.EnvVar, error) { + destinations, err := common.ProcessPodDestinationsForMeshWebhook(pod) + if err != nil { + return nil, fmt.Errorf("error processing the destination for container environment variable creation: %s", err.Error()) + } + if destinations == nil { + return nil, nil + } + + var result []corev1.EnvVar + for _, destination := range destinations.Destinations { + serviceName := strings.TrimSpace(destination.DestinationRef.Name) + serviceName = strings.ToUpper(strings.Replace(serviceName, "-", "_", -1)) + portName := strings.TrimSpace(destination.DestinationPort) + portName = strings.ToUpper(strings.Replace(portName, "-", "_", -1)) + + result = append(result, corev1.EnvVar{ + Name: fmt.Sprintf("%s_%s_CONNECT_SERVICE_HOST", serviceName, portName), + Value: destination.GetIpPort().Ip, + }, corev1.EnvVar{ + Name: fmt.Sprintf("%s_%s_CONNECT_SERVICE_PORT", serviceName, portName), + Value: strconv.Itoa(int(destination.GetIpPort().Port)), + }) + } + + return result, nil +} diff --git a/control-plane/connect-inject/webhookv2/container_env_test.go b/control-plane/connect-inject/webhookv2/container_env_test.go new file mode 100644 index 0000000000..01f5b1f82e --- /dev/null +++ b/control-plane/connect-inject/webhookv2/container_env_test.go @@ -0,0 +1,78 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package webhookv2 + +import ( + "testing" + + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" +) + +func TestContainerEnvVars(t *testing.T) { + cases := []struct { + Name string + Upstream string + ExpectError bool + }{ + { + // TODO: This will not error out when dcs are supported + Name: "Upstream with datacenter", + Upstream: "myPort.static-server:7890:dc1", + ExpectError: true, + }, + { + Name: "Upstream without datacenter", + Upstream: "myPort.static-server:7890", + }, + { + // TODO: This will not error out when dcs are supported + Name: "Upstream with labels and datacenter", + Upstream: "myPort.port.static-server.svc.dc1.dc:7890", + ExpectError: true, + }, + { + Name: "Upstream with labels and no datacenter", + Upstream: "myPort.port.static-server.svc:7890", + }, + { + Name: "Error expected, wrong order", + Upstream: "static-server.svc.myPort.port:7890", + ExpectError: true, + }, + } + + for _, tt := range cases { + t.Run(tt.Name, func(t *testing.T) { + require := require.New(t) + var w MeshWebhook + envVars, err := w.containerEnvVars(corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + constants.AnnotationService: "foo", + constants.AnnotationMeshDestinations: tt.Upstream, + }, + }, + }) + + if !tt.ExpectError { + require.NoError(err) + require.ElementsMatch(envVars, []corev1.EnvVar{ + { + Name: "STATIC_SERVER_MYPORT_CONNECT_SERVICE_HOST", + Value: "127.0.0.1", + }, { + Name: "STATIC_SERVER_MYPORT_CONNECT_SERVICE_PORT", + Value: "7890", + }, + }) + } else { + require.Error(err) + } + }) + } +} diff --git a/control-plane/connect-inject/webhookv2/container_init.go b/control-plane/connect-inject/webhookv2/container_init.go new file mode 100644 index 0000000000..7afcaefd33 --- /dev/null +++ b/control-plane/connect-inject/webhookv2/container_init.go @@ -0,0 +1,287 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package webhookv2 + +import ( + "bytes" + "strconv" + "strings" + "text/template" + + corev1 "k8s.io/api/core/v1" + "k8s.io/utils/pointer" + + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/common" + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" +) + +const ( + injectInitContainerName = "consul-mesh-init" + rootUserAndGroupID = 0 + sidecarUserAndGroupID = 5995 + initContainersUserAndGroupID = 5996 + netAdminCapability = "NET_ADMIN" +) + +type initContainerCommandData struct { + ServiceName string + ServiceAccountName string + AuthMethod string + + // Log settings for the mesh-init command. + LogLevel string + LogJSON bool +} + +// containerInit returns the init container spec for mesh-init that polls for the workload's bootstrap config +// so that it optionally set up iptables for transparent proxy. Otherwise, it ensures the workload exists before +// the pod starts. +func (w *MeshWebhook) containerInit(namespace corev1.Namespace, pod corev1.Pod) (corev1.Container, error) { + // Check if tproxy is enabled on this pod. + tproxyEnabled, err := common.TransparentProxyEnabled(namespace, pod, w.EnableTransparentProxy) + if err != nil { + return corev1.Container{}, err + } + + data := initContainerCommandData{ + AuthMethod: w.AuthMethod, + LogLevel: w.LogLevel, + LogJSON: w.LogJSON, + } + + // Create expected volume mounts + volMounts := []corev1.VolumeMount{ + { + Name: volumeName, + MountPath: "/consul/mesh-inject", + }, + } + + data.ServiceName = pod.Annotations[constants.AnnotationService] + var bearerTokenFile string + if w.AuthMethod != "" { + data.ServiceAccountName = pod.Spec.ServiceAccountName + // Extract the service account token's volume mount + var saTokenVolumeMount corev1.VolumeMount + saTokenVolumeMount, bearerTokenFile, err = findServiceAccountVolumeMount(pod) + if err != nil { + return corev1.Container{}, err + } + + // Append to volume mounts + volMounts = append(volMounts, saTokenVolumeMount) + } + + // Render the command + var buf bytes.Buffer + tpl := template.Must(template.New("root").Parse(strings.TrimSpace( + initContainerCommandTpl))) + err = tpl.Execute(&buf, &data) + if err != nil { + return corev1.Container{}, err + } + + initContainerName := injectInitContainerName + container := corev1.Container{ + Name: initContainerName, + Image: w.ImageConsulK8S, + Env: []corev1.EnvVar{ + { + Name: "POD_NAME", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{FieldPath: "metadata.name"}, + }, + }, + { + Name: "POD_NAMESPACE", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{FieldPath: "metadata.namespace"}, + }, + }, + { + Name: "CONSUL_ADDRESSES", + Value: w.ConsulAddress, + }, + { + Name: "CONSUL_GRPC_PORT", + Value: strconv.Itoa(w.ConsulConfig.GRPCPort), + }, + { + Name: "CONSUL_HTTP_PORT", + Value: strconv.Itoa(w.ConsulConfig.HTTPPort), + }, + { + Name: "CONSUL_API_TIMEOUT", + Value: w.ConsulConfig.APITimeout.String(), + }, + }, + Resources: w.InitContainerResources, + VolumeMounts: volMounts, + Command: []string{"/bin/sh", "-ec", buf.String()}, + } + + if w.TLSEnabled { + container.Env = append(container.Env, + corev1.EnvVar{ + Name: constants.UseTLSEnvVar, + Value: "true", + }, + corev1.EnvVar{ + Name: constants.CACertPEMEnvVar, + Value: w.ConsulCACert, + }, + corev1.EnvVar{ + Name: constants.TLSServerNameEnvVar, + Value: w.ConsulTLSServerName, + }) + } + + if w.AuthMethod != "" { + container.Env = append(container.Env, + corev1.EnvVar{ + Name: "CONSUL_LOGIN_AUTH_METHOD", + Value: w.AuthMethod, + }, + corev1.EnvVar{ + Name: "CONSUL_LOGIN_BEARER_TOKEN_FILE", + Value: bearerTokenFile, + }, + corev1.EnvVar{ + Name: "CONSUL_LOGIN_META", + Value: "pod=$(POD_NAMESPACE)/$(POD_NAME)", + }) + + if w.EnableNamespaces { + if w.EnableK8SNSMirroring { + container.Env = append(container.Env, + corev1.EnvVar{ + Name: "CONSUL_LOGIN_NAMESPACE", + Value: "default", + }) + } else { + container.Env = append(container.Env, + corev1.EnvVar{ + Name: "CONSUL_LOGIN_NAMESPACE", + Value: w.consulNamespace(namespace.Name), + }) + } + } + + if w.ConsulPartition != "" { + container.Env = append(container.Env, + corev1.EnvVar{ + Name: "CONSUL_LOGIN_PARTITION", + Value: w.ConsulPartition, + }) + } + } + if w.EnableNamespaces { + container.Env = append(container.Env, + corev1.EnvVar{ + Name: "CONSUL_NAMESPACE", + Value: w.consulNamespace(namespace.Name), + }) + } + + if w.ConsulPartition != "" { + container.Env = append(container.Env, + corev1.EnvVar{ + Name: "CONSUL_PARTITION", + Value: w.ConsulPartition, + }) + } + + // OpenShift without CNI is the only environment where privileged must be true. + privileged := false + if w.EnableOpenShift && !w.EnableCNI { + privileged = true + } + + if tproxyEnabled { + if !w.EnableCNI { + // Set redirect traffic config for the container so that we can apply iptables rules. + redirectTrafficConfig, err := w.iptablesConfigJSON(pod, namespace) + if err != nil { + return corev1.Container{}, err + } + container.Env = append(container.Env, + corev1.EnvVar{ + Name: "CONSUL_REDIRECT_TRAFFIC_CONFIG", + Value: redirectTrafficConfig, + }) + + // Running consul mesh-init redirect-traffic with iptables + // requires both being a root user and having NET_ADMIN capability. + container.SecurityContext = &corev1.SecurityContext{ + RunAsUser: pointer.Int64(rootUserAndGroupID), + RunAsGroup: pointer.Int64(rootUserAndGroupID), + // RunAsNonRoot overrides any setting in the Pod so that we can still run as root here as required. + RunAsNonRoot: pointer.Bool(false), + Privileged: pointer.Bool(privileged), + Capabilities: &corev1.Capabilities{ + Add: []corev1.Capability{netAdminCapability}, + }, + } + } else { + container.SecurityContext = &corev1.SecurityContext{ + RunAsUser: pointer.Int64(initContainersUserAndGroupID), + RunAsGroup: pointer.Int64(initContainersUserAndGroupID), + RunAsNonRoot: pointer.Bool(true), + Privileged: pointer.Bool(privileged), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + ReadOnlyRootFilesystem: pointer.Bool(true), + AllowPrivilegeEscalation: pointer.Bool(false), + } + } + } + + return container, nil +} + +// consulDNSEnabled returns true if Consul DNS should be enabled for this pod. +// It returns an error when the annotation value cannot be parsed by strconv.ParseBool or if we are unable +// to read the pod's namespace label when it exists. +func consulDNSEnabled(namespace corev1.Namespace, pod corev1.Pod, globalDNSEnabled bool, globalTProxyEnabled bool) (bool, error) { + // DNS is only possible when tproxy is also enabled because it relies + // on traffic being redirected. + tproxy, err := common.TransparentProxyEnabled(namespace, pod, globalTProxyEnabled) + if err != nil { + return false, err + } + if !tproxy { + return false, nil + } + + // First check to see if the pod annotation exists to override the namespace or global settings. + if raw, ok := pod.Annotations[constants.KeyConsulDNS]; ok { + return strconv.ParseBool(raw) + } + // Next see if the namespace has been defaulted. + if raw, ok := namespace.Labels[constants.KeyConsulDNS]; ok { + return strconv.ParseBool(raw) + } + // Else fall back to the global default. + return globalDNSEnabled, nil +} + +// splitCommaSeparatedItemsFromAnnotation takes an annotation and a pod +// and returns the comma-separated value of the annotation as a list of strings. +func splitCommaSeparatedItemsFromAnnotation(annotation string, pod corev1.Pod) []string { + var items []string + if raw, ok := pod.Annotations[annotation]; ok { + items = append(items, strings.Split(raw, ",")...) + } + + return items +} + +// initContainerCommandTpl is the template for the command executed by +// the init container. +const initContainerCommandTpl = ` +consul-k8s-control-plane mesh-init -proxy-name=${POD_NAME} \ + -log-level={{ .LogLevel }} \ + -log-json={{ .LogJSON }} \ +` diff --git a/control-plane/connect-inject/webhookv2/container_init_test.go b/control-plane/connect-inject/webhookv2/container_init_test.go new file mode 100644 index 0000000000..b85ecd3ba5 --- /dev/null +++ b/control-plane/connect-inject/webhookv2/container_init_test.go @@ -0,0 +1,808 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package webhookv2 + +import ( + "fmt" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/pointer" + + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" + "github.com/hashicorp/consul-k8s/control-plane/consul" + "github.com/hashicorp/consul-k8s/control-plane/namespaces" +) + +const k8sNamespace = "k8snamespace" + +func TestHandlerContainerInit(t *testing.T) { + minimal := func() *corev1.Pod { + return &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "test-namespace", + Annotations: map[string]string{ + constants.AnnotationService: "foo", + }, + }, + + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + }, + { + Name: "web-side", + }, + }, + }, + Status: corev1.PodStatus{ + HostIP: "1.1.1.1", + PodIP: "2.2.2.2", + }, + } + } + + cases := []struct { + Name string + Pod func(*corev1.Pod) *corev1.Pod + Webhook MeshWebhook + ExpCmd string // Strings.Contains test + ExpEnv []corev1.EnvVar + }{ + { + "default cmd and env", + func(pod *corev1.Pod) *corev1.Pod { + pod.Annotations[constants.AnnotationService] = "web" + return pod + }, + MeshWebhook{ + ConsulAddress: "10.0.0.0", + ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502}, + LogLevel: "info", + }, + `/bin/sh -ec consul-k8s-control-plane mesh-init -proxy-name=${POD_NAME} \ + -log-level=info \ + -log-json=false \`, + []corev1.EnvVar{ + { + Name: "CONSUL_ADDRESSES", + Value: "10.0.0.0", + }, + { + Name: "CONSUL_GRPC_PORT", + Value: "8502", + }, + { + Name: "CONSUL_HTTP_PORT", + Value: "8500", + }, + { + Name: "CONSUL_API_TIMEOUT", + Value: "0s", + }, + }, + }, + + { + "with auth method", + func(pod *corev1.Pod) *corev1.Pod { + pod.Annotations[constants.AnnotationService] = "web" + pod.Spec.ServiceAccountName = "a-service-account-name" + pod.Spec.Containers[0].VolumeMounts = []corev1.VolumeMount{ + { + Name: "sa", + MountPath: "/var/run/secrets/kubernetes.io/serviceaccount", + }, + } + return pod + }, + MeshWebhook{ + AuthMethod: "an-auth-method", + ConsulAddress: "10.0.0.0", + ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502, APITimeout: 5 * time.Second}, + LogLevel: "debug", + LogJSON: true, + }, + `/bin/sh -ec consul-k8s-control-plane mesh-init -proxy-name=${POD_NAME} \ + -log-level=debug \ + -log-json=true \`, + []corev1.EnvVar{ + { + Name: "CONSUL_ADDRESSES", + Value: "10.0.0.0", + }, + { + Name: "CONSUL_GRPC_PORT", + Value: "8502", + }, + { + Name: "CONSUL_HTTP_PORT", + Value: "8500", + }, + { + Name: "CONSUL_API_TIMEOUT", + Value: "5s", + }, + { + Name: "CONSUL_LOGIN_AUTH_METHOD", + Value: "an-auth-method", + }, + { + Name: "CONSUL_LOGIN_BEARER_TOKEN_FILE", + Value: "/var/run/secrets/kubernetes.io/serviceaccount/token", + }, + { + Name: "CONSUL_LOGIN_META", + Value: "pod=$(POD_NAMESPACE)/$(POD_NAME)", + }, + }, + }, + } + + for _, tt := range cases { + t.Run(tt.Name, func(t *testing.T) { + w := tt.Webhook + pod := *tt.Pod(minimal()) + container, err := w.containerInit(testNS, pod) + require.NoError(t, err) + actual := strings.Join(container.Command, " ") + require.Contains(t, actual, tt.ExpCmd) + require.EqualValues(t, container.Env[2:], tt.ExpEnv) + }) + } +} + +func TestHandlerContainerInit_transparentProxy(t *testing.T) { + cases := map[string]struct { + globalEnabled bool + cniEnabled bool + annotations map[string]string + expTproxyEnabled bool + namespaceLabel map[string]string + openShiftEnabled bool + }{ + "enabled globally, ns not set, annotation not provided, cni disabled, openshift disabled": { + true, + false, + nil, + true, + nil, + false, + }, + "enabled globally, ns not set, annotation is false, cni disabled, openshift disabled": { + true, + false, + map[string]string{constants.KeyTransparentProxy: "false"}, + false, + nil, + false, + }, + "enabled globally, ns not set, annotation is true, cni disabled, openshift disabled": { + true, + false, + map[string]string{constants.KeyTransparentProxy: "true"}, + true, + nil, + false, + }, + "disabled globally, ns not set, annotation not provided, cni disabled, openshift disabled": { + false, + false, + nil, + false, + nil, + false, + }, + "disabled globally, ns not set, annotation is false, cni disabled, openshift disabled": { + false, + false, + map[string]string{constants.KeyTransparentProxy: "false"}, + false, + nil, + false, + }, + "disabled globally, ns not set, annotation is true, cni disabled, openshift disabled": { + false, + false, + map[string]string{constants.KeyTransparentProxy: "true"}, + true, + nil, + false, + }, + "disabled globally, ns enabled, annotation not set, cni disabled, openshift disabled": { + false, + false, + nil, + true, + map[string]string{constants.KeyTransparentProxy: "true"}, + false, + }, + "enabled globally, ns disabled, annotation not set, cni disabled, openshift disabled": { + true, + false, + nil, + false, + map[string]string{constants.KeyTransparentProxy: "false"}, + false, + }, + "disabled globally, ns enabled, annotation not set, cni enabled, openshift disabled": { + false, + true, + nil, + false, + map[string]string{constants.KeyTransparentProxy: "true"}, + false, + }, + + "enabled globally, ns not set, annotation not set, cni enabled, openshift disabled": { + true, + true, + nil, + false, + nil, + false, + }, + "enabled globally, ns not set, annotation not set, cni enabled, openshift enabled": { + true, + true, + nil, + false, + nil, + true, + }, + "enabled globally, ns not set, annotation not set, cni disabled, openshift enabled": { + true, + false, + nil, + true, + nil, + true, + }, + } + for name, c := range cases { + t.Run(name, func(t *testing.T) { + w := MeshWebhook{ + EnableTransparentProxy: c.globalEnabled, + EnableCNI: c.cniEnabled, + ConsulConfig: &consul.Config{HTTPPort: 8500}, + EnableOpenShift: c.openShiftEnabled, + } + pod := minimal() + pod.Annotations = c.annotations + + privileged := false + if c.openShiftEnabled && !c.cniEnabled { + privileged = true + } + + var expectedSecurityContext *corev1.SecurityContext + if c.cniEnabled { + expectedSecurityContext = &corev1.SecurityContext{ + RunAsUser: pointer.Int64(initContainersUserAndGroupID), + RunAsGroup: pointer.Int64(initContainersUserAndGroupID), + RunAsNonRoot: pointer.Bool(true), + Privileged: pointer.Bool(privileged), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + ReadOnlyRootFilesystem: pointer.Bool(true), + AllowPrivilegeEscalation: pointer.Bool(false), + } + } else if c.expTproxyEnabled { + expectedSecurityContext = &corev1.SecurityContext{ + RunAsUser: pointer.Int64(0), + RunAsGroup: pointer.Int64(0), + RunAsNonRoot: pointer.Bool(false), + Privileged: pointer.Bool(privileged), + Capabilities: &corev1.Capabilities{ + Add: []corev1.Capability{netAdminCapability}, + }, + } + } + ns := testNS + ns.Labels = c.namespaceLabel + container, err := w.containerInit(ns, *pod) + require.NoError(t, err) + + redirectTrafficEnvVarFound := false + for _, ev := range container.Env { + if ev.Name == "CONSUL_REDIRECT_TRAFFIC_CONFIG" { + redirectTrafficEnvVarFound = true + break + } + } + + require.Equal(t, c.expTproxyEnabled, redirectTrafficEnvVarFound) + require.Equal(t, expectedSecurityContext, container.SecurityContext) + }) + } +} + +func TestHandlerContainerInit_namespacesAndPartitionsEnabled(t *testing.T) { + minimal := func() *corev1.Pod { + return &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + constants.AnnotationService: "foo", + }, + }, + + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + }, + { + Name: "web-side", + }, + { + Name: "auth-method-secret", + VolumeMounts: []corev1.VolumeMount{ + { + Name: "service-account-secret", + MountPath: "/var/run/secrets/kubernetes.io/serviceaccount", + }, + }, + }, + }, + ServiceAccountName: "web", + }, + } + } + + cases := []struct { + Name string + Pod func(*corev1.Pod) *corev1.Pod + Webhook MeshWebhook + Cmd string + ExpEnv []corev1.EnvVar + }{ + { + "default namespace, no partition", + func(pod *corev1.Pod) *corev1.Pod { + pod.Annotations[constants.AnnotationService] = "web" + return pod + }, + MeshWebhook{ + EnableNamespaces: true, + ConsulDestinationNamespace: "default", + ConsulPartition: "", + ConsulAddress: "10.0.0.0", + ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502, APITimeout: 5 * time.Second}, + }, + `/bin/sh -ec consul-k8s-control-plane mesh-init -proxy-name=${POD_NAME} \ + -log-level=info \ + -log-json=false \`, + []corev1.EnvVar{ + { + Name: "CONSUL_ADDRESSES", + Value: "10.0.0.0", + }, + { + Name: "CONSUL_GRPC_PORT", + Value: "8502", + }, + { + Name: "CONSUL_HTTP_PORT", + Value: "8500", + }, + { + Name: "CONSUL_API_TIMEOUT", + Value: "5s", + }, + { + Name: "CONSUL_NAMESPACE", + Value: "default", + }, + }, + }, + { + "default namespace, default partition", + func(pod *corev1.Pod) *corev1.Pod { + pod.Annotations[constants.AnnotationService] = "web" + return pod + }, + MeshWebhook{ + EnableNamespaces: true, + ConsulDestinationNamespace: "default", + ConsulPartition: "default", + ConsulAddress: "10.0.0.0", + ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502, APITimeout: 5 * time.Second}, + }, + `/bin/sh -ec consul-k8s-control-plane mesh-init -proxy-name=${POD_NAME} \ + -log-level=info \ + -log-json=false \`, + []corev1.EnvVar{ + { + Name: "CONSUL_ADDRESSES", + Value: "10.0.0.0", + }, + { + Name: "CONSUL_GRPC_PORT", + Value: "8502", + }, + { + Name: "CONSUL_HTTP_PORT", + Value: "8500", + }, + { + Name: "CONSUL_API_TIMEOUT", + Value: "5s", + }, + { + Name: "CONSUL_NAMESPACE", + Value: "default", + }, + { + Name: "CONSUL_PARTITION", + Value: "default", + }, + }, + }, + { + "non-default namespace, no partition", + func(pod *corev1.Pod) *corev1.Pod { + pod.Annotations[constants.AnnotationService] = "web" + return pod + }, + MeshWebhook{ + EnableNamespaces: true, + ConsulDestinationNamespace: "non-default", + ConsulPartition: "", + ConsulAddress: "10.0.0.0", + ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502, APITimeout: 5 * time.Second}, + }, + `/bin/sh -ec consul-k8s-control-plane mesh-init -proxy-name=${POD_NAME} \ + -log-level=info \ + -log-json=false \`, + []corev1.EnvVar{ + { + Name: "CONSUL_ADDRESSES", + Value: "10.0.0.0", + }, + { + Name: "CONSUL_GRPC_PORT", + Value: "8502", + }, + { + Name: "CONSUL_HTTP_PORT", + Value: "8500", + }, + { + Name: "CONSUL_API_TIMEOUT", + Value: "5s", + }, + { + Name: "CONSUL_NAMESPACE", + Value: "non-default", + }, + }, + }, + { + "non-default namespace, non-default partition", + func(pod *corev1.Pod) *corev1.Pod { + pod.Annotations[constants.AnnotationService] = "web" + return pod + }, + MeshWebhook{ + EnableNamespaces: true, + ConsulDestinationNamespace: "non-default", + ConsulPartition: "non-default-part", + ConsulAddress: "10.0.0.0", + ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502, APITimeout: 5 * time.Second}, + }, + `/bin/sh -ec consul-k8s-control-plane mesh-init -proxy-name=${POD_NAME} \ + -log-level=info \ + -log-json=false \`, + []corev1.EnvVar{ + { + Name: "CONSUL_ADDRESSES", + Value: "10.0.0.0", + }, + { + Name: "CONSUL_GRPC_PORT", + Value: "8502", + }, + { + Name: "CONSUL_HTTP_PORT", + Value: "8500", + }, + { + Name: "CONSUL_API_TIMEOUT", + Value: "5s", + }, + { + Name: "CONSUL_NAMESPACE", + Value: "non-default", + }, + { + Name: "CONSUL_PARTITION", + Value: "non-default-part", + }, + }, + }, + { + "auth method, non-default namespace, mirroring disabled, default partition", + func(pod *corev1.Pod) *corev1.Pod { + pod.Annotations[constants.AnnotationService] = "" + return pod + }, + MeshWebhook{ + AuthMethod: "auth-method", + EnableNamespaces: true, + ConsulDestinationNamespace: "non-default", + ConsulPartition: "default", + ConsulAddress: "10.0.0.0", + ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502, APITimeout: 5 * time.Second}, + }, + `/bin/sh -ec consul-k8s-control-plane mesh-init -proxy-name=${POD_NAME} \ + -log-level=info \ + -log-json=false \`, + []corev1.EnvVar{ + { + Name: "CONSUL_ADDRESSES", + Value: "10.0.0.0", + }, + { + Name: "CONSUL_GRPC_PORT", + Value: "8502", + }, + { + Name: "CONSUL_HTTP_PORT", + Value: "8500", + }, + { + Name: "CONSUL_API_TIMEOUT", + Value: "5s", + }, + { + Name: "CONSUL_LOGIN_AUTH_METHOD", + Value: "auth-method", + }, + { + Name: "CONSUL_LOGIN_BEARER_TOKEN_FILE", + Value: "/var/run/secrets/kubernetes.io/serviceaccount/token", + }, + { + Name: "CONSUL_LOGIN_META", + Value: "pod=$(POD_NAMESPACE)/$(POD_NAME)", + }, + { + Name: "CONSUL_LOGIN_NAMESPACE", + Value: "non-default", + }, + { + Name: "CONSUL_LOGIN_PARTITION", + Value: "default", + }, + { + Name: "CONSUL_NAMESPACE", + Value: "non-default", + }, + { + Name: "CONSUL_PARTITION", + Value: "default", + }, + }, + }, + { + "auth method, non-default namespace, mirroring enabled, non-default partition", + func(pod *corev1.Pod) *corev1.Pod { + pod.Annotations[constants.AnnotationService] = "" + return pod + }, + MeshWebhook{ + AuthMethod: "auth-method", + EnableNamespaces: true, + ConsulDestinationNamespace: "non-default", // Overridden by mirroring + EnableK8SNSMirroring: true, + ConsulPartition: "non-default", + ConsulAddress: "10.0.0.0", + ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502, APITimeout: 5 * time.Second}, + }, + `/bin/sh -ec consul-k8s-control-plane mesh-init -proxy-name=${POD_NAME} \ + -log-level=info \ + -log-json=false \`, + []corev1.EnvVar{ + { + Name: "CONSUL_ADDRESSES", + Value: "10.0.0.0", + }, + { + Name: "CONSUL_GRPC_PORT", + Value: "8502", + }, + { + Name: "CONSUL_HTTP_PORT", + Value: "8500", + }, + { + Name: "CONSUL_API_TIMEOUT", + Value: "5s", + }, + { + Name: "CONSUL_LOGIN_AUTH_METHOD", + Value: "auth-method", + }, + { + Name: "CONSUL_LOGIN_BEARER_TOKEN_FILE", + Value: "/var/run/secrets/kubernetes.io/serviceaccount/token", + }, + { + Name: "CONSUL_LOGIN_META", + Value: "pod=$(POD_NAMESPACE)/$(POD_NAME)", + }, + { + Name: "CONSUL_LOGIN_NAMESPACE", + Value: "default", + }, + { + Name: "CONSUL_LOGIN_PARTITION", + Value: "non-default", + }, + { + Name: "CONSUL_NAMESPACE", + Value: "k8snamespace", + }, + { + Name: "CONSUL_PARTITION", + Value: "non-default", + }, + }, + }, + } + + for _, tt := range cases { + t.Run(tt.Name, func(t *testing.T) { + h := tt.Webhook + h.LogLevel = "info" + container, err := h.containerInit(testNS, *tt.Pod(minimal())) + require.NoError(t, err) + actual := strings.Join(container.Command, " ") + require.Equal(t, tt.Cmd, actual) + if tt.ExpEnv != nil { + require.Equal(t, tt.ExpEnv, container.Env[2:]) + } + }) + } +} + +// If TLSEnabled is set, +// Consul addresses should use HTTPS +// and CA cert should be set as env variable if provided. +// Additionally, test that the init container is correctly configured +// when http or gRPC ports are different from defaults. +func TestHandlerContainerInit_WithTLSAndCustomPorts(t *testing.T) { + for _, caProvided := range []bool{true, false} { + name := fmt.Sprintf("ca provided: %t", caProvided) + t.Run(name, func(t *testing.T) { + w := MeshWebhook{ + ConsulAddress: "10.0.0.0", + TLSEnabled: true, + ConsulConfig: &consul.Config{HTTPPort: 443, GRPCPort: 8503}, + } + if caProvided { + w.ConsulCACert = "consul-ca-cert" + } + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + constants.AnnotationService: "foo", + }, + }, + + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + }, + }, + }, + } + container, err := w.containerInit(testNS, *pod) + require.NoError(t, err) + require.Equal(t, "CONSUL_ADDRESSES", container.Env[2].Name) + require.Equal(t, w.ConsulAddress, container.Env[2].Value) + require.Equal(t, "CONSUL_GRPC_PORT", container.Env[3].Name) + require.Equal(t, fmt.Sprintf("%d", w.ConsulConfig.GRPCPort), container.Env[3].Value) + require.Equal(t, "CONSUL_HTTP_PORT", container.Env[4].Name) + require.Equal(t, fmt.Sprintf("%d", w.ConsulConfig.HTTPPort), container.Env[4].Value) + if w.TLSEnabled { + require.Equal(t, "CONSUL_USE_TLS", container.Env[6].Name) + require.Equal(t, "true", container.Env[6].Value) + if caProvided { + require.Equal(t, "CONSUL_CACERT_PEM", container.Env[7].Name) + require.Equal(t, "consul-ca-cert", container.Env[7].Value) + } else { + for _, ev := range container.Env { + if ev.Name == "CONSUL_CACERT_PEM" { + require.Empty(t, ev.Value) + } + } + } + } + + }) + } +} + +func TestHandlerContainerInit_Resources(t *testing.T) { + w := MeshWebhook{ + InitContainerResources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("10m"), + corev1.ResourceMemory: resource.MustParse("10Mi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("20m"), + corev1.ResourceMemory: resource.MustParse("25Mi"), + }, + }, + ConsulConfig: &consul.Config{HTTPPort: 8500, APITimeout: 5 * time.Second}, + } + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + constants.AnnotationService: "foo", + }, + }, + + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + }, + }, + }, + } + container, err := w.containerInit(testNS, *pod) + require.NoError(t, err) + require.Equal(t, corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("20m"), + corev1.ResourceMemory: resource.MustParse("25Mi"), + }, + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("10m"), + corev1.ResourceMemory: resource.MustParse("10Mi"), + }, + }, container.Resources) +} + +var testNS = corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: k8sNamespace, + Labels: map[string]string{}, + }, +} + +func minimal() *corev1.Pod { + return &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespaces.DefaultNamespace, + Name: "minimal", + Annotations: map[string]string{ + constants.AnnotationService: "foo", + }, + }, + + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + }, + { + Name: "web-side", + }, + }, + }, + } +} diff --git a/control-plane/connect-inject/webhookv2/container_volume.go b/control-plane/connect-inject/webhookv2/container_volume.go new file mode 100644 index 0000000000..a05a6720db --- /dev/null +++ b/control-plane/connect-inject/webhookv2/container_volume.go @@ -0,0 +1,23 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package webhookv2 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// volumeName is the name of the volume that is created to store the +// Consul Connect injection data. +const volumeName = "consul-mesh-inject-data" + +// containerVolume returns the volume data to add to the pod. This volume +// is used for shared data between containers. +func (w *MeshWebhook) containerVolume() corev1.Volume { + return corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{Medium: corev1.StorageMediumMemory}, + }, + } +} diff --git a/control-plane/connect-inject/webhookv2/dns.go b/control-plane/connect-inject/webhookv2/dns.go new file mode 100644 index 0000000000..883c9ed034 --- /dev/null +++ b/control-plane/connect-inject/webhookv2/dns.go @@ -0,0 +1,93 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package webhookv2 + +import ( + "fmt" + "strconv" + + "github.com/miekg/dns" + corev1 "k8s.io/api/core/v1" + "k8s.io/utils/pointer" +) + +const ( + // These defaults are taken from the /etc/resolv.conf man page + // and are used by the dns library. + defaultDNSOptionNdots = 1 + defaultDNSOptionTimeout = 5 + defaultDNSOptionAttempts = 2 + + // defaultEtcResolvConfFile is the default location of the /etc/resolv.conf file. + defaultEtcResolvConfFile = "/etc/resolv.conf" +) + +func (w *MeshWebhook) configureDNS(pod *corev1.Pod, k8sNS string) error { + // First, we need to determine the nameservers configured in this cluster from /etc/resolv.conf. + etcResolvConf := defaultEtcResolvConfFile + if w.etcResolvFile != "" { + etcResolvConf = w.etcResolvFile + } + cfg, err := dns.ClientConfigFromFile(etcResolvConf) + if err != nil { + return err + } + + // Set DNS policy on the pod to None because we want DNS to work according to the config we will provide. + pod.Spec.DNSPolicy = corev1.DNSNone + + // Set the consul-dataplane's DNS server as the first server in the list (i.e. localhost). + // We want to do that so that when consul cannot resolve the record, we will fall back to the nameservers + // configured in our /etc/resolv.conf. It's important to add Consul DNS as the first nameserver because + // if we put kube DNS first, it will return NXDOMAIN response and a DNS client will not fall back to other nameservers. + if pod.Spec.DNSConfig == nil { + nameservers := []string{consulDataplaneDNSBindHost} + nameservers = append(nameservers, cfg.Servers...) + var options []corev1.PodDNSConfigOption + if cfg.Ndots != defaultDNSOptionNdots { + ndots := strconv.Itoa(cfg.Ndots) + options = append(options, corev1.PodDNSConfigOption{ + Name: "ndots", + Value: &ndots, + }) + } + if cfg.Timeout != defaultDNSOptionTimeout { + options = append(options, corev1.PodDNSConfigOption{ + Name: "timeout", + Value: pointer.String(strconv.Itoa(cfg.Timeout)), + }) + } + if cfg.Attempts != defaultDNSOptionAttempts { + options = append(options, corev1.PodDNSConfigOption{ + Name: "attempts", + Value: pointer.String(strconv.Itoa(cfg.Attempts)), + }) + } + + // Replace release namespace in the searches with the pod namespace. + // This is so that the searches we generate will be for the pod's namespace + // instead of the namespace of the connect-injector. E.g. instead of + // consul.svc.cluster.local it should be .svc.cluster.local. + var searches []string + // Kubernetes will add a search domain for .svc.cluster.local so we can always + // expect it to be there. See https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#namespaces-of-services. + consulReleaseNSSearchDomain := fmt.Sprintf("%s.svc.cluster.local", w.ReleaseNamespace) + for _, search := range cfg.Search { + if search == consulReleaseNSSearchDomain { + searches = append(searches, fmt.Sprintf("%s.svc.cluster.local", k8sNS)) + } else { + searches = append(searches, search) + } + } + + pod.Spec.DNSConfig = &corev1.PodDNSConfig{ + Nameservers: nameservers, + Searches: searches, + Options: options, + } + } else { + return fmt.Errorf("DNS redirection to Consul is not supported with an already defined DNSConfig on the pod") + } + return nil +} diff --git a/control-plane/connect-inject/webhookv2/dns_test.go b/control-plane/connect-inject/webhookv2/dns_test.go new file mode 100644 index 0000000000..e7a380b271 --- /dev/null +++ b/control-plane/connect-inject/webhookv2/dns_test.go @@ -0,0 +1,105 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package webhookv2 + +import ( + "os" + "testing" + + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + "k8s.io/utils/pointer" +) + +func TestMeshWebhook_configureDNS(t *testing.T) { + cases := map[string]struct { + etcResolv string + expDNSConfig *corev1.PodDNSConfig + }{ + "empty /etc/resolv.conf file": { + expDNSConfig: &corev1.PodDNSConfig{ + Nameservers: []string{"127.0.0.1"}, + }, + }, + "one nameserver": { + etcResolv: `nameserver 1.1.1.1`, + expDNSConfig: &corev1.PodDNSConfig{ + Nameservers: []string{"127.0.0.1", "1.1.1.1"}, + }, + }, + "mutiple nameservers, searches, and options": { + etcResolv: ` +nameserver 1.1.1.1 +nameserver 2.2.2.2 +search foo.bar bar.baz +options ndots:5 timeout:6 attempts:3`, + expDNSConfig: &corev1.PodDNSConfig{ + Nameservers: []string{"127.0.0.1", "1.1.1.1", "2.2.2.2"}, + Searches: []string{"foo.bar", "bar.baz"}, + Options: []corev1.PodDNSConfigOption{ + { + Name: "ndots", + Value: pointer.String("5"), + }, + { + Name: "timeout", + Value: pointer.String("6"), + }, + { + Name: "attempts", + Value: pointer.String("3"), + }, + }, + }, + }, + "replaces release specific search domains": { + etcResolv: ` +nameserver 1.1.1.1 +nameserver 2.2.2.2 +search consul.svc.cluster.local svc.cluster.local cluster.local +options ndots:5`, + expDNSConfig: &corev1.PodDNSConfig{ + Nameservers: []string{"127.0.0.1", "1.1.1.1", "2.2.2.2"}, + Searches: []string{"default.svc.cluster.local", "svc.cluster.local", "cluster.local"}, + Options: []corev1.PodDNSConfigOption{ + { + Name: "ndots", + Value: pointer.String("5"), + }, + }, + }, + }, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + etcResolvFile, err := os.CreateTemp("", "") + require.NoError(t, err) + t.Cleanup(func() { + _ = os.RemoveAll(etcResolvFile.Name()) + }) + _, err = etcResolvFile.WriteString(c.etcResolv) + require.NoError(t, err) + w := MeshWebhook{ + etcResolvFile: etcResolvFile.Name(), + ReleaseNamespace: "consul", + } + + pod := minimal() + err = w.configureDNS(pod, "default") + require.NoError(t, err) + require.Equal(t, corev1.DNSNone, pod.Spec.DNSPolicy) + require.Equal(t, c.expDNSConfig, pod.Spec.DNSConfig) + }) + } +} + +func TestMeshWebhook_configureDNS_error(t *testing.T) { + w := MeshWebhook{} + + pod := minimal() + pod.Spec.DNSConfig = &corev1.PodDNSConfig{Nameservers: []string{"1.1.1.1"}} + err := w.configureDNS(pod, "default") + require.EqualError(t, err, "DNS redirection to Consul is not supported with an already defined DNSConfig on the pod") +} diff --git a/control-plane/connect-inject/webhookv2/health_checks_test.go b/control-plane/connect-inject/webhookv2/health_checks_test.go new file mode 100644 index 0000000000..82b7cdd99d --- /dev/null +++ b/control-plane/connect-inject/webhookv2/health_checks_test.go @@ -0,0 +1,56 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package webhookv2 + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestReady(t *testing.T) { + + var cases = []struct { + name string + certFileContents *string + keyFileContents *string + expectError bool + }{ + {"Both cert and key files not present.", nil, nil, true}, + {"Cert file not empty and key file missing.", ptrToString("test"), nil, true}, + {"Key file not empty and cert file missing.", nil, ptrToString("test"), true}, + {"Both cert and key files are present and not empty.", ptrToString("test"), ptrToString("test"), false}, + {"Both cert and key files are present but both are empty.", ptrToString(""), ptrToString(""), true}, + {"Both cert and key files are present but key file is empty.", ptrToString("test"), ptrToString(""), true}, + {"Both cert and key files are present but cert file is empty.", ptrToString(""), ptrToString("test"), true}, + } + + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + tmpDir, err := os.MkdirTemp("", "") + require.NoError(t, err) + if tt.certFileContents != nil { + err := os.WriteFile(filepath.Join(tmpDir, "tls.crt"), []byte(*tt.certFileContents), 0666) + require.NoError(t, err) + } + if tt.keyFileContents != nil { + err := os.WriteFile(filepath.Join(tmpDir, "tls.key"), []byte(*tt.keyFileContents), 0666) + require.NoError(t, err) + } + rc := ReadinessCheck{tmpDir} + err = rc.Ready(nil) + if tt.expectError { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} + +func ptrToString(s string) *string { + return &s +} diff --git a/control-plane/connect-inject/webhookv2/heath_checks.go b/control-plane/connect-inject/webhookv2/heath_checks.go new file mode 100644 index 0000000000..6bd11f6efa --- /dev/null +++ b/control-plane/connect-inject/webhookv2/heath_checks.go @@ -0,0 +1,30 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package webhookv2 + +import ( + "errors" + "net/http" + "os" + "path/filepath" +) + +type ReadinessCheck struct { + CertDir string +} + +func (r ReadinessCheck) Ready(_ *http.Request) error { + certFile, err := os.ReadFile(filepath.Join(r.CertDir, "tls.crt")) + if err != nil { + return err + } + keyFile, err := os.ReadFile(filepath.Join(r.CertDir, "tls.key")) + if err != nil { + return err + } + if len(certFile) == 0 || len(keyFile) == 0 { + return errors.New("certificate files have not been loaded") + } + return nil +} diff --git a/control-plane/connect-inject/webhookv2/mesh_webhook.go b/control-plane/connect-inject/webhookv2/mesh_webhook.go new file mode 100644 index 0000000000..9676243fef --- /dev/null +++ b/control-plane/connect-inject/webhookv2/mesh_webhook.go @@ -0,0 +1,556 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package webhookv2 + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "strconv" + "strings" + + mapset "github.com/deckarep/golang-set" + "github.com/go-logr/logr" + "golang.org/x/exp/slices" + "gomodules.xyz/jsonpatch/v2" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/kubernetes" + _ "k8s.io/client-go/plugin/pkg/client/auth" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/common" + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/lifecycle" + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/metrics" + "github.com/hashicorp/consul-k8s/control-plane/consul" + "github.com/hashicorp/consul-k8s/control-plane/namespaces" + "github.com/hashicorp/consul-k8s/version" +) + +const ( + sidecarContainer = "consul-dataplane" + + // exposedPathsLivenessPortsRangeStart is the start of the port range that we will use as + // the ListenerPort for the Expose configuration of the proxy registration for a liveness probe. + exposedPathsLivenessPortsRangeStart = 20300 + + // exposedPathsReadinessPortsRangeStart is the start of the port range that we will use as + // the ListenerPort for the Expose configuration of the proxy registration for a readiness probe. + exposedPathsReadinessPortsRangeStart = 20400 + + // exposedPathsStartupPortsRangeStart is the start of the port range that we will use as + // the ListenerPort for the Expose configuration of the proxy registration for a startup probe. + exposedPathsStartupPortsRangeStart = 20500 +) + +// kubeSystemNamespaces is a set of namespaces that are considered +// "system" level namespaces and are always skipped (never injected). +var kubeSystemNamespaces = mapset.NewSetWith(metav1.NamespaceSystem, metav1.NamespacePublic) + +// MeshWebhook is the HTTP meshWebhook for admission webhooks. +type MeshWebhook struct { + Clientset kubernetes.Interface + + // ConsulClientConfig is the config to create a Consul API client. + ConsulConfig *consul.Config + + // ConsulServerConnMgr is the watcher for the Consul server addresses. + ConsulServerConnMgr consul.ServerConnectionManager + + // ImageConsul is the container image for Consul to use. + // ImageConsulDataplane is the container image for Envoy to use. + // + // Both of these MUST be set. + ImageConsul string + ImageConsulDataplane string + + // ImageConsulK8S is the container image for consul-k8s to use. + // This image is used for the consul-sidecar container. + ImageConsulK8S string + + // Optional: set when you need extra options to be set when running envoy + // See a list of args here: https://www.envoyproxy.io/docs/envoy/latest/operations/cli + EnvoyExtraArgs string + + // RequireAnnotation means that the annotation must be given to inject. + // If this is false, injection is default. + RequireAnnotation bool + + // AuthMethod is the name of the Kubernetes Auth Method to + // use for identity with connectInjection if ACLs are enabled. + AuthMethod string + + // The PEM-encoded CA certificate string + // to use when communicating with Consul clients over HTTPS. + // If not set, will use HTTP. + ConsulCACert string + + // TLSEnabled indicates whether we should use TLS for communicating to Consul. + TLSEnabled bool + + // ConsulAddress is the address of the Consul server. This should be only the + // host (i.e. not including port or protocol). + ConsulAddress string + + // ConsulTLSServerName is the SNI header to use to connect to the Consul servers + // over TLS. + ConsulTLSServerName string + + // ConsulPartition is the name of the Admin Partition that the controller + // is deployed in. It is an enterprise feature requiring Consul Enterprise 1.11+. + // Its value is an empty string if partitions aren't enabled. + ConsulPartition string + + // EnableNamespaces indicates that a user is running Consul Enterprise + // with version 1.7+ which is namespace aware. It enables Consul namespaces, + // with injection into either a single Consul namespace or mirrored from + // k8s namespaces. + EnableNamespaces bool + + // AllowK8sNamespacesSet is a set of k8s namespaces to explicitly allow for + // injection. It supports the special character `*` which indicates that + // all k8s namespaces are eligible unless explicitly denied. This filter + // is applied before checking pod annotations. + AllowK8sNamespacesSet mapset.Set + + // DenyK8sNamespacesSet is a set of k8s namespaces to explicitly deny + // injection and thus service registration with Consul. An empty set + // means that no namespaces are removed from consideration. This filter + // takes precedence over AllowK8sNamespacesSet. + DenyK8sNamespacesSet mapset.Set + + // ConsulDestinationNamespace is the name of the Consul namespace to register all + // injected services into if Consul namespaces are enabled and mirroring + // is disabled. This may be set, but will not be used if mirroring is enabled. + ConsulDestinationNamespace string + + // EnableK8SNSMirroring causes Consul namespaces to be created to match the + // k8s namespace of any service being registered into Consul. Services are + // registered into the Consul namespace that mirrors their k8s namespace. + EnableK8SNSMirroring bool + + // K8SNSMirroringPrefix is an optional prefix that can be added to the Consul + // namespaces created while mirroring. For example, if it is set to "k8s-", + // then the k8s `default` namespace will be mirrored in Consul's + // `k8s-default` namespace. + K8SNSMirroringPrefix string + + // CrossNamespaceACLPolicy is the name of the ACL policy to attach to + // any created Consul namespaces to allow cross namespace service discovery. + // Only necessary if ACLs are enabled. + CrossNamespaceACLPolicy string + + // Default resource settings for sidecar proxies. Some of these + // fields may be empty. + DefaultProxyCPURequest resource.Quantity + DefaultProxyCPULimit resource.Quantity + DefaultProxyMemoryRequest resource.Quantity + DefaultProxyMemoryLimit resource.Quantity + + // LifecycleConfig contains proxy lifecycle management configuration from the inject-connect command and has methods to determine whether + // configuration should come from the default flags or annotations. The meshWebhook uses this to configure container sidecar proxy args. + LifecycleConfig lifecycle.Config + + // Default Envoy concurrency flag, this is the number of worker threads to be used by the proxy. + DefaultEnvoyProxyConcurrency int + + // MetricsConfig contains metrics configuration from the inject-connect command and has methods to determine whether + // configuration should come from the default flags or annotations. The meshWebhook uses this to configure prometheus + // annotations and the merged metrics server. + MetricsConfig metrics.Config + + // Resource settings for init container. All of these fields + // will be populated by the defaults provided in the initial flags. + InitContainerResources corev1.ResourceRequirements + + // Resource settings for Consul sidecar. All of these fields + // will be populated by the defaults provided in the initial flags. + DefaultConsulSidecarResources corev1.ResourceRequirements + + // EnableTransparentProxy enables transparent proxy mode. + // This means that the injected init container will apply traffic redirection rules + // so that all traffic will go through the Envoy proxy. + EnableTransparentProxy bool + + // EnableCNI enables the CNI plugin and prevents the connect-inject init container + // from running the consul redirect-traffic command as the CNI plugin handles traffic + // redirection + EnableCNI bool + + // TProxyOverwriteProbes controls whether the webhook should mutate pod's HTTP probes + // to point them to the Envoy proxy. + TProxyOverwriteProbes bool + + // EnableConsulDNS enables traffic redirection so that DNS requests are directed to Consul + // from mesh services. + EnableConsulDNS bool + + // EnableOpenShift indicates that when tproxy is enabled, the security context for the Envoy and init + // containers should not be added because OpenShift sets a random user for those and will not allow + // those containers to be created otherwise. + EnableOpenShift bool + + // SkipServerWatch prevents consul-dataplane from consuming the server update stream. This is useful + // for situations where Consul servers are behind a load balancer. + SkipServerWatch bool + + // ReleaseNamespace is the Kubernetes namespace where this webhook is running. + ReleaseNamespace string + + // Log + Log logr.Logger + // Log settings for consul-dataplane and connect-init containers. + LogLevel string + LogJSON bool + + decoder *admission.Decoder + // etcResolvFile is only used in tests to stub out /etc/resolv.conf file. + etcResolvFile string +} + +// Handle is the admission.Webhook implementation that actually handles the +// webhook request for admission control. This should be registered or +// served via the controller runtime manager. +func (w *MeshWebhook) Handle(ctx context.Context, req admission.Request) admission.Response { + var pod corev1.Pod + + // Decode the pod from the request + if err := w.decoder.Decode(req, &pod); err != nil { + w.Log.Error(err, "could not unmarshal request to pod") + return admission.Errored(http.StatusBadRequest, err) + } + + // Marshall the contents of the pod that was received. This is compared with the + // marshalled contents of the pod after it has been updated to create the jsonpatch. + origPodJson, err := json.Marshal(pod) + if err != nil { + return admission.Errored(http.StatusBadRequest, err) + } + + // Setup the default annotation values that are used for the container. + // This MUST be done before shouldInject is called since that function + // uses these annotations. + if err := w.defaultAnnotations(&pod, string(origPodJson)); err != nil { + w.Log.Error(err, "error creating default annotations", "request name", req.Name) + return admission.Errored(http.StatusInternalServerError, fmt.Errorf("error creating default annotations: %s", err)) + } + + // Check if we should inject, for example we don't inject in the + // system namespaces. + if shouldInject, err := w.shouldInject(pod, req.Namespace); err != nil { + w.Log.Error(err, "error checking if should inject", "request name", req.Name) + return admission.Errored(http.StatusInternalServerError, fmt.Errorf("error checking if should inject: %s", err)) + } else if !shouldInject { + return admission.Allowed(fmt.Sprintf("%s %s does not require injection", pod.Kind, pod.Name)) + } + + w.Log.Info("received pod", "name", req.Name, "ns", req.Namespace) + + // Validate that none of the pod ports start with the prefix "cslport-" as that may result in conflicts with ports + // created by the pod controller when creating workloads. + for _, c := range pod.Spec.Containers { + for _, p := range c.Ports { + if strings.HasPrefix(p.Name, constants.UnnamedWorkloadPortNamePrefix) { + return admission.Errored(http.StatusInternalServerError, fmt.Errorf("error creating pod: port names cannot be prefixed with \"cslport-\" as that prefix is reserved")) + } + } + } + + // Add our volume that will be shared by the init container and + // the sidecar for passing data in the pod. + pod.Spec.Volumes = append(pod.Spec.Volumes, w.containerVolume()) + + // Optionally mount data volume to other containers + w.injectVolumeMount(pod) + + // Optionally add any volumes that are to be used by the envoy sidecar. + if _, ok := pod.Annotations[constants.AnnotationConsulSidecarUserVolume]; ok { + var userVolumes []corev1.Volume + err := json.Unmarshal([]byte(pod.Annotations[constants.AnnotationConsulSidecarUserVolume]), &userVolumes) + if err != nil { + return admission.Errored(http.StatusInternalServerError, fmt.Errorf("error unmarshalling sidecar user volumes: %s", err)) + } + pod.Spec.Volumes = append(pod.Spec.Volumes, userVolumes...) + } + + // Add the upstream services as environment variables for easy + // service discovery. + containerEnvVars, err := w.containerEnvVars(pod) + if err != nil { + w.Log.Error(err, "error creating the port environment variables based on pod annotations", "request name", req.Name) + return admission.Errored(http.StatusInternalServerError, fmt.Errorf("error creating the port environment variables based on pod annotations: %s", err)) + } + for i := range pod.Spec.InitContainers { + pod.Spec.InitContainers[i].Env = append(pod.Spec.InitContainers[i].Env, containerEnvVars...) + } + + for i := range pod.Spec.Containers { + pod.Spec.Containers[i].Env = append(pod.Spec.Containers[i].Env, containerEnvVars...) + } + + // A user can enable/disable tproxy for an entire namespace via a label. + ns, err := w.Clientset.CoreV1().Namespaces().Get(ctx, req.Namespace, metav1.GetOptions{}) + if err != nil { + w.Log.Error(err, "error fetching namespace metadata for container", "request name", req.Name) + return admission.Errored(http.StatusInternalServerError, fmt.Errorf("error getting namespace metadata for container: %s", err)) + } + + lifecycleEnabled, ok := w.LifecycleConfig.EnableProxyLifecycle(pod) + if ok != nil { + w.Log.Error(err, "unable to get lifecycle enabled status") + } + // Add the init container that registers the service and sets up the Envoy configuration. + initContainer, err := w.containerInit(*ns, pod) + if err != nil { + w.Log.Error(err, "error configuring injection init container", "request name", req.Name) + return admission.Errored(http.StatusInternalServerError, fmt.Errorf("error configuring injection init container: %s", err)) + } + pod.Spec.InitContainers = append(pod.Spec.InitContainers, initContainer) + + // Add the Envoy sidecar. + envoySidecar, err := w.consulDataplaneSidecar(*ns, pod) + if err != nil { + w.Log.Error(err, "error configuring injection sidecar container", "request name", req.Name) + return admission.Errored(http.StatusInternalServerError, fmt.Errorf("error configuring injection sidecar container: %s", err)) + } + //Append the Envoy sidecar before the application container only if lifecycle enabled. + + if lifecycleEnabled && ok == nil { + pod.Spec.Containers = append([]corev1.Container{envoySidecar}, pod.Spec.Containers...) + } else { + pod.Spec.Containers = append(pod.Spec.Containers, envoySidecar) + } + + // pod.Annotations has already been initialized by h.defaultAnnotations() + // and does not need to be checked for being a nil value. + pod.Annotations[constants.KeyMeshInjectStatus] = constants.Injected + + tproxyEnabled, err := common.TransparentProxyEnabled(*ns, pod, w.EnableTransparentProxy) + if err != nil { + w.Log.Error(err, "error determining if transparent proxy is enabled", "request name", req.Name) + return admission.Errored(http.StatusInternalServerError, fmt.Errorf("error determining if transparent proxy is enabled: %s", err)) + } + + // Add an annotation to the pod sets transparent-proxy-status to enabled or disabled. Used by the CNI plugin + // to determine if it should traffic redirect or not. + if tproxyEnabled { + pod.Annotations[constants.KeyTransparentProxyStatus] = constants.Enabled + } + + // If DNS redirection is enabled, we want to configure dns on the pod. + dnsEnabled, err := consulDNSEnabled(*ns, pod, w.EnableConsulDNS, w.EnableTransparentProxy) + if err != nil { + w.Log.Error(err, "error determining if dns redirection is enabled", "request name", req.Name) + return admission.Errored(http.StatusInternalServerError, fmt.Errorf("error determining if dns redirection is enabled: %s", err)) + } + if dnsEnabled { + if err = w.configureDNS(&pod, req.Namespace); err != nil { + w.Log.Error(err, "error configuring DNS on the pod", "request name", req.Name) + return admission.Errored(http.StatusInternalServerError, fmt.Errorf("error configuring DNS on the pod: %s", err)) + } + } + + // Add annotations for metrics. + if err = w.prometheusAnnotations(&pod); err != nil { + w.Log.Error(err, "error configuring prometheus annotations", "request name", req.Name) + return admission.Errored(http.StatusInternalServerError, fmt.Errorf("error configuring prometheus annotations: %s", err)) + } + + if pod.Labels == nil { + pod.Labels = make(map[string]string) + } + pod.Labels[constants.KeyMeshInjectStatus] = constants.Injected + + // Consul-ENT only: Add the Consul destination namespace as an annotation to the pod. + if w.EnableNamespaces { + pod.Annotations[constants.AnnotationConsulNamespace] = w.consulNamespace(req.Namespace) + } + + // Overwrite readiness/liveness probes if needed. + err = w.overwriteProbes(*ns, &pod) + if err != nil { + w.Log.Error(err, "error overwriting readiness or liveness probes", "request name", req.Name) + return admission.Errored(http.StatusInternalServerError, fmt.Errorf("error overwriting readiness or liveness probes: %s", err)) + } + + // When CNI and tproxy are enabled, we add an annotation to the pod that contains the iptables config so that the CNI + // plugin can apply redirect traffic rules on the pod. + if w.EnableCNI && tproxyEnabled { + if err = w.addRedirectTrafficConfigAnnotation(&pod, *ns); err != nil { + w.Log.Error(err, "error configuring annotation for CNI traffic redirection", "request name", req.Name) + return admission.Errored(http.StatusInternalServerError, fmt.Errorf("error configuring annotation for CNI traffic redirection: %s", err)) + } + } + + // Marshall the pod into JSON after it has the desired envs, annotations, labels, + // sidecars and initContainers appended to it. + updatedPodJson, err := json.Marshal(pod) + if err != nil { + return admission.Errored(http.StatusBadRequest, err) + } + + // Create a patches based on the Pod that was received by the meshWebhook + // and the desired Pod spec. + patches, err := jsonpatch.CreatePatch(origPodJson, updatedPodJson) + if err != nil { + return admission.Errored(http.StatusBadRequest, err) + } + + // Return a Patched response along with the patches we intend on applying to the + // Pod received by the meshWebhook. + return admission.Patched(fmt.Sprintf("valid %s request", pod.Kind), patches...) +} + +// overwriteProbes overwrites readiness/liveness probes of this pod when +// both transparent proxy is enabled and overwrite probes is true for the pod. +func (w *MeshWebhook) overwriteProbes(ns corev1.Namespace, pod *corev1.Pod) error { + tproxyEnabled, err := common.TransparentProxyEnabled(ns, *pod, w.EnableTransparentProxy) + if err != nil { + return err + } + + overwriteProbes, err := common.ShouldOverwriteProbes(*pod, w.TProxyOverwriteProbes) + if err != nil { + return err + } + + if tproxyEnabled && overwriteProbes { + // We don't use the loop index because this needs to line up w.withiptablesConfigJSON, + // which is performed before the sidecar is injected. + idx := 0 + for _, container := range pod.Spec.Containers { + // skip the "envoy-sidecar" container from having it's probes overridden + if container.Name == sidecarContainer { + continue + } + if container.LivenessProbe != nil && container.LivenessProbe.HTTPGet != nil { + container.LivenessProbe.HTTPGet.Port = intstr.FromInt(exposedPathsLivenessPortsRangeStart + idx) + } + if container.ReadinessProbe != nil && container.ReadinessProbe.HTTPGet != nil { + container.ReadinessProbe.HTTPGet.Port = intstr.FromInt(exposedPathsReadinessPortsRangeStart + idx) + } + if container.StartupProbe != nil && container.StartupProbe.HTTPGet != nil { + container.StartupProbe.HTTPGet.Port = intstr.FromInt(exposedPathsStartupPortsRangeStart + idx) + } + idx++ + } + } + return nil +} + +func (w *MeshWebhook) injectVolumeMount(pod corev1.Pod) { + containersToInject := splitCommaSeparatedItemsFromAnnotation(constants.AnnotationMeshInjectMountVolumes, pod) + + for index, container := range pod.Spec.Containers { + if slices.Contains(containersToInject, container.Name) { + pod.Spec.Containers[index].VolumeMounts = append(pod.Spec.Containers[index].VolumeMounts, corev1.VolumeMount{ + Name: volumeName, + MountPath: "/consul/mesh-inject", + }) + } + } +} + +func (w *MeshWebhook) shouldInject(pod corev1.Pod, namespace string) (bool, error) { + // Don't inject in the Kubernetes system namespaces + if kubeSystemNamespaces.Contains(namespace) { + return false, nil + } + + // Namespace logic + // If in deny list, don't inject + if w.DenyK8sNamespacesSet.Contains(namespace) { + return false, nil + } + + // If not in allow list or allow list is not *, don't inject + if !w.AllowK8sNamespacesSet.Contains("*") && !w.AllowK8sNamespacesSet.Contains(namespace) { + return false, nil + } + + // If we already injected then don't inject again + if pod.Annotations[constants.KeyMeshInjectStatus] != "" || pod.Annotations[constants.KeyInjectStatus] != "" { + return false, nil + } + + // If the explicit true/false is on, then take that value. Note that + // this has to be the last check since it sets a default value after + // all other checks. + if raw, ok := pod.Annotations[constants.AnnotationMeshInject]; ok { + return strconv.ParseBool(raw) + } + + return !w.RequireAnnotation, nil +} + +func (w *MeshWebhook) defaultAnnotations(pod *corev1.Pod, podJson string) error { + if pod.Annotations == nil { + pod.Annotations = make(map[string]string) + } + + pod.Annotations[constants.AnnotationOriginalPod] = podJson + pod.Annotations[constants.AnnotationConsulK8sVersion] = version.GetHumanVersion() + + return nil +} + +// prometheusAnnotations sets the Prometheus scraping configuration +// annotations on the Pod. +func (w *MeshWebhook) prometheusAnnotations(pod *corev1.Pod) error { + enableMetrics, err := w.MetricsConfig.EnableMetrics(*pod) + if err != nil { + return err + } + prometheusScrapePort, err := w.MetricsConfig.PrometheusScrapePort(*pod) + if err != nil { + return err + } + prometheusScrapePath := w.MetricsConfig.PrometheusScrapePath(*pod) + + if enableMetrics { + pod.Annotations[constants.AnnotationPrometheusScrape] = "true" + pod.Annotations[constants.AnnotationPrometheusPort] = prometheusScrapePort + pod.Annotations[constants.AnnotationPrometheusPath] = prometheusScrapePath + } + return nil +} + +// consulNamespace returns the namespace that a service should be +// registered in based on the namespace options. It returns an +// empty string if namespaces aren't enabled. +func (w *MeshWebhook) consulNamespace(ns string) string { + return namespaces.ConsulNamespace(ns, w.EnableNamespaces, w.ConsulDestinationNamespace, w.EnableK8SNSMirroring, w.K8SNSMirroringPrefix) +} + +func findServiceAccountVolumeMount(pod corev1.Pod) (corev1.VolumeMount, string, error) { + // Find the volume mount that is mounted at the known + // service account token location + var volumeMount corev1.VolumeMount + for _, container := range pod.Spec.Containers { + for _, vm := range container.VolumeMounts { + if vm.MountPath == "/var/run/secrets/kubernetes.io/serviceaccount" { + volumeMount = vm + break + } + } + } + + // Return an error if volumeMount is still empty + if (corev1.VolumeMount{}) == volumeMount { + return volumeMount, "", errors.New("unable to find service account token volumeMount") + } + + return volumeMount, "/var/run/secrets/kubernetes.io/serviceaccount/token", nil +} + +func (w *MeshWebhook) SetupWithManager(mgr ctrl.Manager) { + w.decoder = admission.NewDecoder(mgr.GetScheme()) + mgr.GetWebhookServer().Register("/mutate", &admission.Webhook{Handler: w}) +} diff --git a/control-plane/connect-inject/webhookv2/mesh_webhook_ent_test.go b/control-plane/connect-inject/webhookv2/mesh_webhook_ent_test.go new file mode 100644 index 0000000000..c9898499a4 --- /dev/null +++ b/control-plane/connect-inject/webhookv2/mesh_webhook_ent_test.go @@ -0,0 +1,114 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build enterprise + +package webhookv2 + +import ( + "context" + "testing" + + "github.com/deckarep/golang-set" + logrtest "github.com/go-logr/logr/testing" + "github.com/stretchr/testify/require" + admissionv1 "k8s.io/api/admission/v1" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" + "github.com/hashicorp/consul-k8s/control-plane/helper/test" +) + +// Test that the annotation for the Consul namespace is added. +func TestHandler_MutateWithNamespaces_Annotation(t *testing.T) { + t.Parallel() + sourceKubeNS := "kube-ns" + + cases := map[string]struct { + ConsulDestinationNamespace string + Mirroring bool + MirroringPrefix string + ExpNamespaceAnnotation string + }{ + "dest: default": { + ConsulDestinationNamespace: "default", + ExpNamespaceAnnotation: "default", + }, + "dest: foo": { + ConsulDestinationNamespace: "foo", + ExpNamespaceAnnotation: "foo", + }, + "mirroring": { + Mirroring: true, + ExpNamespaceAnnotation: sourceKubeNS, + }, + "mirroring with prefix": { + Mirroring: true, + MirroringPrefix: "prefix-", + ExpNamespaceAnnotation: "prefix-" + sourceKubeNS, + }, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + testClient := test.TestServerWithMockConnMgrWatcher(t, nil) + + s := runtime.NewScheme() + s.AddKnownTypes(schema.GroupVersion{Group: "", Version: "v1"}, &corev1.Pod{}) + decoder := admission.NewDecoder(s) + + webhook := MeshWebhook{ + Log: logrtest.NewTestLogger(t), + AllowK8sNamespacesSet: mapset.NewSet("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + EnableNamespaces: true, + ConsulDestinationNamespace: c.ConsulDestinationNamespace, + EnableK8SNSMirroring: c.Mirroring, + K8SNSMirroringPrefix: c.MirroringPrefix, + ConsulConfig: testClient.Cfg, + ConsulServerConnMgr: testClient.Watcher, + decoder: decoder, + Clientset: clientWithNamespace(sourceKubeNS), + } + + pod := corev1.Pod{ + ObjectMeta: v1.ObjectMeta{ + Namespace: sourceKubeNS, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + }, + }, + }, + } + request := admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Object: encodeRaw(t, &pod), + Namespace: sourceKubeNS, + }, + } + resp := webhook.Handle(context.Background(), request) + require.Equal(t, resp.Allowed, true) + + // Check that the annotation was added as a patch. + var consulNamespaceAnnotationValue string + for _, patch := range resp.Patches { + if patch.Path == "/metadata/annotations" { + for annotationName, annotationValue := range patch.Value.(map[string]interface{}) { + if annotationName == constants.AnnotationConsulNamespace { + consulNamespaceAnnotationValue = annotationValue.(string) + } + } + } + } + require.NotEmpty(t, consulNamespaceAnnotationValue, "no namespace annotation set") + require.Equal(t, c.ExpNamespaceAnnotation, consulNamespaceAnnotationValue) + }) + } +} diff --git a/control-plane/connect-inject/webhookv2/mesh_webhook_test.go b/control-plane/connect-inject/webhookv2/mesh_webhook_test.go new file mode 100644 index 0000000000..9158449cd3 --- /dev/null +++ b/control-plane/connect-inject/webhookv2/mesh_webhook_test.go @@ -0,0 +1,2174 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package webhookv2 + +import ( + "context" + "encoding/json" + "strconv" + "strings" + "testing" + + mapset "github.com/deckarep/golang-set" + logrtest "github.com/go-logr/logr/testr" + "github.com/hashicorp/consul/sdk/iptables" + "github.com/stretchr/testify/require" + "gomodules.xyz/jsonpatch/v2" + admissionv1 "k8s.io/api/admission/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/fake" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/lifecycle" + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/metrics" + "github.com/hashicorp/consul-k8s/control-plane/consul" + "github.com/hashicorp/consul-k8s/control-plane/namespaces" + "github.com/hashicorp/consul-k8s/version" +) + +func TestHandlerHandle(t *testing.T) { + t.Parallel() + basicSpec := corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + }, + }, + } + s := runtime.NewScheme() + s.AddKnownTypes(schema.GroupVersion{ + Group: "", + Version: "v1", + }, &corev1.Pod{}) + decoder := admission.NewDecoder(s) + + cases := []struct { + Name string + Webhook MeshWebhook + Req admission.Request + Err string // expected error string, not exact + Patches []jsonpatch.Operation + }{ + { + "kube-system namespace", + MeshWebhook{ + Log: logrtest.New(t), + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + decoder: decoder, + }, + admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Namespace: metav1.NamespaceSystem, + Object: encodeRaw(t, &corev1.Pod{ + Spec: basicSpec, + }), + }, + }, + "", + nil, + }, + + { + "already injected", + MeshWebhook{ + Log: logrtest.New(t), + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + decoder: decoder, + }, + admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Object: encodeRaw(t, &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + constants.KeyMeshInjectStatus: constants.Injected, + }, + }, + Spec: basicSpec, + }), + }, + }, + "", + nil, + }, + + { + "empty pod basic", + MeshWebhook{ + Log: logrtest.New(t), + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + decoder: decoder, + Clientset: defaultTestClientWithNamespace(), + }, + admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Namespace: namespaces.DefaultNamespace, + Object: encodeRaw(t, &corev1.Pod{ + Spec: basicSpec, + }), + }, + }, + "", + []jsonpatch.Operation{ + { + Operation: "add", + Path: "/metadata/labels", + }, + { + Operation: "add", + Path: "/metadata/annotations", + }, + { + Operation: "add", + Path: "/spec/volumes", + }, + { + Operation: "add", + Path: "/spec/initContainers", + }, + { + Operation: "add", + Path: "/spec/containers/1", + }, + }, + }, + { + "empty pod basic with lifecycle", + MeshWebhook{ + Log: logrtest.New(t), + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + decoder: decoder, + Clientset: defaultTestClientWithNamespace(), + LifecycleConfig: lifecycle.Config{DefaultEnableProxyLifecycle: true}, + }, + admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Namespace: namespaces.DefaultNamespace, + Object: encodeRaw(t, &corev1.Pod{ + Spec: basicSpec, + }), + }, + }, + "", + []jsonpatch.Operation{ + { + Operation: "add", + Path: "/metadata/labels", + }, + { + Operation: "add", + Path: "/metadata/annotations", + }, + { + Operation: "add", + Path: "/spec/volumes", + }, + { + Operation: "add", + Path: "/spec/initContainers", + }, + { + Operation: "add", + Path: "/spec/containers/1", + }, + + { + Operation: "add", + Path: "/spec/containers/0/readinessProbe", + }, + { + Operation: "add", + Path: "/spec/containers/0/securityContext", + }, + { + Operation: "replace", + Path: "/spec/containers/0/name", + }, + { + Operation: "add", + Path: "/spec/containers/0/args", + }, + { + Operation: "add", + Path: "/spec/containers/0/env", + }, + { + Operation: "add", + Path: "/spec/containers/0/volumeMounts", + }, + }, + }, + { + "pod with destinations specified", + MeshWebhook{ + Log: logrtest.New(t), + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + decoder: decoder, + Clientset: defaultTestClientWithNamespace(), + }, + admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Namespace: namespaces.DefaultNamespace, + Object: encodeRaw(t, &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + constants.AnnotationMeshDestinations: "myPort1.echo:1234,myPort2.db:1234", + }, + }, + Spec: basicSpec, + }), + }, + }, + "", + []jsonpatch.Operation{ + { + Operation: "add", + Path: "/metadata/labels", + }, + { + Operation: "add", + Path: "/metadata/annotations/" + escapeJSONPointer(constants.KeyMeshInjectStatus), + }, + { + Operation: "add", + Path: "/metadata/annotations/" + escapeJSONPointer(constants.AnnotationOriginalPod), + }, + { + Operation: "add", + Path: "/metadata/annotations/" + escapeJSONPointer(constants.AnnotationConsulK8sVersion), + }, + { + Operation: "add", + Path: "/spec/volumes", + }, + { + Operation: "add", + Path: "/spec/initContainers", + }, + { + Operation: "add", + Path: "/spec/containers/1", + }, + { + Operation: "add", + Path: "/spec/containers/0/env", + }, + }, + }, + { + "error pod with incorrect destinations specified", + MeshWebhook{ + Log: logrtest.New(t), + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + decoder: decoder, + Clientset: defaultTestClientWithNamespace(), + }, + admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Namespace: namespaces.DefaultNamespace, + Object: encodeRaw(t, &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + constants.AnnotationMeshDestinations: "db:1234", + }, + }, + Spec: basicSpec, + }), + }, + }, + "error creating the port environment variables based on pod annotations", + []jsonpatch.Operation{}, + }, + { + "empty pod with injection disabled", + MeshWebhook{ + Log: logrtest.New(t), + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + decoder: decoder, + Clientset: defaultTestClientWithNamespace(), + }, + admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Namespace: namespaces.DefaultNamespace, + Object: encodeRaw(t, &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + constants.AnnotationMeshInject: "false", + }, + }, + Spec: basicSpec, + }), + }, + }, + "", + nil, + }, + + { + "empty pod with injection truthy", + MeshWebhook{ + Log: logrtest.New(t), + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + decoder: decoder, + Clientset: defaultTestClientWithNamespace(), + }, + admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Namespace: namespaces.DefaultNamespace, + Object: encodeRaw(t, &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + constants.AnnotationMeshInject: "t", + }, + }, + Spec: basicSpec, + }), + }, + }, + "", + []jsonpatch.Operation{ + { + Operation: "add", + Path: "/spec/volumes", + }, + { + Operation: "add", + Path: "/spec/initContainers", + }, + { + Operation: "add", + Path: "/spec/containers/1", + }, + { + Operation: "add", + Path: "/metadata/annotations/" + escapeJSONPointer(constants.KeyMeshInjectStatus), + }, + { + Operation: "add", + Path: "/metadata/annotations/" + escapeJSONPointer(constants.AnnotationOriginalPod), + }, + { + Operation: "add", + Path: "/metadata/annotations/" + escapeJSONPointer(constants.AnnotationConsulK8sVersion), + }, + { + Operation: "add", + Path: "/metadata/labels", + }, + }, + }, + + { + "pod with empty volume mount annotation", + MeshWebhook{ + Log: logrtest.New(t), + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + decoder: decoder, + Clientset: defaultTestClientWithNamespace(), + }, + admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Namespace: namespaces.DefaultNamespace, + Object: encodeRaw(t, &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + constants.AnnotationMeshInjectMountVolumes: "", + }, + }, + Spec: basicSpec, + }), + }, + }, + "", + []jsonpatch.Operation{ + { + Operation: "add", + Path: "/spec/volumes", + }, + { + Operation: "add", + Path: "/spec/initContainers", + }, + { + Operation: "add", + Path: "/spec/containers/1", + }, + { + Operation: "add", + Path: "/metadata/annotations/" + escapeJSONPointer(constants.KeyMeshInjectStatus), + }, + { + Operation: "add", + Path: "/metadata/annotations/" + escapeJSONPointer(constants.AnnotationOriginalPod), + }, + { + Operation: "add", + Path: "/metadata/annotations/" + escapeJSONPointer(constants.AnnotationConsulK8sVersion), + }, + { + Operation: "add", + Path: "/metadata/labels", + }, + }, + }, + { + "pod with volume mount annotation", + MeshWebhook{ + Log: logrtest.New(t), + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + decoder: decoder, + Clientset: defaultTestClientWithNamespace(), + }, + admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Namespace: namespaces.DefaultNamespace, + Object: encodeRaw(t, &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + constants.AnnotationMeshInjectMountVolumes: "web,unknown,web_three_point_oh", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + }, + { + Name: "web_two_point_oh", + }, + { + Name: "web_three_point_oh", + }, + }, + }, + }), + }, + }, + "", + []jsonpatch.Operation{ + { + Operation: "add", + Path: "/spec/volumes", + }, + { + Operation: "add", + Path: "/spec/containers/0/volumeMounts", + }, + { + Operation: "add", + Path: "/spec/containers/2/volumeMounts", + }, + { + Operation: "add", + Path: "/spec/initContainers", + }, + { + Operation: "add", + Path: "/spec/containers/3", + }, + { + Operation: "add", + Path: "/metadata/annotations/" + escapeJSONPointer(constants.KeyMeshInjectStatus), + }, + { + Operation: "add", + Path: "/metadata/annotations/" + escapeJSONPointer(constants.AnnotationOriginalPod), + }, + { + Operation: "add", + Path: "/metadata/annotations/" + escapeJSONPointer(constants.AnnotationConsulK8sVersion), + }, + { + Operation: "add", + Path: "/metadata/labels", + }, + }, + }, + { + "pod with sidecar volume mount annotation", + MeshWebhook{ + Log: logrtest.New(t), + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + decoder: decoder, + Clientset: defaultTestClientWithNamespace(), + }, + admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Namespace: namespaces.DefaultNamespace, + Object: encodeRaw(t, &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + constants.AnnotationConsulSidecarUserVolume: "[{\"name\":\"bbb\",\"csi\":{\"driver\":\"bob\"}}]", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + }, + }, + }, + }), + }, + }, + "", + []jsonpatch.Operation{ + { + Operation: "add", + Path: "/spec/volumes", + }, + { + Operation: "add", + Path: "/spec/containers/1", + }, + { + Operation: "add", + Path: "/spec/initContainers", + }, + { + Operation: "add", + Path: "/metadata/annotations/" + escapeJSONPointer(constants.KeyMeshInjectStatus), + }, + { + Operation: "add", + Path: "/metadata/annotations/" + escapeJSONPointer(constants.AnnotationOriginalPod), + }, + { + Operation: "add", + Path: "/metadata/annotations/" + escapeJSONPointer(constants.AnnotationConsulK8sVersion), + }, + { + Operation: "add", + Path: "/metadata/labels", + }, + }, + }, + { + "pod with sidecar invalid volume mount annotation", + MeshWebhook{ + Log: logrtest.New(t), + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + decoder: decoder, + Clientset: defaultTestClientWithNamespace(), + }, + admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Namespace: namespaces.DefaultNamespace, + Object: encodeRaw(t, &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + constants.AnnotationConsulSidecarUserVolume: "[a]", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + }, + }, + }, + }), + }, + }, + "error unmarshalling sidecar user volumes: invalid character 'a' looking for beginning of value", + nil, + }, + { + "pod with service annotation", + MeshWebhook{ + Log: logrtest.New(t), + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + decoder: decoder, + Clientset: defaultTestClientWithNamespace(), + }, + admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Namespace: namespaces.DefaultNamespace, + Object: encodeRaw(t, &corev1.Pod{ + Spec: basicSpec, + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + constants.AnnotationService: "foo", + }, + }, + }), + }, + }, + "", + []jsonpatch.Operation{ + { + Operation: "add", + Path: "/spec/volumes", + }, + { + Operation: "add", + Path: "/spec/initContainers", + }, + { + Operation: "add", + Path: "/spec/containers/1", + }, + { + Operation: "add", + Path: "/metadata/annotations/" + escapeJSONPointer(constants.KeyMeshInjectStatus), + }, + { + Operation: "add", + Path: "/metadata/annotations/" + escapeJSONPointer(constants.AnnotationOriginalPod), + }, + { + Operation: "add", + Path: "/metadata/annotations/" + escapeJSONPointer(constants.AnnotationConsulK8sVersion), + }, + { + Operation: "add", + Path: "/metadata/labels", + }, + }, + }, + + { + "pod with existing label", + MeshWebhook{ + Log: logrtest.New(t), + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + decoder: decoder, + Clientset: defaultTestClientWithNamespace(), + }, + admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Namespace: namespaces.DefaultNamespace, + Object: encodeRaw(t, &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "testLabel": "123", + }, + }, + Spec: basicSpec, + }), + }, + }, + "", + []jsonpatch.Operation{ + { + Operation: "add", + Path: "/spec/volumes", + }, + { + Operation: "add", + Path: "/spec/initContainers", + }, + { + Operation: "add", + Path: "/spec/containers/1", + }, + { + Operation: "add", + Path: "/metadata/annotations", + }, + { + Operation: "add", + Path: "/metadata/labels/" + escapeJSONPointer(constants.KeyMeshInjectStatus), + }, + }, + }, + { + "tproxy with overwriteProbes is enabled", + MeshWebhook{ + Log: logrtest.New(t), + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + EnableTransparentProxy: true, + TProxyOverwriteProbes: true, + decoder: decoder, + Clientset: defaultTestClientWithNamespace(), + }, + admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Namespace: namespaces.DefaultNamespace, + Object: encodeRaw(t, &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{}, + // We're setting an existing annotation so that we can assert on the + // specific annotations that are set as a result of probes being overwritten. + Annotations: map[string]string{"foo": "bar"}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + LivenessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(8080), + }, + }, + }, + ReadinessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(8081), + }, + }, + }, + }, + }, + }, + }), + }, + }, + "", + []jsonpatch.Operation{ + { + Operation: "add", + Path: "/spec/volumes", + }, + { + Operation: "add", + Path: "/spec/initContainers", + }, + { + Operation: "add", + Path: "/spec/containers/1", + }, + { + Operation: "add", + Path: "/metadata/labels", + }, + { + Operation: "add", + Path: "/metadata/annotations/" + escapeJSONPointer(constants.KeyMeshInjectStatus), + }, + { + Operation: "add", + Path: "/metadata/annotations/" + escapeJSONPointer(constants.KeyTransparentProxyStatus), + }, + + { + Operation: "add", + Path: "/metadata/annotations/" + escapeJSONPointer(constants.AnnotationOriginalPod), + }, + { + Operation: "add", + Path: "/metadata/annotations/" + escapeJSONPointer(constants.AnnotationConsulK8sVersion), + }, + { + Operation: "replace", + Path: "/spec/containers/0/livenessProbe/httpGet/port", + }, + { + Operation: "replace", + Path: "/spec/containers/0/readinessProbe/httpGet/port", + }, + }, + }, + { + "dns redirection enabled", + MeshWebhook{ + Log: logrtest.New(t), + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + EnableTransparentProxy: true, + TProxyOverwriteProbes: true, + decoder: decoder, + Clientset: defaultTestClientWithNamespace(), + }, + admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Namespace: namespaces.DefaultNamespace, + Object: encodeRaw(t, &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{}, + Annotations: map[string]string{constants.KeyConsulDNS: "true"}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + }, + }, + }, + }), + }, + }, + "", + []jsonpatch.Operation{ + { + Operation: "add", + Path: "/spec/volumes", + }, + { + Operation: "add", + Path: "/spec/initContainers", + }, + { + Operation: "add", + Path: "/spec/containers/1", + }, + { + Operation: "add", + Path: "/metadata/labels", + }, + { + Operation: "add", + Path: "/metadata/annotations/" + escapeJSONPointer(constants.KeyMeshInjectStatus), + }, + { + Operation: "add", + Path: "/metadata/annotations/" + escapeJSONPointer(constants.KeyTransparentProxyStatus), + }, + + { + Operation: "add", + Path: "/metadata/annotations/" + escapeJSONPointer(constants.AnnotationOriginalPod), + }, + { + Operation: "add", + Path: "/metadata/annotations/" + escapeJSONPointer(constants.AnnotationConsulK8sVersion), + }, + { + Operation: "add", + Path: "/spec/dnsPolicy", + }, + { + Operation: "add", + Path: "/spec/dnsConfig", + }, + }, + }, + { + "dns redirection only enabled if tproxy enabled", + MeshWebhook{ + Log: logrtest.New(t), + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + EnableTransparentProxy: true, + TProxyOverwriteProbes: true, + decoder: decoder, + Clientset: defaultTestClientWithNamespace(), + }, + admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Namespace: namespaces.DefaultNamespace, + Object: encodeRaw(t, &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{}, + Annotations: map[string]string{ + constants.KeyConsulDNS: "true", + constants.KeyTransparentProxy: "false", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + }, + }, + }, + }), + }, + }, + "", + []jsonpatch.Operation{ + { + Operation: "add", + Path: "/spec/volumes", + }, + { + Operation: "add", + Path: "/spec/initContainers", + }, + { + Operation: "add", + Path: "/spec/containers/1", + }, + { + Operation: "add", + Path: "/metadata/labels", + }, + { + Operation: "add", + Path: "/metadata/annotations/" + escapeJSONPointer(constants.KeyMeshInjectStatus), + }, + { + Operation: "add", + Path: "/metadata/annotations/" + escapeJSONPointer(constants.AnnotationOriginalPod), + }, + { + Operation: "add", + Path: "/metadata/annotations/" + escapeJSONPointer(constants.AnnotationConsulK8sVersion), + }, + // Note: no DNS policy/config additions. + }, + }, + } + + for _, tt := range cases { + t.Run(tt.Name, func(t *testing.T) { + tt.Webhook.ConsulConfig = &consul.Config{HTTPPort: 8500} + ctx := context.Background() + resp := tt.Webhook.Handle(ctx, tt.Req) + if (tt.Err == "") != resp.Allowed { + t.Fatalf("allowed: %v, expected err: %v", resp.Allowed, tt.Err) + } + if tt.Err != "" { + require.Contains(t, resp.Result.Message, tt.Err) + return + } + + actual := resp.Patches + if len(actual) > 0 { + for i := range actual { + actual[i].Value = nil + } + } + require.ElementsMatch(t, tt.Patches, actual) + }) + } +} + +// This test validates that overwrite probes match the iptables configuration fromiptablesConfigJSON() +// Because they happen at different points in the injection, the port numbers can get out of sync. +func TestHandlerHandle_ValidateOverwriteProbes(t *testing.T) { + // TODO (v2/nitya): enable when expose paths and L7 are implemented + t.Skip("Tproxy probes are not supported yet") + t.Parallel() + s := runtime.NewScheme() + s.AddKnownTypes(schema.GroupVersion{ + Group: "", + Version: "v1", + }, &corev1.Pod{}) + decoder := admission.NewDecoder(s) + + cases := []struct { + Name string + Webhook MeshWebhook + Req admission.Request + Err string // expected error string, not exact + Patches []jsonpatch.Operation + }{ + { + "tproxy with overwriteProbes is enabled", + MeshWebhook{ + Log: logrtest.New(t), + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + EnableTransparentProxy: true, + TProxyOverwriteProbes: true, + LifecycleConfig: lifecycle.Config{DefaultEnableProxyLifecycle: true}, + decoder: decoder, + Clientset: defaultTestClientWithNamespace(), + }, + admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Namespace: namespaces.DefaultNamespace, + Object: encodeRaw(t, &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{}, + // We're setting an existing annotation so that we can assert on the + // specific annotations that are set as a result of probes being overwritten. + Annotations: map[string]string{"foo": "bar"}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + LivenessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(8080), + }, + }, + }, + ReadinessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(8081), + }, + }, + }, + StartupProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(8082), + }, + }, + }, + }, + }, + }, + }), + }, + }, + "", + []jsonpatch.Operation{ + { + Operation: "add", + Path: "/spec/volumes", + }, + { + Operation: "add", + Path: "/spec/initContainers", + }, + { + Operation: "add", + Path: "/spec/containers/1", + }, + { + Operation: "replace", + Path: "/spec/containers/0/name", + }, + { + Operation: "add", + Path: "/spec/containers/0/args", + }, + { + Operation: "add", + Path: "/spec/containers/0/env", + }, + { + Operation: "add", + Path: "/spec/containers/0/volumeMounts", + }, + { + Operation: "add", + Path: "/spec/containers/0/readinessProbe/tcpSocket", + }, + { + Operation: "add", + Path: "/spec/containers/0/readinessProbe/initialDelaySeconds", + }, + { + Operation: "remove", + Path: "/spec/containers/0/readinessProbe/httpGet", + }, + { + Operation: "add", + Path: "/spec/containers/0/securityContext", + }, + { + Operation: "remove", + Path: "/spec/containers/0/startupProbe", + }, + { + Operation: "remove", + Path: "/spec/containers/0/livenessProbe", + }, + { + Operation: "add", + Path: "/metadata/labels", + }, + { + Operation: "add", + Path: "/metadata/annotations/" + escapeJSONPointer(constants.KeyMeshInjectStatus), + }, + { + Operation: "add", + Path: "/metadata/annotations/" + escapeJSONPointer(constants.KeyTransparentProxyStatus), + }, + + { + Operation: "add", + Path: "/metadata/annotations/" + escapeJSONPointer(constants.AnnotationOriginalPod), + }, + { + Operation: "add", + Path: "/metadata/annotations/" + escapeJSONPointer(constants.AnnotationConsulK8sVersion), + }, + }, + }, + } + + for _, tt := range cases { + t.Run(tt.Name, func(t *testing.T) { + tt.Webhook.ConsulConfig = &consul.Config{HTTPPort: 8500} + ctx := context.Background() + resp := tt.Webhook.Handle(ctx, tt.Req) + if (tt.Err == "") != resp.Allowed { + t.Fatalf("allowed: %v, expected err: %v", resp.Allowed, tt.Err) + } + if tt.Err != "" { + require.Contains(t, resp.Result.Message, tt.Err) + return + } + + var iptablesCfg iptables.Config + var overwritePorts []string + actual := resp.Patches + if len(actual) > 0 { + for i := range actual { + + // We want to grab the iptables configuration from the connect-init container's + // environment. + if actual[i].Path == "/spec/initContainers" { + value := actual[i].Value.([]any) + valueMap := value[0].(map[string]any) + envs := valueMap["env"].([]any) + redirectEnv := envs[6].(map[string]any) + require.Equal(t, redirectEnv["name"].(string), "CONSUL_REDIRECT_TRAFFIC_CONFIG") + iptablesJson := redirectEnv["value"].(string) + + err := json.Unmarshal([]byte(iptablesJson), &iptablesCfg) + require.NoError(t, err) + } + + // We want to accumulate the httpGet Probes from the application container to + // compare them to the iptables rules. This is now the second container in the spec + if strings.Contains(actual[i].Path, "/spec/containers/1") { + valueMap, ok := actual[i].Value.(map[string]any) + require.True(t, ok) + + for k, v := range valueMap { + if strings.Contains(k, "Probe") { + probe := v.(map[string]any) + httpProbe := probe["httpGet"] + httpProbeMap := httpProbe.(map[string]any) + port := httpProbeMap["port"] + portNum := port.(float64) + + overwritePorts = append(overwritePorts, strconv.Itoa(int(portNum))) + } + } + } + + // nil out all the patch values to just compare the keys changing. + actual[i].Value = nil + } + } + // Make sure the iptables excluded ports match the ports on the container + require.ElementsMatch(t, iptablesCfg.ExcludeInboundPorts, overwritePorts) + require.ElementsMatch(t, tt.Patches, actual) + }) + } +} + +func TestHandlerValidatePorts(t *testing.T) { + cases := []struct { + Name string + Pod *corev1.Pod + Err string + }{ + { + "basic pod, with ports", + &corev1.Pod{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + Ports: []corev1.ContainerPort{ + { + Name: "http", + ContainerPort: 8080, + }, + }, + }, + { + Name: "web-side", + }, + }, + }, + }, + "", + }, + { + "basic pod, with unnamed ports", + &corev1.Pod{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + Ports: []corev1.ContainerPort{ + { + ContainerPort: 8080, + }, + }, + }, + { + Name: "web-side", + }, + }, + }, + }, + "", + }, + { + "basic pod, with invalid prefix name", + &corev1.Pod{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + Ports: []corev1.ContainerPort{ + { + Name: "cslport-8080", + ContainerPort: 8080, + }, + }, + }, + { + Name: "web-side", + }, + }, + }, + }, + "error creating pod: port names cannot be prefixed with \"cslport-\" as that prefix is reserved", + }, + } + for _, tt := range cases { + t.Run(tt.Name, func(t *testing.T) { + s := runtime.NewScheme() + s.AddKnownTypes(schema.GroupVersion{ + Group: "", + Version: "v1", + }, &corev1.Pod{}) + decoder := admission.NewDecoder(s) + + w := MeshWebhook{ + Log: logrtest.New(t), + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + EnableTransparentProxy: true, + TProxyOverwriteProbes: true, + decoder: decoder, + ConsulConfig: &consul.Config{HTTPPort: 8500}, + Clientset: defaultTestClientWithNamespace(), + } + req := admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Namespace: namespaces.DefaultNamespace, + Object: encodeRaw(t, tt.Pod), + }, + } + resp := w.Handle(context.Background(), req) + if tt.Err == "" { + require.True(t, resp.Allowed) + } else { + require.False(t, resp.Allowed) + require.Contains(t, resp.Result.Message, tt.Err) + } + + }) + } +} +func TestHandlerDefaultAnnotations(t *testing.T) { + cases := []struct { + Name string + Pod *corev1.Pod + Expected map[string]string + Err string + }{ + { + "empty", + &corev1.Pod{}, + map[string]string{ + constants.AnnotationOriginalPod: "{\"metadata\":{\"creationTimestamp\":null},\"spec\":{\"containers\":null},\"status\":{}}", + constants.AnnotationConsulK8sVersion: version.GetHumanVersion(), + }, + "", + }, + { + "basic pod, no ports", + &corev1.Pod{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + }, + { + Name: "web-side", + }, + }, + }, + }, + map[string]string{ + constants.AnnotationOriginalPod: "{\"metadata\":{\"creationTimestamp\":null},\"spec\":{\"containers\":[{\"name\":\"web\",\"resources\":{}},{\"name\":\"web-side\",\"resources\":{}}]},\"status\":{}}", + constants.AnnotationConsulK8sVersion: version.GetHumanVersion(), + }, + "", + }, + { + "basic pod, with ports", + &corev1.Pod{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + Ports: []corev1.ContainerPort{ + { + Name: "http", + ContainerPort: 8080, + }, + }, + }, + { + Name: "web-side", + }, + }, + }, + }, + map[string]string{ + constants.AnnotationOriginalPod: "{\"metadata\":{\"creationTimestamp\":null},\"spec\":{\"containers\":[{\"name\":\"web\",\"ports\":[{\"name\":\"http\",\"containerPort\":8080}],\"resources\":{}},{\"name\":\"web-side\",\"resources\":{}}]},\"status\":{}}", + constants.AnnotationConsulK8sVersion: version.GetHumanVersion(), + }, + "", + }, + + { + "basic pod, with unnamed ports", + &corev1.Pod{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + Ports: []corev1.ContainerPort{ + { + ContainerPort: 8080, + }, + }, + }, + { + Name: "web-side", + }, + }, + }, + }, + map[string]string{ + constants.AnnotationOriginalPod: "{\"metadata\":{\"creationTimestamp\":null},\"spec\":{\"containers\":[{\"name\":\"web\",\"ports\":[{\"containerPort\":8080}],\"resources\":{}},{\"name\":\"web-side\",\"resources\":{}}]},\"status\":{}}", + constants.AnnotationConsulK8sVersion: version.GetHumanVersion(), + }, + "", + }, + } + for _, tt := range cases { + t.Run(tt.Name, func(t *testing.T) { + podJson, err := json.Marshal(tt.Pod) + require.NoError(t, err) + + var w MeshWebhook + err = w.defaultAnnotations(tt.Pod, string(podJson)) + if (tt.Err != "") != (err != nil) { + t.Fatalf("actual: %v, expected err: %v", err, tt.Err) + } + if tt.Err != "" { + require.Contains(t, err.Error(), tt.Err) + return + } + + actual := tt.Pod.Annotations + if len(actual) == 0 { + actual = nil + } + require.Equal(t, tt.Expected, actual) + }) + } +} + +func TestHandlerPrometheusAnnotations(t *testing.T) { + cases := []struct { + Name string + Webhook MeshWebhook + Expected map[string]string + }{ + { + Name: "Sets the correct prometheus annotations on the pod if metrics are enabled", + Webhook: MeshWebhook{ + MetricsConfig: metrics.Config{ + DefaultEnableMetrics: true, + DefaultPrometheusScrapePort: "20200", + DefaultPrometheusScrapePath: "/metrics", + }, + }, + Expected: map[string]string{ + constants.AnnotationPrometheusScrape: "true", + constants.AnnotationPrometheusPort: "20200", + constants.AnnotationPrometheusPath: "/metrics", + }, + }, + { + Name: "Does not set annotations if metrics are not enabled", + Webhook: MeshWebhook{ + MetricsConfig: metrics.Config{ + DefaultEnableMetrics: false, + DefaultPrometheusScrapePort: "20200", + DefaultPrometheusScrapePath: "/metrics", + }, + }, + Expected: map[string]string{}, + }, + } + + for _, tt := range cases { + t.Run(tt.Name, func(t *testing.T) { + require := require.New(t) + h := tt.Webhook + pod := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{}}} + + err := h.prometheusAnnotations(pod) + require.NoError(err) + + require.Equal(pod.Annotations, tt.Expected) + }) + } +} + +// Test consulNamespace function. +func TestConsulNamespace(t *testing.T) { + cases := []struct { + Name string + EnableNamespaces bool + ConsulDestinationNamespace string + EnableK8SNSMirroring bool + K8SNSMirroringPrefix string + K8sNamespace string + Expected string + }{ + { + "namespaces disabled", + false, + "default", + false, + "", + "namespace", + "", + }, + + { + "namespaces disabled, mirroring enabled", + false, + "default", + true, + "", + "namespace", + "", + }, + + { + "namespaces disabled, mirroring enabled, prefix defined", + false, + "default", + true, + "test-", + "namespace", + "", + }, + + { + "namespaces enabled, mirroring disabled", + true, + "default", + false, + "", + "namespace", + "default", + }, + + { + "namespaces enabled, mirroring disabled, prefix defined", + true, + "default", + false, + "test-", + "namespace", + "default", + }, + + { + "namespaces enabled, mirroring enabled", + true, + "default", + true, + "", + "namespace", + "namespace", + }, + + { + "namespaces enabled, mirroring enabled, prefix defined", + true, + "default", + true, + "test-", + "namespace", + "test-namespace", + }, + } + + for _, tt := range cases { + t.Run(tt.Name, func(t *testing.T) { + require := require.New(t) + + w := MeshWebhook{ + EnableNamespaces: tt.EnableNamespaces, + ConsulDestinationNamespace: tt.ConsulDestinationNamespace, + EnableK8SNSMirroring: tt.EnableK8SNSMirroring, + K8SNSMirroringPrefix: tt.K8SNSMirroringPrefix, + } + + ns := w.consulNamespace(tt.K8sNamespace) + + require.Equal(tt.Expected, ns) + }) + } +} + +// Test shouldInject function. +func TestShouldInject(t *testing.T) { + cases := []struct { + Name string + Pod *corev1.Pod + K8sNamespace string + EnableNamespaces bool + AllowK8sNamespacesSet mapset.Set + DenyK8sNamespacesSet mapset.Set + Expected bool + }{ + { + "kube-system not injected", + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + // Service annotation is required for injection + constants.AnnotationService: "testing", + }, + }, + }, + "kube-system", + false, + mapset.NewSet(), + mapset.NewSet(), + false, + }, + { + "kube-public not injected", + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + constants.AnnotationService: "testing", + }, + }, + }, + "kube-public", + false, + mapset.NewSet(), + mapset.NewSet(), + false, + }, + { + "namespaces disabled, empty allow/deny lists", + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + constants.AnnotationService: "testing", + }, + }, + }, + "default", + false, + mapset.NewSet(), + mapset.NewSet(), + false, + }, + { + "namespaces disabled, allow *", + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + constants.AnnotationService: "testing", + }, + }, + }, + "default", + false, + mapset.NewSetWith("*"), + mapset.NewSet(), + true, + }, + { + "namespaces disabled, allow default", + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + constants.AnnotationService: "testing", + }, + }, + }, + "default", + false, + mapset.NewSetWith("default"), + mapset.NewSet(), + true, + }, + { + "namespaces disabled, allow * and default", + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + constants.AnnotationService: "testing", + }, + }, + }, + "default", + false, + mapset.NewSetWith("*", "default"), + mapset.NewSet(), + true, + }, + { + "namespaces disabled, allow only ns1 and ns2", + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + constants.AnnotationService: "testing", + }, + }, + }, + "default", + false, + mapset.NewSetWith("ns1", "ns2"), + mapset.NewSet(), + false, + }, + { + "namespaces disabled, deny default ns", + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + constants.AnnotationService: "testing", + }, + }, + }, + "default", + false, + mapset.NewSet(), + mapset.NewSetWith("default"), + false, + }, + { + "namespaces disabled, allow *, deny default ns", + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + constants.AnnotationService: "testing", + }, + }, + }, + "default", + false, + mapset.NewSetWith("*"), + mapset.NewSetWith("default"), + false, + }, + { + "namespaces disabled, default ns in both allow and deny lists", + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + constants.AnnotationService: "testing", + }, + }, + }, + "default", + false, + mapset.NewSetWith("default"), + mapset.NewSetWith("default"), + false, + }, + { + "namespaces enabled, empty allow/deny lists", + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + constants.AnnotationService: "testing", + }, + }, + }, + "default", + true, + mapset.NewSet(), + mapset.NewSet(), + false, + }, + { + "namespaces enabled, allow *", + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + constants.AnnotationService: "testing", + }, + }, + }, + "default", + true, + mapset.NewSetWith("*"), + mapset.NewSet(), + true, + }, + { + "namespaces enabled, allow default", + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + constants.AnnotationService: "testing", + }, + }, + }, + "default", + true, + mapset.NewSetWith("default"), + mapset.NewSet(), + true, + }, + { + "namespaces enabled, allow * and default", + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + constants.AnnotationService: "testing", + }, + }, + }, + "default", + true, + mapset.NewSetWith("*", "default"), + mapset.NewSet(), + true, + }, + { + "namespaces enabled, allow only ns1 and ns2", + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + constants.AnnotationService: "testing", + }, + }, + }, + "default", + true, + mapset.NewSetWith("ns1", "ns2"), + mapset.NewSet(), + false, + }, + { + "namespaces enabled, deny default ns", + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + constants.AnnotationService: "testing", + }, + }, + }, + "default", + true, + mapset.NewSet(), + mapset.NewSetWith("default"), + false, + }, + { + "namespaces enabled, allow *, deny default ns", + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + constants.AnnotationService: "testing", + }, + }, + }, + "default", + true, + mapset.NewSetWith("*"), + mapset.NewSetWith("default"), + false, + }, + { + "namespaces enabled, default ns in both allow and deny lists", + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + constants.AnnotationService: "testing", + }, + }, + }, + "default", + true, + mapset.NewSetWith("default"), + mapset.NewSetWith("default"), + false, + }, + } + + for _, tt := range cases { + t.Run(tt.Name, func(t *testing.T) { + require := require.New(t) + + w := MeshWebhook{ + RequireAnnotation: false, + EnableNamespaces: tt.EnableNamespaces, + AllowK8sNamespacesSet: tt.AllowK8sNamespacesSet, + DenyK8sNamespacesSet: tt.DenyK8sNamespacesSet, + } + + injected, err := w.shouldInject(*tt.Pod, tt.K8sNamespace) + + require.Equal(nil, err) + require.Equal(tt.Expected, injected) + }) + } +} + +func TestOverwriteProbes(t *testing.T) { + t.Parallel() + + cases := map[string]struct { + tproxyEnabled bool + overwriteProbes bool + podContainers []corev1.Container + expLivenessPort []int + expReadinessPort []int + expStartupPort []int + additionalAnnotations map[string]string + }{ + "transparent proxy disabled; overwrites probes disabled": { + tproxyEnabled: false, + podContainers: []corev1.Container{ + { + Name: "test", + Ports: []corev1.ContainerPort{ + { + Name: "tcp", + ContainerPort: 8081, + }, + { + Name: "http", + ContainerPort: 8080, + }, + }, + ReadinessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(8080), + }, + }, + }, + }, + }, + expReadinessPort: []int{8080}, + }, + "transparent proxy enabled; overwrite probes disabled": { + tproxyEnabled: true, + podContainers: []corev1.Container{ + { + Name: "test", + Ports: []corev1.ContainerPort{ + { + Name: "tcp", + ContainerPort: 8081, + }, + { + Name: "http", + ContainerPort: 8080, + }, + }, + ReadinessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(8080), + }, + }, + }, + }, + }, + expReadinessPort: []int{8080}, + }, + "transparent proxy disabled; overwrite probes enabled": { + tproxyEnabled: false, + overwriteProbes: true, + podContainers: []corev1.Container{ + { + Name: "test", + Ports: []corev1.ContainerPort{ + { + Name: "tcp", + ContainerPort: 8081, + }, + { + Name: "http", + ContainerPort: 8080, + }, + }, + ReadinessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(8080), + }, + }, + }, + }, + }, + expReadinessPort: []int{8080}, + }, + "just the readiness probe": { + tproxyEnabled: true, + overwriteProbes: true, + podContainers: []corev1.Container{ + { + Name: "test", + Ports: []corev1.ContainerPort{ + { + Name: "tcp", + ContainerPort: 8081, + }, + { + Name: "http", + ContainerPort: 8080, + }, + }, + ReadinessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(8080), + }, + }, + }, + }, + }, + expReadinessPort: []int{exposedPathsReadinessPortsRangeStart}, + }, + "just the liveness probe": { + tproxyEnabled: true, + overwriteProbes: true, + podContainers: []corev1.Container{ + { + Name: "test", + Ports: []corev1.ContainerPort{ + { + Name: "tcp", + ContainerPort: 8081, + }, + { + Name: "http", + ContainerPort: 8080, + }, + }, + LivenessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(8081), + }, + }, + }, + }, + }, + expLivenessPort: []int{exposedPathsLivenessPortsRangeStart}, + }, + "skips envoy sidecar": { + tproxyEnabled: true, + overwriteProbes: true, + podContainers: []corev1.Container{ + { + Name: sidecarContainer, + }, + }, + }, + "readiness, liveness and startup probes": { + tproxyEnabled: true, + overwriteProbes: true, + podContainers: []corev1.Container{ + { + Name: "test", + Ports: []corev1.ContainerPort{ + { + Name: "tcp", + ContainerPort: 8081, + }, + { + Name: "http", + ContainerPort: 8080, + }, + }, + LivenessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(8081), + }, + }, + }, + ReadinessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(8080), + }, + }, + }, + StartupProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(8082), + }, + }, + }, + }, + }, + expLivenessPort: []int{exposedPathsLivenessPortsRangeStart}, + expReadinessPort: []int{exposedPathsReadinessPortsRangeStart}, + expStartupPort: []int{exposedPathsStartupPortsRangeStart}, + }, + "readiness, liveness and startup probes multiple containers": { + tproxyEnabled: true, + overwriteProbes: true, + podContainers: []corev1.Container{ + { + Name: "test", + Ports: []corev1.ContainerPort{ + { + Name: "tcp", + ContainerPort: 8081, + }, + { + Name: "http", + ContainerPort: 8080, + }, + }, + LivenessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(8081), + }, + }, + }, + ReadinessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(8080), + }, + }, + }, + StartupProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(8080), + }, + }, + }, + }, + { + Name: "test-2", + Ports: []corev1.ContainerPort{ + { + Name: "tcp", + ContainerPort: 8083, + }, + { + Name: "http", + ContainerPort: 8082, + }, + }, + LivenessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(8083), + }, + }, + }, + ReadinessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(8082), + }, + }, + }, + StartupProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(8082), + }, + }, + }, + }, + }, + expLivenessPort: []int{exposedPathsLivenessPortsRangeStart, exposedPathsLivenessPortsRangeStart + 1}, + expReadinessPort: []int{exposedPathsReadinessPortsRangeStart, exposedPathsReadinessPortsRangeStart + 1}, + expStartupPort: []int{exposedPathsStartupPortsRangeStart, exposedPathsStartupPortsRangeStart + 1}, + }, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{}, + Annotations: map[string]string{}, + }, + Spec: corev1.PodSpec{ + Containers: c.podContainers, + }, + } + if c.additionalAnnotations != nil { + pod.ObjectMeta.Annotations = c.additionalAnnotations + } + + w := MeshWebhook{ + EnableTransparentProxy: c.tproxyEnabled, + TProxyOverwriteProbes: c.overwriteProbes, + } + err := w.overwriteProbes(corev1.Namespace{}, pod) + require.NoError(t, err) + for i, container := range pod.Spec.Containers { + if container.ReadinessProbe != nil { + require.Equal(t, c.expReadinessPort[i], container.ReadinessProbe.HTTPGet.Port.IntValue()) + } + if container.LivenessProbe != nil { + require.Equal(t, c.expLivenessPort[i], container.LivenessProbe.HTTPGet.Port.IntValue()) + } + if container.StartupProbe != nil { + require.Equal(t, c.expStartupPort[i], container.StartupProbe.HTTPGet.Port.IntValue()) + } + } + }) + } +} + +// encodeRaw is a helper to encode some data into a RawExtension. +func encodeRaw(t *testing.T, input interface{}) runtime.RawExtension { + data, err := json.Marshal(input) + require.NoError(t, err) + return runtime.RawExtension{Raw: data} +} + +// https://tools.ietf.org/html/rfc6901 +func escapeJSONPointer(s string) string { + s = strings.Replace(s, "~", "~0", -1) + s = strings.Replace(s, "/", "~1", -1) + return s +} + +func defaultTestClientWithNamespace() kubernetes.Interface { + return clientWithNamespace("default") +} + +func clientWithNamespace(name string) kubernetes.Interface { + ns := corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + } + return fake.NewSimpleClientset(&ns) +} diff --git a/control-plane/connect-inject/webhookv2/redirect_traffic.go b/control-plane/connect-inject/webhookv2/redirect_traffic.go new file mode 100644 index 0000000000..8432372831 --- /dev/null +++ b/control-plane/connect-inject/webhookv2/redirect_traffic.go @@ -0,0 +1,137 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package webhookv2 + +import ( + "encoding/json" + "fmt" + "strconv" + + "github.com/hashicorp/consul/sdk/iptables" + corev1 "k8s.io/api/core/v1" + + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/common" + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" +) + +// addRedirectTrafficConfigAnnotation creates an iptables.Config in JSON format based on proxy configuration. +// iptables.Config: +// +// ConsulDNSIP: an environment variable named RESOURCE_PREFIX_DNS_SERVICE_HOST where RESOURCE_PREFIX is the consul.fullname in helm. +// ProxyUserID: a constant set in Annotations +// ProxyInboundPort: the service port or bind port +// ProxyOutboundPort: default transparent proxy outbound port or transparent proxy outbound listener port +// ExcludeInboundPorts: prometheus, envoy stats, expose paths, checks and excluded pod annotations +// ExcludeOutboundPorts: pod annotations +// ExcludeOutboundCIDRs: pod annotations +// ExcludeUIDs: pod annotations +func (w *MeshWebhook) iptablesConfigJSON(pod corev1.Pod, ns corev1.Namespace) (string, error) { + cfg := iptables.Config{ + ProxyUserID: strconv.Itoa(sidecarUserAndGroupID), + } + + // Set the proxy's inbound port. + cfg.ProxyInboundPort = constants.ProxyDefaultInboundPort + + // Set the proxy's outbound port. + cfg.ProxyOutboundPort = iptables.DefaultTProxyOutboundPort + + // If metrics are enabled, get the prometheusScrapePort and exclude it from the inbound ports + enableMetrics, err := w.MetricsConfig.EnableMetrics(pod) + if err != nil { + return "", err + } + if enableMetrics { + prometheusScrapePort, err := w.MetricsConfig.PrometheusScrapePort(pod) + if err != nil { + return "", err + } + cfg.ExcludeInboundPorts = append(cfg.ExcludeInboundPorts, prometheusScrapePort) + } + + // Exclude any overwritten liveness/readiness/startup ports from redirection. + overwriteProbes, err := common.ShouldOverwriteProbes(pod, w.TProxyOverwriteProbes) + if err != nil { + return "", err + } + + // Exclude the port on which the proxy health check port will be configured if + // using the proxy health check for a service. + if useProxyHealthCheck(pod) { + cfg.ExcludeInboundPorts = append(cfg.ExcludeInboundPorts, strconv.Itoa(constants.ProxyDefaultHealthPort)) + } + + if overwriteProbes { + // We don't use the loop index because this needs to line up w.overwriteProbes(), + // which is performed after the sidecar is injected. + idx := 0 + for _, container := range pod.Spec.Containers { + // skip the "consul-dataplane" container from having its probes overridden + if container.Name == sidecarContainer { + continue + } + if container.LivenessProbe != nil && container.LivenessProbe.HTTPGet != nil { + cfg.ExcludeInboundPorts = append(cfg.ExcludeInboundPorts, strconv.Itoa(exposedPathsLivenessPortsRangeStart+idx)) + } + if container.ReadinessProbe != nil && container.ReadinessProbe.HTTPGet != nil { + cfg.ExcludeInboundPorts = append(cfg.ExcludeInboundPorts, strconv.Itoa(exposedPathsReadinessPortsRangeStart+idx)) + } + if container.StartupProbe != nil && container.StartupProbe.HTTPGet != nil { + cfg.ExcludeInboundPorts = append(cfg.ExcludeInboundPorts, strconv.Itoa(exposedPathsStartupPortsRangeStart+idx)) + } + idx++ + } + } + + // Inbound ports + excludeInboundPorts := splitCommaSeparatedItemsFromAnnotation(constants.AnnotationTProxyExcludeInboundPorts, pod) + cfg.ExcludeInboundPorts = append(cfg.ExcludeInboundPorts, excludeInboundPorts...) + + // Outbound ports + excludeOutboundPorts := splitCommaSeparatedItemsFromAnnotation(constants.AnnotationTProxyExcludeOutboundPorts, pod) + cfg.ExcludeOutboundPorts = append(cfg.ExcludeOutboundPorts, excludeOutboundPorts...) + + // Outbound CIDRs + excludeOutboundCIDRs := splitCommaSeparatedItemsFromAnnotation(constants.AnnotationTProxyExcludeOutboundCIDRs, pod) + cfg.ExcludeOutboundCIDRs = append(cfg.ExcludeOutboundCIDRs, excludeOutboundCIDRs...) + + // UIDs + excludeUIDs := splitCommaSeparatedItemsFromAnnotation(constants.AnnotationTProxyExcludeUIDs, pod) + cfg.ExcludeUIDs = append(cfg.ExcludeUIDs, excludeUIDs...) + + // Add init container user ID to exclude from traffic redirection. + cfg.ExcludeUIDs = append(cfg.ExcludeUIDs, strconv.Itoa(initContainersUserAndGroupID)) + + dnsEnabled, err := consulDNSEnabled(ns, pod, w.EnableConsulDNS, w.EnableTransparentProxy) + if err != nil { + return "", err + } + + if dnsEnabled { + // If Consul DNS is enabled, we find the environment variable that has the value + // of the ClusterIP of the Consul DNS Service. constructDNSServiceHostName returns + // the name of the env variable whose value is the ClusterIP of the Consul DNS Service. + cfg.ConsulDNSIP = consulDataplaneDNSBindHost + cfg.ConsulDNSPort = consulDataplaneDNSBindPort + } + + iptablesConfigJson, err := json.Marshal(&cfg) + if err != nil { + return "", fmt.Errorf("could not marshal iptables config: %w", err) + } + + return string(iptablesConfigJson), nil +} + +// addRedirectTrafficConfigAnnotation add the created iptables JSON config as an annotation on the provided pod. +func (w *MeshWebhook) addRedirectTrafficConfigAnnotation(pod *corev1.Pod, ns corev1.Namespace) error { + iptablesConfig, err := w.iptablesConfigJSON(*pod, ns) + if err != nil { + return err + } + + pod.Annotations[constants.AnnotationRedirectTraffic] = iptablesConfig + + return nil +} diff --git a/control-plane/connect-inject/webhookv2/redirect_traffic_test.go b/control-plane/connect-inject/webhookv2/redirect_traffic_test.go new file mode 100644 index 0000000000..66e8e6658f --- /dev/null +++ b/control-plane/connect-inject/webhookv2/redirect_traffic_test.go @@ -0,0 +1,480 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package webhookv2 + +import ( + "encoding/json" + "fmt" + "strconv" + "testing" + + mapset "github.com/deckarep/golang-set" + logrtest "github.com/go-logr/logr/testr" + "github.com/hashicorp/consul/sdk/iptables" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/intstr" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" + "github.com/hashicorp/consul-k8s/control-plane/consul" +) + +const ( + defaultPodName = "fakePod" + defaultNamespace = "default" +) + +func TestAddRedirectTrafficConfig(t *testing.T) { + s := runtime.NewScheme() + s.AddKnownTypes(schema.GroupVersion{ + Group: "", + Version: "v1", + }, &corev1.Pod{}) + decoder := admission.NewDecoder(s) + cases := []struct { + name string + webhook MeshWebhook + pod *corev1.Pod + namespace corev1.Namespace + dnsEnabled bool + expCfg iptables.Config + expErr error + }{ + { + name: "basic bare minimum pod", + webhook: MeshWebhook{ + Log: logrtest.New(t), + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + decoder: decoder, + }, + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: defaultNamespace, + Name: defaultPodName, + Annotations: map[string]string{}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test", + }, + }, + }, + }, + expCfg: iptables.Config{ + ConsulDNSIP: "", + ProxyUserID: strconv.Itoa(sidecarUserAndGroupID), + ProxyInboundPort: constants.ProxyDefaultInboundPort, + ProxyOutboundPort: iptables.DefaultTProxyOutboundPort, + ExcludeUIDs: []string{"5996"}, + }, + }, + { + name: "proxy health checks enabled", + webhook: MeshWebhook{ + Log: logrtest.New(t), + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + decoder: decoder, + }, + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: defaultNamespace, + Name: defaultPodName, + Annotations: map[string]string{ + constants.AnnotationUseProxyHealthCheck: "true", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test", + }, + }, + }, + }, + expCfg: iptables.Config{ + ConsulDNSIP: "", + ProxyUserID: strconv.Itoa(sidecarUserAndGroupID), + ProxyInboundPort: constants.ProxyDefaultInboundPort, + ProxyOutboundPort: iptables.DefaultTProxyOutboundPort, + ExcludeUIDs: []string{"5996"}, + ExcludeInboundPorts: []string{"21000"}, + }, + }, + { + name: "metrics enabled", + webhook: MeshWebhook{ + Log: logrtest.New(t), + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + decoder: decoder, + }, + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: defaultNamespace, + Name: defaultPodName, + Annotations: map[string]string{ + constants.AnnotationEnableMetrics: "true", + constants.AnnotationPrometheusScrapePort: "13373", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test", + }, + }, + }, + }, + expCfg: iptables.Config{ + ConsulDNSIP: "", + ProxyUserID: strconv.Itoa(sidecarUserAndGroupID), + ProxyInboundPort: constants.ProxyDefaultInboundPort, + ProxyOutboundPort: iptables.DefaultTProxyOutboundPort, + ExcludeUIDs: []string{"5996"}, + ExcludeInboundPorts: []string{"13373"}, + }, + }, + { + name: "metrics enabled with incorrect annotation", + webhook: MeshWebhook{ + Log: logrtest.New(t), + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + decoder: decoder, + }, + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: defaultNamespace, + Name: defaultPodName, + Annotations: map[string]string{ + constants.AnnotationEnableMetrics: "invalid", + constants.AnnotationPrometheusScrapePort: "13373", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test", + }, + }, + }, + }, + expCfg: iptables.Config{ + ConsulDNSIP: "", + ProxyUserID: strconv.Itoa(sidecarUserAndGroupID), + ProxyInboundPort: constants.ProxyDefaultInboundPort, + ProxyOutboundPort: iptables.DefaultTProxyOutboundPort, + ExcludeUIDs: []string{"5996"}, + ExcludeInboundPorts: []string{"13373"}, + }, + expErr: fmt.Errorf("%s annotation value of %s was invalid: %s", constants.AnnotationEnableMetrics, "invalid", "strconv.ParseBool: parsing \"invalid\": invalid syntax"), + }, + { + name: "overwrite probes, transparent proxy annotation set", + webhook: MeshWebhook{ + Log: logrtest.New(t), + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + decoder: decoder, + }, + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: defaultNamespace, + Name: defaultPodName, + Annotations: map[string]string{ + constants.AnnotationTransparentProxyOverwriteProbes: "true", + constants.KeyTransparentProxy: "true", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test", + LivenessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(exposedPathsLivenessPortsRangeStart), + }, + }, + }, + }, + }, + }, + }, + expCfg: iptables.Config{ + ConsulDNSIP: "", + ProxyUserID: strconv.Itoa(sidecarUserAndGroupID), + ProxyInboundPort: constants.ProxyDefaultInboundPort, + ProxyOutboundPort: iptables.DefaultTProxyOutboundPort, + ExcludeUIDs: []string{"5996"}, + ExcludeInboundPorts: []string{strconv.Itoa(exposedPathsLivenessPortsRangeStart)}, + }, + }, + { + name: "exclude inbound ports", + webhook: MeshWebhook{ + Log: logrtest.New(t), + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + decoder: decoder, + }, + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: defaultNamespace, + Name: defaultPodName, + Annotations: map[string]string{ + constants.AnnotationTProxyExcludeInboundPorts: "1111,11111", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test", + }, + }, + }, + }, + expCfg: iptables.Config{ + ConsulDNSIP: "", + ProxyUserID: strconv.Itoa(sidecarUserAndGroupID), + ProxyInboundPort: constants.ProxyDefaultInboundPort, + ProxyOutboundPort: iptables.DefaultTProxyOutboundPort, + ExcludeUIDs: []string{"5996"}, + ExcludeInboundPorts: []string{"1111", "11111"}, + }, + }, + { + name: "exclude outbound ports", + webhook: MeshWebhook{ + Log: logrtest.New(t), + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + decoder: decoder, + }, + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: defaultNamespace, + Name: defaultPodName, + Annotations: map[string]string{ + constants.AnnotationTProxyExcludeOutboundPorts: "2222,22222", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test", + }, + }, + }, + }, + expCfg: iptables.Config{ + ConsulDNSIP: "", + ProxyUserID: strconv.Itoa(sidecarUserAndGroupID), + ProxyInboundPort: constants.ProxyDefaultInboundPort, + ProxyOutboundPort: iptables.DefaultTProxyOutboundPort, + ExcludeUIDs: []string{"5996"}, + ExcludeOutboundPorts: []string{"2222", "22222"}, + }, + }, + { + name: "exclude outbound CIDRs", + webhook: MeshWebhook{ + Log: logrtest.New(t), + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + decoder: decoder, + }, + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: defaultNamespace, + Name: defaultPodName, + Annotations: map[string]string{ + constants.AnnotationTProxyExcludeOutboundCIDRs: "3.3.3.3,3.3.3.3/24", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test", + }, + }, + }, + }, + expCfg: iptables.Config{ + ConsulDNSIP: "", + ProxyUserID: strconv.Itoa(sidecarUserAndGroupID), + ProxyInboundPort: constants.ProxyDefaultInboundPort, + ProxyOutboundPort: iptables.DefaultTProxyOutboundPort, + ExcludeUIDs: []string{strconv.Itoa(initContainersUserAndGroupID)}, + ExcludeOutboundCIDRs: []string{"3.3.3.3", "3.3.3.3/24"}, + }, + }, + { + name: "exclude UIDs", + webhook: MeshWebhook{ + Log: logrtest.New(t), + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + decoder: decoder, + }, + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: defaultNamespace, + Name: defaultPodName, + Annotations: map[string]string{ + constants.AnnotationTProxyExcludeUIDs: "4444,44444", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test", + }, + }, + }, + }, + expCfg: iptables.Config{ + ConsulDNSIP: "", + ProxyUserID: strconv.Itoa(sidecarUserAndGroupID), + ProxyInboundPort: constants.ProxyDefaultInboundPort, + ProxyOutboundPort: iptables.DefaultTProxyOutboundPort, + ExcludeUIDs: []string{"4444", "44444", strconv.Itoa(initContainersUserAndGroupID)}, + }, + }, + { + name: "exclude inbound ports, outbound ports, outbound CIDRs, and UIDs", + webhook: MeshWebhook{ + Log: logrtest.New(t), + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + decoder: decoder, + }, + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: defaultNamespace, + Name: defaultPodName, + Annotations: map[string]string{ + constants.AnnotationTProxyExcludeInboundPorts: "1111,11111", + constants.AnnotationTProxyExcludeOutboundPorts: "2222,22222", + constants.AnnotationTProxyExcludeOutboundCIDRs: "3.3.3.3,3.3.3.3/24", + constants.AnnotationTProxyExcludeUIDs: "4444,44444", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test", + }, + }, + }, + }, + expCfg: iptables.Config{ + ProxyUserID: strconv.Itoa(sidecarUserAndGroupID), + ProxyInboundPort: constants.ProxyDefaultInboundPort, + ProxyOutboundPort: iptables.DefaultTProxyOutboundPort, + ExcludeInboundPorts: []string{"1111", "11111"}, + ExcludeOutboundPorts: []string{"2222", "22222"}, + ExcludeOutboundCIDRs: []string{"3.3.3.3", "3.3.3.3/24"}, + ExcludeUIDs: []string{"4444", "44444", strconv.Itoa(initContainersUserAndGroupID)}, + }, + }, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + err := c.webhook.addRedirectTrafficConfigAnnotation(c.pod, c.namespace) + + // Only compare annotation and iptables config on successful runs + if c.expErr == nil { + require.NoError(t, err) + anno, ok := c.pod.Annotations[constants.AnnotationRedirectTraffic] + require.Equal(t, ok, true) + + actualConfig := iptables.Config{} + err = json.Unmarshal([]byte(anno), &actualConfig) + require.NoError(t, err) + require.Equal(t, c.expCfg, actualConfig) + } else { + require.EqualError(t, err, c.expErr.Error()) + } + }) + } +} + +func TestRedirectTraffic_consulDNS(t *testing.T) { + cases := map[string]struct { + globalEnabled bool + annotations map[string]string + namespaceLabel map[string]string + expectConsulDNSConfig bool + }{ + "enabled globally, ns not set, annotation not provided": { + globalEnabled: true, + expectConsulDNSConfig: true, + }, + "enabled globally, ns not set, annotation is false": { + globalEnabled: true, + annotations: map[string]string{constants.KeyConsulDNS: "false"}, + expectConsulDNSConfig: false, + }, + "enabled globally, ns not set, annotation is true": { + globalEnabled: true, + annotations: map[string]string{constants.KeyConsulDNS: "true"}, + expectConsulDNSConfig: true, + }, + "disabled globally, ns not set, annotation not provided": { + expectConsulDNSConfig: false, + }, + "disabled globally, ns not set, annotation is false": { + annotations: map[string]string{constants.KeyConsulDNS: "false"}, + expectConsulDNSConfig: false, + }, + "disabled globally, ns not set, annotation is true": { + annotations: map[string]string{constants.KeyConsulDNS: "true"}, + expectConsulDNSConfig: true, + }, + "disabled globally, ns enabled, annotation not set": { + namespaceLabel: map[string]string{constants.KeyConsulDNS: "true"}, + expectConsulDNSConfig: true, + }, + "enabled globally, ns disabled, annotation not set": { + globalEnabled: true, + namespaceLabel: map[string]string{constants.KeyConsulDNS: "false"}, + expectConsulDNSConfig: false, + }, + } + for name, c := range cases { + t.Run(name, func(t *testing.T) { + w := MeshWebhook{ + EnableConsulDNS: c.globalEnabled, + EnableTransparentProxy: true, + ConsulConfig: &consul.Config{HTTPPort: 8500}, + } + + pod := minimal() + pod.Annotations = c.annotations + + ns := testNS + ns.Labels = c.namespaceLabel + iptablesConfig, err := w.iptablesConfigJSON(*pod, ns) + require.NoError(t, err) + + actualConfig := iptables.Config{} + err = json.Unmarshal([]byte(iptablesConfig), &actualConfig) + require.NoError(t, err) + if c.expectConsulDNSConfig { + require.Equal(t, "127.0.0.1", actualConfig.ConsulDNSIP) + require.Equal(t, 8600, actualConfig.ConsulDNSPort) + } else { + require.Empty(t, actualConfig.ConsulDNSIP) + } + }) + } +} diff --git a/control-plane/consul/dataplane_client.go b/control-plane/consul/dataplane_client.go new file mode 100644 index 0000000000..628d353252 --- /dev/null +++ b/control-plane/consul/dataplane_client.go @@ -0,0 +1,28 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package consul + +import ( + "fmt" + + "github.com/hashicorp/consul/proto-public/pbdataplane" +) + +// NewDataplaneServiceClient creates a pbdataplane.DataplaneServiceClient for gathering proxy bootstrap config. +// It is initialized with a consul-server-connection-manager Watcher to continuously find Consul +// server addresses. +func NewDataplaneServiceClient(watcher ServerConnectionManager) (pbdataplane.DataplaneServiceClient, error) { + + // We recycle the GRPC connection from the discovery client because it + // should have all the necessary dial options, including the resolver that + // continuously updates Consul server addresses. Otherwise, a lot of code from consul-server-connection-manager + // would need to be duplicated + state, err := watcher.State() + if err != nil { + return nil, fmt.Errorf("unable to get connection manager state: %w", err) + } + dpClient := pbdataplane.NewDataplaneServiceClient(state.GRPCConn) + + return dpClient, nil +} diff --git a/control-plane/consul/dataplane_client_test.go b/control-plane/consul/dataplane_client_test.go new file mode 100644 index 0000000000..233000cee8 --- /dev/null +++ b/control-plane/consul/dataplane_client_test.go @@ -0,0 +1,199 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package consul + +import ( + "context" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/consul-server-connection-manager/discovery" + pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" + "github.com/hashicorp/consul/proto-public/pbdataplane" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1" + "github.com/hashicorp/consul/proto-public/pbresource" + "github.com/hashicorp/consul/sdk/testutil" + "github.com/hashicorp/go-hclog" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/testing/protocmp" + "google.golang.org/protobuf/types/known/anypb" +) + +func Test_NewDataplaneServiceClient(t *testing.T) { + + var serverConfig *testutil.TestServerConfig + server, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { + c.Experiments = []string{"resource-apis"} + serverConfig = c + }) + require.NoError(t, err) + defer server.Stop() + + server.WaitForLeader(t) + server.WaitForActiveCARoot(t) + + t.Logf("server grpc address on %d", serverConfig.Ports.GRPC) + + // Create discovery configuration + discoverConfig := discovery.Config{ + Addresses: "127.0.0.1", + GRPCPort: serverConfig.Ports.GRPC, + } + + opts := hclog.LoggerOptions{Name: "dataplane-service-client"} + logger := hclog.New(&opts) + + watcher, err := discovery.NewWatcher(context.Background(), discoverConfig, logger) + require.NoError(t, err) + require.NotNil(t, watcher) + + defer watcher.Stop() + go watcher.Run() + + // Create a workload and create a proxyConfiguration + createWorkload(t, watcher, "foo") + pc := createProxyConfiguration(t, watcher, "foo") + + client, err := NewDataplaneServiceClient(watcher) + require.NoError(t, err) + require.NotNil(t, client) + require.NotNil(t, watcher) + + req := &pbdataplane.GetEnvoyBootstrapParamsRequest{ + ProxyId: "foo", + Namespace: "default", + Partition: "default", + } + + res, err := client.GetEnvoyBootstrapParams(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, res) + require.Equal(t, "foo", res.GetIdentity()) + require.Equal(t, "default", res.GetNamespace()) + require.Equal(t, "default", res.GetPartition()) + + if diff := cmp.Diff(pc.BootstrapConfig, res.GetBootstrapConfig(), protocmp.Transform()); diff != "" { + t.Errorf("unexpected difference:\n%v", diff) + } + + // NOTE: currently it isn't possible to test that the grpc connection responds to changes in the + // discovery server. The discovery response only includes the IP address of the host, so all servers + // for a local test are de-duplicated as a single entry. +} + +func createWorkload(t *testing.T, watcher ServerConnectionManager, name string) { + + client, err := NewResourceServiceClient(watcher) + require.NoError(t, err) + + workload := &pbcatalog.Workload{ + Addresses: []*pbcatalog.WorkloadAddress{ + {Host: "10.0.0.1", Ports: []string{"public", "admin", "mesh"}}, + }, + Ports: map[string]*pbcatalog.WorkloadPort{ + "public": { + Port: 80, + Protocol: pbcatalog.Protocol_PROTOCOL_TCP, + }, + "admin": { + Port: 8080, + Protocol: pbcatalog.Protocol_PROTOCOL_TCP, + }, + "mesh": { + Port: 20000, + Protocol: pbcatalog.Protocol_PROTOCOL_MESH, + }, + }, + NodeName: "k8s-node-0-virtual", + Identity: name, + } + + id := &pbresource.ID{ + Name: name, + Type: pbcatalog.WorkloadType, + Tenancy: &pbresource.Tenancy{ + Partition: "default", + Namespace: "default", + }, + } + + proto, err := anypb.New(workload) + require.NoError(t, err) + + req := &pbresource.WriteRequest{ + Resource: &pbresource.Resource{ + Id: id, + Data: proto, + }, + } + + _, err = client.Write(context.Background(), req) + require.NoError(t, err) + + resourceHasPersisted(t, client, id) +} + +func createProxyConfiguration(t *testing.T, watcher ServerConnectionManager, name string) *pbmesh.ProxyConfiguration { + + client, err := NewResourceServiceClient(watcher) + require.NoError(t, err) + + pc := &pbmesh.ProxyConfiguration{ + Workloads: &pbcatalog.WorkloadSelector{ + Names: []string{"foo"}, + }, + BootstrapConfig: &pbmesh.BootstrapConfig{ + StatsBindAddr: "127.0.0.2:1234", + ReadyBindAddr: "127.0.0.3:5678", + }, + } + + id := &pbresource.ID{ + Name: name, + Type: pbmesh.ProxyConfigurationType, + Tenancy: &pbresource.Tenancy{ + Partition: "default", + Namespace: "default", + }, + } + + proto, err := anypb.New(pc) + require.NoError(t, err) + + req := &pbresource.WriteRequest{ + Resource: &pbresource.Resource{ + Id: id, + Data: proto, + }, + } + + _, err = client.Write(context.Background(), req) + require.NoError(t, err) + + resourceHasPersisted(t, client, id) + return pc +} + +// resourceHasPersisted checks that a recently written resource exists in the Consul +// state store with a valid version. This must be true before a resource is overwritten +// or deleted. +// TODO: refactor so that there isn't an import cycle when using test.ResourceHasPersisted. +func resourceHasPersisted(t *testing.T, client pbresource.ResourceServiceClient, id *pbresource.ID) { + req := &pbresource.ReadRequest{Id: id} + + require.Eventually(t, func() bool { + res, err := client.Read(context.Background(), req) + if err != nil { + return false + } + + if res.GetResource().GetVersion() == "" { + return false + } + + return true + }, 5*time.Second, + time.Second) +} diff --git a/control-plane/consul/resource_client.go b/control-plane/consul/resource_client.go new file mode 100644 index 0000000000..82c24af34f --- /dev/null +++ b/control-plane/consul/resource_client.go @@ -0,0 +1,28 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package consul + +import ( + "fmt" + + "github.com/hashicorp/consul/proto-public/pbresource" +) + +// NewResourceServiceClient creates a pbresource.ResourceServiceClient for creating V2 Consul resources. +// It is initialized with a consul-server-connection-manager Watcher to continuously find Consul +// server addresses. +func NewResourceServiceClient(watcher ServerConnectionManager) (pbresource.ResourceServiceClient, error) { + + // We recycle the GRPC connection from the discovery client because it + // should have all the necessary dial options, including the resolver that + // continuously updates Consul server addresses. Otherwise, a lot of code from consul-server-connection-manager + // would need to be duplicated + state, err := watcher.State() + if err != nil { + return nil, fmt.Errorf("unable to get connection manager state: %w", err) + } + resourceClient := pbresource.NewResourceServiceClient(state.GRPCConn) + + return resourceClient, nil +} diff --git a/control-plane/consul/resource_client_test.go b/control-plane/consul/resource_client_test.go new file mode 100644 index 0000000000..f1d28b27da --- /dev/null +++ b/control-plane/consul/resource_client_test.go @@ -0,0 +1,109 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package consul + +import ( + "context" + "testing" + + "github.com/hashicorp/consul-server-connection-manager/discovery" + pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" + "github.com/hashicorp/consul/proto-public/pbresource" + "github.com/hashicorp/consul/sdk/testutil" + "github.com/hashicorp/go-hclog" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/types/known/anypb" + + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" +) + +func Test_NewResourceServiceClient(t *testing.T) { + + var serverConfig *testutil.TestServerConfig + server, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { + c.Experiments = []string{"resource-apis"} + serverConfig = c + }) + require.NoError(t, err) + defer server.Stop() + + server.WaitForLeader(t) + server.WaitForActiveCARoot(t) + + t.Logf("server grpc address on %d", serverConfig.Ports.GRPC) + + // Create discovery configuration + discoverConfig := discovery.Config{ + Addresses: "127.0.0.1", + GRPCPort: serverConfig.Ports.GRPC, + } + + opts := hclog.LoggerOptions{Name: "resource-service-client"} + logger := hclog.New(&opts) + + watcher, err := discovery.NewWatcher(context.Background(), discoverConfig, logger) + require.NoError(t, err) + require.NotNil(t, watcher) + + defer watcher.Stop() + go watcher.Run() + + client, err := NewResourceServiceClient(watcher) + require.NoError(t, err) + require.NotNil(t, client) + require.NotNil(t, watcher) + + req := createWriteRequest(t, "foo") + res, err := client.Write(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, res) + require.Equal(t, "foo", res.GetResource().GetId().GetName()) + + // NOTE: currently it isn't possible to test that the grpc connection responds to changes in the + // discovery server. The discovery response only includes the IP address of the host, so all servers + // for a local test are de-duplicated as a single entry. +} + +func createWriteRequest(t *testing.T, name string) *pbresource.WriteRequest { + + workload := &pbcatalog.Workload{ + Addresses: []*pbcatalog.WorkloadAddress{ + {Host: "10.0.0.1", Ports: []string{"public", "admin", "mesh"}}, + }, + Ports: map[string]*pbcatalog.WorkloadPort{ + "public": { + Port: 80, + Protocol: pbcatalog.Protocol_PROTOCOL_TCP, + }, + "admin": { + Port: 8080, + Protocol: pbcatalog.Protocol_PROTOCOL_TCP, + }, + "mesh": { + Port: 20000, + Protocol: pbcatalog.Protocol_PROTOCOL_MESH, + }, + }, + NodeName: "k8s-node-0-virtual", + Identity: name, + } + + proto, err := anypb.New(workload) + require.NoError(t, err) + + req := &pbresource.WriteRequest{ + Resource: &pbresource.Resource{ + Id: &pbresource.ID{ + Name: name, + Type: pbcatalog.WorkloadType, + Tenancy: &pbresource.Tenancy{ + Namespace: constants.DefaultConsulNS, + Partition: constants.DefaultConsulPartition, + }, + }, + Data: proto, + }, + } + return req +} diff --git a/control-plane/controllers/resources/consul_resource_controller.go b/control-plane/controllers/resources/consul_resource_controller.go new file mode 100644 index 0000000000..95c5cbcac6 --- /dev/null +++ b/control-plane/controllers/resources/consul_resource_controller.go @@ -0,0 +1,327 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package resources + +import ( + "context" + "fmt" + "time" + + "github.com/go-logr/logr" + "github.com/hashicorp/consul/proto-public/pbresource" + "golang.org/x/time/rate" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + corev1 "k8s.io/api/core/v1" + k8serr "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + "k8s.io/utils/strings/slices" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/hashicorp/consul-k8s/control-plane/api/common" + tenancy "github.com/hashicorp/consul-k8s/control-plane/connect-inject/common" + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" + "github.com/hashicorp/consul-k8s/control-plane/consul" + "github.com/hashicorp/consul-k8s/control-plane/namespaces" +) + +const ( + FinalizerName = "finalizers.consul.hashicorp.com" + ConsulAgentError = "ConsulAgentError" + ExternallyManagedConfigError = "ExternallyManagedConfigError" +) + +// ResourceController is implemented by resources syncing Consul Resources from their CRD counterparts. +// It is used by ConsulResourceController to abstract CRD-specific Consul Resources. +type ResourceController interface { + // Update updates the state of the whole object. + Update(context.Context, client.Object, ...client.UpdateOption) error + // UpdateStatus updates the state of just the object's status. + UpdateStatus(context.Context, client.Object, ...client.SubResourceUpdateOption) error + // Get retrieves an object for the given object key from the Kubernetes Cluster. + // obj must be a struct pointer so that obj can be updated with the response + // returned by the Server. + Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error + // Logger returns a logger with values added for the specific controller + // and request name. + Logger(types.NamespacedName) logr.Logger +} + +// ConsulResourceController is a generic controller that is used to reconcile +// all Consul Resource types, e.g. TrafficPermissions, ProxyConfiguration, etc., since +// they share the same reconcile behaviour. +type ConsulResourceController struct { + // ConsulClientConfig is the config for the Consul API client. + ConsulClientConfig *consul.Config + + // ConsulServerConnMgr is the watcher for the Consul server addresses. + ConsulServerConnMgr consul.ServerConnectionManager + + common.ConsulTenancyConfig +} + +// ReconcileResource reconciles an update to a resource. CRD-specific controller's +// call this function because it handles reconciliation of config entries +// generically. +// CRD-specific controller should pass themselves in as updater since we +// need to call back into their own update methods to ensure they update their +// internal state. +func (r *ConsulResourceController) ReconcileResource(ctx context.Context, crdCtrl ResourceController, req ctrl.Request, resource common.ConsulResource) (ctrl.Result, error) { + logger := crdCtrl.Logger(req.NamespacedName) + err := crdCtrl.Get(ctx, req.NamespacedName, resource) + if k8serr.IsNotFound(err) { + return ctrl.Result{}, client.IgnoreNotFound(err) + } else if err != nil { + logger.Error(err, "retrieving resource") + return ctrl.Result{}, err + } + + // Create Consul resource service client for this reconcile. + resourceClient, err := consul.NewResourceServiceClient(r.ConsulServerConnMgr) + if err != nil { + logger.Error(err, "failed to create Consul resource client", "name", req.Name, "ns", req.Namespace) + return ctrl.Result{}, err + } + + state, err := r.ConsulServerConnMgr.State() + if err != nil { + logger.Error(err, "failed to query Consul client state", "name", req.Name, "ns", req.Namespace) + return ctrl.Result{}, err + } + if state.Token != "" { + ctx = metadata.AppendToOutgoingContext(ctx, "x-consul-token", state.Token) + } + + if resource.GetDeletionTimestamp().IsZero() { + // The object is not being deleted, so if it does not have our finalizer, + // then let's add the finalizer and update the object. This is equivalent + // registering our finalizer. + if !slices.Contains(resource.GetFinalizers(), FinalizerName) { + resource.AddFinalizer(FinalizerName) + if err := r.syncUnknown(ctx, crdCtrl, resource); err != nil { + return ctrl.Result{}, err + } + } + } + + if !resource.GetDeletionTimestamp().IsZero() { + if slices.Contains(resource.GetFinalizers(), FinalizerName) { + // The object is being deleted + logger.Info("deletion event") + // Check to see if consul has config entry with the same name + res, err := resourceClient.Read(ctx, &pbresource.ReadRequest{Id: resource.ResourceID(r.consulNamespace(req.Namespace), r.getConsulPartition())}) + + // Ignore the error where the resource isn't found in Consul. + // It is indicative of desired state. + if err != nil && !isNotFoundErr(err) { + return ctrl.Result{}, fmt.Errorf("getting resource from Consul: %w", err) + } + + // In the case this resource was created outside of consul, skip the deletion process and continue + if !managedByConsulResourceController(res.GetResource()) { + logger.Info("resource in Consul was created outside of Kubernetes - skipping delete from Consul") + } + + if err == nil && managedByConsulResourceController(res.GetResource()) { + _, err := resourceClient.Delete(ctx, &pbresource.DeleteRequest{Id: resource.ResourceID(r.consulNamespace(req.Namespace), r.getConsulPartition())}) + if err != nil { + return r.syncFailed(ctx, logger, crdCtrl, resource, ConsulAgentError, + fmt.Errorf("deleting resource from Consul: %w", err)) + } + logger.Info("deletion from Consul successful") + } + // remove our finalizer from the list and update it. + resource.RemoveFinalizer(FinalizerName) + if err := crdCtrl.Update(ctx, resource); err != nil { + return ctrl.Result{}, err + } + logger.Info("finalizer removed") + } + + // Stop reconciliation as the item is being deleted + return ctrl.Result{}, nil + } + + // Check to see if consul has config entry with the same name + res, err := resourceClient.Read(ctx, &pbresource.ReadRequest{Id: resource.ResourceID(r.consulNamespace(req.Namespace), r.getConsulPartition())}) + + // In the case the namespace doesn't exist in Consul yet, assume we are racing with the namespace controller + // and requeue. + if tenancy.ConsulNamespaceIsNotFound(err) { + logger.Info("Consul namespace not found; re-queueing request", + "name", req.Name, "ns", req.Namespace, "consul-ns", + r.consulNamespace(req.Namespace), "err", err.Error()) + return ctrl.Result{Requeue: true}, nil + } + + // If resource with this name does not exist + if isNotFoundErr(err) { + logger.Info("resource not found in Consul") + + // Create the config entry + _, err := resourceClient.Write(ctx, &pbresource.WriteRequest{Resource: resource.Resource(r.consulNamespace(req.Namespace), r.getConsulPartition())}) + if err != nil { + return r.syncFailed(ctx, logger, crdCtrl, resource, ConsulAgentError, + fmt.Errorf("writing resource to Consul: %w", err)) + } + + logger.Info("resource created") + return r.syncSuccessful(ctx, crdCtrl, resource) + } + + // If there is an error when trying to get the resource from the api server, + // fail the reconcile. + if err != nil { + return r.syncFailed(ctx, logger, crdCtrl, resource, ConsulAgentError, err) + } + + // TODO: consider the case where we want to migrate a resource existing into Consul to a CRD with an annotation + if !managedByConsulResourceController(res.Resource) { + return r.syncFailed(ctx, logger, crdCtrl, resource, ExternallyManagedConfigError, + fmt.Errorf("resource already exists in Consul")) + } + + if !resource.MatchesConsul(res.Resource, r.consulNamespace(req.Namespace), r.getConsulPartition()) { + logger.Info("resource does not match Consul") + _, err := resourceClient.Write(ctx, &pbresource.WriteRequest{Resource: resource.Resource(r.consulNamespace(req.Namespace), r.getConsulPartition())}) + if err != nil { + return r.syncUnknownWithError(ctx, logger, crdCtrl, resource, ConsulAgentError, + fmt.Errorf("updating resource in Consul: %w", err)) + } + logger.Info("resource updated") + return r.syncSuccessful(ctx, crdCtrl, resource) + } else if resource.SyncedConditionStatus() != corev1.ConditionTrue { + return r.syncSuccessful(ctx, crdCtrl, resource) + } + + return ctrl.Result{}, nil +} + +// setupWithManager sets up the controller manager for the given resource +// with our default options. +func setupWithManager(mgr ctrl.Manager, resource client.Object, reconciler reconcile.Reconciler) error { + options := controller.Options{ + // Taken from https://github.com/kubernetes/client-go/blob/master/util/workqueue/default_rate_limiters.go#L39 + // and modified from a starting backoff of 5ms and max of 1000s to a + // starting backoff of 200ms and a max of 5s to better fit our most + // common error cases and performance characteristics. + // + // One common error case is that a resource is applied that requires + // a protocol like http or grpc. Often the user will apply a new resource + // to set the protocol in a minute or two. During this time, the + // default backoff could then be set up to 5m or more which means the + // original resource takes a long time to re-sync. + // + // In terms of performance, Consul servers can handle tens of thousands + // of writes per second, so retrying at max every 5s isn't an issue and + // provides a better UX. + RateLimiter: workqueue.NewMaxOfRateLimiter( + workqueue.NewItemExponentialFailureRateLimiter(200*time.Millisecond, 5*time.Second), + // 10 qps, 100 bucket size. This is only for retry speed, and it's only the overall factor (not per item) + &workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)}, + ), + } + + return ctrl.NewControllerManagedBy(mgr). + For(resource). + WithOptions(options). + Complete(reconciler) +} + +func (r *ConsulResourceController) syncFailed(ctx context.Context, logger logr.Logger, updater ResourceController, resource common.ConsulResource, errType string, err error) (ctrl.Result, error) { + resource.SetSyncedCondition(corev1.ConditionFalse, errType, err.Error()) + if updateErr := updater.UpdateStatus(ctx, resource); updateErr != nil { + // Log the original error here because we are returning the updateErr. + // Otherwise, the original error would be lost. + logger.Error(err, "sync failed") + return ctrl.Result{}, updateErr + } + return ctrl.Result{}, err +} + +func (r *ConsulResourceController) syncSuccessful(ctx context.Context, updater ResourceController, resource common.ConsulResource) (ctrl.Result, error) { + resource.SetSyncedCondition(corev1.ConditionTrue, "", "") + timeNow := metav1.NewTime(time.Now()) + resource.SetLastSyncedTime(&timeNow) + return ctrl.Result{}, updater.UpdateStatus(ctx, resource) +} + +func (r *ConsulResourceController) syncUnknown(ctx context.Context, updater ResourceController, resource common.ConsulResource) error { + resource.SetSyncedCondition(corev1.ConditionUnknown, "", "") + return updater.Update(ctx, resource) +} + +func (r *ConsulResourceController) syncUnknownWithError(ctx context.Context, + logger logr.Logger, + updater ResourceController, + resource common.ConsulResource, + errType string, + err error, +) (ctrl.Result, error) { + resource.SetSyncedCondition(corev1.ConditionUnknown, errType, err.Error()) + if updateErr := updater.UpdateStatus(ctx, resource); updateErr != nil { + // Log the original error here because we are returning the updateErr. + // Otherwise, the original error would be lost. + logger.Error(err, "sync status unknown") + return ctrl.Result{}, updateErr + } + return ctrl.Result{}, err +} + +// isNotFoundErr checks the grpc response code for "NotFound". +func isNotFoundErr(err error) bool { + if err == nil { + return false + } + s, ok := status.FromError(err) + if !ok { + return false + } + return codes.NotFound == s.Code() +} + +func (r *ConsulResourceController) consulNamespace(namespace string) string { + ns := namespaces.ConsulNamespace( + namespace, + r.EnableConsulNamespaces, + r.ConsulDestinationNamespace, + r.EnableNSMirroring, + r.NSMirroringPrefix, + ) + + // TODO: remove this if and when the default namespace of resources is no longer required to be set explicitly. + if ns == "" { + ns = constants.DefaultConsulNS + } + return ns +} + +func (r *ConsulResourceController) getConsulPartition() string { + if !r.EnableConsulPartitions || r.ConsulPartition == "" { + return constants.DefaultConsulPartition + } + return r.ConsulPartition +} + +func managedByConsulResourceController(resource *pbresource.Resource) bool { + if resource == nil { + return false + } + + consulMeta := resource.GetMetadata() + if consulMeta == nil { + return false + } + + if val, ok := consulMeta[common.SourceKey]; ok && val == common.SourceValue { + return true + } + return false +} diff --git a/control-plane/controllers/resources/consul_resource_controller_ent_test.go b/control-plane/controllers/resources/consul_resource_controller_ent_test.go new file mode 100644 index 0000000000..cc94a55877 --- /dev/null +++ b/control-plane/controllers/resources/consul_resource_controller_ent_test.go @@ -0,0 +1,189 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build enterprise + +package resources + +import ( + "context" + "testing" + + "github.com/go-logr/logr" + logrtest "github.com/go-logr/logr/testr" + "github.com/google/go-cmp/cmp" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/testing/protocmp" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "github.com/hashicorp/consul-k8s/control-plane/api/auth/v2beta1" + "github.com/hashicorp/consul-k8s/control-plane/api/common" + "github.com/hashicorp/consul-k8s/control-plane/api/v1alpha1" + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" + "github.com/hashicorp/consul-k8s/control-plane/consul" + "github.com/hashicorp/consul-k8s/control-plane/helper/test" + + pbauth "github.com/hashicorp/consul/proto-public/pbauth/v2beta1" + "github.com/hashicorp/consul/proto-public/pbresource" + "github.com/hashicorp/consul/sdk/testutil" +) + +// TestConsulResourceController_UpdatesConsulResourceEnt tests is a mirror of the CE test which also tests the +// enterprise traffic permissions deny action. +func TestConsulResourceController_UpdatesConsulResourceEnt(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + resource common.ConsulResource + expected *pbauth.TrafficPermissions + reconciler func(client.Client, *consul.Config, consul.ServerConnectionManager, logr.Logger) testReconciler + updateF func(config common.ConsulResource) + unmarshal func(t *testing.T, consul *pbresource.Resource) proto.Message + }{ + { + name: "TrafficPermissions", + resource: &v2beta1.TrafficPermissions{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-traffic-permission", + Namespace: metav1.NamespaceDefault, + }, + Spec: pbauth.TrafficPermissions{ + Destination: &pbauth.Destination{ + IdentityName: "destination-identity", + }, + Action: pbauth.Action_ACTION_ALLOW, + Permissions: []*pbauth.Permission{ + { + Sources: []*pbauth.Source{ + { + Namespace: "the space namespace space", + }, + { + IdentityName: "source-identity", + }, + }, + // TODO: enable this when L7 traffic permissions are supported + //DestinationRules: []*pbauth.DestinationRule{ + // { + // PathExact: "/hello", + // Methods: []string{"GET", "POST"}, + // PortNames: []string{"web", "admin"}, + // }, + //}, + }, + }, + }, + }, + expected: &pbauth.TrafficPermissions{ + Destination: &pbauth.Destination{ + IdentityName: "destination-identity", + }, + Action: pbauth.Action_ACTION_DENY, + Permissions: []*pbauth.Permission{ + { + Sources: []*pbauth.Source{ + { + Namespace: "the space namespace space", + Partition: common.DefaultConsulPartition, + Peer: constants.DefaultConsulPeer, + }, + }, + //DestinationRules: []*pbauth.DestinationRule{ + // { + // PathExact: "/hello", + // Methods: []string{"GET", "POST"}, + // PortNames: []string{"web", "admin"}, + // }, + //}, + }, + }, + }, + reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + return &TrafficPermissionsController{ + Client: client, + Log: logger, + Controller: &ConsulResourceController{ + ConsulClientConfig: cfg, + ConsulServerConnMgr: watcher, + }, + } + }, + updateF: func(resource common.ConsulResource) { + trafficPermissions := resource.(*v2beta1.TrafficPermissions) + trafficPermissions.Spec.Action = pbauth.Action_ACTION_DENY + trafficPermissions.Spec.Permissions[0].Sources = trafficPermissions.Spec.Permissions[0].Sources[:1] + }, + unmarshal: func(t *testing.T, resource *pbresource.Resource) proto.Message { + data := resource.Data + + trafficPermission := &pbauth.TrafficPermissions{} + require.NoError(t, data.UnmarshalTo(trafficPermission)) + return trafficPermission + }, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + ctx := context.Background() + + s := runtime.NewScheme() + s.AddKnownTypes(v1alpha1.GroupVersion, c.resource) + fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(c.resource).WithStatusSubresource(c.resource).Build() + + testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + c.Experiments = []string{"resource-apis"} + }) + + // We haven't run reconcile yet, so we must create the resource + // in Consul ourselves. + { + resource := c.resource.Resource(constants.DefaultConsulNS, constants.DefaultConsulPartition) + req := &pbresource.WriteRequest{Resource: resource} + _, err := testClient.ResourceClient.Write(ctx, req) + require.NoError(t, err) + } + + // Now run reconcile which should update the entry in Consul. + { + namespacedName := types.NamespacedName{ + Namespace: metav1.NamespaceDefault, + Name: c.resource.KubernetesName(), + } + // First get it, so we have the latest revision number. + err := fakeClient.Get(ctx, namespacedName, c.resource) + require.NoError(t, err) + + // Update the entry in Kube and run reconcile. + c.updateF(c.resource) + err = fakeClient.Update(ctx, c.resource) + require.NoError(t, err) + r := c.reconciler(fakeClient, testClient.Cfg, testClient.Watcher, logrtest.New(t)) + resp, err := r.Reconcile(ctx, ctrl.Request{ + NamespacedName: namespacedName, + }) + require.NoError(t, err) + require.False(t, resp.Requeue) + + // Now check that the object in Consul is as expected. + req := &pbresource.ReadRequest{Id: c.resource.ResourceID(constants.DefaultConsulNS, constants.DefaultConsulPartition)} + res, err := testClient.ResourceClient.Read(ctx, req) + require.NoError(t, err) + require.NotNil(t, res) + require.Equal(t, c.resource.GetName(), res.GetResource().GetId().GetName()) + + actual := c.unmarshal(t, res.GetResource()) + opts := append([]cmp.Option{protocmp.IgnoreFields(&pbresource.Resource{}, "status", "generation", "version")}, test.CmpProtoIgnoreOrder()...) + diff := cmp.Diff(c.expected, actual, opts...) + require.Equal(t, "", diff, "TrafficPermissions do not match") + } + }) + } +} diff --git a/control-plane/controllers/resources/consul_resource_controller_test.go b/control-plane/controllers/resources/consul_resource_controller_test.go new file mode 100644 index 0000000000..5de53a5b22 --- /dev/null +++ b/control-plane/controllers/resources/consul_resource_controller_test.go @@ -0,0 +1,770 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package resources + +import ( + "context" + "testing" + "time" + + "github.com/go-logr/logr" + logrtest "github.com/go-logr/logr/testr" + "github.com/google/go-cmp/cmp" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/testing/protocmp" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + pbauth "github.com/hashicorp/consul/proto-public/pbauth/v2beta1" + "github.com/hashicorp/consul/proto-public/pbresource" + "github.com/hashicorp/consul/sdk/testutil" + + "github.com/hashicorp/consul-k8s/control-plane/api/auth/v2beta1" + "github.com/hashicorp/consul-k8s/control-plane/api/common" + "github.com/hashicorp/consul-k8s/control-plane/api/v1alpha1" + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" + "github.com/hashicorp/consul-k8s/control-plane/consul" + "github.com/hashicorp/consul-k8s/control-plane/helper/test" +) + +type testReconciler interface { + Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) +} + +// TestConsulResourceController_CreatesConsulResource validated resources are created in Consul from kube objects. +func TestConsulResourceController_CreatesConsulResource(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + resource common.ConsulResource + expected *pbauth.TrafficPermissions + reconciler func(client.Client, *consul.Config, consul.ServerConnectionManager, logr.Logger) testReconciler + unmarshal func(t *testing.T, consul *pbresource.Resource) proto.Message + }{ + { + name: "TrafficPermissions", + resource: &v2beta1.TrafficPermissions{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-traffic-permission", + Namespace: metav1.NamespaceDefault, + }, + Spec: pbauth.TrafficPermissions{ + Destination: &pbauth.Destination{ + IdentityName: "destination-identity", + }, + Action: pbauth.Action_ACTION_ALLOW, + Permissions: []*pbauth.Permission{ + { + Sources: []*pbauth.Source{ + { + Namespace: "the space namespace space", + }, + { + IdentityName: "source-identity", + }, + }, + // TODO: enable this when L7 traffic permissions are supported + //DestinationRules: []*pbauth.DestinationRule{ + // { + // PathExact: "/hello", + // Methods: []string{"GET", "POST"}, + // PortNames: []string{"web", "admin"}, + // }, + //}, + }, + }, + }, + }, + expected: &pbauth.TrafficPermissions{ + Destination: &pbauth.Destination{ + IdentityName: "destination-identity", + }, + Action: pbauth.Action_ACTION_ALLOW, + Permissions: []*pbauth.Permission{ + { + Sources: []*pbauth.Source{ + { + IdentityName: "source-identity", + Namespace: common.DefaultConsulNamespace, + Partition: common.DefaultConsulPartition, + Peer: constants.DefaultConsulPeer, + }, + { + Namespace: "the space namespace space", + Partition: common.DefaultConsulPartition, + Peer: constants.DefaultConsulPeer, + }, + }, + //DestinationRules: []*pbauth.DestinationRule{ + // { + // PathExact: "/hello", + // Methods: []string{"GET", "POST"}, + // PortNames: []string{"web", "admin"}, + // }, + //}, + }, + }, + }, + reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + return &TrafficPermissionsController{ + Client: client, + Log: logger, + Controller: &ConsulResourceController{ + ConsulClientConfig: cfg, + ConsulServerConnMgr: watcher, + }, + } + }, + unmarshal: func(t *testing.T, resource *pbresource.Resource) proto.Message { + data := resource.Data + + trafficPermission := &pbauth.TrafficPermissions{} + require.NoError(t, data.UnmarshalTo(trafficPermission)) + return trafficPermission + }, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + ctx := context.Background() + + s := runtime.NewScheme() + s.AddKnownTypes(v2beta1.AuthGroupVersion, c.resource) + fakeClient := fake.NewClientBuilder().WithScheme(s).WithObjects(c.resource).WithStatusSubresource(c.resource).Build() + + testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + c.Experiments = []string{"resource-apis"} + }) + + r := c.reconciler(fakeClient, testClient.Cfg, testClient.Watcher, logrtest.New(t)) + namespacedName := types.NamespacedName{ + Namespace: metav1.NamespaceDefault, + Name: c.resource.KubernetesName(), + } + resp, err := r.Reconcile(ctx, ctrl.Request{ + NamespacedName: namespacedName, + }) + require.NoError(t, err) + require.False(t, resp.Requeue) + + req := &pbresource.ReadRequest{Id: c.resource.ResourceID(constants.DefaultConsulNS, constants.DefaultConsulPartition)} + res, err := testClient.ResourceClient.Read(ctx, req) + require.NoError(t, err) + require.NotNil(t, res) + require.Equal(t, c.resource.GetName(), res.GetResource().GetId().GetName()) + + actual := c.unmarshal(t, res.GetResource()) + opts := append([]cmp.Option{protocmp.IgnoreFields(&pbresource.Resource{}, "status", "generation", "version")}, test.CmpProtoIgnoreOrder()...) + diff := cmp.Diff(c.expected, actual, opts...) + require.Equal(t, "", diff, "TrafficPermissions do not match") + + // Check that the status is "synced". + err = fakeClient.Get(ctx, namespacedName, c.resource) + require.NoError(t, err) + require.Equal(t, corev1.ConditionTrue, c.resource.SyncedConditionStatus()) + + // Check that the finalizer is added. + require.Contains(t, c.resource.Finalizers(), FinalizerName) + }) + } +} + +func TestConsulResourceController_UpdatesConsulResource(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + resource common.ConsulResource + expected *pbauth.TrafficPermissions + reconciler func(client.Client, *consul.Config, consul.ServerConnectionManager, logr.Logger) testReconciler + updateF func(config common.ConsulResource) + unmarshal func(t *testing.T, consul *pbresource.Resource) proto.Message + }{ + { + name: "TrafficPermissions", + resource: &v2beta1.TrafficPermissions{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-traffic-permission", + Namespace: metav1.NamespaceDefault, + }, + Spec: pbauth.TrafficPermissions{ + Destination: &pbauth.Destination{ + IdentityName: "destination-identity", + }, + Action: pbauth.Action_ACTION_ALLOW, + Permissions: []*pbauth.Permission{ + { + Sources: []*pbauth.Source{ + { + Namespace: "the space namespace space", + }, + { + IdentityName: "source-identity", + }, + }, + // TODO: enable this when L7 traffic permissions are supported + //DestinationRules: []*pbauth.DestinationRule{ + // { + // PathExact: "/hello", + // Methods: []string{"GET", "POST"}, + // PortNames: []string{"web", "admin"}, + // }, + //}, + }, + }, + }, + }, + expected: &pbauth.TrafficPermissions{ + Destination: &pbauth.Destination{ + IdentityName: "destination-identity", + }, + Action: pbauth.Action_ACTION_ALLOW, + Permissions: []*pbauth.Permission{ + { + Sources: []*pbauth.Source{ + { + Namespace: "the space namespace space", + Partition: common.DefaultConsulPartition, + Peer: constants.DefaultConsulPeer, + }, + }, + //DestinationRules: []*pbauth.DestinationRule{ + // { + // PathExact: "/hello", + // Methods: []string{"GET", "POST"}, + // PortNames: []string{"web", "admin"}, + // }, + //}, + }, + }, + }, + reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + return &TrafficPermissionsController{ + Client: client, + Log: logger, + Controller: &ConsulResourceController{ + ConsulClientConfig: cfg, + ConsulServerConnMgr: watcher, + }, + } + }, + updateF: func(resource common.ConsulResource) { + trafficPermissions := resource.(*v2beta1.TrafficPermissions) + trafficPermissions.Spec.Permissions[0].Sources = trafficPermissions.Spec.Permissions[0].Sources[:1] + }, + unmarshal: func(t *testing.T, resource *pbresource.Resource) proto.Message { + data := resource.Data + + trafficPermission := &pbauth.TrafficPermissions{} + require.NoError(t, data.UnmarshalTo(trafficPermission)) + return trafficPermission + }, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + ctx := context.Background() + + s := runtime.NewScheme() + s.AddKnownTypes(v1alpha1.GroupVersion, c.resource) + fakeClient := fake.NewClientBuilder().WithScheme(s).WithObjects(c.resource).WithStatusSubresource(c.resource).Build() + + testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + c.Experiments = []string{"resource-apis"} + }) + + // We haven't run reconcile yet, so we must create the resource + // in Consul ourselves. + { + resource := c.resource.Resource(constants.DefaultConsulNS, constants.DefaultConsulPartition) + req := &pbresource.WriteRequest{Resource: resource} + _, err := testClient.ResourceClient.Write(ctx, req) + require.NoError(t, err) + } + + // Now run reconcile which should update the entry in Consul. + { + namespacedName := types.NamespacedName{ + Namespace: metav1.NamespaceDefault, + Name: c.resource.KubernetesName(), + } + // First get it, so we have the latest revision number. + err := fakeClient.Get(ctx, namespacedName, c.resource) + require.NoError(t, err) + + // Update the entry in Kube and run reconcile. + c.updateF(c.resource) + err = fakeClient.Update(ctx, c.resource) + require.NoError(t, err) + r := c.reconciler(fakeClient, testClient.Cfg, testClient.Watcher, logrtest.New(t)) + resp, err := r.Reconcile(ctx, ctrl.Request{ + NamespacedName: namespacedName, + }) + require.NoError(t, err) + require.False(t, resp.Requeue) + + // Now check that the object in Consul is as expected. + req := &pbresource.ReadRequest{Id: c.resource.ResourceID(constants.DefaultConsulNS, constants.DefaultConsulPartition)} + res, err := testClient.ResourceClient.Read(ctx, req) + require.NoError(t, err) + require.NotNil(t, res) + require.Equal(t, c.resource.GetName(), res.GetResource().GetId().GetName()) + + actual := c.unmarshal(t, res.GetResource()) + opts := append([]cmp.Option{protocmp.IgnoreFields(&pbresource.Resource{}, "status", "generation", "version")}, test.CmpProtoIgnoreOrder()...) + diff := cmp.Diff(c.expected, actual, opts...) + require.Equal(t, "", diff, "TrafficPermissions do not match") + } + }) + } +} + +func TestConsulResourceController_DeletesConsulResource(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + resource common.ConsulResource + reconciler func(client.Client, *consul.Config, consul.ServerConnectionManager, logr.Logger) testReconciler + }{ + { + name: "TrafficPermissions", + resource: &v2beta1.TrafficPermissions{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-name", + Namespace: metav1.NamespaceDefault, + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + Finalizers: []string{FinalizerName}, + }, + Spec: pbauth.TrafficPermissions{ + Destination: &pbauth.Destination{ + IdentityName: "destination-identity", + }, + Action: pbauth.Action_ACTION_ALLOW, + Permissions: []*pbauth.Permission{ + { + Sources: []*pbauth.Source{ + { + Namespace: "the space namespace space", + }, + { + IdentityName: "source-identity", + }, + }, + // TODO: enable this when L7 traffic permissions are supported + //DestinationRules: []*pbauth.DestinationRule{ + // { + // PathExact: "/hello", + // Methods: []string{"GET", "POST"}, + // PortNames: []string{"web", "admin"}, + // }, + //}, + }, + }, + }, + }, + reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + return &TrafficPermissionsController{ + Client: client, + Log: logger, + Controller: &ConsulResourceController{ + ConsulClientConfig: cfg, + ConsulServerConnMgr: watcher, + }, + } + }, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + ctx := context.Background() + + s := runtime.NewScheme() + s.AddKnownTypes(v2beta1.AuthGroupVersion, c.resource) + fakeClient := fake.NewClientBuilder().WithScheme(s).WithObjects(c.resource).WithStatusSubresource(c.resource).Build() + + testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + c.Experiments = []string{"resource-apis"} + }) + + // We haven't run reconcile yet, so we must create the config entry + // in Consul ourselves. + { + resource := c.resource.Resource(constants.DefaultConsulNS, constants.DefaultConsulPartition) + req := &pbresource.WriteRequest{Resource: resource} + _, err := testClient.ResourceClient.Write(ctx, req) + require.NoError(t, err) + } + + // Now run reconcile. It's marked for deletion so this should delete it. + { + namespacedName := types.NamespacedName{ + Namespace: metav1.NamespaceDefault, + Name: c.resource.KubernetesName(), + } + r := c.reconciler(fakeClient, testClient.Cfg, testClient.Watcher, logrtest.New(t)) + resp, err := r.Reconcile(context.Background(), ctrl.Request{ + NamespacedName: namespacedName, + }) + require.NoError(t, err) + require.False(t, resp.Requeue) + + // Now check that the object in Consul is as expected. + req := &pbresource.ReadRequest{Id: c.resource.ResourceID(constants.DefaultConsulNS, constants.DefaultConsulPartition)} + _, err = testClient.ResourceClient.Read(ctx, req) + require.Error(t, err) + require.True(t, isNotFoundErr(err)) + } + }) + } +} + +func TestConsulResourceController_ErrorUpdatesSyncStatus(t *testing.T) { + t.Parallel() + + ctx := context.Background() + trafficpermissions := &v2beta1.TrafficPermissions{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: metav1.NamespaceDefault, + }, + Spec: pbauth.TrafficPermissions{ + Destination: &pbauth.Destination{ + IdentityName: "destination-identity", + }, + Action: pbauth.Action_ACTION_ALLOW, + Permissions: []*pbauth.Permission{ + { + Sources: []*pbauth.Source{ + { + IdentityName: "source-identity", + }, + }, + }, + }, + }, + } + + s := runtime.NewScheme() + s.AddKnownTypes(v2beta1.AuthGroupVersion, trafficpermissions) + fakeClient := fake.NewClientBuilder().WithScheme(s).WithObjects(trafficpermissions).WithStatusSubresource(trafficpermissions).Build() + + testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + c.Experiments = []string{"resource-apis"} + }) + + // Stop the server before calling reconcile imitating a server that's not running. + _ = testClient.TestServer.Stop() + + reconciler := &TrafficPermissionsController{ + Client: fakeClient, + Log: logrtest.New(t), + Controller: &ConsulResourceController{ + ConsulClientConfig: testClient.Cfg, + ConsulServerConnMgr: testClient.Watcher, + }, + } + + // ReconcileResource should result in an error. + namespacedName := types.NamespacedName{ + Namespace: metav1.NamespaceDefault, + Name: trafficpermissions.KubernetesName(), + } + resp, err := reconciler.Reconcile(ctx, ctrl.Request{ + NamespacedName: namespacedName, + }) + require.Error(t, err) + require.False(t, resp.Requeue) + actualErrMsg := err.Error() + + // Check that the status is "synced=false". + err = fakeClient.Get(ctx, namespacedName, trafficpermissions) + require.NoError(t, err) + status, reason, errMsg := trafficpermissions.SyncedCondition() + require.Equal(t, corev1.ConditionFalse, status) + require.Equal(t, "ConsulAgentError", reason) + require.Contains(t, errMsg, actualErrMsg) +} + +// TestConsulResourceController_SetsSyncedToTrue tests that if the resource hasn't changed in +// Consul but our resource's synced status isn't set to true, then we update its status. +func TestConsulResourceController_SetsSyncedToTrue(t *testing.T) { + t.Parallel() + + ctx := context.Background() + s := runtime.NewScheme() + + trafficpermissions := &v2beta1.TrafficPermissions{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: metav1.NamespaceDefault, + }, + Spec: pbauth.TrafficPermissions{ + Destination: &pbauth.Destination{ + IdentityName: "destination-identity", + }, + Action: pbauth.Action_ACTION_ALLOW, + Permissions: []*pbauth.Permission{ + { + Sources: []*pbauth.Source{ + { + IdentityName: "source-identity", + }, + }, + }, + }, + }, + Status: v2beta1.Status{ + Conditions: v2beta1.Conditions{ + { + Type: v2beta1.ConditionSynced, + Status: corev1.ConditionUnknown, + }, + }, + }, + } + s.AddKnownTypes(v2beta1.AuthGroupVersion, trafficpermissions) + + // The config entry exists in kube but its status will be nil. + fakeClient := fake.NewClientBuilder().WithScheme(s).WithObjects(trafficpermissions).WithStatusSubresource(trafficpermissions).Build() + + testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + c.Experiments = []string{"resource-apis"} + }) + + reconciler := &TrafficPermissionsController{ + Client: fakeClient, + Log: logrtest.New(t), + Controller: &ConsulResourceController{ + ConsulClientConfig: testClient.Cfg, + ConsulServerConnMgr: testClient.Watcher, + }, + } + + // Create the resource in Consul to mimic that it was created + // successfully (but its status hasn't been updated). + { + resource := trafficpermissions.Resource(constants.DefaultConsulNS, constants.DefaultConsulPartition) + req := &pbresource.WriteRequest{Resource: resource} + _, err := testClient.ResourceClient.Write(ctx, req) + require.NoError(t, err) + } + + namespacedName := types.NamespacedName{ + Namespace: metav1.NamespaceDefault, + Name: trafficpermissions.KubernetesName(), + } + resp, err := reconciler.Reconcile(ctx, ctrl.Request{ + NamespacedName: namespacedName, + }) + require.NoError(t, err) + require.False(t, resp.Requeue) + + // Check that the status is now "synced". + err = fakeClient.Get(ctx, namespacedName, trafficpermissions) + require.NoError(t, err) + require.Equal(t, corev1.ConditionTrue, trafficpermissions.SyncedConditionStatus()) +} + +// TestConsulResourceController_DoesNotCreateUnownedResource test that if the resource +// exists in Consul but is not managed by the controller, creating/updating the resource fails. +func TestConsulResourceController_DoesNotCreateUnownedResource(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + s := runtime.NewScheme() + trafficpermissions := &v2beta1.TrafficPermissions{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: metav1.NamespaceDefault, + }, + Spec: pbauth.TrafficPermissions{ + Destination: &pbauth.Destination{ + IdentityName: "destination-identity", + }, + Action: pbauth.Action_ACTION_ALLOW, + Permissions: []*pbauth.Permission{ + { + Sources: []*pbauth.Source{ + { + IdentityName: "source-identity", + Namespace: common.DefaultConsulNamespace, + Partition: common.DefaultConsulPartition, + Peer: constants.DefaultConsulPeer, + }, + }, + }, + }, + }, + } + s.AddKnownTypes(v2beta1.AuthGroupVersion, trafficpermissions) + fakeClient := fake.NewClientBuilder().WithScheme(s).WithObjects(trafficpermissions).WithStatusSubresource(trafficpermissions).Build() + + testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + c.Experiments = []string{"resource-apis"} + }) + + unmanagedResource := trafficpermissions.Resource(constants.DefaultConsulNS, constants.DefaultConsulPartition) + unmanagedResource.Metadata = make(map[string]string) // Zero out the metadata + + // We haven't run reconcile yet. We must create the resource + // in Consul ourselves, without the metadata indicating it is owned by the controller. + { + req := &pbresource.WriteRequest{Resource: unmanagedResource} + _, err := testClient.ResourceClient.Write(ctx, req) + require.NoError(t, err) + } + + // Now run reconcile which should **not** update the entry in Consul. + { + namespacedName := types.NamespacedName{ + Namespace: metav1.NamespaceDefault, + Name: trafficpermissions.KubernetesName(), + } + // First get it, so we have the latest revision number. + err := fakeClient.Get(ctx, namespacedName, trafficpermissions) + require.NoError(t, err) + + // Attempt to create the entry in Kube and run reconcile. + reconciler := TrafficPermissionsController{ + Client: fakeClient, + Log: logrtest.New(t), + Controller: &ConsulResourceController{ + ConsulClientConfig: testClient.Cfg, + ConsulServerConnMgr: testClient.Watcher, + }, + } + resp, err := reconciler.Reconcile(ctx, ctrl.Request{ + NamespacedName: namespacedName, + }) + require.EqualError(t, err, "resource already exists in Consul") + require.False(t, resp.Requeue) + + // Now check that the object in Consul is as expected. + req := &pbresource.ReadRequest{Id: trafficpermissions.ResourceID(constants.DefaultConsulNS, constants.DefaultConsulPartition)} + readResp, err := testClient.ResourceClient.Read(ctx, req) + require.NoError(t, err) + require.NotNil(t, readResp.GetResource()) + opts := append([]cmp.Option{ + protocmp.IgnoreFields(&pbresource.Resource{}, "status", "generation", "version"), + protocmp.IgnoreFields(&pbresource.ID{}, "uid")}, + test.CmpProtoIgnoreOrder()...) + diff := cmp.Diff(unmanagedResource, readResp.GetResource(), opts...) + require.Equal(t, "", diff, "TrafficPermissions do not match") + + // Check that the status is "synced=false". + err = fakeClient.Get(ctx, namespacedName, trafficpermissions) + require.NoError(t, err) + status, reason, errMsg := trafficpermissions.SyncedCondition() + require.Equal(t, corev1.ConditionFalse, status) + require.Equal(t, "ExternallyManagedConfigError", reason) + require.Equal(t, errMsg, "resource already exists in Consul") + } + +} + +// TestConsulResourceController_doesNotDeleteUnownedConfig tests that if the resource +// exists in Consul but is not managed by the controller, deleting the resource does +// not delete the Consul resource. +func TestConsulResourceController_doesNotDeleteUnownedConfig(t *testing.T) { + t.Parallel() + + ctx := context.Background() + s := runtime.NewScheme() + + trafficpermissionsWithDeletion := &v2beta1.TrafficPermissions{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: metav1.NamespaceDefault, + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + Finalizers: []string{FinalizerName}, + }, + Spec: pbauth.TrafficPermissions{ + Destination: &pbauth.Destination{ + IdentityName: "destination-identity", + }, + Action: pbauth.Action_ACTION_ALLOW, + Permissions: []*pbauth.Permission{ + { + Sources: []*pbauth.Source{ + { + IdentityName: "source-identity", + Namespace: common.DefaultConsulNamespace, + Partition: common.DefaultConsulPartition, + Peer: constants.DefaultConsulPeer, + }, + }, + }, + }, + }, + } + s.AddKnownTypes(v2beta1.AuthGroupVersion, trafficpermissionsWithDeletion) + fakeClient := fake.NewClientBuilder().WithScheme(s).WithObjects(trafficpermissionsWithDeletion).WithStatusSubresource(trafficpermissionsWithDeletion).Build() + + testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + c.Experiments = []string{"resource-apis"} + }) + + reconciler := &TrafficPermissionsController{ + Client: fakeClient, + Log: logrtest.New(t), + Controller: &ConsulResourceController{ + ConsulClientConfig: testClient.Cfg, + ConsulServerConnMgr: testClient.Watcher, + }, + } + + unmanagedResource := trafficpermissionsWithDeletion.Resource(constants.DefaultConsulNS, constants.DefaultConsulPartition) + unmanagedResource.Metadata = make(map[string]string) // Zero out the metadata + + // We haven't run reconcile yet. We must create the resource + // in Consul ourselves, without the metadata indicating it is owned by the controller. + { + req := &pbresource.WriteRequest{Resource: unmanagedResource} + _, err := testClient.ResourceClient.Write(ctx, req) + require.NoError(t, err) + } + + // Now run reconcile. It's marked for deletion so this should delete the kubernetes resource + // but not the consul config entry. + { + namespacedName := types.NamespacedName{ + Namespace: metav1.NamespaceDefault, + Name: trafficpermissionsWithDeletion.KubernetesName(), + } + resp, err := reconciler.Reconcile(ctx, ctrl.Request{ + NamespacedName: namespacedName, + }) + require.NoError(t, err) + require.False(t, resp.Requeue) + + // Now check that the object in Consul is as expected. + req := &pbresource.ReadRequest{Id: trafficpermissionsWithDeletion.ResourceID(constants.DefaultConsulNS, constants.DefaultConsulPartition)} + readResp, err := testClient.ResourceClient.Read(ctx, req) + require.NoError(t, err) + require.NotNil(t, readResp.GetResource()) + opts := append([]cmp.Option{ + protocmp.IgnoreFields(&pbresource.Resource{}, "status", "generation", "version"), + protocmp.IgnoreFields(&pbresource.ID{}, "uid")}, + test.CmpProtoIgnoreOrder()...) + diff := cmp.Diff(unmanagedResource, readResp.GetResource(), opts...) + require.Equal(t, "", diff, "TrafficPermissions do not match") + + // Check that the resource is deleted from cluster. + tp := &v2beta1.TrafficPermissions{} + _ = fakeClient.Get(ctx, namespacedName, tp) + require.Empty(t, tp.Finalizers()) + } +} diff --git a/control-plane/controllers/resources/exported_services_controller.go b/control-plane/controllers/resources/exported_services_controller.go new file mode 100644 index 0000000000..9690942194 --- /dev/null +++ b/control-plane/controllers/resources/exported_services_controller.go @@ -0,0 +1,45 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package resources + +import ( + "context" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + multiclusterv2 "github.com/hashicorp/consul-k8s/control-plane/api/multicluster/v2" + "github.com/hashicorp/consul-k8s/control-plane/gateways" +) + +// ExportedServicesController reconciles a MeshGateway object. +type ExportedServicesController struct { + client.Client + Log logr.Logger + Scheme *runtime.Scheme + Controller *ConsulResourceController + GatewayConfig gateways.GatewayConfig +} + +// +kubebuilder:rbac:groups=multicluster.consul.hashicorp.com,resources=exportedservices,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=multicluster.consul.hashicorp.com,resources=exportedservices/status,verbs=get;update;patch + +func (r *ExportedServicesController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + return r.Controller.ReconcileResource(ctx, r, req, &multiclusterv2.ExportedServices{}) +} + +func (r *ExportedServicesController) Logger(name types.NamespacedName) logr.Logger { + return r.Log.WithValues("request", name) +} + +func (r *ExportedServicesController) UpdateStatus(ctx context.Context, obj client.Object, opts ...client.SubResourceUpdateOption) error { + return r.Status().Update(ctx, obj, opts...) +} + +func (r *ExportedServicesController) SetupWithManager(mgr ctrl.Manager) error { + return setupWithManager(mgr, &multiclusterv2.ExportedServices{}, r) +} diff --git a/control-plane/controllers/resources/gateway_class_config_controller.go b/control-plane/controllers/resources/gateway_class_config_controller.go new file mode 100644 index 0000000000..22084bbc56 --- /dev/null +++ b/control-plane/controllers/resources/gateway_class_config_controller.go @@ -0,0 +1,45 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package resources + +import ( + "context" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + meshv2beta1 "github.com/hashicorp/consul-k8s/control-plane/api/mesh/v2beta1" +) + +// GatewayClassConfigController reconciles a GatewayClassConfig object. +type GatewayClassConfigController struct { + client.Client + Log logr.Logger + Scheme *runtime.Scheme + Controller *ConsulResourceController +} + +// +kubebuilder:rbac:groups=mesh.consul.hashicorp.com,resources=gatewayclassconfig,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=mesh.consul.hashicorp.com,resources=gatewayclassconfig/status,verbs=get;update;patch + +func (r *GatewayClassConfigController) Reconcile(_ context.Context, _ ctrl.Request) (ctrl.Result, error) { + // GatewayClassConfig is not synced into Consul because Consul has no use for it. + // Consul is only aware of the resource for the sake of Kubernetes CRD generation. + return ctrl.Result{}, nil +} + +func (r *GatewayClassConfigController) Logger(name types.NamespacedName) logr.Logger { + return r.Log.WithValues("request", name) +} + +func (r *GatewayClassConfigController) UpdateStatus(ctx context.Context, obj client.Object, opts ...client.SubResourceUpdateOption) error { + return r.Status().Update(ctx, obj, opts...) +} + +func (r *GatewayClassConfigController) SetupWithManager(mgr ctrl.Manager) error { + return setupWithManager(mgr, &meshv2beta1.GatewayClassConfig{}, r) +} diff --git a/control-plane/controllers/resources/gateway_class_controller.go b/control-plane/controllers/resources/gateway_class_controller.go new file mode 100644 index 0000000000..5f2bc91ebe --- /dev/null +++ b/control-plane/controllers/resources/gateway_class_controller.go @@ -0,0 +1,45 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package resources + +import ( + "context" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + meshv2beta1 "github.com/hashicorp/consul-k8s/control-plane/api/mesh/v2beta1" +) + +// GatewayClassController reconciles a MeshGateway object. +type GatewayClassController struct { + client.Client + Log logr.Logger + Scheme *runtime.Scheme + Controller *ConsulResourceController +} + +// +kubebuilder:rbac:groups=mesh.consul.hashicorp.com,resources=gatewayclass,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=mesh.consul.hashicorp.com,resources=gatewayclass/status,verbs=get;update;patch + +func (r *GatewayClassController) Reconcile(_ context.Context, _ ctrl.Request) (ctrl.Result, error) { + // GatewayClass is not synced into Consul because Consul has no use for it. + // Consul is only aware of the resource for the sake of Kubernetes CRD generation. + return ctrl.Result{}, nil +} + +func (r *GatewayClassController) Logger(name types.NamespacedName) logr.Logger { + return r.Log.WithValues("request", name) +} + +func (r *GatewayClassController) UpdateStatus(ctx context.Context, obj client.Object, opts ...client.SubResourceUpdateOption) error { + return r.Status().Update(ctx, obj, opts...) +} + +func (r *GatewayClassController) SetupWithManager(mgr ctrl.Manager) error { + return setupWithManager(mgr, &meshv2beta1.GatewayClass{}, r) +} diff --git a/control-plane/controllers/resources/grpc_route_controller.go b/control-plane/controllers/resources/grpc_route_controller.go new file mode 100644 index 0000000000..fa5401c800 --- /dev/null +++ b/control-plane/controllers/resources/grpc_route_controller.go @@ -0,0 +1,43 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package resources + +import ( + "context" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + meshv2beta1 "github.com/hashicorp/consul-k8s/control-plane/api/mesh/v2beta1" +) + +// GRPCRouteController reconciles a GRPCRoute object. +type GRPCRouteController struct { + client.Client + Log logr.Logger + Scheme *runtime.Scheme + Controller *ConsulResourceController +} + +// +kubebuilder:rbac:groups=mesh.consul.hashicorp.com,resources=grpcroute,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=mesh.consul.hashicorp.com,resources=grpcroute/status,verbs=get;update;patch + +func (r *GRPCRouteController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + return r.Controller.ReconcileResource(ctx, r, req, &meshv2beta1.GRPCRoute{}) +} + +func (r *GRPCRouteController) Logger(name types.NamespacedName) logr.Logger { + return r.Log.WithValues("request", name) +} + +func (r *GRPCRouteController) UpdateStatus(ctx context.Context, obj client.Object, opts ...client.SubResourceUpdateOption) error { + return r.Status().Update(ctx, obj, opts...) +} + +func (r *GRPCRouteController) SetupWithManager(mgr ctrl.Manager) error { + return setupWithManager(mgr, &meshv2beta1.GRPCRoute{}, r) +} diff --git a/control-plane/controllers/resources/http_route_controller.go b/control-plane/controllers/resources/http_route_controller.go new file mode 100644 index 0000000000..9275d8f265 --- /dev/null +++ b/control-plane/controllers/resources/http_route_controller.go @@ -0,0 +1,43 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package resources + +import ( + "context" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + meshv2beta1 "github.com/hashicorp/consul-k8s/control-plane/api/mesh/v2beta1" +) + +// HTTPRouteController reconciles a HTTPRoute object. +type HTTPRouteController struct { + client.Client + Log logr.Logger + Scheme *runtime.Scheme + Controller *ConsulResourceController +} + +// +kubebuilder:rbac:groups=mesh.consul.hashicorp.com,resources=httproute,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=mesh.consul.hashicorp.com,resources=httproute/status,verbs=get;update;patch + +func (r *HTTPRouteController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + return r.Controller.ReconcileResource(ctx, r, req, &meshv2beta1.HTTPRoute{}) +} + +func (r *HTTPRouteController) Logger(name types.NamespacedName) logr.Logger { + return r.Log.WithValues("request", name) +} + +func (r *HTTPRouteController) UpdateStatus(ctx context.Context, obj client.Object, opts ...client.SubResourceUpdateOption) error { + return r.Status().Update(ctx, obj, opts...) +} + +func (r *HTTPRouteController) SetupWithManager(mgr ctrl.Manager) error { + return setupWithManager(mgr, &meshv2beta1.HTTPRoute{}, r) +} diff --git a/control-plane/controllers/resources/mesh_configuration_controller.go b/control-plane/controllers/resources/mesh_configuration_controller.go new file mode 100644 index 0000000000..d5813294e9 --- /dev/null +++ b/control-plane/controllers/resources/mesh_configuration_controller.go @@ -0,0 +1,43 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package resources + +import ( + "context" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + meshv2beta1 "github.com/hashicorp/consul-k8s/control-plane/api/mesh/v2beta1" +) + +// MeshConfigurationController reconciles a MeshConfiguration object. +type MeshConfigurationController struct { + client.Client + Log logr.Logger + Scheme *runtime.Scheme + Controller *ConsulResourceController +} + +// +kubebuilder:rbac:groups=mesh.consul.hashicorp.com,resources=meshconfiguration,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=mesh.consul.hashicorp.com,resources=meshconfiguration/status,verbs=get;update;patch + +func (r *MeshConfigurationController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + return r.Controller.ReconcileResource(ctx, r, req, &meshv2beta1.MeshConfiguration{}) +} + +func (r *MeshConfigurationController) Logger(name types.NamespacedName) logr.Logger { + return r.Log.WithValues("request", name) +} + +func (r *MeshConfigurationController) UpdateStatus(ctx context.Context, obj client.Object, opts ...client.SubResourceUpdateOption) error { + return r.Status().Update(ctx, obj, opts...) +} + +func (r *MeshConfigurationController) SetupWithManager(mgr ctrl.Manager) error { + return setupWithManager(mgr, &meshv2beta1.MeshConfiguration{}, r) +} diff --git a/control-plane/controllers/resources/mesh_gateway_controller.go b/control-plane/controllers/resources/mesh_gateway_controller.go new file mode 100644 index 0000000000..3996fa4680 --- /dev/null +++ b/control-plane/controllers/resources/mesh_gateway_controller.go @@ -0,0 +1,376 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package resources + +import ( + "context" + "errors" + "fmt" + + "github.com/go-logr/logr" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + k8serr "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + meshv2beta1 "github.com/hashicorp/consul-k8s/control-plane/api/mesh/v2beta1" + "github.com/hashicorp/consul-k8s/control-plane/gateways" +) + +// errResourceNotOwned indicates that a resource the controller would have +// updated or deleted does not have an owner reference pointing to the MeshGateway. +var errResourceNotOwned = errors.New("existing resource not owned by controller") + +// MeshGatewayController reconciles a MeshGateway object. +type MeshGatewayController struct { + client.Client + Log logr.Logger + Scheme *runtime.Scheme + Controller *ConsulResourceController + GatewayConfig gateways.GatewayConfig +} + +// +kubebuilder:rbac:groups=mesh.consul.hashicorp.com,resources=meshgateway,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=mesh.consul.hashicorp.com,resources=meshgateway/status,verbs=get;update;patch + +func (r *MeshGatewayController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := r.Logger(req.NamespacedName) + + // Fetch the resource being reconciled + resource := &meshv2beta1.MeshGateway{} + if err := r.Get(ctx, req.NamespacedName, resource); k8serr.IsNotFound(err) { + return ctrl.Result{}, client.IgnoreNotFound(err) + } else if err != nil { + logger.Error(err, "retrieving resource") + return ctrl.Result{}, err + } + + // Call hooks + if !resource.GetDeletionTimestamp().IsZero() { + logger.Info("deletion event") + + if err := r.onDelete(ctx, req, resource); err != nil { + return ctrl.Result{}, err + } + } else { + if err := r.onCreateUpdate(ctx, req, resource); err != nil { + return ctrl.Result{}, err + } + } + + return r.Controller.ReconcileResource(ctx, r, req, &meshv2beta1.MeshGateway{}) +} + +func (r *MeshGatewayController) Logger(name types.NamespacedName) logr.Logger { + return r.Log.WithValues("request", name) +} + +func (r *MeshGatewayController) UpdateStatus(ctx context.Context, obj client.Object, opts ...client.SubResourceUpdateOption) error { + return r.Status().Update(ctx, obj, opts...) +} + +func (r *MeshGatewayController) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&meshv2beta1.MeshGateway{}). + Owns(&appsv1.Deployment{}). + Owns(&rbacv1.Role{}). + Owns(&rbacv1.RoleBinding{}). + Owns(&corev1.Service{}). + Owns(&corev1.ServiceAccount{}). + Watches( + &meshv2beta1.GatewayClass{}, + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, o client.Object) []reconcile.Request { + gateways, err := r.getGatewaysReferencingGatewayClass(ctx, o.(*meshv2beta1.GatewayClass)) + if err != nil { + return nil + } + + requests := make([]reconcile.Request, 0, len(gateways.Items)) + for _, gateway := range gateways.Items { + requests = append(requests, reconcile.Request{NamespacedName: types.NamespacedName{ + Namespace: gateway.Namespace, + Name: gateway.Name, + }}) + } + + return requests + })). + Watches( + &meshv2beta1.GatewayClassConfig{}, + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, o client.Object) []reconcile.Request { + classes, err := r.getGatewayClassesReferencingGatewayClassConfig(ctx, o.(*meshv2beta1.GatewayClassConfig)) + if err != nil { + return nil + } + + var requests []reconcile.Request + for _, class := range classes.Items { + gateways, err := r.getGatewaysReferencingGatewayClass(ctx, class) + if err != nil { + continue + } + + for _, gateway := range gateways.Items { + requests = append(requests, reconcile.Request{NamespacedName: types.NamespacedName{ + Namespace: gateway.Namespace, + Name: gateway.Name, + }}) + } + } + + return requests + })). + Complete(r) +} + +// onCreateUpdate is responsible for creating/updating all K8s resources that +// are required in order to run a meshv2beta1.MeshGateway. These are created/updated +// in dependency order. +// 1. ServiceAccount +// 2. Deployment +// 3. Service +// 4. Role +// 5. RoleBinding +func (r *MeshGatewayController) onCreateUpdate(ctx context.Context, req ctrl.Request, resource *meshv2beta1.MeshGateway) error { + // Fetch GatewayClassConfig for the gateway + gcc, err := r.getGatewayClassConfigForGateway(ctx, resource) + if err != nil { + r.Log.Error(err, "unable to get gatewayclassconfig for gateway: %s gatewayclass: %s", resource.Name, resource.Spec.GatewayClassName) + return err + } + + builder := gateways.NewMeshGatewayBuilder(resource, r.GatewayConfig, gcc) + + // Create ServiceAccount + desiredAccount := builder.ServiceAccount() + existingAccount := &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Namespace: desiredAccount.Namespace, Name: desiredAccount.Name}} + + upsertOp := func(ctx context.Context, _, object client.Object) error { + _, err := controllerutil.CreateOrUpdate(ctx, r.Client, object, func() error { return nil }) + return err + } + + err = r.opIfNewOrOwned(ctx, resource, existingAccount, desiredAccount, upsertOp) + if err != nil { + return fmt.Errorf("unable to create service account: %w", err) + } + + // Create Role + desiredRole := builder.Role() + existingRole := &rbacv1.Role{ObjectMeta: metav1.ObjectMeta{Namespace: desiredRole.Namespace, Name: desiredRole.Name}} + + err = r.opIfNewOrOwned(ctx, resource, existingRole, desiredRole, upsertOp) + if err != nil { + return fmt.Errorf("unable to create role: %w", err) + } + + // Create RoleBinding + desiredBinding := builder.RoleBinding() + existingBinding := &rbacv1.RoleBinding{ObjectMeta: metav1.ObjectMeta{Namespace: desiredBinding.Namespace, Name: desiredBinding.Name}} + + err = r.opIfNewOrOwned(ctx, resource, existingBinding, desiredBinding, upsertOp) + if err != nil { + return fmt.Errorf("unable to create role binding: %w", err) + } + + // Create Service + desiredService := builder.Service() + existingService := &corev1.Service{ObjectMeta: metav1.ObjectMeta{Namespace: desiredService.Namespace, Name: desiredService.Name}} + + mergeServiceOp := func(ctx context.Context, existingObj, desiredObj client.Object) error { + existing := existingObj.(*corev1.Service) + desired := desiredObj.(*corev1.Service) + + _, err := controllerutil.CreateOrUpdate(ctx, r.Client, existing, func() error { + gateways.MergeService(existing, desired) + return nil + }) + return err + } + + err = r.opIfNewOrOwned(ctx, resource, existingService, desiredService, mergeServiceOp) + if err != nil { + return fmt.Errorf("unable to create service: %w", err) + } + + // Create Deployment + desiredDeployment, err := builder.Deployment() + if err != nil { + return fmt.Errorf("unable to create deployment: %w", err) + } + existingDeployment := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Namespace: desiredDeployment.Namespace, Name: desiredDeployment.Name}} + + mergeDeploymentOp := func(ctx context.Context, existingObj, desiredObj client.Object) error { + existing := existingObj.(*appsv1.Deployment) + desired := desiredObj.(*appsv1.Deployment) + + _, err = controllerutil.CreateOrUpdate(ctx, r.Client, existing, func() error { + gateways.MergeDeployment(existing, desired) + return nil + }) + return err + } + + err = r.opIfNewOrOwned(ctx, resource, existingDeployment, desiredDeployment, mergeDeploymentOp) + if err != nil { + return fmt.Errorf("unable to create deployment: %w", err) + } + + return nil +} + +// onDelete is responsible for cleaning up any side effects of onCreateUpdate. +// We only clean up side effects because all resources that we create explicitly +// have an owner reference and will thus be cleaned up by the K8s garbage collector +// once the owning meshv2beta1.MeshGateway is deleted. +func (r *MeshGatewayController) onDelete(ctx context.Context, req ctrl.Request, resource *meshv2beta1.MeshGateway) error { + // TODO NET-6392 NET-6393 + return nil +} + +// ownedObjectOp represents an operation that needs to be applied +// only if the newObject does not yet exist or if the existingObject +// has an owner reference pointing to the MeshGateway being reconciled. +// +// The existing and new object are available in case any merging needs +// to occur, such as unknown annotations and values from the existing object +// that need to be carried forward onto the new object. +type ownedObjectOp func(ctx context.Context, existing, desired client.Object) error + +// opIfNewOrOwned runs a given ownedObjectOp to create, update, or delete a resource. +// The purpose of opIfNewOrOwned is to ensure that we aren't updating or deleting a +// resource that was not created by us. If this scenario is encountered, we error. +func (r *MeshGatewayController) opIfNewOrOwned(ctx context.Context, gateway *meshv2beta1.MeshGateway, existing, desired client.Object, op ownedObjectOp) error { + // Ensure owner reference is always set on objects that we write + if err := ctrl.SetControllerReference(gateway, desired, r.Client.Scheme()); err != nil { + return err + } + + key := client.ObjectKey{ + Namespace: existing.GetNamespace(), + Name: existing.GetName(), + } + + exists := false + if err := r.Get(ctx, key, existing); err != nil { + // We failed to fetch the object in a way that doesn't tell us about its existence + if !k8serr.IsNotFound(err) { + return err + } + } else { + // We successfully fetched the object, so it exists + exists = true + } + + // None exists, so we need only execute the operation + if !exists { + return op(ctx, existing, desired) + } + + // Ensure the existing object was put there by us so that we don't overwrite random objects + owned := false + for _, reference := range existing.GetOwnerReferences() { + if reference.UID == gateway.GetUID() && reference.Name == gateway.GetName() { + owned = true + break + } + } + if !owned { + return errResourceNotOwned + } + return op(ctx, existing, desired) +} + +func (r *MeshGatewayController) getGatewayClassConfigForGateway(ctx context.Context, gateway *meshv2beta1.MeshGateway) (*meshv2beta1.GatewayClassConfig, error) { + gatewayClass, err := r.getGatewayClassForGateway(ctx, gateway) + if err != nil { + return nil, err + } + + gatewayClassConfig, err := r.getGatewayClassConfigForGatewayClass(ctx, gatewayClass) + if err != nil { + return nil, err + } + + return gatewayClassConfig, nil +} + +func (r *MeshGatewayController) getGatewayClassConfigForGatewayClass(ctx context.Context, gatewayClass *meshv2beta1.GatewayClass) (*meshv2beta1.GatewayClassConfig, error) { + if gatewayClass == nil { + // if we don't have a gateway class we can't fetch the corresponding config + return nil, nil + } + + config := &meshv2beta1.GatewayClassConfig{} + if ref := gatewayClass.Spec.ParametersRef; ref != nil { + if ref.Group != meshv2beta1.MeshGroup || ref.Kind != meshv2beta1.KindGatewayClassConfig { + // TODO @Gateway-Management additionally check for controller name when available + return nil, nil + } + + if err := r.Client.Get(ctx, types.NamespacedName{Name: ref.Name}, config); err != nil { + return nil, client.IgnoreNotFound(err) + } + } + return config, nil +} + +func (r *MeshGatewayController) getGatewayClassForGateway(ctx context.Context, gateway *meshv2beta1.MeshGateway) (*meshv2beta1.GatewayClass, error) { + var gatewayClass meshv2beta1.GatewayClass + + if err := r.Client.Get(ctx, types.NamespacedName{Name: string(gateway.Spec.GatewayClassName)}, &gatewayClass); err != nil { + return nil, client.IgnoreNotFound(err) + } + return &gatewayClass, nil +} + +// getGatewayClassesReferencingGatewayClassConfig queries all GatewayClass resources in the +// cluster and returns any that reference the given GatewayClassConfig. +func (r *MeshGatewayController) getGatewayClassesReferencingGatewayClassConfig(ctx context.Context, config *meshv2beta1.GatewayClassConfig) (*meshv2beta1.GatewayClassList, error) { + if config == nil { + return nil, nil + } + + allClasses := &meshv2beta1.GatewayClassList{} + if err := r.Client.List(ctx, allClasses); err != nil { + return nil, client.IgnoreNotFound(err) + } + + matchingClasses := &meshv2beta1.GatewayClassList{} + for _, class := range allClasses.Items { + if class.Spec.ParametersRef != nil && class.Spec.ParametersRef.Name == config.Name { + matchingClasses.Items = append(matchingClasses.Items, class) + } + } + return matchingClasses, nil +} + +// getGatewaysReferencingGatewayClass queries all MeshGateway resources in the cluster +// and returns any that reference the given GatewayClass. +func (r *MeshGatewayController) getGatewaysReferencingGatewayClass(ctx context.Context, class *meshv2beta1.GatewayClass) (*meshv2beta1.MeshGatewayList, error) { + if class == nil { + return nil, nil + } + + allGateways := &meshv2beta1.MeshGatewayList{} + if err := r.Client.List(ctx, allGateways); err != nil { + return nil, client.IgnoreNotFound(err) + } + + matchingGateways := &meshv2beta1.MeshGatewayList{} + for _, gateway := range allGateways.Items { + if gateway.Spec.GatewayClassName == class.Name { + matchingGateways.Items = append(matchingGateways.Items, gateway) + } + } + return matchingGateways, nil +} diff --git a/control-plane/controllers/resources/mesh_gateway_controller_test.go b/control-plane/controllers/resources/mesh_gateway_controller_test.go new file mode 100644 index 0000000000..cd3f590deb --- /dev/null +++ b/control-plane/controllers/resources/mesh_gateway_controller_test.go @@ -0,0 +1,602 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package resources + +import ( + "context" + "testing" + + logrtest "github.com/go-logr/logr/testr" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1" + "github.com/hashicorp/consul/sdk/testutil" + + "github.com/hashicorp/consul-k8s/control-plane/api/mesh/v2beta1" + "github.com/hashicorp/consul-k8s/control-plane/helper/test" +) + +func TestMeshGatewayController_Reconcile(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + // k8sObjects is the list of Kubernetes resources that will be present in the cluster at runtime + k8sObjects []client.Object + // request is the request that will be provided to MeshGatewayController.Reconcile + request ctrl.Request + // expectedErr is the error we expect MeshGatewayController.Reconcile to return + expectedErr error + // expectedResult is the result we expect MeshGatewayController.Reconcile to return + expectedResult ctrl.Result + // postReconcile runs some set of assertions on the state of k8s after Reconcile is called + postReconcile func(*testing.T, client.Client) + }{ + // ServiceAccount + { + name: "MeshGateway created with no existing ServiceAccount", + k8sObjects: []client.Object{ + &v2beta1.MeshGateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "consul", + Name: "mesh-gateway", + }, + Spec: pbmesh.MeshGateway{ + GatewayClassName: "consul", + Listeners: []*pbmesh.MeshGatewayListener{ + { + Name: "wan", + Port: 8443, + Protocol: "tcp", + }, + }, + }, + }, + }, + request: ctrl.Request{ + NamespacedName: types.NamespacedName{ + Namespace: "consul", + Name: "mesh-gateway", + }, + }, + expectedResult: ctrl.Result{}, + postReconcile: func(t *testing.T, c client.Client) { + // Verify ServiceAccount was created + key := client.ObjectKey{Namespace: "consul", Name: "mesh-gateway"} + assert.NoError(t, c.Get(context.Background(), key, &corev1.ServiceAccount{})) + }, + }, + { + name: "MeshGateway created with existing ServiceAccount not owned by gateway", + k8sObjects: []client.Object{ + &v2beta1.MeshGateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "mesh-gateway", + }, + Spec: pbmesh.MeshGateway{ + GatewayClassName: "consul", + Listeners: []*pbmesh.MeshGatewayListener{ + { + Name: "wan", + Port: 8443, + Protocol: "tcp", + }, + }, + }, + }, + &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "mesh-gateway", + }, + }, + }, + request: ctrl.Request{ + NamespacedName: types.NamespacedName{ + Namespace: "default", + Name: "mesh-gateway", + }, + }, + expectedResult: ctrl.Result{}, + expectedErr: errResourceNotOwned, + }, + // Role + { + name: "MeshGateway created with no existing Role", + k8sObjects: []client.Object{ + &v2beta1.MeshGateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "consul", + Name: "mesh-gateway", + }, + Spec: pbmesh.MeshGateway{ + GatewayClassName: "consul", + Listeners: []*pbmesh.MeshGatewayListener{ + { + Name: "wan", + Port: 8443, + Protocol: "tcp", + }, + }, + }, + }, + }, + request: ctrl.Request{ + NamespacedName: types.NamespacedName{ + Namespace: "consul", + Name: "mesh-gateway", + }, + }, + expectedResult: ctrl.Result{}, + postReconcile: func(t *testing.T, c client.Client) { + // Verify Role was created + key := client.ObjectKey{Namespace: "consul", Name: "mesh-gateway"} + assert.NoError(t, c.Get(context.Background(), key, &rbacv1.Role{})) + }, + }, + { + name: "MeshGateway created with existing Role not owned by gateway", + k8sObjects: []client.Object{ + &v2beta1.MeshGateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "mesh-gateway", + }, + Spec: pbmesh.MeshGateway{ + GatewayClassName: "consul", + Listeners: []*pbmesh.MeshGatewayListener{ + { + Name: "wan", + Port: 8443, + Protocol: "tcp", + }, + }, + }, + }, + &rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "mesh-gateway", + }, + }, + }, + request: ctrl.Request{ + NamespacedName: types.NamespacedName{ + Namespace: "default", + Name: "mesh-gateway", + }, + }, + expectedResult: ctrl.Result{}, + expectedErr: errResourceNotOwned, + }, + { + name: "MeshGateway created with existing Role owned by gateway", + k8sObjects: []client.Object{ + &v2beta1.MeshGateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "mesh-gateway", + UID: "abc123", + }, + Spec: pbmesh.MeshGateway{ + GatewayClassName: "consul", + Listeners: []*pbmesh.MeshGatewayListener{ + { + Name: "wan", + Port: 8443, + Protocol: "tcp", + }, + }, + }, + }, + &rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "mesh-gateway", + OwnerReferences: []metav1.OwnerReference{ + { + UID: "abc123", + Name: "mesh-gateway", + }, + }, + }, + }, + }, + request: ctrl.Request{ + NamespacedName: types.NamespacedName{ + Namespace: "default", + Name: "mesh-gateway", + }, + }, + expectedResult: ctrl.Result{}, + expectedErr: nil, // The Reconcile should be a no-op + }, + // RoleBinding + { + name: "MeshGateway created with no existing RoleBinding", + k8sObjects: []client.Object{ + &v2beta1.MeshGateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "consul", + Name: "mesh-gateway", + }, + Spec: pbmesh.MeshGateway{ + GatewayClassName: "consul", + Listeners: []*pbmesh.MeshGatewayListener{ + { + Name: "wan", + Port: 8443, + Protocol: "tcp", + }, + }, + }, + }, + }, + request: ctrl.Request{ + NamespacedName: types.NamespacedName{ + Namespace: "consul", + Name: "mesh-gateway", + }, + }, + expectedResult: ctrl.Result{}, + postReconcile: func(t *testing.T, c client.Client) { + // Verify RoleBinding was created + key := client.ObjectKey{Namespace: "consul", Name: "mesh-gateway"} + assert.NoError(t, c.Get(context.Background(), key, &rbacv1.RoleBinding{})) + }, + }, + { + name: "MeshGateway created with existing RoleBinding not owned by gateway", + k8sObjects: []client.Object{ + &v2beta1.MeshGateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "mesh-gateway", + }, + Spec: pbmesh.MeshGateway{ + GatewayClassName: "consul", + Listeners: []*pbmesh.MeshGatewayListener{ + { + Name: "wan", + Port: 8443, + Protocol: "tcp", + }, + }, + }, + }, + &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "mesh-gateway", + }, + }, + }, + request: ctrl.Request{ + NamespacedName: types.NamespacedName{ + Namespace: "default", + Name: "mesh-gateway", + }, + }, + expectedResult: ctrl.Result{}, + expectedErr: errResourceNotOwned, + }, + { + name: "MeshGateway created with existing RoleBinding owned by gateway", + k8sObjects: []client.Object{ + &v2beta1.MeshGateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "mesh-gateway", + UID: "abc123", + }, + Spec: pbmesh.MeshGateway{ + GatewayClassName: "consul", + Listeners: []*pbmesh.MeshGatewayListener{ + { + Name: "wan", + Port: 8443, + Protocol: "tcp", + }, + }, + }, + }, + &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "mesh-gateway", + OwnerReferences: []metav1.OwnerReference{ + { + UID: "abc123", + Name: "mesh-gateway", + }, + }, + }, + }, + }, + request: ctrl.Request{ + NamespacedName: types.NamespacedName{ + Namespace: "default", + Name: "mesh-gateway", + }, + }, + expectedResult: ctrl.Result{}, + expectedErr: nil, // The Reconcile should be a no-op + }, + // Deployment + { + name: "MeshGateway created with no existing Deployment", + k8sObjects: []client.Object{ + &v2beta1.MeshGateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "consul", + Name: "mesh-gateway", + }, + Spec: pbmesh.MeshGateway{ + GatewayClassName: "consul", + Listeners: []*pbmesh.MeshGatewayListener{ + { + Name: "wan", + Port: 8443, + Protocol: "tcp", + }, + }, + }, + }, + }, + request: ctrl.Request{ + NamespacedName: types.NamespacedName{ + Namespace: "consul", + Name: "mesh-gateway", + }, + }, + expectedResult: ctrl.Result{}, + postReconcile: func(t *testing.T, c client.Client) { + // Verify Deployment was created + key := client.ObjectKey{Namespace: "consul", Name: "mesh-gateway"} + assert.NoError(t, c.Get(context.Background(), key, &appsv1.Deployment{})) + }, + }, + { + name: "MeshGateway created with existing Deployment not owned by gateway", + k8sObjects: []client.Object{ + &v2beta1.MeshGateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "mesh-gateway", + }, + Spec: pbmesh.MeshGateway{ + GatewayClassName: "consul", + Listeners: []*pbmesh.MeshGatewayListener{ + { + Name: "wan", + Port: 8443, + Protocol: "tcp", + }, + }, + }, + }, + &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "mesh-gateway", + }, + }, + }, + request: ctrl.Request{ + NamespacedName: types.NamespacedName{ + Namespace: "default", + Name: "mesh-gateway", + }, + }, + expectedResult: ctrl.Result{}, + expectedErr: errResourceNotOwned, + }, + { + name: "MeshGateway created with existing Deployment owned by gateway", + k8sObjects: []client.Object{ + &v2beta1.MeshGateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "mesh-gateway", + UID: "abc123", + }, + Spec: pbmesh.MeshGateway{ + GatewayClassName: "consul", + Listeners: []*pbmesh.MeshGatewayListener{ + { + Name: "wan", + Port: 8443, + Protocol: "tcp", + }, + }, + }, + }, + &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "mesh-gateway", + OwnerReferences: []metav1.OwnerReference{ + { + UID: "abc123", + Name: "mesh-gateway", + }, + }, + }, + }, + }, + request: ctrl.Request{ + NamespacedName: types.NamespacedName{ + Namespace: "default", + Name: "mesh-gateway", + }, + }, + expectedResult: ctrl.Result{}, + expectedErr: nil, // The Reconcile should be a no-op + }, + // Service + { + name: "MeshGateway created with no existing Service", + k8sObjects: []client.Object{ + &v2beta1.MeshGateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "consul", + Name: "mesh-gateway", + }, + Spec: pbmesh.MeshGateway{ + GatewayClassName: "consul", + Listeners: []*pbmesh.MeshGatewayListener{ + { + Name: "wan", + Port: 8443, + Protocol: "tcp", + }, + }, + }, + }, + }, + request: ctrl.Request{ + NamespacedName: types.NamespacedName{ + Namespace: "consul", + Name: "mesh-gateway", + }, + }, + expectedResult: ctrl.Result{}, + postReconcile: func(t *testing.T, c client.Client) { + // Verify Service was created + key := client.ObjectKey{Namespace: "consul", Name: "mesh-gateway"} + assert.NoError(t, c.Get(context.Background(), key, &corev1.Service{})) + }, + }, + { + name: "MeshGateway created with existing Service not owned by gateway", + k8sObjects: []client.Object{ + &v2beta1.MeshGateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "mesh-gateway", + }, + Spec: pbmesh.MeshGateway{ + GatewayClassName: "consul", + Listeners: []*pbmesh.MeshGatewayListener{ + { + Name: "wan", + Port: 8443, + Protocol: "tcp", + }, + }, + }, + }, + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "mesh-gateway", + }, + }, + }, + request: ctrl.Request{ + NamespacedName: types.NamespacedName{ + Namespace: "default", + Name: "mesh-gateway", + }, + }, + expectedResult: ctrl.Result{}, + expectedErr: errResourceNotOwned, + }, + { + name: "MeshGateway created with existing Service owned by gateway", + k8sObjects: []client.Object{ + &v2beta1.MeshGateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "mesh-gateway", + UID: "abc123", + }, + Spec: pbmesh.MeshGateway{ + GatewayClassName: "consul", + Listeners: []*pbmesh.MeshGatewayListener{ + { + Name: "wan", + Port: 8443, + Protocol: "tcp", + }, + }, + }, + }, + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "mesh-gateway", + OwnerReferences: []metav1.OwnerReference{ + { + UID: "abc123", + Name: "mesh-gateway", + }, + }, + }, + }, + }, + request: ctrl.Request{ + NamespacedName: types.NamespacedName{ + Namespace: "default", + Name: "mesh-gateway", + }, + }, + expectedResult: ctrl.Result{}, + expectedErr: nil, // The Reconcile should be a no-op + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + consulClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + c.Experiments = []string{"resource-apis"} + }) + + s := runtime.NewScheme() + require.NoError(t, corev1.AddToScheme(s)) + require.NoError(t, appsv1.AddToScheme(s)) + require.NoError(t, rbacv1.AddToScheme(s)) + require.NoError(t, v2beta1.AddMeshToScheme(s)) + s.AddKnownTypes(v2beta1.MeshGroupVersion, &v2beta1.MeshGateway{}, &v2beta1.GatewayClass{}, &v2beta1.GatewayClassConfig{}) + fakeClient := fake.NewClientBuilder().WithScheme(s). + WithObjects(testCase.k8sObjects...). + WithStatusSubresource(testCase.k8sObjects...). + Build() + + controller := MeshGatewayController{ + Client: fakeClient, + Log: logrtest.New(t), + Scheme: s, + Controller: &ConsulResourceController{ + ConsulClientConfig: consulClient.Cfg, + ConsulServerConnMgr: consulClient.Watcher, + }, + } + + res, err := controller.Reconcile(context.Background(), testCase.request) + if testCase.expectedErr != nil { + // require.EqualError(t, err, testCase.expectedErr.Error()) + require.ErrorIs(t, err, testCase.expectedErr) + } else { + require.NoError(t, err) + } + assert.Equal(t, testCase.expectedResult, res) + + if testCase.postReconcile != nil { + testCase.postReconcile(t, fakeClient) + } + }) + } +} diff --git a/control-plane/controllers/resources/proxy_configuration_controller.go b/control-plane/controllers/resources/proxy_configuration_controller.go new file mode 100644 index 0000000000..7f67afe26a --- /dev/null +++ b/control-plane/controllers/resources/proxy_configuration_controller.go @@ -0,0 +1,43 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package resources + +import ( + "context" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + meshv2beta1 "github.com/hashicorp/consul-k8s/control-plane/api/mesh/v2beta1" +) + +// ProxyConfigurationController reconciles a ProxyConfiguration object. +type ProxyConfigurationController struct { + client.Client + Log logr.Logger + Scheme *runtime.Scheme + Controller *ConsulResourceController +} + +// +kubebuilder:rbac:groups=mesh.consul.hashicorp.com,resources=proxyconfiguration,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=mesh.consul.hashicorp.com,resources=proxyconfiguration/status,verbs=get;update;patch + +func (r *ProxyConfigurationController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + return r.Controller.ReconcileResource(ctx, r, req, &meshv2beta1.ProxyConfiguration{}) +} + +func (r *ProxyConfigurationController) Logger(name types.NamespacedName) logr.Logger { + return r.Log.WithValues("request", name) +} + +func (r *ProxyConfigurationController) UpdateStatus(ctx context.Context, obj client.Object, opts ...client.SubResourceUpdateOption) error { + return r.Status().Update(ctx, obj, opts...) +} + +func (r *ProxyConfigurationController) SetupWithManager(mgr ctrl.Manager) error { + return setupWithManager(mgr, &meshv2beta1.ProxyConfiguration{}, r) +} diff --git a/control-plane/controllers/resources/tcp_route_controller.go b/control-plane/controllers/resources/tcp_route_controller.go new file mode 100644 index 0000000000..dc69f879b2 --- /dev/null +++ b/control-plane/controllers/resources/tcp_route_controller.go @@ -0,0 +1,43 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package resources + +import ( + "context" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + meshv2beta1 "github.com/hashicorp/consul-k8s/control-plane/api/mesh/v2beta1" +) + +// TCPRouteController reconciles a TCPRoute object. +type TCPRouteController struct { + client.Client + Log logr.Logger + Scheme *runtime.Scheme + Controller *ConsulResourceController +} + +// +kubebuilder:rbac:groups=mesh.consul.hashicorp.com,resources=tcproute,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=mesh.consul.hashicorp.com,resources=tcproute/status,verbs=get;update;patch + +func (r *TCPRouteController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + return r.Controller.ReconcileResource(ctx, r, req, &meshv2beta1.TCPRoute{}) +} + +func (r *TCPRouteController) Logger(name types.NamespacedName) logr.Logger { + return r.Log.WithValues("request", name) +} + +func (r *TCPRouteController) UpdateStatus(ctx context.Context, obj client.Object, opts ...client.SubResourceUpdateOption) error { + return r.Status().Update(ctx, obj, opts...) +} + +func (r *TCPRouteController) SetupWithManager(mgr ctrl.Manager) error { + return setupWithManager(mgr, &meshv2beta1.TCPRoute{}, r) +} diff --git a/control-plane/controllers/resources/traffic_permissions_controller.go b/control-plane/controllers/resources/traffic_permissions_controller.go new file mode 100644 index 0000000000..f844473b0c --- /dev/null +++ b/control-plane/controllers/resources/traffic_permissions_controller.go @@ -0,0 +1,43 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package resources + +import ( + "context" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + consulv2beta1 "github.com/hashicorp/consul-k8s/control-plane/api/auth/v2beta1" +) + +// TrafficPermissionsController reconciles a TrafficPermissions object. +type TrafficPermissionsController struct { + client.Client + Log logr.Logger + Scheme *runtime.Scheme + Controller *ConsulResourceController +} + +// +kubebuilder:rbac:groups=auth.consul.hashicorp.com,resources=trafficpermissions,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=auth.consul.hashicorp.com,resources=trafficpermissions/status,verbs=get;update;patch + +func (r *TrafficPermissionsController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + return r.Controller.ReconcileResource(ctx, r, req, &consulv2beta1.TrafficPermissions{}) +} + +func (r *TrafficPermissionsController) Logger(name types.NamespacedName) logr.Logger { + return r.Log.WithValues("request", name) +} + +func (r *TrafficPermissionsController) UpdateStatus(ctx context.Context, obj client.Object, opts ...client.SubResourceUpdateOption) error { + return r.Status().Update(ctx, obj, opts...) +} + +func (r *TrafficPermissionsController) SetupWithManager(mgr ctrl.Manager) error { + return setupWithManager(mgr, &consulv2beta1.TrafficPermissions{}, r) +} diff --git a/control-plane/gateways/builder.go b/control-plane/gateways/builder.go new file mode 100644 index 0000000000..e43e6dd890 --- /dev/null +++ b/control-plane/gateways/builder.go @@ -0,0 +1,27 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package gateways + +import ( + meshv2beta1 "github.com/hashicorp/consul-k8s/control-plane/api/mesh/v2beta1" +) + +// meshGatewayBuilder is a helper struct for building the Kubernetes resources for a mesh gateway. +// This includes Deployment, Role, Service, and ServiceAccount resources. +// Configuration is combined from the MeshGateway, GatewayConfig, and GatewayClassConfig. +type meshGatewayBuilder struct { + gateway *meshv2beta1.MeshGateway + config GatewayConfig + gcc *meshv2beta1.GatewayClassConfig +} + +// NewMeshGatewayBuilder returns a new meshGatewayBuilder for the given MeshGateway, +// GatewayConfig, and GatewayClassConfig. +func NewMeshGatewayBuilder(gateway *meshv2beta1.MeshGateway, gatewayConfig GatewayConfig, gatewayClassConfig *meshv2beta1.GatewayClassConfig) *meshGatewayBuilder { + return &meshGatewayBuilder{ + gateway: gateway, + config: gatewayConfig, + gcc: gatewayClassConfig, + } +} diff --git a/control-plane/gateways/constants.go b/control-plane/gateways/constants.go new file mode 100644 index 0000000000..ac0242bd2d --- /dev/null +++ b/control-plane/gateways/constants.go @@ -0,0 +1,32 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package gateways + +const ( + // General environment variables. + envPodName = "POD_NAME" + envPodNamespace = "POD_NAMESPACE" + envNodeName = "NODE_NAME" + envTmpDir = "TMPDIR" + + // Dataplane Configuration Environment variables. + envDPProxyId = "DP_PROXY_ID" + envDPCredentialLoginMeta = "DP_CREDENTIAL_LOGIN_META" + + // Init Container Configuration Environment variables. + envConsulAddresses = "CONSUL_ADDRESSES" + envConsulGRPCPort = "CONSUL_GRPC_PORT" + envConsulHTTPPort = "CONSUL_HTTP_PORT" + envConsulAPITimeout = "CONSUL_API_TIMEOUT" + envConsulNodeName = "CONSUL_NODE_NAME" + envConsulLoginAuthMethod = "CONSUL_LOGIN_AUTH_METHOD" + envConsulLoginBearerTokenFile = "CONSUL_LOGIN_BEARER_TOKEN_FILE" + envConsulLoginMeta = "CONSUL_LOGIN_META" + envConsulLoginPartition = "CONSUL_LOGIN_PARTITION" + envConsulNamespace = "CONSUL_NAMESPACE" + envConsulPartition = "CONSUL_PARTITION" + + // defaultBearerTokenFile is the default location where the init container will store the bearer token for the dataplane container to read. + defaultBearerTokenFile = "/var/run/secrets/kubernetes.io/serviceaccount/token" +) diff --git a/control-plane/gateways/deployment.go b/control-plane/gateways/deployment.go new file mode 100644 index 0000000000..433323489c --- /dev/null +++ b/control-plane/gateways/deployment.go @@ -0,0 +1,207 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package gateways + +import ( + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/pointer" + + meshv2beta1 "github.com/hashicorp/consul-k8s/control-plane/api/mesh/v2beta1" + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" +) + +const ( + globalDefaultInstances int32 = 1 + meshGatewayAnnotationKind = "mesh-gateway" +) + +func (b *meshGatewayBuilder) Deployment() (*appsv1.Deployment, error) { + spec, err := b.deploymentSpec() + return &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: b.gateway.Name, + Namespace: b.gateway.Namespace, + Labels: b.labelsForDeployment(), + Annotations: b.annotationsForDeployment(), + }, + Spec: *spec, + }, err +} + +func (b *meshGatewayBuilder) deploymentSpec() (*appsv1.DeploymentSpec, error) { + var ( + deploymentConfig meshv2beta1.GatewayClassDeploymentConfig + containerConfig meshv2beta1.GatewayClassContainerConfig + ) + + // If GatewayClassConfig is not nil, use it to override the defaults for + // the deployment and container configs. + if b.gcc != nil { + deploymentConfig = b.gcc.Spec.Deployment + if deploymentConfig.Container != nil { + containerConfig = *b.gcc.Spec.Deployment.Container + } + } + + initContainer, err := b.initContainer() + if err != nil { + return nil, err + } + + container, err := b.consulDataplaneContainer(containerConfig) + if err != nil { + return nil, err + } + + return &appsv1.DeploymentSpec{ + Replicas: deploymentReplicaCount(deploymentConfig.Replicas, nil), + Selector: &metav1.LabelSelector{ + MatchLabels: b.labelsForDeployment(), + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: b.labelsForDeployment(), + Annotations: map[string]string{ + // Indicate that this pod is a mesh gateway pod so that the Pod controller, + // consul-k8s CLI, etc. can key off of it + constants.AnnotationGatewayKind: meshGatewayAnnotationKind, + // It's not logical to add a proxy sidecar since our workload is itself a proxy + constants.AnnotationMeshInject: "false", + // This functionality only applies when proxy sidecars are used + constants.AnnotationTransparentProxyOverwriteProbes: "false", + // This annotation determines which source to use to set the + // WAN address and WAN port for the Mesh Gateway service registration. + constants.AnnotationGatewayWANSource: b.gateway.Annotations[constants.AnnotationGatewayWANSource], + // This annotation determines the WAN port for the Mesh Gateway service registration. + constants.AnnotationGatewayWANPort: b.gateway.Annotations[constants.AnnotationGatewayWANPort], + // This annotation determines the address for the gateway when the source annotation is "Static". + constants.AnnotationGatewayWANAddress: b.gateway.Annotations[constants.AnnotationGatewayWANAddress], + }, + }, + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{Medium: corev1.StorageMediumMemory}, + }, + }, + }, + InitContainers: []corev1.Container{ + initContainer, + }, + Containers: []corev1.Container{ + container, + }, + Affinity: deploymentConfig.Affinity, + NodeSelector: deploymentConfig.NodeSelector, + PriorityClassName: deploymentConfig.PriorityClassName, + TopologySpreadConstraints: deploymentConfig.TopologySpreadConstraints, + HostNetwork: deploymentConfig.HostNetwork, + Tolerations: deploymentConfig.Tolerations, + ServiceAccountName: b.serviceAccountName(), + DNSPolicy: deploymentConfig.DNSPolicy, + }, + }, + }, nil +} + +// areDeploymentsEqual determines whether two Deployments are the same in +// the ways that we care about. This specifically ignores valid out-of-band +// changes such as initContainer injection. +func areDeploymentsEqual(a, b *appsv1.Deployment) bool { + // since K8s adds a bunch of defaults when we create a deployment, check that + // they don't differ by the things that we may actually change, namely container + // ports + if len(b.Spec.Template.Spec.Containers) != len(a.Spec.Template.Spec.Containers) { + return false + } + for i, container := range a.Spec.Template.Spec.Containers { + otherPorts := b.Spec.Template.Spec.Containers[i].Ports + if len(container.Ports) != len(otherPorts) { + return false + } + for j, port := range container.Ports { + otherPort := otherPorts[j] + if port.ContainerPort != otherPort.ContainerPort { + return false + } + if port.Protocol != otherPort.Protocol { + return false + } + } + } + + if b.Spec.Replicas == nil && a.Spec.Replicas == nil { + return true + } else if b.Spec.Replicas == nil { + return false + } else if a.Spec.Replicas == nil { + return false + } + + return *b.Spec.Replicas == *a.Spec.Replicas +} + +func deploymentReplicaCount(replicas *meshv2beta1.GatewayClassReplicasConfig, currentReplicas *int32) *int32 { + // if we have the replicas config, use it + if replicas != nil && replicas.Default != nil && currentReplicas == nil { + return replicas.Default + } + + // if we have the replicas config and the current replicas, use the min/max to ensure + // the current replicas are within the min/max range + if replicas != nil && currentReplicas != nil { + if replicas.Max != nil && *currentReplicas > *replicas.Max { + return replicas.Max + } + + if replicas.Min != nil && *currentReplicas < *replicas.Min { + return replicas.Min + } + + return currentReplicas + } + + // if we don't have the replicas config, use the current replicas if we have them + if currentReplicas != nil { + return currentReplicas + } + + // otherwise use the global default + return pointer.Int32(globalDefaultInstances) +} + +// MergeDeployment is used to update an appsv1.Deployment without overwriting any +// existing annotations or labels that were placed there by other vendors. +// +// based on https://github.com/kubernetes-sigs/controller-runtime/blob/4000e996a202917ad7d40f02ed8a2079a9ce25e9/pkg/controller/controllerutil/example_test.go +func MergeDeployment(existing, desired *appsv1.Deployment) { + // Only overwrite fields if the Deployment doesn't exist yet + if existing.ObjectMeta.CreationTimestamp.IsZero() { + existing.ObjectMeta.OwnerReferences = desired.ObjectMeta.OwnerReferences + existing.Spec = desired.Spec + existing.Annotations = desired.Annotations + existing.Labels = desired.Labels + return + } + + // Make sure we don't reconcile forever by overwriting valid out-of-band + // changes such as init container injection. If the deployments are + // sufficiently equal, we only update the annotations. + if !areDeploymentsEqual(existing, desired) { + desired.Spec.Replicas = deploymentReplicaCount(nil, existing.Spec.Replicas) + existing.Spec = desired.Spec + } + + // If the Deployment already exists, add any desired annotations + labels to existing set + for k, v := range desired.ObjectMeta.Annotations { + existing.ObjectMeta.Annotations[k] = v + } + for k, v := range desired.ObjectMeta.Labels { + existing.ObjectMeta.Labels[k] = v + } +} diff --git a/control-plane/gateways/deployment_dataplane_container.go b/control-plane/gateways/deployment_dataplane_container.go new file mode 100644 index 0000000000..9dc7dad141 --- /dev/null +++ b/control-plane/gateways/deployment_dataplane_container.go @@ -0,0 +1,208 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package gateways + +import ( + "fmt" + "strconv" + + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/pointer" + + "github.com/hashicorp/consul-k8s/control-plane/api/mesh/v2beta1" + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" + "github.com/hashicorp/consul-k8s/control-plane/namespaces" + + corev1 "k8s.io/api/core/v1" +) + +const ( + allCapabilities = "ALL" + netBindCapability = "NET_BIND_SERVICE" + consulDataplaneDNSBindHost = "127.0.0.1" + consulDataplaneDNSBindPort = 8600 + defaultPrometheusScrapePath = "/metrics" + defaultEnvoyProxyConcurrency = "1" + volumeName = "consul-mesh-inject-data" +) + +func (b *meshGatewayBuilder) consulDataplaneContainer(containerConfig v2beta1.GatewayClassContainerConfig) (corev1.Container, error) { + // Extract the service account token's volume mount. + var ( + err error + bearerTokenFile string + ) + + resources := containerConfig.Resources + + if b.config.AuthMethod != "" { + bearerTokenFile = "/var/run/secrets/kubernetes.io/serviceaccount/token" + } + + args, err := b.dataplaneArgs(bearerTokenFile) + if err != nil { + return corev1.Container{}, err + } + + probe := &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(constants.ProxyDefaultHealthPort), + Path: "/ready", + }, + }, + InitialDelaySeconds: 1, + } + + container := corev1.Container{ + Name: b.gateway.Name, + Image: b.config.ImageDataplane, + + // We need to set tmp dir to an ephemeral volume that we're mounting so that + // consul-dataplane can write files to it. Otherwise, it wouldn't be able to + // because we set file system to be read-only. + + // TODO(nathancoleman): I don't believe consul-dataplane needs to write anymore, investigate. + Env: []corev1.EnvVar{ + { + Name: envDPProxyId, + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{FieldPath: "metadata.name"}, + }, + }, + { + Name: envPodNamespace, + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{FieldPath: "metadata.namespace"}, + }, + }, + { + Name: envTmpDir, + Value: constants.MeshV2VolumePath, + }, + { + Name: envNodeName, + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "spec.nodeName", + }, + }, + }, + { + Name: envDPCredentialLoginMeta, + Value: "pod=$(POD_NAMESPACE)/$(DP_PROXY_ID)", + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: volumeName, + MountPath: constants.MeshV2VolumePath, + }, + }, + Args: args, + ReadinessProbe: probe, + } + + // Configure the Readiness Address for the proxy's health check to be the Pod IP. + container.Env = append(container.Env, corev1.EnvVar{ + Name: "DP_ENVOY_READY_BIND_ADDRESS", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{FieldPath: "status.podIP"}, + }, + }) + // Configure the port on which the readiness probe will query the proxy for its health. + container.Ports = append(container.Ports, corev1.ContainerPort{ + Name: "proxy-health", + ContainerPort: int32(constants.ProxyDefaultHealthPort), + }) + + // Configure the wan port. + wanPort := corev1.ContainerPort{ + Name: "wan", + ContainerPort: int32(constants.DefaultWANPort), + HostPort: containerConfig.HostPort, + } + + wanPort.ContainerPort = 443 + containerConfig.PortModifier + + container.Ports = append(container.Ports, wanPort) + + // Configure the resource requests and limits for the proxy if they are set. + if resources != nil { + container.Resources = *resources + } + + container.SecurityContext = &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + // Drop any Linux capabilities you'd get other than NET_BIND_SERVICE. + // FUTURE: We likely require some additional capability in order to support + // MeshGateway's host network option. + Capabilities: &corev1.Capabilities{ + Add: []corev1.Capability{netBindCapability}, + Drop: []corev1.Capability{allCapabilities}, + }, + ReadOnlyRootFilesystem: pointer.Bool(true), + RunAsNonRoot: pointer.Bool(true), + } + + return container, nil +} + +func (b *meshGatewayBuilder) dataplaneArgs(bearerTokenFile string) ([]string, error) { + args := []string{ + "-addresses", b.config.ConsulConfig.Address, + "-grpc-port=" + strconv.Itoa(b.config.ConsulConfig.GRPCPort), + "-log-level=" + b.logLevelForDataplaneContainer(), + "-log-json=" + strconv.FormatBool(b.config.LogJSON), + "-envoy-concurrency=" + defaultEnvoyProxyConcurrency, + } + + consulNamespace := namespaces.ConsulNamespace(b.gateway.Namespace, b.config.ConsulTenancyConfig.EnableConsulNamespaces, b.config.ConsulTenancyConfig.ConsulDestinationNamespace, b.config.ConsulTenancyConfig.EnableConsulNamespaces, b.config.ConsulTenancyConfig.NSMirroringPrefix) + + if b.config.AuthMethod != "" { + args = append(args, + "-credential-type=login", + "-login-auth-method="+b.config.AuthMethod, + "-login-bearer-token-path="+bearerTokenFile, + "-login-meta="+fmt.Sprintf("gateway=%s/%s", b.gateway.Namespace, b.gateway.Name), + ) + if b.config.ConsulTenancyConfig.ConsulPartition != "" { + args = append(args, "-login-partition="+b.config.ConsulTenancyConfig.ConsulPartition) + } + } + if b.config.SkipServerWatch { + args = append(args, "-server-watch-disabled=true") + } + if b.config.ConsulTenancyConfig.EnableConsulNamespaces { + args = append(args, "-proxy-namespace="+consulNamespace) + } + if b.config.ConsulTenancyConfig.ConsulPartition != "" { + args = append(args, "-proxy-partition="+b.config.ConsulTenancyConfig.ConsulPartition) + } + + args = append(args, buildTLSArgs(b.config)...) + + // Configure the readiness port on the dataplane sidecar if proxy health checks are enabled. + args = append(args, fmt.Sprintf("%s=%d", "-envoy-ready-bind-port", constants.ProxyDefaultHealthPort)) + + args = append(args, fmt.Sprintf("-envoy-admin-bind-port=%d", 19000)) + + return args, nil +} + +func buildTLSArgs(config GatewayConfig) []string { + if !config.TLSEnabled { + return []string{"-tls-disabled"} + } + tlsArgs := make([]string, 0, 2) + + if config.ConsulTLSServerName != "" { + tlsArgs = append(tlsArgs, fmt.Sprintf("-tls-server-name=%s", config.ConsulTLSServerName)) + } + if config.ConsulCACert != "" { + tlsArgs = append(tlsArgs, fmt.Sprintf("-ca-certs=%s", constants.ConsulCAFile)) + } + + return tlsArgs +} diff --git a/control-plane/gateways/deployment_init_container.go b/control-plane/gateways/deployment_init_container.go new file mode 100644 index 0000000000..14230b98df --- /dev/null +++ b/control-plane/gateways/deployment_init_container.go @@ -0,0 +1,195 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package gateways + +import ( + "bytes" + "strconv" + "strings" + "text/template" + + corev1 "k8s.io/api/core/v1" + + meshv2beta1 "github.com/hashicorp/consul-k8s/control-plane/api/mesh/v2beta1" + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" + "github.com/hashicorp/consul-k8s/control-plane/namespaces" +) + +const ( + injectInitContainerName = "consul-mesh-init" + initContainersUserAndGroupID = 5996 +) + +var ( + tpl = template.Must(template.New("root").Parse(strings.TrimSpace(initContainerCommandTpl))) +) + +type initContainerCommandData struct { + ServiceName string + ServiceAccountName string + AuthMethod string + + // Log settings for the connect-init command. + LogLevel string + LogJSON bool +} + +// initContainer returns the init container spec for connect-init that polls for the service and the connect proxy service to be registered +// so that it can save the proxy service id to the shared volume and boostrap Envoy with the proxy-id. +func (b *meshGatewayBuilder) initContainer() (corev1.Container, error) { + data := initContainerCommandData{ + AuthMethod: b.config.AuthMethod, + LogLevel: b.logLevelForInitContainer(), + LogJSON: b.config.LogJSON, + ServiceName: b.gateway.Name, + ServiceAccountName: b.serviceAccountName(), + } + // Render the command + var buf bytes.Buffer + if err := tpl.Execute(&buf, &data); err != nil { + return corev1.Container{}, err + } + + // Create expected volume mounts + volMounts := []corev1.VolumeMount{ + { + Name: volumeName, + MountPath: constants.MeshV2VolumePath, + }, + } + + var bearerTokenFile string + if b.config.AuthMethod != "" { + bearerTokenFile = defaultBearerTokenFile + } + + consulNamespace := namespaces.ConsulNamespace(b.gateway.Namespace, b.config.ConsulTenancyConfig.EnableConsulNamespaces, b.config.ConsulTenancyConfig.ConsulDestinationNamespace, b.config.ConsulTenancyConfig.EnableConsulNamespaces, b.config.ConsulTenancyConfig.NSMirroringPrefix) + + initContainerName := injectInitContainerName + container := corev1.Container{ + Name: initContainerName, + Image: b.config.ImageConsulK8S, + + Env: []corev1.EnvVar{ + { + Name: envPodName, + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{FieldPath: "metadata.name"}, + }, + }, + { + Name: envPodNamespace, + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{FieldPath: "metadata.namespace"}, + }, + }, + { + Name: envNodeName, + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "spec.nodeName", + }, + }, + }, + { + Name: envConsulAddresses, + Value: b.config.ConsulConfig.Address, + }, + { + Name: envConsulGRPCPort, + Value: strconv.Itoa(b.config.ConsulConfig.GRPCPort), + }, + { + Name: envConsulHTTPPort, + Value: strconv.Itoa(b.config.ConsulConfig.HTTPPort), + }, + { + Name: envConsulAPITimeout, + Value: b.config.ConsulConfig.APITimeout.String(), + }, + { + Name: envConsulNodeName, + Value: "$(NODE_NAME)-virtual", + }, + }, + VolumeMounts: volMounts, + Command: []string{"/bin/sh", "-ec", buf.String()}, + Resources: initContainerResourcesOrDefault(b.gcc), + } + + if b.config.AuthMethod != "" { + container.Env = append(container.Env, + corev1.EnvVar{ + Name: envConsulLoginAuthMethod, + Value: b.config.AuthMethod, + }, + corev1.EnvVar{ + Name: envConsulLoginBearerTokenFile, + Value: bearerTokenFile, + }, + corev1.EnvVar{ + Name: envConsulLoginMeta, + Value: "pod=$(POD_NAMESPACE)/$(POD_NAME)", + }) + + if b.config.ConsulTenancyConfig.ConsulPartition != "" { + container.Env = append(container.Env, corev1.EnvVar{ + Name: envConsulLoginPartition, + Value: b.config.ConsulTenancyConfig.ConsulPartition, + }) + } + } + container.Env = append(container.Env, + corev1.EnvVar{ + Name: envConsulNamespace, + Value: consulNamespace, + }) + + if b.config.TLSEnabled { + container.Env = append(container.Env, + corev1.EnvVar{ + Name: constants.UseTLSEnvVar, + Value: "true", + }, + corev1.EnvVar{ + Name: constants.CACertPEMEnvVar, + Value: b.config.ConsulCACert, + }, + corev1.EnvVar{ + Name: constants.TLSServerNameEnvVar, + Value: b.config.ConsulTLSServerName, + }) + } + + if b.config.ConsulTenancyConfig.ConsulPartition != "" { + container.Env = append(container.Env, + corev1.EnvVar{ + Name: envConsulPartition, + Value: b.config.ConsulTenancyConfig.ConsulPartition, + }) + } + + return container, nil +} + +func initContainerResourcesOrDefault(gcc *meshv2beta1.GatewayClassConfig) corev1.ResourceRequirements { + if gcc != nil && gcc.Spec.Deployment.InitContainer != nil && gcc.Spec.Deployment.InitContainer.Resources != nil { + return *gcc.Spec.Deployment.InitContainer.Resources + } + + return corev1.ResourceRequirements{} +} + +// initContainerCommandTpl is the template for the command executed by +// the init container. +// TODO @GatewayManagement parametrize gateway kind. +const initContainerCommandTpl = ` +consul-k8s-control-plane mesh-init \ + -proxy-name=${POD_NAME} \ + -namespace=${POD_NAMESPACE} \ + {{- with .LogLevel }} + -log-level={{ . }} \ + {{- end }} + -log-json={{ .LogJSON }} +` diff --git a/control-plane/gateways/deployment_test.go b/control-plane/gateways/deployment_test.go new file mode 100644 index 0000000000..12fa30ee0c --- /dev/null +++ b/control-plane/gateways/deployment_test.go @@ -0,0 +1,1237 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package gateways + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/pointer" + + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1" + + meshv2beta1 "github.com/hashicorp/consul-k8s/control-plane/api/mesh/v2beta1" + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" +) + +const testCert = `-----BEGIN CERTIFICATE----- │ +MIIDQjCCAuigAwIBAgIUZGIigQ4IKLoCh4XrXyi/c89B7ZgwCgYIKoZIzj0EAwIw │ +gZExCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5j │ +aXNjbzEaMBgGA1UECRMRMTAxIFNlY29uZCBTdHJlZXQxDjAMBgNVBBETBTk0MTA1 │ +MRcwFQYDVQQKEw5IYXNoaUNvcnAgSW5jLjEYMBYGA1UEAxMPQ29uc3VsIEFnZW50 │ +IENBMB4XDTI0MDEwMzE4NTYyOVoXDTMzMTIzMTE4NTcyOVowgZExCzAJBgNVBAYT │ +AlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEaMBgGA1UE │ +CRMRMTAxIFNlY29uZCBTdHJlZXQxDjAMBgNVBBETBTk0MTA1MRcwFQYDVQQKEw5I │ +YXNoaUNvcnAgSW5jLjEYMBYGA1UEAxMPQ29uc3VsIEFnZW50IENBMFkwEwYHKoZI │ +zj0CAQYIKoZIzj0DAQcDQgAEcbkdpZxlDOEuT3ZCcZ8H9j0Jad8ncDYk/Y0IbHPC │ +OKfFcpldEFPRv16WgSTHg38kK9WgEuK291+joBTHry3y06OCARowggEWMA4GA1Ud │ +DwEB/wQEAwIBhjAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDwYDVR0T │ +AQH/BAUwAwEB/zBoBgNVHQ4EYQRfZGY6MzA6YWE6NzI6ZTQ6ZTI6NzI6Y2Y6NTg6 │ +NDU6Zjk6YjU6NTA6N2I6ZDQ6MDI6MTE6ZjM6YzY6ZjE6NTc6NTE6MTg6NGU6OGU6 │ +ZjE6MmE6ZTE6MzI6NmY6ZTU6YjMwagYDVR0jBGMwYYBfZGY6MzA6YWE6NzI6ZTQ6 │ +ZTI6NzI6Y2Y6NTg6NDU6Zjk6YjU6NTA6N2I6ZDQ6MDI6MTE6ZjM6YzY6ZjE6NTc6 │ +NTE6MTg6NGU6OGU6ZjE6MmE6ZTE6MzI6NmY6ZTU6YjMwCgYIKoZIzj0EAwIDSAAw │ +RQIgXg8YtejEgGNxswtyXsvqzhLpt7k44L7TJMUhfIw0lUECIQCIxKNowmv0/XVz │ +nRnYLmGy79EZ2Y+CZS9nSm9Es6QNwg== │ +-----END CERTIFICATE-----` + +func Test_meshGatewayBuilder_Deployment(t *testing.T) { + type fields struct { + gateway *meshv2beta1.MeshGateway + config GatewayConfig + gcc *meshv2beta1.GatewayClassConfig + } + tests := []struct { + name string + fields fields + want *appsv1.Deployment + wantErr bool + }{ + { + name: "happy path", + fields: fields{ + gateway: &meshv2beta1.MeshGateway{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + constants.AnnotationGatewayWANSource: "Service", + constants.AnnotationGatewayWANPort: "443", + constants.AnnotationGatewayWANAddress: "", + }, + }, + Spec: pbmesh.MeshGateway{ + GatewayClassName: "test-gateway-class", + }, + }, + config: GatewayConfig{}, + gcc: &meshv2beta1.GatewayClassConfig{ + Spec: meshv2beta1.GatewayClassConfigSpec{ + GatewayClassAnnotationsAndLabels: meshv2beta1.GatewayClassAnnotationsAndLabels{ + Labels: meshv2beta1.GatewayClassAnnotationsLabelsConfig{ + Set: map[string]string{ + "app": "consul", + "chart": "consul-helm", + "heritage": "Helm", + "release": "consul", + }, + }, + Annotations: meshv2beta1.GatewayClassAnnotationsLabelsConfig{ + Set: map[string]string{ + "a": "b", + }, + }, + }, + Deployment: meshv2beta1.GatewayClassDeploymentConfig{ + Affinity: &corev1.Affinity{ + PodAntiAffinity: &corev1.PodAntiAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{ + { + Weight: 1, + PodAffinityTerm: corev1.PodAffinityTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + labelManagedBy: "consul-k8s", + "app": "consul", + "chart": "consul-helm", + "heritage": "Helm", + "release": "consul", + }, + }, + TopologyKey: "kubernetes.io/hostname", + }, + }, + }, + }, + }, + GatewayClassAnnotationsAndLabels: meshv2beta1.GatewayClassAnnotationsAndLabels{ + Labels: meshv2beta1.GatewayClassAnnotationsLabelsConfig{ + Set: map[string]string{ + "foo": "bar", + }, + }, + Annotations: meshv2beta1.GatewayClassAnnotationsLabelsConfig{ + Set: map[string]string{ + "baz": "qux", + }, + }, + }, + Container: &meshv2beta1.GatewayClassContainerConfig{ + HostPort: 8080, + PortModifier: 8000, + Consul: meshv2beta1.GatewayClassConsulConfig{ + Logging: meshv2beta1.GatewayClassConsulLoggingConfig{ + Level: "debug", + }, + }, + }, + NodeSelector: map[string]string{"beta.kubernetes.io/arch": "amd64"}, + Replicas: &meshv2beta1.GatewayClassReplicasConfig{ + Default: pointer.Int32(1), + Min: pointer.Int32(1), + Max: pointer.Int32(8), + }, + PriorityClassName: "priorityclassname", + TopologySpreadConstraints: []corev1.TopologySpreadConstraint{ + { + MaxSkew: 1, + TopologyKey: "key", + WhenUnsatisfiable: "DoNotSchedule", + }, + }, + InitContainer: &meshv2beta1.GatewayClassInitContainerConfig{ + Resources: &corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + "cpu": resource.MustParse("100m"), + "memory": resource.MustParse("128Mi"), + }, + Limits: corev1.ResourceList{ + "cpu": resource.MustParse("200m"), + "memory": resource.MustParse("228Mi"), + }, + }, + Consul: meshv2beta1.GatewayClassConsulConfig{ + Logging: meshv2beta1.GatewayClassConsulLoggingConfig{ + Level: "debug", + }, + }, + }, + }, + }, + }, + }, + want: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + labelManagedBy: "consul-k8s", + "app": "consul", + "chart": "consul-helm", + "heritage": "Helm", + "release": "consul", + "foo": "bar", + }, + Annotations: map[string]string{ + "a": "b", + "baz": "qux", + }, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: pointer.Int32(1), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + labelManagedBy: "consul-k8s", + "app": "consul", + "chart": "consul-helm", + "heritage": "Helm", + "release": "consul", + "foo": "bar", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + labelManagedBy: "consul-k8s", + "app": "consul", + "chart": "consul-helm", + "heritage": "Helm", + "foo": "bar", + "release": "consul", + }, + Annotations: map[string]string{ + constants.AnnotationGatewayKind: meshGatewayAnnotationKind, + constants.AnnotationMeshInject: "false", + constants.AnnotationTransparentProxyOverwriteProbes: "false", + constants.AnnotationGatewayWANSource: "Service", + constants.AnnotationGatewayWANPort: "443", + constants.AnnotationGatewayWANAddress: "", + }, + }, + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: "consul-mesh-inject-data", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{ + Medium: "Memory", + }, + }, + }, + }, + InitContainers: []corev1.Container{ + { + Name: "consul-mesh-init", + Command: []string{ + "/bin/sh", + "-ec", + "consul-k8s-control-plane mesh-init \\\n -proxy-name=${POD_NAME} \\\n -namespace=${POD_NAMESPACE} \\\n -log-level=debug \\\n -log-json=false", + }, + Env: []corev1.EnvVar{ + { + Name: "POD_NAME", + Value: "", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + APIVersion: "", + FieldPath: "metadata.name", + }, + }, + }, + { + Name: "POD_NAMESPACE", + Value: "", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + APIVersion: "", + FieldPath: "metadata.namespace", + }, + }, + }, + { + Name: "NODE_NAME", + Value: "", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + APIVersion: "", + FieldPath: "spec.nodeName", + }, + }, + }, + { + Name: "CONSUL_ADDRESSES", + Value: "", + }, + { + Name: "CONSUL_GRPC_PORT", + Value: "0", + }, + { + Name: "CONSUL_HTTP_PORT", + Value: "0", + }, + { + Name: "CONSUL_API_TIMEOUT", + Value: "0s", + }, + { + Name: "CONSUL_NODE_NAME", + Value: "$(NODE_NAME)-virtual", + }, + { + Name: "CONSUL_NAMESPACE", + Value: "", + }, + }, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + "cpu": resource.MustParse("100m"), + "memory": resource.MustParse("128Mi"), + }, + Limits: corev1.ResourceList{ + "cpu": resource.MustParse("200m"), + "memory": resource.MustParse("228Mi"), + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "consul-mesh-inject-data", + ReadOnly: false, + MountPath: "/consul/mesh-inject", + }, + }, + }, + }, + Containers: []corev1.Container{ + { + Args: []string{ + "-addresses", + "", + "-grpc-port=0", + "-log-level=debug", + "-log-json=false", + "-envoy-concurrency=1", + "-tls-disabled", + "-envoy-ready-bind-port=21000", + "-envoy-admin-bind-port=19000", + }, + Ports: []corev1.ContainerPort{ + { + Name: "proxy-health", + ContainerPort: 21000, + }, + { + Name: "wan", + ContainerPort: 8443, + HostPort: 8080, + }, + }, + Env: []corev1.EnvVar{ + { + Name: "DP_PROXY_ID", + Value: "", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + APIVersion: "", + FieldPath: "metadata.name", + }, + }, + }, + { + Name: "POD_NAMESPACE", + Value: "", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + APIVersion: "", + FieldPath: "metadata.namespace", + }, + }, + }, + { + Name: "TMPDIR", + Value: "/consul/mesh-inject", + }, + { + Name: "NODE_NAME", + Value: "", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + APIVersion: "", + FieldPath: "spec.nodeName", + }, + }, + }, + { + Name: "DP_CREDENTIAL_LOGIN_META", + Value: "pod=$(POD_NAMESPACE)/$(DP_PROXY_ID)", + }, + { + Name: "DP_ENVOY_READY_BIND_ADDRESS", + Value: "", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + APIVersion: "", + FieldPath: "status.podIP", + }, + }, + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "consul-mesh-inject-data", + MountPath: "/consul/mesh-inject", + }, + }, + ReadinessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/ready", + Port: intstr.IntOrString{ + Type: 0, + IntVal: 21000, + StrVal: "", + }, + }, + }, + InitialDelaySeconds: 1, + }, + SecurityContext: &corev1.SecurityContext{ + Capabilities: &corev1.Capabilities{ + Add: []corev1.Capability{ + "NET_BIND_SERVICE", + }, + Drop: []corev1.Capability{ + "ALL", + }, + }, + RunAsNonRoot: pointer.Bool(true), + ReadOnlyRootFilesystem: pointer.Bool(true), + AllowPrivilegeEscalation: pointer.Bool(false), + ProcMount: nil, + SeccompProfile: nil, + }, + Stdin: false, + StdinOnce: false, + TTY: false, + }, + }, + NodeSelector: map[string]string{"beta.kubernetes.io/arch": "amd64"}, + PriorityClassName: "priorityclassname", + TopologySpreadConstraints: []corev1.TopologySpreadConstraint{ + { + MaxSkew: 1, + TopologyKey: "key", + WhenUnsatisfiable: "DoNotSchedule", + }, + }, + Affinity: &corev1.Affinity{ + NodeAffinity: nil, + PodAffinity: nil, + PodAntiAffinity: &corev1.PodAntiAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{ + { + Weight: 1, + PodAffinityTerm: corev1.PodAffinityTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + labelManagedBy: "consul-k8s", + "app": "consul", + "chart": "consul-helm", + "heritage": "Helm", + "release": "consul", + }, + }, + TopologyKey: "kubernetes.io/hostname", + }, + }, + }, + }, + }, + }, + }, + Strategy: appsv1.DeploymentStrategy{}, + MinReadySeconds: 0, + RevisionHistoryLimit: nil, + Paused: false, + ProgressDeadlineSeconds: nil, + }, + Status: appsv1.DeploymentStatus{}, + }, + wantErr: false, + }, + { + name: "happy path tls enabled", + fields: fields{ + gateway: &meshv2beta1.MeshGateway{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + constants.AnnotationGatewayWANSource: "Service", + constants.AnnotationGatewayWANPort: "443", + constants.AnnotationGatewayWANAddress: "", + }, + }, + Spec: pbmesh.MeshGateway{ + GatewayClassName: "test-gateway-class", + }, + }, + config: GatewayConfig{ + TLSEnabled: true, + ConsulCACert: testCert, + }, + gcc: &meshv2beta1.GatewayClassConfig{ + Spec: meshv2beta1.GatewayClassConfigSpec{ + GatewayClassAnnotationsAndLabels: meshv2beta1.GatewayClassAnnotationsAndLabels{ + Labels: meshv2beta1.GatewayClassAnnotationsLabelsConfig{ + Set: map[string]string{ + "app": "consul", + "chart": "consul-helm", + "heritage": "Helm", + "release": "consul", + }, + }, + }, + Deployment: meshv2beta1.GatewayClassDeploymentConfig{ + Affinity: &corev1.Affinity{ + PodAntiAffinity: &corev1.PodAntiAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{ + { + Weight: 1, + PodAffinityTerm: corev1.PodAffinityTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + labelManagedBy: "consul-k8s", + "app": "consul", + "chart": "consul-helm", + "heritage": "Helm", + "release": "consul", + }, + }, + TopologyKey: "kubernetes.io/hostname", + }, + }, + }, + }, + }, + Container: &meshv2beta1.GatewayClassContainerConfig{ + HostPort: 8080, + PortModifier: 8000, + Consul: meshv2beta1.GatewayClassConsulConfig{ + Logging: meshv2beta1.GatewayClassConsulLoggingConfig{ + Level: "debug", + }, + }, + }, + NodeSelector: map[string]string{"beta.kubernetes.io/arch": "amd64"}, + Replicas: &meshv2beta1.GatewayClassReplicasConfig{ + Default: pointer.Int32(1), + Min: pointer.Int32(1), + Max: pointer.Int32(8), + }, + PriorityClassName: "priorityclassname", + TopologySpreadConstraints: []corev1.TopologySpreadConstraint{ + { + MaxSkew: 1, + TopologyKey: "key", + WhenUnsatisfiable: "DoNotSchedule", + }, + }, + InitContainer: &meshv2beta1.GatewayClassInitContainerConfig{ + Resources: &corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + "cpu": resource.MustParse("100m"), + "memory": resource.MustParse("128Mi"), + }, + Limits: corev1.ResourceList{ + "cpu": resource.MustParse("200m"), + "memory": resource.MustParse("228Mi"), + }, + }, + Consul: meshv2beta1.GatewayClassConsulConfig{ + Logging: meshv2beta1.GatewayClassConsulLoggingConfig{ + Level: "debug", + }, + }, + }, + }, + }, + }, + }, + want: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + labelManagedBy: "consul-k8s", + "app": "consul", + "chart": "consul-helm", + "heritage": "Helm", + "release": "consul", + }, + + Annotations: map[string]string{}, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: pointer.Int32(1), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + labelManagedBy: "consul-k8s", + "app": "consul", + "chart": "consul-helm", + "heritage": "Helm", + "release": "consul", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + labelManagedBy: "consul-k8s", + "app": "consul", + "chart": "consul-helm", + "heritage": "Helm", + "release": "consul", + }, + Annotations: map[string]string{ + constants.AnnotationGatewayKind: meshGatewayAnnotationKind, + constants.AnnotationMeshInject: "false", + constants.AnnotationTransparentProxyOverwriteProbes: "false", + constants.AnnotationGatewayWANSource: "Service", + constants.AnnotationGatewayWANPort: "443", + constants.AnnotationGatewayWANAddress: "", + }, + }, + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: "consul-mesh-inject-data", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{ + Medium: "Memory", + }, + }, + }, + }, + InitContainers: []corev1.Container{ + { + Name: "consul-mesh-init", + Command: []string{ + "/bin/sh", + "-ec", + "consul-k8s-control-plane mesh-init \\\n -proxy-name=${POD_NAME} \\\n -namespace=${POD_NAMESPACE} \\\n -log-level=debug \\\n -log-json=false", + }, + Env: []corev1.EnvVar{ + { + Name: "POD_NAME", + Value: "", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + APIVersion: "", + FieldPath: "metadata.name", + }, + }, + }, + { + Name: "POD_NAMESPACE", + Value: "", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + APIVersion: "", + FieldPath: "metadata.namespace", + }, + }, + }, + { + Name: "NODE_NAME", + Value: "", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + APIVersion: "", + FieldPath: "spec.nodeName", + }, + }, + }, + { + Name: "CONSUL_ADDRESSES", + Value: "", + }, + { + Name: "CONSUL_GRPC_PORT", + Value: "0", + }, + { + Name: "CONSUL_HTTP_PORT", + Value: "0", + }, + { + Name: "CONSUL_API_TIMEOUT", + Value: "0s", + }, + { + Name: "CONSUL_NODE_NAME", + Value: "$(NODE_NAME)-virtual", + }, + { + Name: "CONSUL_NAMESPACE", + Value: "", + }, + { + Name: "CONSUL_USE_TLS", + Value: "true", + }, + { + Name: "CONSUL_CACERT_PEM", + Value: testCert, + }, + { + Name: "CONSUL_TLS_SERVER_NAME", + Value: "", + }, + }, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + "cpu": resource.MustParse("100m"), + "memory": resource.MustParse("128Mi"), + }, + Limits: corev1.ResourceList{ + "cpu": resource.MustParse("200m"), + "memory": resource.MustParse("228Mi"), + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "consul-mesh-inject-data", + ReadOnly: false, + MountPath: "/consul/mesh-inject", + }, + }, + }, + }, + Containers: []corev1.Container{ + { + Args: []string{ + "-addresses", + "", + "-grpc-port=0", + "-log-level=debug", + "-log-json=false", + "-envoy-concurrency=1", + "-ca-certs=/consul/mesh-inject/consul-ca.pem", + "-envoy-ready-bind-port=21000", + "-envoy-admin-bind-port=19000", + }, + Ports: []corev1.ContainerPort{ + { + Name: "proxy-health", + ContainerPort: 21000, + }, + { + Name: "wan", + ContainerPort: 8443, + HostPort: 8080, + }, + }, + Env: []corev1.EnvVar{ + { + Name: "DP_PROXY_ID", + Value: "", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + APIVersion: "", + FieldPath: "metadata.name", + }, + }, + }, + { + Name: "POD_NAMESPACE", + Value: "", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + APIVersion: "", + FieldPath: "metadata.namespace", + }, + }, + }, + { + Name: "TMPDIR", + Value: "/consul/mesh-inject", + }, + { + Name: "NODE_NAME", + Value: "", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + APIVersion: "", + FieldPath: "spec.nodeName", + }, + }, + }, + { + Name: "DP_CREDENTIAL_LOGIN_META", + Value: "pod=$(POD_NAMESPACE)/$(DP_PROXY_ID)", + }, + { + Name: "DP_ENVOY_READY_BIND_ADDRESS", + Value: "", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + APIVersion: "", + FieldPath: "status.podIP", + }, + }, + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "consul-mesh-inject-data", + MountPath: "/consul/mesh-inject", + }, + }, + ReadinessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/ready", + Port: intstr.IntOrString{ + Type: 0, + IntVal: 21000, + StrVal: "", + }, + }, + }, + InitialDelaySeconds: 1, + }, + SecurityContext: &corev1.SecurityContext{ + Capabilities: &corev1.Capabilities{ + Add: []corev1.Capability{ + "NET_BIND_SERVICE", + }, + Drop: []corev1.Capability{ + "ALL", + }, + }, + RunAsNonRoot: pointer.Bool(true), + ReadOnlyRootFilesystem: pointer.Bool(true), + AllowPrivilegeEscalation: pointer.Bool(false), + ProcMount: nil, + SeccompProfile: nil, + }, + Stdin: false, + StdinOnce: false, + TTY: false, + }, + }, + NodeSelector: map[string]string{"beta.kubernetes.io/arch": "amd64"}, + PriorityClassName: "priorityclassname", + TopologySpreadConstraints: []corev1.TopologySpreadConstraint{ + { + MaxSkew: 1, + TopologyKey: "key", + WhenUnsatisfiable: "DoNotSchedule", + }, + }, + Affinity: &corev1.Affinity{ + NodeAffinity: nil, + PodAffinity: nil, + PodAntiAffinity: &corev1.PodAntiAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{ + { + Weight: 1, + PodAffinityTerm: corev1.PodAffinityTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + labelManagedBy: "consul-k8s", + "app": "consul", + "chart": "consul-helm", + "heritage": "Helm", + "release": "consul", + }, + }, + TopologyKey: "kubernetes.io/hostname", + }, + }, + }, + }, + }, + }, + }, + Strategy: appsv1.DeploymentStrategy{}, + MinReadySeconds: 0, + RevisionHistoryLimit: nil, + Paused: false, + ProgressDeadlineSeconds: nil, + }, + Status: appsv1.DeploymentStatus{}, + }, + wantErr: false, + }, + { + name: "nil gatewayclassconfig - (notfound)", + fields: fields{ + gateway: &meshv2beta1.MeshGateway{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + constants.AnnotationGatewayWANSource: "Service", + constants.AnnotationGatewayWANPort: "443", + constants.AnnotationGatewayWANAddress: "", + }, + }, + Spec: pbmesh.MeshGateway{ + GatewayClassName: "test-gateway-class", + }, + }, + config: GatewayConfig{}, + gcc: nil, + }, + want: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Labels: defaultLabels, + Annotations: map[string]string{}, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: pointer.Int32(1), + Selector: &metav1.LabelSelector{ + MatchLabels: defaultLabels, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: defaultLabels, + Annotations: map[string]string{ + constants.AnnotationGatewayKind: meshGatewayAnnotationKind, + constants.AnnotationMeshInject: "false", + constants.AnnotationTransparentProxyOverwriteProbes: "false", + constants.AnnotationGatewayWANSource: "Service", + constants.AnnotationGatewayWANPort: "443", + constants.AnnotationGatewayWANAddress: "", + }, + }, + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: "consul-mesh-inject-data", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{ + Medium: "Memory", + }, + }, + }, + }, + InitContainers: []corev1.Container{ + { + Name: "consul-mesh-init", + Command: []string{ + "/bin/sh", + "-ec", + "consul-k8s-control-plane mesh-init \\\n -proxy-name=${POD_NAME} \\\n -namespace=${POD_NAMESPACE} \\\n -log-json=false", + }, + Env: []corev1.EnvVar{ + { + Name: "POD_NAME", + Value: "", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + APIVersion: "", + FieldPath: "metadata.name", + }, + }, + }, + { + Name: "POD_NAMESPACE", + Value: "", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + APIVersion: "", + FieldPath: "metadata.namespace", + }, + }, + }, + { + Name: "NODE_NAME", + Value: "", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + APIVersion: "", + FieldPath: "spec.nodeName", + }, + }, + }, + { + Name: "CONSUL_ADDRESSES", + Value: "", + }, + { + Name: "CONSUL_GRPC_PORT", + Value: "0", + }, + { + Name: "CONSUL_HTTP_PORT", + Value: "0", + }, + { + Name: "CONSUL_API_TIMEOUT", + Value: "0s", + }, + { + Name: "CONSUL_NODE_NAME", + Value: "$(NODE_NAME)-virtual", + }, + { + Name: "CONSUL_NAMESPACE", + Value: "", + }, + }, + Resources: corev1.ResourceRequirements{}, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "consul-mesh-inject-data", + ReadOnly: false, + MountPath: "/consul/mesh-inject", + }, + }, + }, + }, + Containers: []corev1.Container{ + { + Args: []string{ + "-addresses", + "", + "-grpc-port=0", + "-log-level=", + "-log-json=false", + "-envoy-concurrency=1", + "-tls-disabled", + "-envoy-ready-bind-port=21000", + "-envoy-admin-bind-port=19000", + }, + Ports: []corev1.ContainerPort{ + { + Name: "proxy-health", + ContainerPort: 21000, + }, + { + Name: "wan", + ContainerPort: 443, + }, + }, + Env: []corev1.EnvVar{ + { + Name: "DP_PROXY_ID", + Value: "", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + APIVersion: "", + FieldPath: "metadata.name", + }, + }, + }, + { + Name: "POD_NAMESPACE", + Value: "", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + APIVersion: "", + FieldPath: "metadata.namespace", + }, + }, + }, + { + Name: "TMPDIR", + Value: "/consul/mesh-inject", + }, + { + Name: "NODE_NAME", + Value: "", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + APIVersion: "", + FieldPath: "spec.nodeName", + }, + }, + }, + { + Name: "DP_CREDENTIAL_LOGIN_META", + Value: "pod=$(POD_NAMESPACE)/$(DP_PROXY_ID)", + }, + { + Name: "DP_ENVOY_READY_BIND_ADDRESS", + Value: "", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + APIVersion: "", + FieldPath: "status.podIP", + }, + }, + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "consul-mesh-inject-data", + MountPath: "/consul/mesh-inject", + }, + }, + ReadinessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/ready", + Port: intstr.IntOrString{ + Type: 0, + IntVal: 21000, + StrVal: "", + }, + }, + }, + InitialDelaySeconds: 1, + }, + SecurityContext: &corev1.SecurityContext{ + Capabilities: &corev1.Capabilities{ + Add: []corev1.Capability{ + "NET_BIND_SERVICE", + }, + Drop: []corev1.Capability{ + "ALL", + }, + }, + RunAsNonRoot: pointer.Bool(true), + ReadOnlyRootFilesystem: pointer.Bool(true), + AllowPrivilegeEscalation: pointer.Bool(false), + ProcMount: nil, + SeccompProfile: nil, + }, + Stdin: false, + StdinOnce: false, + TTY: false, + }, + }, + }, + }, + Strategy: appsv1.DeploymentStrategy{}, + MinReadySeconds: 0, + RevisionHistoryLimit: nil, + Paused: false, + ProgressDeadlineSeconds: nil, + }, + Status: appsv1.DeploymentStatus{}, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + b := &meshGatewayBuilder{ + gateway: tt.fields.gateway, + config: tt.fields.config, + gcc: tt.fields.gcc, + } + got, err := b.Deployment() + if !tt.wantErr && (err != nil) { + assert.Errorf(t, err, "Error") + } + assert.Equalf(t, tt.want, got, "Deployment()") + }) + } +} + +func Test_MergeDeployment(t *testing.T) { + testCases := []struct { + name string + a, b *appsv1.Deployment + assertFn func(*testing.T, *appsv1.Deployment) + }{ + { + name: "new deployment gets desired annotations + labels + containers", + a: &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Namespace: "default", Name: "deployment"}}, + b: &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "deployment", + Annotations: map[string]string{"b": "b"}, + Labels: map[string]string{"b": "b"}, + }}, + assertFn: func(t *testing.T, result *appsv1.Deployment) { + assert.Equal(t, map[string]string{"b": "b"}, result.Annotations) + assert.Equal(t, map[string]string{"b": "b"}, result.Labels) + }, + }, + { + name: "existing deployment keeps existing annotations + labels and gains desired annotations + labels + containers", + a: &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "deployment", + CreationTimestamp: metav1.Now(), + Annotations: map[string]string{"a": "a"}, + Labels: map[string]string{"a": "a"}, + }}, + b: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "deployment", + Annotations: map[string]string{"b": "b"}, + Labels: map[string]string{"b": "b"}, + }, + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: "b"}}, + }, + }, + }, + }, + assertFn: func(t *testing.T, result *appsv1.Deployment) { + assert.Equal(t, map[string]string{"a": "a", "b": "b"}, result.Annotations) + assert.Equal(t, map[string]string{"a": "a", "b": "b"}, result.Labels) + + require.Len(t, result.Spec.Template.Spec.Containers, 1) + assert.Equal(t, "b", result.Spec.Template.Spec.Containers[0].Name) + }, + }, + { + name: "existing deployment with injected initContainer retains it", + a: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "deployment", + CreationTimestamp: metav1.Now(), + Annotations: map[string]string{"a": "a"}, + Labels: map[string]string{"a": "a"}, + }, + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + InitContainers: []corev1.Container{{Name: "b"}}, + Containers: []corev1.Container{{Name: "b"}}, + }, + }, + }, + }, + b: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "deployment", + Annotations: map[string]string{"b": "b"}, + Labels: map[string]string{"b": "b"}, + }, + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: "b"}}, + }, + }, + }, + }, + assertFn: func(t *testing.T, result *appsv1.Deployment) { + assert.Equal(t, map[string]string{"a": "a", "b": "b"}, result.Annotations) + assert.Equal(t, map[string]string{"a": "a", "b": "b"}, result.Labels) + + require.Len(t, result.Spec.Template.Spec.InitContainers, 1) + assert.Equal(t, "b", result.Spec.Template.Spec.InitContainers[0].Name) + + require.Len(t, result.Spec.Template.Spec.Containers, 1) + assert.Equal(t, "b", result.Spec.Template.Spec.Containers[0].Name) + }, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + MergeDeployment(testCase.a, testCase.b) + testCase.assertFn(t, testCase.a) + }) + } +} diff --git a/control-plane/gateways/gateway_config.go b/control-plane/gateways/gateway_config.go new file mode 100644 index 0000000000..de3202e29e --- /dev/null +++ b/control-plane/gateways/gateway_config.go @@ -0,0 +1,58 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package gateways + +import ( + "github.com/hashicorp/consul-k8s/control-plane/api/common" + "github.com/hashicorp/consul-k8s/control-plane/api/mesh/v2beta1" +) + +// GatewayConfig is a combination of settings relevant to Gateways. +type GatewayConfig struct { + // ImageDataplane is the Consul Dataplane image to use in gateway deployments. + ImageDataplane string + // ImageConsulK8S is the Consul Kubernetes Control Plane image to use in gateway deployments. + ImageConsulK8S string + // AuthMethod method used to authenticate with Consul Server. + AuthMethod string + + // ConsulTenancyConfig is the configuration for the Consul Tenancy feature. + ConsulTenancyConfig common.ConsulTenancyConfig + + // LogLevel is the logging level of the deployed Consul Dataplanes. + LogLevel string + // LogJSON if JSONLogging has been enabled. + LogJSON bool + // TLSEnabled is the value of whether or not TLS has been enabled in Consul. + TLSEnabled bool + // PeeringEnabled toggles whether or not Peering is enabled in Consul. + PeeringEnabled bool + // ConsulTLSServerName the name of the server running TLS. + ConsulTLSServerName string + // ConsulCACert contains the Consul Certificate Authority. + ConsulCACert string + // ConsulConfig configuration for the consul server address. + ConsulConfig common.ConsulConfig + + // EnableOpenShift indicates whether we're deploying into an OpenShift environment + EnableOpenShift bool + + // MapPrivilegedServicePorts is the value which Consul will add to privileged container port values (ports < 1024) + // defined on a Gateway. + MapPrivilegedServicePorts int + + // TODO(nathancoleman) Add doc + SkipServerWatch bool +} + +// GatewayResources is a collection of Kubernetes resources for a Gateway. +type GatewayResources struct { + // GatewayClassConfigs is a list of GatewayClassConfig resources which are + // responsible for defining configuration shared across all gateway kinds. + GatewayClassConfigs []*v2beta1.GatewayClassConfig `json:"gatewayClassConfigs"` + // MeshGateways is a list of MeshGateway resources which are responsible for + // defining the configuration for a specific mesh gateway. + // Deployments of mesh gateways have a one-to-one relationship with MeshGateway resources. + MeshGateways []*v2beta1.MeshGateway `json:"meshGateways"` +} diff --git a/control-plane/gateways/metadata.go b/control-plane/gateways/metadata.go new file mode 100644 index 0000000000..e1479ef3f2 --- /dev/null +++ b/control-plane/gateways/metadata.go @@ -0,0 +1,169 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package gateways + +import ( + "golang.org/x/exp/slices" + + "github.com/hashicorp/consul-k8s/control-plane/api/mesh/v2beta1" +) + +const labelManagedBy = "mesh.consul.hashicorp.com/managed-by" + +var defaultLabels = map[string]string{labelManagedBy: "consul-k8s"} + +func (b *meshGatewayBuilder) annotationsForDeployment() map[string]string { + if b.gcc == nil { + return map[string]string{} + } + return computeAnnotationsOrLabels(b.gateway.Annotations, b.gcc.Spec.Deployment.Annotations, b.gcc.Spec.Annotations) +} + +func (b *meshGatewayBuilder) annotationsForRole() map[string]string { + if b.gcc == nil { + return map[string]string{} + } + return computeAnnotationsOrLabels(b.gateway.Annotations, b.gcc.Spec.Role.Annotations, b.gcc.Spec.Annotations) +} + +func (b *meshGatewayBuilder) annotationsForRoleBinding() map[string]string { + if b.gcc == nil { + return map[string]string{} + } + return computeAnnotationsOrLabels(b.gateway.Annotations, b.gcc.Spec.RoleBinding.Annotations, b.gcc.Spec.Annotations) +} + +func (b *meshGatewayBuilder) annotationsForService() map[string]string { + if b.gcc == nil { + return map[string]string{} + } + return computeAnnotationsOrLabels(b.gateway.Annotations, b.gcc.Spec.Service.Annotations, b.gcc.Spec.Annotations) +} + +func (b *meshGatewayBuilder) annotationsForServiceAccount() map[string]string { + if b.gcc == nil { + return map[string]string{} + } + return computeAnnotationsOrLabels(b.gateway.Annotations, b.gcc.Spec.ServiceAccount.Annotations, b.gcc.Spec.Annotations) +} + +func (b *meshGatewayBuilder) labelsForDeployment() map[string]string { + if b.gcc == nil { + return defaultLabels + } + + labels := computeAnnotationsOrLabels(b.gateway.Labels, b.gcc.Spec.Deployment.Labels, b.gcc.Spec.Labels) + for k, v := range defaultLabels { + labels[k] = v + } + return labels +} + +func (b *meshGatewayBuilder) logLevelForDataplaneContainer() string { + if b.config.LogLevel != "" { + return b.config.LogLevel + } + + if b.gcc == nil || b.gcc.Spec.Deployment.Container == nil { + return "" + } + + return b.gcc.Spec.Deployment.Container.Consul.Logging.Level +} + +func (b *meshGatewayBuilder) logLevelForInitContainer() string { + if b.config.LogLevel != "" { + return b.config.LogLevel + } + + if b.gcc == nil || b.gcc.Spec.Deployment.InitContainer == nil { + return "" + } + + return b.gcc.Spec.Deployment.InitContainer.Consul.Logging.Level +} + +func (b *meshGatewayBuilder) labelsForRole() map[string]string { + if b.gcc == nil { + return defaultLabels + } + + labels := computeAnnotationsOrLabels(b.gateway.Labels, b.gcc.Spec.Role.Labels, b.gcc.Spec.Labels) + for k, v := range defaultLabels { + labels[k] = v + } + return labels +} + +func (b *meshGatewayBuilder) labelsForRoleBinding() map[string]string { + if b.gcc == nil { + return defaultLabels + } + + labels := computeAnnotationsOrLabels(b.gateway.Labels, b.gcc.Spec.RoleBinding.Labels, b.gcc.Spec.Labels) + for k, v := range defaultLabels { + labels[k] = v + } + return labels +} + +func (b *meshGatewayBuilder) labelsForService() map[string]string { + if b.gcc == nil { + return defaultLabels + } + + labels := computeAnnotationsOrLabels(b.gateway.Labels, b.gcc.Spec.Service.Labels, b.gcc.Spec.Labels) + for k, v := range defaultLabels { + labels[k] = v + } + return labels +} + +func (b *meshGatewayBuilder) labelsForServiceAccount() map[string]string { + if b.gcc == nil { + return defaultLabels + } + + labels := computeAnnotationsOrLabels(b.gateway.Labels, b.gcc.Spec.ServiceAccount.Labels, b.gcc.Spec.Labels) + for k, v := range defaultLabels { + labels[k] = v + } + return labels +} + +// computeAnnotationsOrLabels compiles a set of annotations or labels +// using the following priority, highest to lowest: +// 1. inherited keys specified on the primary +// 2. added key-values specified on the primary +// 3. inherited keys specified on the secondary +// 4. added key-values specified on the secondary +func computeAnnotationsOrLabels(inheritFrom map[string]string, primary, secondary v2beta1.GatewayClassAnnotationsLabelsConfig) map[string]string { + out := map[string]string{} + + // Add key-values specified on the secondary + for k, v := range secondary.Set { + out[k] = v + } + + // Inherit keys specified on the secondary + for k, v := range inheritFrom { + if slices.Contains(secondary.InheritFromGateway, k) { + out[k] = v + } + } + + // Add key-values specified on the primary + for k, v := range primary.Set { + out[k] = v + } + + // Inherit keys specified on the primary + for k, v := range inheritFrom { + if slices.Contains(primary.InheritFromGateway, k) { + out[k] = v + } + } + + return out +} diff --git a/control-plane/gateways/metadata_test.go b/control-plane/gateways/metadata_test.go new file mode 100644 index 0000000000..7505867992 --- /dev/null +++ b/control-plane/gateways/metadata_test.go @@ -0,0 +1,342 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package gateways + +import ( + "testing" + + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + meshv2beta1 "github.com/hashicorp/consul-k8s/control-plane/api/mesh/v2beta1" +) + +func TestMeshGatewayBuilder_Annotations(t *testing.T) { + gateway := &meshv2beta1.MeshGateway{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "gateway-annotation": "true", // Will be inherited by all resources + "gateway-deployment-annotation": "true", // Will be inherited by Deployment + "gateway-role-annotation": "true", // Will be inherited by Role + "gateway-role-binding-annotation": "true", // Will be inherited by RoleBinding + "gateway-service-annotation": "true", // Will be inherited by Service + "gateway-service-account-annotation": "true", // Will be inherited by ServiceAccount + }, + }, + } + + gatewayClassConfig := &meshv2beta1.GatewayClassConfig{ + Spec: meshv2beta1.GatewayClassConfigSpec{ + GatewayClassAnnotationsAndLabels: meshv2beta1.GatewayClassAnnotationsAndLabels{ + Annotations: meshv2beta1.GatewayClassAnnotationsLabelsConfig{ + InheritFromGateway: []string{"gateway-annotation"}, + Set: map[string]string{"global-set": "true"}, + }, + }, + Deployment: meshv2beta1.GatewayClassDeploymentConfig{ + GatewayClassAnnotationsAndLabels: meshv2beta1.GatewayClassAnnotationsAndLabels{ + Annotations: meshv2beta1.GatewayClassAnnotationsLabelsConfig{ + InheritFromGateway: []string{"gateway-deployment-annotation"}, + Set: map[string]string{"deployment-set": "true"}, + }, + }, + }, + Role: meshv2beta1.GatewayClassRoleConfig{ + GatewayClassAnnotationsAndLabels: meshv2beta1.GatewayClassAnnotationsAndLabels{ + Annotations: meshv2beta1.GatewayClassAnnotationsLabelsConfig{ + InheritFromGateway: []string{"gateway-role-annotation"}, + Set: map[string]string{"role-set": "true"}, + }, + }, + }, + RoleBinding: meshv2beta1.GatewayClassRoleBindingConfig{ + GatewayClassAnnotationsAndLabels: meshv2beta1.GatewayClassAnnotationsAndLabels{ + Annotations: meshv2beta1.GatewayClassAnnotationsLabelsConfig{ + InheritFromGateway: []string{"gateway-role-binding-annotation"}, + Set: map[string]string{"role-binding-set": "true"}, + }, + }, + }, + Service: meshv2beta1.GatewayClassServiceConfig{ + GatewayClassAnnotationsAndLabels: meshv2beta1.GatewayClassAnnotationsAndLabels{ + Annotations: meshv2beta1.GatewayClassAnnotationsLabelsConfig{ + InheritFromGateway: []string{"gateway-service-annotation"}, + Set: map[string]string{"service-set": "true"}, + }, + }, + }, + ServiceAccount: meshv2beta1.GatewayClassServiceAccountConfig{ + GatewayClassAnnotationsAndLabels: meshv2beta1.GatewayClassAnnotationsAndLabels{ + Annotations: meshv2beta1.GatewayClassAnnotationsLabelsConfig{ + InheritFromGateway: []string{"gateway-service-account-annotation"}, + Set: map[string]string{"service-account-set": "true"}, + }, + }, + }, + }, + } + + b := NewMeshGatewayBuilder(gateway, GatewayConfig{}, gatewayClassConfig) + + for _, testCase := range []struct { + Actual map[string]string + Expected map[string]string + }{ + { + Actual: b.annotationsForDeployment(), + Expected: map[string]string{ + "gateway-annotation": "true", + "global-set": "true", + "gateway-deployment-annotation": "true", + "deployment-set": "true", + }, + }, + { + Actual: b.annotationsForRole(), + Expected: map[string]string{ + "gateway-annotation": "true", + "global-set": "true", + "gateway-role-annotation": "true", + "role-set": "true", + }, + }, + { + Actual: b.annotationsForRoleBinding(), + Expected: map[string]string{ + "gateway-annotation": "true", + "global-set": "true", + "gateway-role-binding-annotation": "true", + "role-binding-set": "true", + }, + }, + { + Actual: b.annotationsForService(), + Expected: map[string]string{ + "gateway-annotation": "true", + "global-set": "true", + "gateway-service-annotation": "true", + "service-set": "true", + }, + }, + { + Actual: b.annotationsForServiceAccount(), + Expected: map[string]string{ + "gateway-annotation": "true", + "global-set": "true", + "gateway-service-account-annotation": "true", + "service-account-set": "true", + }, + }, + } { + assert.Equal(t, testCase.Expected, testCase.Actual) + } +} + +func TestNewMeshGatewayBuilder_Labels(t *testing.T) { + gateway := &meshv2beta1.MeshGateway{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "gateway-label": "true", // Will be inherited by all resources + "gateway-deployment-label": "true", // Will be inherited by Deployment + "gateway-role-label": "true", // Will be inherited by Role + "gateway-role-binding-label": "true", // Will be inherited by RoleBinding + "gateway-service-label": "true", // Will be inherited by Service + "gateway-service-account-label": "true", // Will be inherited by ServiceAccount + }, + }, + } + + gatewayClassConfig := &meshv2beta1.GatewayClassConfig{ + Spec: meshv2beta1.GatewayClassConfigSpec{ + GatewayClassAnnotationsAndLabels: meshv2beta1.GatewayClassAnnotationsAndLabels{ + Labels: meshv2beta1.GatewayClassAnnotationsLabelsConfig{ + InheritFromGateway: []string{"gateway-label"}, + Set: map[string]string{"global-set": "true"}, + }, + }, + Deployment: meshv2beta1.GatewayClassDeploymentConfig{ + GatewayClassAnnotationsAndLabels: meshv2beta1.GatewayClassAnnotationsAndLabels{ + Labels: meshv2beta1.GatewayClassAnnotationsLabelsConfig{ + InheritFromGateway: []string{"gateway-deployment-label"}, + Set: map[string]string{"deployment-set": "true"}, + }, + }, + }, + Role: meshv2beta1.GatewayClassRoleConfig{ + GatewayClassAnnotationsAndLabels: meshv2beta1.GatewayClassAnnotationsAndLabels{ + Labels: meshv2beta1.GatewayClassAnnotationsLabelsConfig{ + InheritFromGateway: []string{"gateway-role-label"}, + Set: map[string]string{"role-set": "true"}, + }, + }, + }, + RoleBinding: meshv2beta1.GatewayClassRoleBindingConfig{ + GatewayClassAnnotationsAndLabels: meshv2beta1.GatewayClassAnnotationsAndLabels{ + Labels: meshv2beta1.GatewayClassAnnotationsLabelsConfig{ + InheritFromGateway: []string{"gateway-role-binding-label"}, + Set: map[string]string{"role-binding-set": "true"}, + }, + }, + }, + Service: meshv2beta1.GatewayClassServiceConfig{ + GatewayClassAnnotationsAndLabels: meshv2beta1.GatewayClassAnnotationsAndLabels{ + Labels: meshv2beta1.GatewayClassAnnotationsLabelsConfig{ + InheritFromGateway: []string{"gateway-service-label"}, + Set: map[string]string{"service-set": "true"}, + }, + }, + }, + ServiceAccount: meshv2beta1.GatewayClassServiceAccountConfig{ + GatewayClassAnnotationsAndLabels: meshv2beta1.GatewayClassAnnotationsAndLabels{ + Labels: meshv2beta1.GatewayClassAnnotationsLabelsConfig{ + InheritFromGateway: []string{"gateway-service-account-label"}, + Set: map[string]string{"service-account-set": "true"}, + }, + }, + }, + }, + } + + b := NewMeshGatewayBuilder(gateway, GatewayConfig{}, gatewayClassConfig) + + for _, testCase := range []struct { + Actual map[string]string + Expected map[string]string + }{ + { + Actual: b.labelsForDeployment(), + Expected: map[string]string{ + "mesh.consul.hashicorp.com/managed-by": "consul-k8s", + "gateway-label": "true", + "global-set": "true", + "gateway-deployment-label": "true", + "deployment-set": "true", + }, + }, + { + Actual: b.labelsForRole(), + Expected: map[string]string{ + "mesh.consul.hashicorp.com/managed-by": "consul-k8s", + "gateway-label": "true", + "global-set": "true", + "gateway-role-label": "true", + "role-set": "true", + }, + }, + { + Actual: b.labelsForRoleBinding(), + Expected: map[string]string{ + "mesh.consul.hashicorp.com/managed-by": "consul-k8s", + "gateway-label": "true", + "global-set": "true", + "gateway-role-binding-label": "true", + "role-binding-set": "true", + }, + }, + { + Actual: b.labelsForService(), + Expected: map[string]string{ + "mesh.consul.hashicorp.com/managed-by": "consul-k8s", + "gateway-label": "true", + "global-set": "true", + "gateway-service-label": "true", + "service-set": "true", + }, + }, + { + Actual: b.labelsForServiceAccount(), + Expected: map[string]string{ + "mesh.consul.hashicorp.com/managed-by": "consul-k8s", + "gateway-label": "true", + "global-set": "true", + "gateway-service-account-label": "true", + "service-account-set": "true", + }, + }, + } { + assert.Equal(t, testCase.Expected, testCase.Actual) + } +} + +// The LogLevel for deployment containers may be set on the Gateway Class Config or the Gateway Config. +// If it is set on both, the Gateway Config takes precedence. +func TestMeshGatewayBuilder_LogLevel(t *testing.T) { + debug := "debug" + info := "info" + + testCases := map[string]struct { + GatewayLogLevel string + GCCLogLevel string + }{ + "Set on Gateway": { + GatewayLogLevel: debug, + GCCLogLevel: "", + }, + "Set on GCC": { + GatewayLogLevel: "", + GCCLogLevel: debug, + }, + "Set on both": { + GatewayLogLevel: debug, + GCCLogLevel: info, + }, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + + gcc := &meshv2beta1.GatewayClassConfig{ + Spec: meshv2beta1.GatewayClassConfigSpec{ + Deployment: meshv2beta1.GatewayClassDeploymentConfig{ + Container: &meshv2beta1.GatewayClassContainerConfig{ + Consul: meshv2beta1.GatewayClassConsulConfig{ + Logging: meshv2beta1.GatewayClassConsulLoggingConfig{ + Level: testCase.GCCLogLevel, + }, + }, + }, + }, + }, + } + b := NewMeshGatewayBuilder(&meshv2beta1.MeshGateway{}, GatewayConfig{LogLevel: testCase.GatewayLogLevel}, gcc) + + assert.Equal(t, debug, b.logLevelForDataplaneContainer()) + }) + } +} + +func Test_computeAnnotationsOrLabels(t *testing.T) { + gatewaySet := map[string]string{ + "service.beta.kubernetes.io/aws-load-balancer-internal": "true", // Will not be inherited + "service.beta.kubernetes.io/aws-load-balancer-name": "my-lb", // Will be inherited + } + + primary := meshv2beta1.GatewayClassAnnotationsLabelsConfig{ + InheritFromGateway: []string{ + "service.beta.kubernetes.io/aws-load-balancer-name", + }, + Set: map[string]string{ + "created-by": "nathancoleman", // Only exists in primary + "owning-team": "consul-gateway-management", // Will override secondary + }, + } + + secondary := meshv2beta1.GatewayClassAnnotationsLabelsConfig{ + InheritFromGateway: []string{}, + Set: map[string]string{ + "created-on": "kubernetes", // Only exists in secondary + "owning-team": "consul", // Will be overridden by primary + }, + } + + actual := computeAnnotationsOrLabels(gatewaySet, primary, secondary) + expected := map[string]string{ + "created-by": "nathancoleman", // Set by primary + "created-on": "kubernetes", // Set by secondary + "owning-team": "consul-gateway-management", // Set by primary, overrode secondary + "service.beta.kubernetes.io/aws-load-balancer-name": "my-lb", // Inherited from gateway + } + + assert.Equal(t, expected, actual) +} diff --git a/control-plane/gateways/role.go b/control-plane/gateways/role.go new file mode 100644 index 0000000000..3264bb60b0 --- /dev/null +++ b/control-plane/gateways/role.go @@ -0,0 +1,45 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package gateways + +import ( + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func (b *meshGatewayBuilder) Role() *rbacv1.Role { + return &rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Name: b.gateway.Name, + Namespace: b.gateway.Namespace, + Labels: b.labelsForRole(), + Annotations: b.annotationsForRole(), + }, + Rules: []rbacv1.PolicyRule{}, + } +} + +func (b *meshGatewayBuilder) RoleBinding() *rbacv1.RoleBinding { + return &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: b.gateway.Name, + Namespace: b.gateway.Namespace, + Labels: b.labelsForRoleBinding(), + Annotations: b.annotationsForRoleBinding(), + }, + Subjects: []rbacv1.Subject{ + { + APIGroup: "", + Kind: rbacv1.ServiceAccountKind, + Name: b.gateway.Name, + Namespace: b.gateway.Namespace, + }, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "Role", + Name: b.Role().Name, + }, + } +} diff --git a/control-plane/gateways/service.go b/control-plane/gateways/service.go new file mode 100644 index 0000000000..c7dffdf969 --- /dev/null +++ b/control-plane/gateways/service.go @@ -0,0 +1,99 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package gateways + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + + meshv2beta1 "github.com/hashicorp/consul-k8s/control-plane/api/mesh/v2beta1" + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" +) + +func (b *meshGatewayBuilder) Service() *corev1.Service { + var ( + containerConfig *meshv2beta1.GatewayClassContainerConfig + portModifier = int32(0) + serviceType = corev1.ServiceType("") + ) + + if b.gcc != nil { + containerConfig = b.gcc.Spec.Deployment.Container + portModifier = containerConfig.PortModifier + serviceType = *b.gcc.Spec.Service.Type + } + + return &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: b.gateway.Name, + Namespace: b.gateway.Namespace, + Labels: b.labelsForService(), + Annotations: b.annotationsForService(), + }, + Spec: corev1.ServiceSpec{ + Selector: b.labelsForDeployment(), + Type: serviceType, + Ports: b.Ports(portModifier), + }, + } +} + +// Ports build a list of ports from the listener objects. In theory there should only ever be a WAN port on +// mesh gateway but building the ports from a list of listeners will allow for easier compatability with other +// gateway patterns in the future. +func (b *meshGatewayBuilder) Ports(portModifier int32) []corev1.ServicePort { + + ports := []corev1.ServicePort{} + + if len(b.gateway.Spec.Listeners) == 0 { + //If empty use the default value. This should always be set, but in case it's not, this check + //will prevent a panic. + return []corev1.ServicePort{ + { + Name: "wan", + Port: constants.DefaultWANPort, + TargetPort: intstr.IntOrString{ + IntVal: constants.DefaultWANPort + portModifier, + }, + }, + } + } + for _, listener := range b.gateway.Spec.Listeners { + port := int32(listener.Port) + ports = append(ports, corev1.ServicePort{ + Name: listener.Name, + Port: port, + TargetPort: intstr.IntOrString{ + IntVal: port + portModifier, + }, + Protocol: corev1.Protocol(listener.Protocol), + }) + } + return ports +} + +// MergeService is used to update a corev1.Service without overwriting any +// existing annotations or labels that were placed there by other vendors. +// +// based on https://github.com/kubernetes-sigs/controller-runtime/blob/4000e996a202917ad7d40f02ed8a2079a9ce25e9/pkg/controller/controllerutil/example_test.go +func MergeService(existing, desired *corev1.Service) { + existing.Spec = desired.Spec + + // Only overwrite fields if the Service doesn't exist yet + if existing.ObjectMeta.CreationTimestamp.IsZero() { + existing.ObjectMeta.OwnerReferences = desired.ObjectMeta.OwnerReferences + existing.Annotations = desired.Annotations + existing.Labels = desired.Labels + return + } + + // If the Service already exists, add any desired annotations + labels to existing set + for k, v := range desired.ObjectMeta.Annotations { + existing.ObjectMeta.Annotations[k] = v + } + for k, v := range desired.ObjectMeta.Labels { + existing.ObjectMeta.Labels[k] = v + } +} diff --git a/control-plane/gateways/service_test.go b/control-plane/gateways/service_test.go new file mode 100644 index 0000000000..f5d4beb58c --- /dev/null +++ b/control-plane/gateways/service_test.go @@ -0,0 +1,218 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package gateways + +import ( + "testing" + + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1" + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + + meshv2beta1 "github.com/hashicorp/consul-k8s/control-plane/api/mesh/v2beta1" +) + +func Test_meshGatewayBuilder_Service(t *testing.T) { + lbType := corev1.ServiceTypeLoadBalancer + + type fields struct { + gateway *meshv2beta1.MeshGateway + config GatewayConfig + gcc *meshv2beta1.GatewayClassConfig + } + tests := []struct { + name string + fields fields + want *corev1.Service + }{ + { + name: "service resource crd created - happy path", + fields: fields{ + gateway: &meshv2beta1.MeshGateway{ + Spec: pbmesh.MeshGateway{ + GatewayClassName: "test-gateway-class", + Listeners: []*pbmesh.MeshGatewayListener{ + { + Name: "wan", + Port: 443, + Protocol: "TCP", + }, + }, + }, + }, + config: GatewayConfig{}, + gcc: &meshv2beta1.GatewayClassConfig{ + Spec: meshv2beta1.GatewayClassConfigSpec{ + GatewayClassAnnotationsAndLabels: meshv2beta1.GatewayClassAnnotationsAndLabels{ + Labels: meshv2beta1.GatewayClassAnnotationsLabelsConfig{ + Set: map[string]string{ + "app": "consul", + "chart": "consul-helm", + "heritage": "Helm", + "release": "consul", + }, + }, + }, + Deployment: meshv2beta1.GatewayClassDeploymentConfig{ + Container: &meshv2beta1.GatewayClassContainerConfig{ + PortModifier: 8000, + }, + }, + Service: meshv2beta1.GatewayClassServiceConfig{ + Type: &lbType, + }, + }, + }, + }, + want: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + labelManagedBy: "consul-k8s", + "app": "consul", + "chart": "consul-helm", + "heritage": "Helm", + "release": "consul", + }, + Annotations: map[string]string{}, + }, + Spec: corev1.ServiceSpec{ + Selector: map[string]string{ + labelManagedBy: "consul-k8s", + "app": "consul", + "chart": "consul-helm", + "heritage": "Helm", + "release": "consul", + }, + Type: corev1.ServiceTypeLoadBalancer, + Ports: []corev1.ServicePort{ + { + Name: "wan", + Port: int32(443), + TargetPort: intstr.IntOrString{ + IntVal: int32(8443), + }, + Protocol: "TCP", + }, + }, + }, + Status: corev1.ServiceStatus{}, + }, + }, + { + name: "create service resource crd - gcc is nil", + fields: fields{ + gateway: &meshv2beta1.MeshGateway{ + Spec: pbmesh.MeshGateway{ + GatewayClassName: "test-gateway-class", + Listeners: []*pbmesh.MeshGatewayListener{ + { + Name: "wan", + Port: 443, + Protocol: "TCP", + }, + }, + }, + }, + config: GatewayConfig{}, + gcc: nil, + }, + want: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Labels: defaultLabels, + Annotations: map[string]string{}, + }, + Spec: corev1.ServiceSpec{ + Selector: defaultLabels, + Type: "", + Ports: []corev1.ServicePort{ + { + Name: "wan", + Port: int32(443), + TargetPort: intstr.IntOrString{ + IntVal: int32(443), + }, + Protocol: "TCP", + }, + }, + }, + Status: corev1.ServiceStatus{}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + b := &meshGatewayBuilder{ + gateway: tt.fields.gateway, + config: tt.fields.config, + gcc: tt.fields.gcc, + } + result := b.Service() + assert.Equalf(t, tt.want, result, "Service()") + }) + } +} + +func Test_MergeService(t *testing.T) { + testCases := []struct { + name string + a, b *corev1.Service + assertFn func(*testing.T, *corev1.Service) + }{ + { + name: "new service gets desired annotations + labels", + a: &corev1.Service{ObjectMeta: metav1.ObjectMeta{Namespace: "default", Name: "service"}}, + b: &corev1.Service{ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "service", + Annotations: map[string]string{"b": "b"}, + Labels: map[string]string{"b": "b"}, + }}, + assertFn: func(t *testing.T, result *corev1.Service) { + assert.Equal(t, map[string]string{"b": "b"}, result.Annotations) + assert.Equal(t, map[string]string{"b": "b"}, result.Labels) + }, + }, + { + name: "existing service keeps existing annotations + labels and gains desired annotations + labels + type", + a: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "service", + CreationTimestamp: metav1.Now(), + Annotations: map[string]string{"a": "a"}, + Labels: map[string]string{"a": "a"}, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + }, + }, + b: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "service", + Annotations: map[string]string{"b": "b"}, + Labels: map[string]string{"b": "b"}, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + }, + }, + assertFn: func(t *testing.T, result *corev1.Service) { + assert.Equal(t, map[string]string{"a": "a", "b": "b"}, result.Annotations) + assert.Equal(t, map[string]string{"a": "a", "b": "b"}, result.Labels) + + assert.Equal(t, corev1.ServiceTypeLoadBalancer, result.Spec.Type) + }, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + MergeService(testCase.a, testCase.b) + testCase.assertFn(t, testCase.a) + }) + } +} diff --git a/control-plane/gateways/serviceaccount.go b/control-plane/gateways/serviceaccount.go new file mode 100644 index 0000000000..1a0b32c275 --- /dev/null +++ b/control-plane/gateways/serviceaccount.go @@ -0,0 +1,24 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package gateways + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func (b *meshGatewayBuilder) ServiceAccount() *corev1.ServiceAccount { + return &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: b.serviceAccountName(), + Namespace: b.gateway.Namespace, + Labels: b.labelsForServiceAccount(), + Annotations: b.annotationsForServiceAccount(), + }, + } +} + +func (b *meshGatewayBuilder) serviceAccountName() string { + return b.gateway.Name +} diff --git a/control-plane/gateways/serviceaccount_test.go b/control-plane/gateways/serviceaccount_test.go new file mode 100644 index 0000000000..ff0fb4e878 --- /dev/null +++ b/control-plane/gateways/serviceaccount_test.go @@ -0,0 +1,34 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package gateways + +import ( + "testing" + + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + meshv2beta1 "github.com/hashicorp/consul-k8s/control-plane/api/mesh/v2beta1" +) + +func TestNewMeshGatewayBuilder_ServiceAccount(t *testing.T) { + b := NewMeshGatewayBuilder(&meshv2beta1.MeshGateway{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "mesh-gateway", + }, + }, GatewayConfig{}, nil) + + expected := &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "mesh-gateway", + Labels: defaultLabels, + Annotations: map[string]string{}, + }, + } + + assert.Equal(t, expected, b.ServiceAccount()) +} diff --git a/control-plane/go.mod b/control-plane/go.mod index 9edffb3410..b615c3613a 100644 --- a/control-plane/go.mod +++ b/control-plane/go.mod @@ -1,6 +1,9 @@ module github.com/hashicorp/consul-k8s/control-plane -replace github.com/hashicorp/consul-k8s/version => ../version +replace ( + github.com/hashicorp/consul-k8s/version => ../version + github.com/hashicorp/consul/api => github.com/hashicorp/consul/api v1.10.1-0.20240312203720-262f4358003f +) require ( github.com/cenkalti/backoff v2.2.1+incompatible @@ -15,8 +18,9 @@ require ( github.com/hashicorp/consul-k8s/control-plane/cni v0.0.0-20240226161840-f3842c41cb2b github.com/hashicorp/consul-k8s/version v0.0.0 github.com/hashicorp/consul-server-connection-manager v0.1.6 - github.com/hashicorp/consul/api v1.29.1 - github.com/hashicorp/consul/sdk v0.16.1 + github.com/hashicorp/consul/api v1.28.2 + github.com/hashicorp/consul/proto-public v0.6.0 + github.com/hashicorp/consul/sdk v0.16.0 github.com/hashicorp/go-bexpr v0.1.11 github.com/hashicorp/go-discover v0.0.0-20230519164032-214571b6a530 github.com/hashicorp/go-hclog v1.6.3 @@ -48,6 +52,7 @@ require ( k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 sigs.k8s.io/controller-runtime v0.16.5 sigs.k8s.io/gateway-api v0.7.1 + sigs.k8s.io/yaml v1.3.0 ) require ( @@ -96,7 +101,6 @@ require ( github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect github.com/googleapis/gax-go/v2 v2.11.0 // indirect github.com/gophercloud/gophercloud v0.1.0 // indirect - github.com/hashicorp/consul/proto-public v0.6.1 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect @@ -128,7 +132,7 @@ require ( github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/posener/complete v1.2.3 // indirect github.com/prometheus/client_golang v1.16.0 // indirect - github.com/prometheus/client_model v0.5.0 // indirect + github.com/prometheus/client_model v0.4.0 // indirect github.com/prometheus/common v0.44.0 // indirect github.com/prometheus/procfs v0.10.1 // indirect github.com/renier/xmlrpc v0.0.0-20170708154548-ce4a1a486c03 // indirect @@ -161,7 +165,6 @@ require ( k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect - sigs.k8s.io/yaml v1.3.0 // indirect ) go 1.20 diff --git a/control-plane/go.sum b/control-plane/go.sum index 10f913bfc5..c0c96f78d7 100644 --- a/control-plane/go.sum +++ b/control-plane/go.sum @@ -206,12 +206,12 @@ github.com/hashicorp/consul-k8s/control-plane/cni v0.0.0-20240226161840-f3842c41 github.com/hashicorp/consul-k8s/control-plane/cni v0.0.0-20240226161840-f3842c41cb2b/go.mod h1:9NKJHOcgmz/6P2y6MegNIOXhIKE/0ils/mHWd5sZgoU= github.com/hashicorp/consul-server-connection-manager v0.1.6 h1:ktj8Fi+dRXn9hhM+FXsfEJayhzzgTqfH08Ne5M6Fmug= github.com/hashicorp/consul-server-connection-manager v0.1.6/go.mod h1:HngMIv57MT+pqCVeRQMa1eTB5dqnyMm8uxjyv+Hn8cs= -github.com/hashicorp/consul/api v1.29.1 h1:UEwOjYJrd3lG1x5w7HxDRMGiAUPrb3f103EoeKuuEcc= -github.com/hashicorp/consul/api v1.29.1/go.mod h1:lumfRkY/coLuqMICkI7Fh3ylMG31mQSRZyef2c5YvJI= -github.com/hashicorp/consul/proto-public v0.6.1 h1:+uzH3olCrksXYWAYHKqK782CtK9scfqH+Unlw3UHhCg= -github.com/hashicorp/consul/proto-public v0.6.1/go.mod h1:cXXbOg74KBNGajC+o8RlA502Esf0R9prcoJgiOX/2Tg= -github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg= -github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s= +github.com/hashicorp/consul/api v1.10.1-0.20240312203720-262f4358003f h1:8clIrMnJtO5ab5Kd1qF19s9s581cyGYhQxfPLVRaFZs= +github.com/hashicorp/consul/api v1.10.1-0.20240312203720-262f4358003f/go.mod h1:JnWx0qZd1Ffeoa42yVAxzv7/v7eaZyptkw0dG9F/gF4= +github.com/hashicorp/consul/proto-public v0.6.0 h1:9qrBujmoTB5gQQ84kQO+YWvhjgYoYBNrOoHdo4cpHHM= +github.com/hashicorp/consul/proto-public v0.6.0/go.mod h1:JF6983XNCzvw4wDNOLEwLqOq2IPw7iyT+pkswHSz08U= +github.com/hashicorp/consul/sdk v0.16.0 h1:SE9m0W6DEfgIVCJX7xU+iv/hUl4m/nxqMTnCdMxDpJ8= +github.com/hashicorp/consul/sdk v0.16.0/go.mod h1:7pxqqhqoaPqnBnzXD1StKed62LqJeClzVsUEy85Zr0A= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -387,8 +387,8 @@ github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1: github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= -github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= +github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= diff --git a/control-plane/helper/test/test_util.go b/control-plane/helper/test/test_util.go index 542700c34f..df51927e4c 100644 --- a/control-plane/helper/test/test_util.go +++ b/control-plane/helper/test/test_util.go @@ -18,9 +18,11 @@ import ( "github.com/google/go-cmp/cmp/cmpopts" "github.com/hashicorp/consul-server-connection-manager/discovery" "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/proto-public/pbresource" "github.com/hashicorp/consul/sdk/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "golang.org/x/exp/slices" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" "google.golang.org/protobuf/testing/protocmp" @@ -28,6 +30,7 @@ import ( "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" "github.com/hashicorp/consul-k8s/control-plane/consul" "github.com/hashicorp/consul-k8s/control-plane/helper/cert" + pbtenancy "github.com/hashicorp/consul/proto-public/pbtenancy/v2beta1" ) const ( @@ -37,10 +40,11 @@ const ( ) type TestServerClient struct { - TestServer *testutil.TestServer - APIClient *api.Client - Cfg *consul.Config - Watcher consul.ServerConnectionManager + TestServer *testutil.TestServer + APIClient *api.Client + Cfg *consul.Config + Watcher consul.ServerConnectionManager + ResourceClient pbresource.ResourceServiceClient } func TestServerWithMockConnMgrWatcher(t *testing.T, callback testutil.ServerConfigCallback) *TestServerClient { @@ -72,13 +76,21 @@ func TestServerWithMockConnMgrWatcher(t *testing.T, callback testutil.ServerConf requireACLBootstrapped(t, cfg, client) watcher := MockConnMgrForIPAndPort(t, "127.0.0.1", cfg.Ports.GRPC, true) - requireTenancyBuiltins(t, cfg, client) + // Create a gRPC resource service client when the resource-apis experiment is enabled. + var resourceClient pbresource.ResourceServiceClient + if slices.Contains(cfg.Experiments, "resource-apis") { + resourceClient, err = consul.NewResourceServiceClient(watcher) + require.NoError(t, err) + } + + requireTenancyBuiltins(t, cfg, client, resourceClient) return &TestServerClient{ - TestServer: consulServer, - APIClient: client, - Cfg: consulConfig, - Watcher: watcher, + TestServer: consulServer, + APIClient: client, + Cfg: consulConfig, + Watcher: watcher, + ResourceClient: resourceClient, } } @@ -308,6 +320,27 @@ func SetupK8sAuthMethodWithNamespaces(t *testing.T, consulClient *api.Client, se require.NoError(t, err) } +// ResourceHasPersisted checks that a recently written resource exists in the Consul +// state store with a valid version. This must be true before a resource is overwritten +// or deleted. +func ResourceHasPersisted(t *testing.T, ctx context.Context, client pbresource.ResourceServiceClient, id *pbresource.ID) { + req := &pbresource.ReadRequest{Id: id} + + require.Eventually(t, func() bool { + res, err := client.Read(ctx, req) + if err != nil { + return false + } + + if res.GetResource().GetVersion() == "" { + return false + } + + return true + }, 5*time.Second, + time.Second) +} + func TokenReviewsResponse(name, ns string) string { return fmt.Sprintf(`{ "kind": "TokenReview", @@ -385,37 +418,53 @@ Z23jGuk6rn9DUHC2xPj3wCTmd8SGEJoV31noJV5dVeQ90wusXz3vTG7ficKnvHFS xtr5PSwH1DusYfVaGH2O -----END CERTIFICATE-----` -func requireTenancyBuiltins(t *testing.T, cfg *testutil.TestServerConfig, client *api.Client) { +func requireTenancyBuiltins(t *testing.T, cfg *testutil.TestServerConfig, client *api.Client, resourceClient pbresource.ResourceServiceClient) { t.Helper() - // There is a window of time post-leader election on startup where tenancy builtins + // There is a window of time post-leader election on startup where v2 tenancy builtins // (default partition and namespace) have not yet been created. // Wait for them to exist before considering the server "open for business". - // // Only check for default namespace existence since it implies the default partition exists. - - require.Eventually(t, - func() bool { - self, err := client.Agent().Self() - if err != nil { - return false - } - if self["DebugConfig"]["VersionMetadata"] != "ent" { - return true - } - - // Check for the default partition instead of the default namespace since this is a thing: - // error="Namespaces are currently disabled until all servers in the datacenter supports the feature" - partition, _, err := client.Partitions().Read( - context.Background(), - constants.DefaultConsulPartition, - nil, - ) - return err == nil && partition != nil + if slices.Contains(cfg.Experiments, "v2tenancy") { + require.EventuallyWithT(t, func(c *assert.CollectT) { + _, err := resourceClient.Read(context.Background(), &pbresource.ReadRequest{ + Id: &pbresource.ID{ + Name: constants.DefaultConsulNS, + Type: pbtenancy.NamespaceType, + Tenancy: &pbresource.Tenancy{Partition: constants.DefaultConsulPartition}, + }, + }) + assert.NoError(c, err) }, - eventuallyWaitFor, - eventuallyTickEvery, - "failed to eventually read builtin default partition") + eventuallyWaitFor, + eventuallyTickEvery, + "failed to eventually read v2 builtin default namespace", + ) + } else { + // Do the same for V1 counterparts in ent only to prevent known test flakes. + require.Eventually(t, + func() bool { + self, err := client.Agent().Self() + if err != nil { + return false + } + if self["DebugConfig"]["VersionMetadata"] != "ent" { + return true + } + + // Check for the default partition instead of the default namespace since this is a thing: + // error="Namespaces are currently disabled until all servers in the datacenter supports the feature" + partition, _, err := client.Partitions().Read( + context.Background(), + constants.DefaultConsulPartition, + nil, + ) + return err == nil && partition != nil + }, + eventuallyWaitFor, + eventuallyTickEvery, + "failed to eventually read v1 builtin default partition") + } } func requireACLBootstrapped(t *testing.T, cfg *testutil.TestServerConfig, client *api.Client) { diff --git a/control-plane/subcommand/gateway-cleanup/command.go b/control-plane/subcommand/gateway-cleanup/command.go index b281efbce7..709f925c66 100644 --- a/control-plane/subcommand/gateway-cleanup/command.go +++ b/control-plane/subcommand/gateway-cleanup/command.go @@ -8,6 +8,8 @@ import ( "errors" "flag" "fmt" + "io" + "os" "sync" "time" @@ -19,8 +21,11 @@ import ( clientgoscheme "k8s.io/client-go/kubernetes/scheme" "sigs.k8s.io/controller-runtime/pkg/client" gwv1beta1 "sigs.k8s.io/gateway-api/apis/v1beta1" + k8syaml "sigs.k8s.io/yaml" + "github.com/hashicorp/consul-k8s/control-plane/api/mesh/v2beta1" "github.com/hashicorp/consul-k8s/control-plane/api/v1alpha1" + "github.com/hashicorp/consul-k8s/control-plane/gateways" "github.com/hashicorp/consul-k8s/control-plane/subcommand" "github.com/hashicorp/consul-k8s/control-plane/subcommand/flags" ) @@ -46,6 +51,8 @@ type Command struct { once sync.Once help string + gatewayConfig gateways.GatewayResources + ctx context.Context } @@ -106,6 +113,11 @@ func (c *Command) Run(args []string) int { return 1 } + if err := v2beta1.AddMeshToScheme(s); err != nil { + c.UI.Error(fmt.Sprintf("Could not add consul-k8s schema: %s", err)) + return 1 + } + c.k8sClient, err = client.New(config, client.Options{Scheme: s}) if err != nil { c.UI.Error(fmt.Sprintf("Error initializing Kubernetes client: %s", err)) @@ -115,16 +127,38 @@ func (c *Command) Run(args []string) int { // do the cleanup - err = c.deleteGatewayClassAndGatewayClasConfig() + //V1 Cleanup + err = c.deleteV1GatewayClassAndGatewayClasConfig() + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + + //V2 Cleanup + err = c.loadGatewayConfigs() + if err != nil { + + c.UI.Error(err.Error()) + return 1 + } + err = c.deleteV2GatewayClassAndClassConfigs(c.ctx) + if err != nil { + c.UI.Error(err.Error()) + + return 1 + } + + err = c.deleteV2MeshGateways(c.ctx) if err != nil { c.UI.Error(err.Error()) + return 1 } return 0 } -func (c *Command) deleteGatewayClassAndGatewayClasConfig() error { +func (c *Command) deleteV1GatewayClassAndGatewayClasConfig() error { // find the class config and mark it for deletion first so that we // can do an early return if the gateway class isn't found config := &v1alpha1.GatewayClassConfig{} @@ -216,3 +250,106 @@ func exponentialBackoffWithMaxIntervalAndTime() *backoff.ExponentialBackOff { backoff.Reset() return backoff } + +// loadGatewayConfigs reads and loads the configs from `/consul/config/config.yaml`, if this file does not exist nothing is done. +func (c *Command) loadGatewayConfigs() error { + file, err := os.Open(c.flagGatewayConfigLocation) + if err != nil { + if os.IsNotExist(err) { + c.UI.Warn(fmt.Sprintf("gateway configuration file not found, skipping gateway configuration, filename: %s", c.flagGatewayConfigLocation)) + return nil + } + c.UI.Error(fmt.Sprintf("Error opening gateway configuration file %s: %s", c.flagGatewayConfigLocation, err)) + return err + } + + config, err := io.ReadAll(file) + if err != nil { + c.UI.Error(fmt.Sprintf("Error reading gateway configuration file %s: %s", c.flagGatewayConfigLocation, err)) + return err + } + + err = k8syaml.Unmarshal(config, &c.gatewayConfig) + if err != nil { + c.UI.Error(fmt.Sprintf("Error decoding gateway config file: %s", err)) + return err + } + + if err := file.Close(); err != nil { + return err + } + return nil +} + +func (c *Command) deleteV2GatewayClassAndClassConfigs(ctx context.Context) error { + for _, gcc := range c.gatewayConfig.GatewayClassConfigs { + + // find the class config and mark it for deletion first so that we + // can do an early return if the gateway class isn't found + config := &v2beta1.GatewayClassConfig{} + err := c.k8sClient.Get(ctx, types.NamespacedName{Name: gcc.Name, Namespace: gcc.Namespace}, config) + if err != nil { + if k8serrors.IsNotFound(err) { + // no gateway class config, just ignore and continue + continue + } + return err + } + + // ignore any returned errors + _ = c.k8sClient.Delete(context.Background(), config) + + // find the gateway class + gatewayClass := &v2beta1.GatewayClass{} + //TODO: NET-6838 To pull the GatewayClassName from the Configmap + err = c.k8sClient.Get(ctx, types.NamespacedName{Name: gcc.Name, Namespace: gcc.Namespace}, gatewayClass) + if err != nil { + if k8serrors.IsNotFound(err) { + // no gateway class, just ignore and continue + continue + } + return err + } + + // ignore any returned errors + _ = c.k8sClient.Delete(context.Background(), gatewayClass) + + // make sure they're gone + if err := backoff.Retry(func() error { + err = c.k8sClient.Get(context.Background(), types.NamespacedName{Name: gcc.Name, Namespace: gcc.Namespace}, config) + if err == nil || !k8serrors.IsNotFound(err) { + return errors.New("gateway class config still exists") + } + + err = c.k8sClient.Get(context.Background(), types.NamespacedName{Name: gcc.Name, Namespace: gcc.Namespace}, gatewayClass) + if err == nil || !k8serrors.IsNotFound(err) { + return errors.New("gateway class still exists") + } + + return nil + }, exponentialBackoffWithMaxIntervalAndTime()); err != nil { + c.UI.Error(err.Error()) + // if we failed, return 0 anyway after logging the error + // since we don't want to block someone from uninstallation + } + } + + return nil +} + +func (c *Command) deleteV2MeshGateways(ctx context.Context) error { + for _, meshGw := range c.gatewayConfig.MeshGateways { + _ = c.k8sClient.Delete(ctx, meshGw) + + err := c.k8sClient.Get(ctx, types.NamespacedName{Name: meshGw.Name, Namespace: meshGw.Namespace}, meshGw) + if err != nil { + if k8serrors.IsNotFound(err) { + // no gateway, just ignore and continue + continue + } + return err + } + + } + return nil +} diff --git a/control-plane/subcommand/gateway-cleanup/command_test.go b/control-plane/subcommand/gateway-cleanup/command_test.go index 038c5b5667..69c626db6c 100644 --- a/control-plane/subcommand/gateway-cleanup/command_test.go +++ b/control-plane/subcommand/gateway-cleanup/command_test.go @@ -4,6 +4,9 @@ package gatewaycleanup import ( + "github.com/hashicorp/consul-k8s/control-plane/api/mesh/v2beta1" + corev1 "k8s.io/api/core/v1" + "os" "testing" "github.com/mitchellh/cli" @@ -52,6 +55,7 @@ func TestRun(t *testing.T) { s := runtime.NewScheme() require.NoError(t, gwv1beta1.Install(s)) require.NoError(t, v1alpha1.AddToScheme(s)) + require.NoError(t, v2beta1.AddMeshToScheme(s)) objs := []client.Object{} if tt.gatewayClass != nil { @@ -82,3 +86,165 @@ func TestRun(t *testing.T) { }) } } + +func TestRunV2Resources(t *testing.T) { + t.Parallel() + + for name, tt := range map[string]struct { + gatewayClassConfig []*v2beta1.GatewayClassConfig + gatewayClass []*v2beta1.GatewayClass + configMapData string + }{ + + "v2 resources exists": { + gatewayClassConfig: []*v2beta1.GatewayClassConfig{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-gateway", + }, + }, + }, + gatewayClass: []*v2beta1.GatewayClass{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-gateway", + }, + }, + }, + configMapData: `gatewayClassConfigs: +- apiVersion: mesh.consul.hashicorp.com/v2beta1 + kind: GatewayClassConfig + metadata: + name: test-gateway + spec: + deployment: + container: + resources: + requests: + cpu: 200m + memory: 200Mi + limits: + cpu: 200m + memory: 200Mi +`, + }, + "multiple v2 resources exists": { + gatewayClassConfig: []*v2beta1.GatewayClassConfig{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-gateway", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-gateway2", + }, + }, + }, + gatewayClass: []*v2beta1.GatewayClass{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-gateway", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-gateway2", + }, + }, + }, + configMapData: `gatewayClassConfigs: +- apiVersion: mesh.consul.hashicorp.com/v2beta1 + kind: GatewayClassConfig + metadata: + name: test-gateway + spec: + deployment: + container: + resources: + requests: + cpu: 200m + memory: 200Mi + limits: + cpu: 200m + memory: 200Mi +- apiVersion: mesh.consul.hashicorp.com/v2beta1 + kind: GatewayClassConfig + metadata: + name: test-gateway2 + spec: + deployment: + container: + resources: + requests: + cpu: 200m + memory: 200Mi + limits: + cpu: 200m + memory: 200Mi +`, + }, + "v2 emptyconfigmap": { + configMapData: "", + }, + } { + t.Run(name, func(t *testing.T) { + tt := tt + + t.Parallel() + + s := runtime.NewScheme() + require.NoError(t, gwv1beta1.Install(s)) + require.NoError(t, v2beta1.AddMeshToScheme(s)) + require.NoError(t, corev1.AddToScheme(s)) + require.NoError(t, v1alpha1.AddToScheme(s)) + + objs := []client.Object{} + for _, gatewayClass := range tt.gatewayClass { + objs = append(objs, gatewayClass) + } + for _, gatewayClassConfig := range tt.gatewayClassConfig { + objs = append(objs, gatewayClassConfig) + } + + path := createGatewayConfigFile(t, tt.configMapData, "config.yaml") + + client := fake.NewClientBuilder().WithScheme(s).WithObjects(objs...).Build() + + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + k8sClient: client, + flagGatewayClassName: "gateway-class", + flagGatewayClassConfigName: "gateway-class-config", + flagGatewayConfigLocation: path, + } + + code := cmd.Run([]string{ + "-gateway-class-config-name", "gateway-class-config", + "-gateway-class-name", "gateway-class", + "-gateway-config-file-location", path, + }) + + require.Equal(t, 0, code) + }) + } +} + +func createGatewayConfigFile(t *testing.T, fileContent, filename string) string { + t.Helper() + + // create a temp file to store configuration yaml + tmpdir := t.TempDir() + file, err := os.CreateTemp(tmpdir, filename) + if err != nil { + t.Fatal(err) + } + defer file.Close() + + _, err = file.WriteString(fileContent) + if err != nil { + t.Fatal(err) + } + return file.Name() +} diff --git a/control-plane/subcommand/gateway-resources/command.go b/control-plane/subcommand/gateway-resources/command.go index a10473239a..946e2d2703 100644 --- a/control-plane/subcommand/gateway-resources/command.go +++ b/control-plane/subcommand/gateway-resources/command.go @@ -26,9 +26,14 @@ import ( clientgoscheme "k8s.io/client-go/kubernetes/scheme" "sigs.k8s.io/controller-runtime/pkg/client" gwv1beta1 "sigs.k8s.io/gateway-api/apis/v1beta1" + k8syaml "sigs.k8s.io/yaml" + + authv2beta1 "github.com/hashicorp/consul-k8s/control-plane/api/auth/v2beta1" "github.com/hashicorp/consul-k8s/control-plane/api-gateway/common" + "github.com/hashicorp/consul-k8s/control-plane/api/mesh/v2beta1" "github.com/hashicorp/consul-k8s/control-plane/api/v1alpha1" + "github.com/hashicorp/consul-k8s/control-plane/gateways" "github.com/hashicorp/consul-k8s/control-plane/subcommand" "github.com/hashicorp/consul-k8s/control-plane/subcommand/flags" ) @@ -103,6 +108,7 @@ type Command struct { tolerations []corev1.Toleration serviceAnnotations []string resources corev1.ResourceRequirements + gatewayConfig gateways.GatewayResources ctx context.Context } @@ -188,6 +194,12 @@ func (c *Command) Run(args []string) int { return 1 } + // Load gateway config from the configmap. + if err := c.loadGatewayConfigs(); err != nil { + c.UI.Error(fmt.Sprintf("Error loading gateway config: %s", err)) + return 1 + } + if c.ctx == nil { c.ctx = context.Background() } @@ -214,6 +226,16 @@ func (c *Command) Run(args []string) int { return 1 } + if err := authv2beta1.AddAuthToScheme(s); err != nil { + c.UI.Error(fmt.Sprintf("Could not add authv2beta schema: %s", err)) + return 1 + } + + if err := v2beta1.AddMeshToScheme(s); err != nil { + c.UI.Error(fmt.Sprintf("Could not add meshv2 schema: %s", err)) + return 1 + } + c.k8sClient, err = client.New(config, client.Options{Scheme: s}) if err != nil { c.UI.Error(fmt.Sprintf("Error initializing Kubernetes client: %s", err)) @@ -272,15 +294,31 @@ func (c *Command) Run(args []string) int { }, } - if err := forceClassConfig(context.Background(), c.k8sClient, classConfig); err != nil { + if err := forceV1ClassConfig(context.Background(), c.k8sClient, classConfig); err != nil { c.UI.Error(err.Error()) return 1 } - if err := forceClass(context.Background(), c.k8sClient, class); err != nil { + if err := forceV1Class(context.Background(), c.k8sClient, class); err != nil { c.UI.Error(err.Error()) return 1 } + if len(c.gatewayConfig.GatewayClassConfigs) > 0 { + err = c.createV2GatewayClassAndClassConfigs(context.Background(), meshGatewayComponent, "consul-mesh-gateway-controller") + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + } + + if len(c.gatewayConfig.MeshGateways) > 0 { + err = c.createV2MeshGateways(context.Background(), meshGatewayComponent) + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + } + return 0 } @@ -371,6 +409,101 @@ func (c *Command) loadResourceConfig(filename string) (corev1.ResourceRequiremen return reqs, nil } +// loadGatewayConfigs reads and loads the configs from `/consul/config/config.yaml`, if this file does not exist nothing is done. +func (c *Command) loadGatewayConfigs() error { + file, err := os.Open(c.flagGatewayConfigLocation) + if err != nil { + if os.IsNotExist(err) { + c.UI.Warn(fmt.Sprintf("gateway configuration file not found, skipping gateway configuration, filename: %s", c.flagGatewayConfigLocation)) + return nil + } + c.UI.Error(fmt.Sprintf("Error opening gateway configuration file %s: %s", c.flagGatewayConfigLocation, err)) + return err + } + + config, err := io.ReadAll(file) + if err != nil { + c.UI.Error(fmt.Sprintf("Error reading gateway configuration file %s: %s", c.flagGatewayConfigLocation, err)) + return err + } + + err = k8syaml.Unmarshal(config, &c.gatewayConfig) + if err != nil { + c.UI.Error(fmt.Sprintf("Error decoding gateway config file: %s", err)) + return err + } + + // ensure default resources requirements are set + for idx := range c.gatewayConfig.MeshGateways { + if c.gatewayConfig.GatewayClassConfigs[idx].Spec.Deployment.Container == nil { + c.gatewayConfig.GatewayClassConfigs[idx].Spec.Deployment.Container = &v2beta1.GatewayClassContainerConfig{Resources: &defaultResourceRequirements} + } + } + if err := file.Close(); err != nil { + return err + } + return nil +} + +// createV2GatewayClassAndClassConfigs utilizes the configuration loaded from the gateway config file to +// create the GatewayClassConfig and GatewayClass for the gateway. +func (c *Command) createV2GatewayClassAndClassConfigs(ctx context.Context, component, controllerName string) error { + labels := map[string]string{ + "app": c.flagApp, + "chart": c.flagChart, + "heritage": c.flagHeritage, + "release": c.flagRelease, + "component": component, + } + + for _, cfg := range c.gatewayConfig.GatewayClassConfigs { + err := forceV2ClassConfig(ctx, c.k8sClient, cfg) + if err != nil { + return err + } + + class := &v2beta1.GatewayClass{ + ObjectMeta: metav1.ObjectMeta{Name: cfg.Name, Labels: labels}, + TypeMeta: metav1.TypeMeta{Kind: v2beta1.KindGatewayClass}, + Spec: v2beta1.GatewayClassSpec{ + ControllerName: controllerName, + ParametersRef: &v2beta1.ParametersReference{ + Group: v2beta1.MeshGroup, + Kind: v2beta1.KindGatewayClassConfig, + Namespace: &cfg.Namespace, + Name: cfg.Name, + }, + }, + } + + err = forceV2Class(ctx, c.k8sClient, class) + if err != nil { + return err + } + } + + return nil +} + +func (c *Command) createV2MeshGateways(ctx context.Context, component string) error { + labels := map[string]string{ + "app": c.flagApp, + "chart": c.flagChart, + "heritage": c.flagHeritage, + "release": c.flagRelease, + "component": component, + } + for _, meshGw := range c.gatewayConfig.MeshGateways { + meshGw.Labels = labels + err := forceV2MeshGateway(ctx, c.k8sClient, meshGw) + if err != nil { + return err + } + + } + return nil +} + func (c *Command) Synopsis() string { return synopsis } func (c *Command) Help() string { c.once.Do(c.init) @@ -400,7 +533,7 @@ var defaultResourceRequirements = corev1.ResourceRequirements{ }, } -func forceClassConfig(ctx context.Context, k8sClient client.Client, o *v1alpha1.GatewayClassConfig) error { +func forceV1ClassConfig(ctx context.Context, k8sClient client.Client, o *v1alpha1.GatewayClassConfig) error { return backoff.Retry(func() error { var existing v1alpha1.GatewayClassConfig err := k8sClient.Get(ctx, client.ObjectKeyFromObject(o), &existing) @@ -419,7 +552,7 @@ func forceClassConfig(ctx context.Context, k8sClient client.Client, o *v1alpha1. }, exponentialBackoffWithMaxIntervalAndTime()) } -func forceClass(ctx context.Context, k8sClient client.Client, o *gwv1beta1.GatewayClass) error { +func forceV1Class(ctx context.Context, k8sClient client.Client, o *gwv1beta1.GatewayClass) error { return backoff.Retry(func() error { var existing gwv1beta1.GatewayClass err := k8sClient.Get(ctx, client.ObjectKeyFromObject(o), &existing) @@ -438,6 +571,63 @@ func forceClass(ctx context.Context, k8sClient client.Client, o *gwv1beta1.Gatew }, exponentialBackoffWithMaxIntervalAndTime()) } +func forceV2ClassConfig(ctx context.Context, k8sClient client.Client, o *v2beta1.GatewayClassConfig) error { + return backoff.Retry(func() error { + var existing v2beta1.GatewayClassConfig + err := k8sClient.Get(ctx, client.ObjectKeyFromObject(o), &existing) + if err != nil && !k8serrors.IsNotFound(err) { + return err + } + + if k8serrors.IsNotFound(err) { + return k8sClient.Create(ctx, o) + } + + existing.Spec = *o.Spec.DeepCopy() + existing.Labels = o.Labels + + return k8sClient.Update(ctx, &existing) + }, exponentialBackoffWithMaxIntervalAndTime()) +} + +func forceV2Class(ctx context.Context, k8sClient client.Client, o *v2beta1.GatewayClass) error { + return backoff.Retry(func() error { + var existing v2beta1.GatewayClass + err := k8sClient.Get(ctx, client.ObjectKeyFromObject(o), &existing) + if err != nil && !k8serrors.IsNotFound(err) { + return err + } + + if k8serrors.IsNotFound(err) { + return k8sClient.Create(ctx, o) + } + + existing.Spec = *o.Spec.DeepCopy() + existing.Labels = o.Labels + + return k8sClient.Update(ctx, &existing) + }, exponentialBackoffWithMaxIntervalAndTime()) +} + +func forceV2MeshGateway(ctx context.Context, k8sClient client.Client, o *v2beta1.MeshGateway) error { + return backoff.Retry(func() error { + var existing v2beta1.MeshGateway + err := k8sClient.Get(ctx, client.ObjectKeyFromObject(o), &existing) + if err != nil && !k8serrors.IsNotFound(err) { + return err + } + + if k8serrors.IsNotFound(err) { + return k8sClient.Create(ctx, o) + } + + existing.Spec = *o.Spec.DeepCopy() + existing.Labels = o.Labels + + return k8sClient.Update(ctx, &existing) + }, exponentialBackoffWithMaxIntervalAndTime()) +} + func exponentialBackoffWithMaxIntervalAndTime() *backoff.ExponentialBackOff { backoff := backoff.NewExponentialBackOff() backoff.MaxElapsedTime = 10 * time.Second diff --git a/control-plane/subcommand/gateway-resources/command_test.go b/control-plane/subcommand/gateway-resources/command_test.go index 6e8230102c..70eb1e3d90 100644 --- a/control-plane/subcommand/gateway-resources/command_test.go +++ b/control-plane/subcommand/gateway-resources/command_test.go @@ -17,6 +17,9 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" gwv1beta1 "sigs.k8s.io/gateway-api/apis/v1beta1" + meshv2beta1 "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1" + + "github.com/hashicorp/consul-k8s/control-plane/api/mesh/v2beta1" "github.com/hashicorp/consul-k8s/control-plane/api/v1alpha1" ) @@ -196,6 +199,7 @@ func TestRun(t *testing.T) { for name, tt := range map[string]struct { existingGatewayClass bool existingGatewayClassConfig bool + meshGWConfigFileExists bool }{ "both exist": { existingGatewayClass: true, @@ -208,6 +212,9 @@ func TestRun(t *testing.T) { existingGatewayClassConfig: true, }, "neither exist": {}, + "mesh gw config file exists": { + meshGWConfigFileExists: true, + }, } { t.Run(name, func(t *testing.T) { tt := tt @@ -226,6 +233,9 @@ func TestRun(t *testing.T) { require.NoError(t, v1alpha1.AddToScheme(s)) configFileName := gatewayConfigFilename + if tt.meshGWConfigFileExists { + configFileName = createGatewayConfigFile(t, validGWConfigurationKitchenSink, "config.yaml") + } objs := []client.Object{} if tt.existingGatewayClass { @@ -355,6 +365,269 @@ func TestRun_loadResourceConfigFileWhenConfigFileDoesNotExist(t *testing.T) { require.Contains(t, string(ui.OutputWriter.Bytes()), "No resources.json found, using defaults") } +var validGWConfigurationKitchenSink = `gatewayClassConfigs: +- apiVersion: mesh.consul.hashicorp.com/v2beta1 + kind: GatewayClassConfig + metadata: + name: consul-mesh-gateway + spec: + deployment: + hostNetwork: true + dnsPolicy: ClusterFirst + replicas: + min: 3 + default: 3 + max: 3 + nodeSelector: + beta.kubernetes.io/arch: amd64 + beta.kubernetes.io/os: linux + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app: consul + release: consul-helm + component: mesh-gateway + topologyKey: kubernetes.io/hostname + tolerations: + - key: "key1" + operator: "Equal" + value: "value1" + effect: "NoSchedule" + container: + portModifier: 8000 + resources: + requests: + cpu: 200m + memory: 200Mi + limits: + cpu: 200m + memory: 200Mi +meshGateways: +- apiVersion: mesh.consul.hashicorp.com/v2beta1 + kind: MeshGateway + metadata: + name: mesh-gateway + namespace: consul + spec: + gatewayClassName: consul-mesh-gateway +` + +var validGWConfigurationMinimal = `gatewayClassConfigs: +- apiVersion: mesh.consul.hashicorp.com/v2beta1 + kind: GatewayClassConfig + metadata: + name: consul-mesh-gateway + spec: + deployment: +meshGateways: +- apiVersion: mesh.consul.hashicorp.com/v2beta1 + kind: MeshGateway + metadata: + name: mesh-gateway + namespace: consul + spec: + gatewayClassName: consul-mesh-gateway +` + +var invalidGWConfiguration = ` +gatewayClassConfigs: +iVersion= mesh.consul.hashicorp.com/v2beta1 + kind: gatewayClassConfig + metadata: + name: consul-mesh-gateway + namespace: namespace + spec: + deployment: + resources: + requests: + cpu: 100m +meshGateways: +- name: mesh-gateway + spec: + gatewayClassName: consul-mesh-gateway +` + +func TestRun_loadGatewayConfigs(t *testing.T) { + var replicasCount int32 = 3 + testCases := map[string]struct { + config string + filename string + expectedDeployment v2beta1.GatewayClassDeploymentConfig + }{ + "kitchen sink": { + config: validGWConfigurationKitchenSink, + filename: "kitchenSinkConfig.yaml", + expectedDeployment: v2beta1.GatewayClassDeploymentConfig{ + HostNetwork: true, + DNSPolicy: "ClusterFirst", + NodeSelector: map[string]string{ + "beta.kubernetes.io/arch": "amd64", + "beta.kubernetes.io/os": "linux", + }, + Replicas: &v2beta1.GatewayClassReplicasConfig{ + Default: &replicasCount, + Min: &replicasCount, + Max: &replicasCount, + }, + Tolerations: []corev1.Toleration{ + { + Key: "key1", + Operator: "Equal", + Value: "value1", + Effect: "NoSchedule", + }, + }, + + Affinity: &corev1.Affinity{ + PodAntiAffinity: &corev1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "consul", + "release": "consul-helm", + "component": "mesh-gateway", + }, + }, + TopologyKey: "kubernetes.io/hostname", + }, + }, + }, + }, + Container: &v2beta1.GatewayClassContainerConfig{ + Resources: &corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("200Mi"), + corev1.ResourceCPU: resource.MustParse("200m"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("200Mi"), + corev1.ResourceCPU: resource.MustParse("200m"), + }, + }, + PortModifier: 8000, + }, + }, + }, + "minimal configuration": { + config: validGWConfigurationMinimal, + filename: "minimalConfig.yaml", + expectedDeployment: v2beta1.GatewayClassDeploymentConfig{ + Container: &v2beta1.GatewayClassContainerConfig{ + Resources: &defaultResourceRequirements, + }, + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + filename := createGatewayConfigFile(t, tc.config, tc.filename) + // setup k8s client + s := runtime.NewScheme() + require.NoError(t, gwv1beta1.Install(s)) + require.NoError(t, v1alpha1.AddToScheme(s)) + + client := fake.NewClientBuilder().WithScheme(s).Build() + + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + k8sClient: client, + flagGatewayConfigLocation: filename, + } + + err := cmd.loadGatewayConfigs() + require.NoError(t, err) + require.NotEmpty(t, cmd.gatewayConfig.GatewayClassConfigs) + require.NotEmpty(t, cmd.gatewayConfig.MeshGateways) + + // we only created one class config + classConfig := cmd.gatewayConfig.GatewayClassConfigs[0].DeepCopy() + + expectedClassConfig := v2beta1.GatewayClassConfig{ + TypeMeta: metav1.TypeMeta{ + APIVersion: v2beta1.MeshGroupVersion.String(), + Kind: v2beta1.KindGatewayClassConfig, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "consul-mesh-gateway", + }, + Spec: v2beta1.GatewayClassConfigSpec{ + Deployment: tc.expectedDeployment, + }, + Status: v2beta1.Status{}, + } + require.Equal(t, expectedClassConfig.DeepCopy(), classConfig) + + // check mesh gateway, we only created one of these + actualMeshGateway := cmd.gatewayConfig.MeshGateways[0] + + expectedMeshGateway := &v2beta1.MeshGateway{ + TypeMeta: metav1.TypeMeta{ + Kind: "MeshGateway", + APIVersion: v2beta1.MeshGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "mesh-gateway", + Namespace: "consul", + }, + Spec: meshv2beta1.MeshGateway{ + GatewayClassName: "consul-mesh-gateway", + }, + } + + require.Equal(t, expectedMeshGateway.DeepCopy(), actualMeshGateway) + }) + } +} + +func TestRun_loadGatewayConfigsWithInvalidFile(t *testing.T) { + filename := createGatewayConfigFile(t, invalidGWConfiguration, "config.yaml") + // setup k8s client + s := runtime.NewScheme() + require.NoError(t, gwv1beta1.Install(s)) + require.NoError(t, v1alpha1.AddToScheme(s)) + + client := fake.NewClientBuilder().WithScheme(s).Build() + + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + k8sClient: client, + flagGatewayConfigLocation: filename, + } + + err := cmd.loadGatewayConfigs() + require.Error(t, err) + require.Empty(t, cmd.gatewayConfig.GatewayClassConfigs) + require.Empty(t, cmd.gatewayConfig.MeshGateways) +} + +func TestRun_loadGatewayConfigsWhenConfigFileDoesNotExist(t *testing.T) { + filename := "./consul/config/config.yaml" + s := runtime.NewScheme() + require.NoError(t, gwv1beta1.Install(s)) + require.NoError(t, v1alpha1.AddToScheme(s)) + + client := fake.NewClientBuilder().WithScheme(s).Build() + + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + k8sClient: client, + flagGatewayConfigLocation: filename, + } + + err := cmd.loadGatewayConfigs() + require.NoError(t, err) + require.Empty(t, cmd.gatewayConfig.GatewayClassConfigs) + require.Empty(t, cmd.gatewayConfig.MeshGateways) + require.Contains(t, string(ui.ErrorWriter.Bytes()), "gateway configuration file not found, skipping gateway configuration") +} + func createGatewayConfigFile(t *testing.T, fileContent, filename string) string { t.Helper() diff --git a/control-plane/subcommand/inject-connect/command.go b/control-plane/subcommand/inject-connect/command.go index 68addfc7b2..db59a98d68 100644 --- a/control-plane/subcommand/inject-connect/command.go +++ b/control-plane/subcommand/inject-connect/command.go @@ -32,6 +32,9 @@ import ( gwv1alpha2 "sigs.k8s.io/gateway-api/apis/v1alpha2" gwv1beta1 "sigs.k8s.io/gateway-api/apis/v1beta1" + authv2beta1 "github.com/hashicorp/consul-k8s/control-plane/api/auth/v2beta1" + meshv2beta1 "github.com/hashicorp/consul-k8s/control-plane/api/mesh/v2beta1" + multiclusterv2 "github.com/hashicorp/consul-k8s/control-plane/api/multicluster/v2" "github.com/hashicorp/consul-k8s/control-plane/api/v1alpha1" "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" "github.com/hashicorp/consul-k8s/control-plane/subcommand/common" @@ -51,12 +54,13 @@ type Command struct { flagConsulImage string // Docker image for Consul flagConsulDataplaneImage string // Docker image for Envoy flagConsulK8sImage string // Docker image for consul-k8s - flagGlobalImagePullPolicy string // Pull policy for all Consul images (consul, consul-dataplane, consul-k8s) flagACLAuthMethod string // Auth Method to use for ACLs, if enabled flagEnvoyExtraArgs string // Extra envoy args when starting envoy flagEnableWebhookCAUpdate bool flagLogLevel string flagLogJSON bool + flagResourceAPIs bool // Use V2 APIs + flagV2Tenancy bool // Use V2 partitions (ent only) and namespaces instead of V1 counterparts flagAllowK8sNamespacesList []string // K8s namespaces to explicitly inject flagDenyK8sNamespacesList []string // K8s namespaces to deny injection (has precedence) @@ -168,6 +172,11 @@ func init() { utilruntime.Must(gwv1beta1.AddToScheme(scheme)) utilruntime.Must(gwv1alpha2.AddToScheme(scheme)) + // V2 resources + utilruntime.Must(authv2beta1.AddAuthToScheme(scheme)) + utilruntime.Must(meshv2beta1.AddMeshToScheme(scheme)) + utilruntime.Must(multiclusterv2.AddMultiClusterToScheme(scheme)) + // +kubebuilder:scaffold:scheme } @@ -185,8 +194,6 @@ func (c *Command) init() { "Docker image for Consul Dataplane.") c.flagSet.StringVar(&c.flagConsulK8sImage, "consul-k8s-image", "", "Docker image for consul-k8s. Used for the connect sidecar.") - c.flagSet.StringVar(&c.flagGlobalImagePullPolicy, "global-image-pull-policy", "", - "ImagePullPolicy for all images used by Consul (consul, consul-dataplane, consul-k8s).") c.flagSet.BoolVar(&c.flagEnablePeering, "enable-peering", false, "Enable cluster peering controllers.") c.flagSet.BoolVar(&c.flagEnableFederation, "enable-federation", false, "Enable Consul WAN Federation.") c.flagSet.StringVar(&c.flagEnvoyExtraArgs, "envoy-extra-args", "", @@ -236,6 +243,10 @@ func (c *Command) init() { "%q, %q, %q, and %q.", zapcore.DebugLevel.String(), zapcore.InfoLevel.String(), zapcore.WarnLevel.String(), zapcore.ErrorLevel.String())) c.flagSet.BoolVar(&c.flagLogJSON, "log-json", false, "Enable or disable JSON output format for logging.") + c.flagSet.BoolVar(&c.flagResourceAPIs, "enable-resource-apis", false, + "Enable or disable Consul V2 Resource APIs.") + c.flagSet.BoolVar(&c.flagV2Tenancy, "enable-v2tenancy", false, + "Enable or disable Consul V2 tenancy.") // Proxy sidecar resource setting flags. c.flagSet.StringVar(&c.flagDefaultSidecarProxyCPURequest, "default-sidecar-proxy-cpu-request", "", "Default sidecar proxy CPU request.") @@ -400,7 +411,13 @@ func (c *Command) Run(args []string) int { return 1 } - err = c.configureControllers(ctx, mgr, watcher) + // Right now we exclusively start controllers for V1 or V2. + // In the future we might add a flag to pick and choose from both. + if c.flagResourceAPIs { + err = c.configureV2Controllers(ctx, mgr, watcher) + } else { + err = c.configureV1Controllers(ctx, mgr, watcher) + } if err != nil { setupLog.Error(err, fmt.Sprintf("could not configure controllers: %s", err.Error())) return 1 @@ -425,14 +442,17 @@ func (c *Command) validateFlags() error { return errors.New("-consul-dataplane-image must be set") } - switch corev1.PullPolicy(c.flagGlobalImagePullPolicy) { - case corev1.PullAlways: - case corev1.PullNever: - case corev1.PullIfNotPresent: - case "": - break - default: - return errors.New("-global-image-pull-policy must be `IfNotPresent`, `Always`, `Never`, or `` ") + // In Consul 1.17, multiport beta shipped with v2 catalog + mesh resources backed by v1 tenancy + // and acls (experiments=[resource-apis]). + // + // With Consul 1.18, we built out v2 tenancy with no support for acls, hence need to be explicit + // about which combination of v1 + v2 features are enabled. + // + // To summarize: + // - experiments=[resource-apis] => v2 catalog and mesh + v1 tenancy and acls + // - experiments=[resource-apis, v2tenancy] => v2 catalog and mesh + v2 tenancy + acls disabled + if c.flagV2Tenancy && !c.flagResourceAPIs { + return errors.New("-enable-resource-apis must be set to 'true' if -enable-v2tenancy is set") } if c.flagEnablePartitions && c.consul.Partition == "" { diff --git a/control-plane/subcommand/inject-connect/command_test.go b/control-plane/subcommand/inject-connect/command_test.go index 7f4b93f89e..e7ca3f12cd 100644 --- a/control-plane/subcommand/inject-connect/command_test.go +++ b/control-plane/subcommand/inject-connect/command_test.go @@ -133,10 +133,13 @@ func TestRun_FlagValidation(t *testing.T) { expErr: "-default-envoy-proxy-concurrency must be >= 0 if set", }, { - flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-consul-dataplane-image", "consul-dataplane:1.14.0", - "-global-image-pull-policy", "garbage", + flags: []string{ + "-consul-k8s-image", "hashicorp/consul-k8s", + "-consul-image", "hashicorp/consul", + "-consul-dataplane-image", "hashicorp/consul-dataplane", + "-enable-v2tenancy", "true", }, - expErr: "-global-image-pull-policy must be `IfNotPresent`, `Always`, `Never`, or `` ", + expErr: "-enable-resource-apis must be set to 'true' if -enable-v2tenancy is set", }, } diff --git a/control-plane/subcommand/inject-connect/v1controllers.go b/control-plane/subcommand/inject-connect/v1controllers.go index 52d6f66259..2b1b1fab39 100644 --- a/control-plane/subcommand/inject-connect/v1controllers.go +++ b/control-plane/subcommand/inject-connect/v1controllers.go @@ -16,7 +16,6 @@ import ( gatewaycontrollers "github.com/hashicorp/consul-k8s/control-plane/api-gateway/controllers" apicommon "github.com/hashicorp/consul-k8s/control-plane/api/common" "github.com/hashicorp/consul-k8s/control-plane/api/v1alpha1" - "github.com/hashicorp/consul-k8s/control-plane/catalog/registration" "github.com/hashicorp/consul-k8s/control-plane/connect-inject/controllers/endpoints" "github.com/hashicorp/consul-k8s/control-plane/connect-inject/controllers/peering" "github.com/hashicorp/consul-k8s/control-plane/connect-inject/lifecycle" @@ -27,7 +26,7 @@ import ( "github.com/hashicorp/consul-k8s/control-plane/subcommand/flags" ) -func (c *Command) configureControllers(ctx context.Context, mgr manager.Manager, watcher *discovery.Watcher) error { +func (c *Command) configureV1Controllers(ctx context.Context, mgr manager.Manager, watcher *discovery.Watcher) error { // Create Consul API config object. consulConfig := c.consul.ConsulClientConfig() @@ -118,7 +117,6 @@ func (c *Command) configureControllers(ctx context.Context, mgr manager.Manager, }, ImageDataplane: c.flagConsulDataplaneImage, ImageConsulK8S: c.flagConsulK8sImage, - GlobalImagePullPolicy: c.flagGlobalImagePullPolicy, ConsulDestinationNamespace: c.flagConsulDestinationNamespace, NamespaceMirroringPrefix: c.flagK8SNSMirroringPrefix, EnableNamespaces: c.flagEnableNamespaces, @@ -287,16 +285,6 @@ func (c *Command) configureControllers(ctx context.Context, mgr manager.Manager, return err } - if err := (®istration.RegistrationsController{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Cache: registration.NewRegistrationCache(ctx, consulConfig, watcher, mgr.GetClient(), c.flagEnableNamespaces, c.flagEnablePartitions), - Log: ctrl.Log.WithName("controller").WithName(apicommon.Registration), - }).SetupWithManager(ctx, mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", apicommon.Registration) - return err - } - if err := mgr.AddReadyzCheck("ready", webhook.ReadinessCheck{CertDir: c.flagCertDir}.Ready); err != nil { setupLog.Error(err, "unable to create readiness check", "controller", endpoints.Controller{}) return err @@ -340,48 +328,47 @@ func (c *Command) configureControllers(ctx context.Context, mgr manager.Manager, } (&webhook.MeshWebhook{ - Clientset: c.clientset, - ReleaseNamespace: c.flagReleaseNamespace, - ConsulConfig: consulConfig, - ConsulServerConnMgr: watcher, - ImageConsul: c.flagConsulImage, - ImageConsulDataplane: c.flagConsulDataplaneImage, - EnvoyExtraArgs: c.flagEnvoyExtraArgs, - ImageConsulK8S: c.flagConsulK8sImage, - GlobalImagePullPolicy: c.flagGlobalImagePullPolicy, - RequireAnnotation: !c.flagDefaultInject, - AuthMethod: c.flagACLAuthMethod, - ConsulCACert: string(c.caCertPem), - TLSEnabled: c.consul.UseTLS, - ConsulAddress: c.consul.Addresses, - SkipServerWatch: c.consul.SkipServerWatch, - ConsulTLSServerName: c.consul.TLSServerName, - DefaultProxyCPURequest: c.sidecarProxyCPURequest, - DefaultProxyCPULimit: c.sidecarProxyCPULimit, - DefaultProxyMemoryRequest: c.sidecarProxyMemoryRequest, - DefaultProxyMemoryLimit: c.sidecarProxyMemoryLimit, - DefaultEnvoyProxyConcurrency: c.flagDefaultEnvoyProxyConcurrency, - DefaultSidecarProxyStartupFailureSeconds: c.flagDefaultSidecarProxyStartupFailureSeconds, + Clientset: c.clientset, + ReleaseNamespace: c.flagReleaseNamespace, + ConsulConfig: consulConfig, + ConsulServerConnMgr: watcher, + ImageConsul: c.flagConsulImage, + ImageConsulDataplane: c.flagConsulDataplaneImage, + EnvoyExtraArgs: c.flagEnvoyExtraArgs, + ImageConsulK8S: c.flagConsulK8sImage, + RequireAnnotation: !c.flagDefaultInject, + AuthMethod: c.flagACLAuthMethod, + ConsulCACert: string(c.caCertPem), + TLSEnabled: c.consul.UseTLS, + ConsulAddress: c.consul.Addresses, + SkipServerWatch: c.consul.SkipServerWatch, + ConsulTLSServerName: c.consul.TLSServerName, + DefaultProxyCPURequest: c.sidecarProxyCPURequest, + DefaultProxyCPULimit: c.sidecarProxyCPULimit, + DefaultProxyMemoryRequest: c.sidecarProxyMemoryRequest, + DefaultProxyMemoryLimit: c.sidecarProxyMemoryLimit, + DefaultEnvoyProxyConcurrency: c.flagDefaultEnvoyProxyConcurrency, + DefaultSidecarProxyStartupFailureSeconds: c.flagDefaultSidecarProxyStartupFailureSeconds, DefaultSidecarProxyLivenessFailureSeconds: c.flagDefaultSidecarProxyLivenessFailureSeconds, - LifecycleConfig: lifecycleConfig, - MetricsConfig: metricsConfig, - InitContainerResources: c.initContainerResources, - ConsulPartition: c.consul.Partition, - AllowK8sNamespacesSet: allowK8sNamespaces, - DenyK8sNamespacesSet: denyK8sNamespaces, - EnableNamespaces: c.flagEnableNamespaces, - ConsulDestinationNamespace: c.flagConsulDestinationNamespace, - EnableK8SNSMirroring: c.flagEnableK8SNSMirroring, - K8SNSMirroringPrefix: c.flagK8SNSMirroringPrefix, - CrossNamespaceACLPolicy: c.flagCrossNamespaceACLPolicy, - EnableTransparentProxy: c.flagDefaultEnableTransparentProxy, - EnableCNI: c.flagEnableCNI, - TProxyOverwriteProbes: c.flagTransparentProxyDefaultOverwriteProbes, - EnableConsulDNS: c.flagEnableConsulDNS, - EnableOpenShift: c.flagEnableOpenShift, - Log: ctrl.Log.WithName("handler").WithName("connect"), - LogLevel: c.flagLogLevel, - LogJSON: c.flagLogJSON, + LifecycleConfig: lifecycleConfig, + MetricsConfig: metricsConfig, + InitContainerResources: c.initContainerResources, + ConsulPartition: c.consul.Partition, + AllowK8sNamespacesSet: allowK8sNamespaces, + DenyK8sNamespacesSet: denyK8sNamespaces, + EnableNamespaces: c.flagEnableNamespaces, + ConsulDestinationNamespace: c.flagConsulDestinationNamespace, + EnableK8SNSMirroring: c.flagEnableK8SNSMirroring, + K8SNSMirroringPrefix: c.flagK8SNSMirroringPrefix, + CrossNamespaceACLPolicy: c.flagCrossNamespaceACLPolicy, + EnableTransparentProxy: c.flagDefaultEnableTransparentProxy, + EnableCNI: c.flagEnableCNI, + TProxyOverwriteProbes: c.flagTransparentProxyDefaultOverwriteProbes, + EnableConsulDNS: c.flagEnableConsulDNS, + EnableOpenShift: c.flagEnableOpenShift, + Log: ctrl.Log.WithName("handler").WithName("connect"), + LogLevel: c.flagLogLevel, + LogJSON: c.flagLogJSON, }).SetupWithManager(mgr) consulMeta := apicommon.ConsulMeta{ @@ -479,12 +466,6 @@ func (c *Command) configureControllers(ctx context.Context, mgr manager.Manager, ConsulMeta: consulMeta, }).SetupWithManager(mgr) - (&v1alpha1.RegistrationWebhook{ - Client: mgr.GetClient(), - Logger: ctrl.Log.WithName("webhooks").WithName(apicommon.Registration), - ConsulMeta: consulMeta, - }).SetupWithManager(mgr) - if c.flagEnableWebhookCAUpdate { err = c.updateWebhookCABundle(ctx) if err != nil { diff --git a/control-plane/subcommand/inject-connect/v2controllers.go b/control-plane/subcommand/inject-connect/v2controllers.go new file mode 100644 index 0000000000..480bd28cf3 --- /dev/null +++ b/control-plane/subcommand/inject-connect/v2controllers.go @@ -0,0 +1,359 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package connectinject + +import ( + "context" + + "github.com/hashicorp/consul-server-connection-manager/discovery" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/manager" + + authv2beta1 "github.com/hashicorp/consul-k8s/control-plane/api/auth/v2beta1" + "github.com/hashicorp/consul-k8s/control-plane/api/common" + meshv2beta1 "github.com/hashicorp/consul-k8s/control-plane/api/mesh/v2beta1" + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/controllers/endpointsv2" + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/controllers/pod" + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/controllers/serviceaccount" + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/lifecycle" + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/metrics" + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/namespace" + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/webhook" + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/webhookv2" + resourceControllers "github.com/hashicorp/consul-k8s/control-plane/controllers/resources" + "github.com/hashicorp/consul-k8s/control-plane/gateways" + "github.com/hashicorp/consul-k8s/control-plane/subcommand/flags" + namespacev2 "github.com/hashicorp/consul-k8s/control-plane/tenancy/namespace" +) + +func (c *Command) configureV2Controllers(ctx context.Context, mgr manager.Manager, watcher *discovery.Watcher) error { + // Create Consul API config object. + consulConfig := c.consul.ConsulClientConfig() + + // Convert allow/deny lists to sets. + allowK8sNamespaces := flags.ToSet(c.flagAllowK8sNamespacesList) + denyK8sNamespaces := flags.ToSet(c.flagDenyK8sNamespacesList) + k8sNsConfig := common.K8sNamespaceConfig{ + AllowK8sNamespacesSet: allowK8sNamespaces, + DenyK8sNamespacesSet: denyK8sNamespaces, + } + consulTenancyConfig := common.ConsulTenancyConfig{ + EnableConsulPartitions: c.flagEnablePartitions, + EnableConsulNamespaces: c.flagEnableNamespaces, + ConsulDestinationNamespace: c.flagConsulDestinationNamespace, + EnableNSMirroring: c.flagEnableK8SNSMirroring, + NSMirroringPrefix: c.flagK8SNSMirroringPrefix, + ConsulPartition: c.consul.Partition, + } + + lifecycleConfig := lifecycle.Config{ + DefaultEnableProxyLifecycle: c.flagDefaultEnableSidecarProxyLifecycle, + DefaultEnableShutdownDrainListeners: c.flagDefaultEnableSidecarProxyLifecycleShutdownDrainListeners, + DefaultShutdownGracePeriodSeconds: c.flagDefaultSidecarProxyLifecycleShutdownGracePeriodSeconds, + DefaultStartupGracePeriodSeconds: c.flagDefaultSidecarProxyLifecycleStartupGracePeriodSeconds, + DefaultGracefulPort: c.flagDefaultSidecarProxyLifecycleGracefulPort, + DefaultGracefulShutdownPath: c.flagDefaultSidecarProxyLifecycleGracefulShutdownPath, + DefaultGracefulStartupPath: c.flagDefaultSidecarProxyLifecycleGracefulStartupPath, + } + + metricsConfig := metrics.Config{ + DefaultEnableMetrics: c.flagDefaultEnableMetrics, + EnableGatewayMetrics: c.flagEnableGatewayMetrics, + DefaultEnableMetricsMerging: c.flagDefaultEnableMetricsMerging, + DefaultMergedMetricsPort: c.flagDefaultMergedMetricsPort, + DefaultPrometheusScrapePort: c.flagDefaultPrometheusScrapePort, + DefaultPrometheusScrapePath: c.flagDefaultPrometheusScrapePath, + } + + if err := (&pod.Controller{ + Client: mgr.GetClient(), + ConsulClientConfig: consulConfig, + ConsulServerConnMgr: watcher, + K8sNamespaceConfig: k8sNsConfig, + ConsulTenancyConfig: consulTenancyConfig, + EnableTransparentProxy: c.flagDefaultEnableTransparentProxy, + TProxyOverwriteProbes: c.flagTransparentProxyDefaultOverwriteProbes, + AuthMethod: c.flagACLAuthMethod, + MetricsConfig: metricsConfig, + EnableTelemetryCollector: c.flagEnableTelemetryCollector, + Log: ctrl.Log.WithName("controller").WithName("pod"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", pod.Controller{}) + return err + } + + endpointsLogger := ctrl.Log.WithName("controller").WithName("endpoints") + if err := (&endpointsv2.Controller{ + Client: mgr.GetClient(), + ConsulServerConnMgr: watcher, + K8sNamespaceConfig: k8sNsConfig, + ConsulTenancyConfig: consulTenancyConfig, + WriteCache: endpointsv2.NewWriteCache(endpointsLogger), + Log: endpointsLogger, + Scheme: mgr.GetScheme(), + Context: ctx, + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", endpointsv2.Controller{}) + return err + } + + if err := (&serviceaccount.Controller{ + Client: mgr.GetClient(), + ConsulServerConnMgr: watcher, + K8sNamespaceConfig: k8sNsConfig, + ConsulTenancyConfig: consulTenancyConfig, + Log: ctrl.Log.WithName("controller").WithName("serviceaccount"), + Scheme: mgr.GetScheme(), + Context: ctx, + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", serviceaccount.Controller{}) + return err + } + + if c.flagV2Tenancy { + // V2 tenancy implies non-default namespaces in CE, so we don't observe flagEnableNamespaces + err := (&namespacev2.Controller{ + Client: mgr.GetClient(), + ConsulServerConnMgr: watcher, + K8sNamespaceConfig: k8sNsConfig, + ConsulTenancyConfig: consulTenancyConfig, + Log: ctrl.Log.WithName("controller").WithName("namespacev2"), + }).SetupWithManager(mgr) + if err != nil { + setupLog.Error(err, "unable to create controller", "controller", "namespacev2") + return err + } + } else { + if c.flagEnableNamespaces { + err := (&namespace.Controller{ + Client: mgr.GetClient(), + ConsulClientConfig: consulConfig, + ConsulServerConnMgr: watcher, + AllowK8sNamespacesSet: allowK8sNamespaces, + DenyK8sNamespacesSet: denyK8sNamespaces, + ConsulDestinationNamespace: c.flagConsulDestinationNamespace, + EnableNSMirroring: c.flagEnableK8SNSMirroring, + NSMirroringPrefix: c.flagK8SNSMirroringPrefix, + CrossNamespaceACLPolicy: c.flagCrossNamespaceACLPolicy, + Log: ctrl.Log.WithName("controller").WithName("namespace"), + }).SetupWithManager(mgr) + if err != nil { + setupLog.Error(err, "unable to create controller", "controller", namespace.Controller{}) + return err + } + } + } + + consulResourceController := &resourceControllers.ConsulResourceController{ + ConsulClientConfig: consulConfig, + ConsulServerConnMgr: watcher, + ConsulTenancyConfig: consulTenancyConfig, + } + + if err := (&resourceControllers.TrafficPermissionsController{ + Controller: consulResourceController, + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controller").WithName(common.TrafficPermissions), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", common.TrafficPermissions) + return err + } + + if err := (&resourceControllers.GRPCRouteController{ + Controller: consulResourceController, + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controller").WithName(common.GRPCRoute), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", common.GRPCRoute) + return err + } + + if err := (&resourceControllers.HTTPRouteController{ + Controller: consulResourceController, + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controller").WithName(common.HTTPRoute), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", common.HTTPRoute) + return err + } + + if err := (&resourceControllers.TCPRouteController{ + Controller: consulResourceController, + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controller").WithName(common.TCPRoute), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", common.TCPRoute) + return err + } + + if err := (&resourceControllers.ProxyConfigurationController{ + Controller: consulResourceController, + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controller").WithName(common.ProxyConfiguration), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", common.ProxyConfiguration) + return err + } + + if err := (&resourceControllers.MeshConfigurationController{ + Controller: consulResourceController, + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controller").WithName(common.MeshConfiguration), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", common.MeshConfiguration) + return err + } + + if err := (&resourceControllers.MeshGatewayController{ + Controller: consulResourceController, + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controller").WithName(common.MeshGateway), + Scheme: mgr.GetScheme(), + GatewayConfig: gateways.GatewayConfig{ + ConsulConfig: common.ConsulConfig{ + Address: c.consul.Addresses, + GRPCPort: consulConfig.GRPCPort, + HTTPPort: consulConfig.HTTPPort, + APITimeout: consulConfig.APITimeout, + }, + ImageDataplane: c.flagConsulDataplaneImage, + ImageConsulK8S: c.flagConsulK8sImage, + ConsulTenancyConfig: consulTenancyConfig, + PeeringEnabled: c.flagEnablePeering, + EnableOpenShift: c.flagEnableOpenShift, + AuthMethod: c.consul.ConsulLogin.AuthMethod, + LogLevel: c.flagLogLevel, + LogJSON: c.flagLogJSON, + TLSEnabled: c.consul.UseTLS, + ConsulTLSServerName: c.consul.TLSServerName, + ConsulCACert: string(c.caCertPem), + SkipServerWatch: c.consul.SkipServerWatch, + }, + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", common.MeshGateway) + return err + } + + if err := (&resourceControllers.GatewayClassConfigController{ + Controller: consulResourceController, + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controller").WithName(common.GatewayClassConfig), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", common.GatewayClassConfig) + return err + } + + if err := (&resourceControllers.GatewayClassController{ + Controller: consulResourceController, + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controller").WithName(common.GatewayClass), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", common.GatewayClass) + return err + } + + if err := (&resourceControllers.ExportedServicesController{ + Controller: consulResourceController, + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controller").WithName(common.ExportedServices), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", common.ExportedServices) + return err + } + + (&webhookv2.MeshWebhook{ + Clientset: c.clientset, + ReleaseNamespace: c.flagReleaseNamespace, + ConsulConfig: consulConfig, + ConsulServerConnMgr: watcher, + ImageConsul: c.flagConsulImage, + ImageConsulDataplane: c.flagConsulDataplaneImage, + EnvoyExtraArgs: c.flagEnvoyExtraArgs, + ImageConsulK8S: c.flagConsulK8sImage, + RequireAnnotation: !c.flagDefaultInject, + AuthMethod: c.flagACLAuthMethod, + ConsulCACert: string(c.caCertPem), + TLSEnabled: c.consul.UseTLS, + ConsulAddress: c.consul.Addresses, + SkipServerWatch: c.consul.SkipServerWatch, + ConsulTLSServerName: c.consul.TLSServerName, + DefaultProxyCPURequest: c.sidecarProxyCPURequest, + DefaultProxyCPULimit: c.sidecarProxyCPULimit, + DefaultProxyMemoryRequest: c.sidecarProxyMemoryRequest, + DefaultProxyMemoryLimit: c.sidecarProxyMemoryLimit, + DefaultEnvoyProxyConcurrency: c.flagDefaultEnvoyProxyConcurrency, + LifecycleConfig: lifecycleConfig, + MetricsConfig: metricsConfig, + InitContainerResources: c.initContainerResources, + ConsulPartition: c.consul.Partition, + AllowK8sNamespacesSet: allowK8sNamespaces, + DenyK8sNamespacesSet: denyK8sNamespaces, + EnableNamespaces: c.flagEnableNamespaces, + ConsulDestinationNamespace: c.flagConsulDestinationNamespace, + EnableK8SNSMirroring: c.flagEnableK8SNSMirroring, + K8SNSMirroringPrefix: c.flagK8SNSMirroringPrefix, + CrossNamespaceACLPolicy: c.flagCrossNamespaceACLPolicy, + EnableTransparentProxy: c.flagDefaultEnableTransparentProxy, + EnableCNI: c.flagEnableCNI, + TProxyOverwriteProbes: c.flagTransparentProxyDefaultOverwriteProbes, + EnableConsulDNS: c.flagEnableConsulDNS, + EnableOpenShift: c.flagEnableOpenShift, + Log: ctrl.Log.WithName("handler").WithName("consul-mesh"), + LogLevel: c.flagLogLevel, + LogJSON: c.flagLogJSON, + }).SetupWithManager(mgr) + + (&authv2beta1.TrafficPermissionsWebhook{ + Client: mgr.GetClient(), + Logger: ctrl.Log.WithName("webhooks").WithName(common.TrafficPermissions), + ConsulTenancyConfig: consulTenancyConfig, + }).SetupWithManager(mgr) + + (&meshv2beta1.ProxyConfigurationWebhook{ + Client: mgr.GetClient(), + Logger: ctrl.Log.WithName("webhooks").WithName(common.ProxyConfiguration), + ConsulTenancyConfig: consulTenancyConfig, + }).SetupWithManager(mgr) + + (&meshv2beta1.HTTPRouteWebhook{ + Client: mgr.GetClient(), + Logger: ctrl.Log.WithName("webhooks").WithName(common.HTTPRoute), + ConsulTenancyConfig: consulTenancyConfig, + }).SetupWithManager(mgr) + + (&meshv2beta1.GRPCRouteWebhook{ + Client: mgr.GetClient(), + Logger: ctrl.Log.WithName("webhooks").WithName(common.GRPCRoute), + ConsulTenancyConfig: consulTenancyConfig, + }).SetupWithManager(mgr) + + (&meshv2beta1.TCPRouteWebhook{ + Client: mgr.GetClient(), + Logger: ctrl.Log.WithName("webhooks").WithName(common.TCPRoute), + ConsulTenancyConfig: consulTenancyConfig, + }).SetupWithManager(mgr) + + if err := mgr.AddReadyzCheck("ready", webhook.ReadinessCheck{CertDir: c.flagCertDir}.Ready); err != nil { + setupLog.Error(err, "unable to create readiness check") + return err + } + + if c.flagEnableWebhookCAUpdate { + err := c.updateWebhookCABundle(ctx) + if err != nil { + setupLog.Error(err, "problem getting CA Cert") + return err + } + } + + return nil +} diff --git a/control-plane/subcommand/mesh-init/command.go b/control-plane/subcommand/mesh-init/command.go new file mode 100644 index 0000000000..5b74607a65 --- /dev/null +++ b/control-plane/subcommand/mesh-init/command.go @@ -0,0 +1,287 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package meshinit + +import ( + "context" + "encoding/json" + "errors" + "flag" + "fmt" + "net" + "os" + "os/signal" + "sync" + "syscall" + "time" + + "github.com/cenkalti/backoff" + "github.com/hashicorp/consul-server-connection-manager/discovery" + "github.com/hashicorp/consul/proto-public/pbdataplane" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1" + "github.com/hashicorp/consul/sdk/iptables" + "github.com/hashicorp/go-hclog" + "github.com/mitchellh/cli" + + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" + "github.com/hashicorp/consul-k8s/control-plane/consul" + "github.com/hashicorp/consul-k8s/control-plane/subcommand/common" + "github.com/hashicorp/consul-k8s/control-plane/subcommand/flags" + "github.com/hashicorp/consul-k8s/version" +) + +const ( + // The number of times to attempt to read this proxy registration (120s). + defaultMaxPollingRetries = 120 + defaultProxyIDFile = "/consul/mesh-inject/proxyid" +) + +type Command struct { + UI cli.Ui + + flagProxyName string + + maxPollingAttempts uint64 // Number of times to poll Consul for proxy registrations. + + flagRedirectTrafficConfig string + flagLogLevel string + flagLogJSON bool + + flagSet *flag.FlagSet + consul *flags.ConsulFlags + + once sync.Once + help string + logger hclog.Logger + + watcher *discovery.Watcher + + // Only used in tests. + iptablesProvider iptables.Provider + iptablesConfig iptables.Config +} + +func (c *Command) init() { + c.flagSet = flag.NewFlagSet("", flag.ContinueOnError) + + // V2 Flags + c.flagSet.StringVar(&c.flagProxyName, "proxy-name", os.Getenv("PROXY_NAME"), "The Consul proxy name. This is the K8s Pod name, which is also the name of the Workload in Consul. (Required)") + + // Universal flags + c.flagSet.StringVar(&c.flagRedirectTrafficConfig, "redirect-traffic-config", os.Getenv("CONSUL_REDIRECT_TRAFFIC_CONFIG"), "Config (in JSON format) to configure iptables for this pod.") + c.flagSet.StringVar(&c.flagLogLevel, "log-level", "info", + "Log verbosity level. Supported values (in order of detail) are \"trace\", "+ + "\"debug\", \"info\", \"warn\", and \"error\".") + + c.flagSet.BoolVar(&c.flagLogJSON, "log-json", false, + "Enable or disable JSON output format for logging.") + + if c.maxPollingAttempts == 0 { + c.maxPollingAttempts = defaultMaxPollingRetries + } + + c.consul = &flags.ConsulFlags{} + flags.Merge(c.flagSet, c.consul.Flags()) + c.help = flags.Usage(help, c.flagSet) +} + +func (c *Command) Run(args []string) int { + c.once.Do(c.init) + + if err := c.flagSet.Parse(args); err != nil { + return 1 + } + // Validate flags + if err := c.validateFlags(); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + if c.consul.Namespace == "" { + c.consul.Namespace = constants.DefaultConsulNS + } + if c.consul.Partition == "" { + c.consul.Partition = constants.DefaultConsulPartition + } + + // Set up logging. + if c.logger == nil { + var err error + c.logger, err = common.Logger(c.flagLogLevel, c.flagLogJSON) + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + } + + // Create Consul API config object. + consulConfig := c.consul.ConsulClientConfig() + + // Create a context to be used by the processes started in this command. + ctx, cancelFunc := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM) + defer cancelFunc() + + // Start Consul server Connection manager. + serverConnMgrCfg, err := c.consul.ConsulServerConnMgrConfig() + // Disable server watch because we only need to get server IPs once. + serverConnMgrCfg.ServerWatchDisabled = true + if err != nil { + c.UI.Error(fmt.Sprintf("unable to create config for consul-server-connection-manager: %s", err)) + return 1 + } + if c.watcher == nil { + c.watcher, err = discovery.NewWatcher(ctx, serverConnMgrCfg, c.logger.Named("consul-server-connection-manager")) + if err != nil { + c.UI.Error(fmt.Sprintf("unable to create Consul server watcher: %s", err)) + return 1 + } + go c.watcher.Run() // The actual ACL login happens here + defer c.watcher.Stop() + } + + state, err := c.watcher.State() + if err != nil { + c.logger.Error("Unable to get state from consul-server-connection-manager", "error", err) + return 1 + } + + consulClient, err := consul.NewClientFromConnMgrState(consulConfig, state) + if err != nil { + c.logger.Error("Unable to get client connection", "error", err) + return 1 + } + + if version.IsFIPS() { + // make sure we are also using FIPS Consul + var versionInfo map[string]interface{} + _, err := consulClient.Raw().Query("/v1/agent/version", versionInfo, nil) + if err != nil { + c.logger.Warn("This is a FIPS build of consul-k8s, which should be used with FIPS Consul. Unable to verify FIPS Consul while setting up Consul API client.") + } + if val, ok := versionInfo["FIPS"]; !ok || val == "" { + c.logger.Warn("This is a FIPS build of consul-k8s, which should be used with FIPS Consul. A non-FIPS version of Consul was detected.") + } + } + + // todo (agentless): this should eventually be passed to consul-dataplane as a string so we don't need to write it to file. + if c.consul.UseTLS && c.consul.CACertPEM != "" { + if err = common.WriteFileWithPerms(constants.ConsulCAFile, c.consul.CACertPEM, 0444); err != nil { + c.logger.Error("error writing CA cert file", "error", err) + return 1 + } + } + + dc, err := consul.NewDataplaneServiceClient(c.watcher) + if err != nil { + c.logger.Error("failed to create resource client", "error", err) + return 1 + } + + var bootstrapConfig pbmesh.BootstrapConfig + if err := backoff.Retry(c.getBootstrapParams(dc, &bootstrapConfig), backoff.WithMaxRetries(backoff.NewConstantBackOff(1*time.Second), c.maxPollingAttempts)); err != nil { + c.logger.Error("Timed out waiting for bootstrap parameters", "error", err) + return 1 + } + + if c.flagRedirectTrafficConfig != "" { + c.watcher.Stop() // Explicitly stop the watcher so that ACLs are cleaned up before we apply re-direction. + err := c.applyTrafficRedirectionRules(&bootstrapConfig) // BootstrapConfig is always populated non-nil from the RPC + if err != nil { + c.logger.Error("error applying traffic redirection rules", "err", err) + return 1 + } + } + + c.logger.Info("Proxy initialization completed") + return 0 +} + +func (c *Command) validateFlags() error { + if c.flagProxyName == "" { + return errors.New("-proxy-name must be set") + } + return nil +} + +func (c *Command) Synopsis() string { return synopsis } +func (c *Command) Help() string { + c.once.Do(c.init) + return c.help +} + +func (c *Command) getBootstrapParams( + client pbdataplane.DataplaneServiceClient, + bootstrapConfig *pbmesh.BootstrapConfig, +) backoff.Operation { + return func() error { + req := &pbdataplane.GetEnvoyBootstrapParamsRequest{ + ProxyId: c.flagProxyName, + Namespace: c.consul.Namespace, + Partition: c.consul.Partition, + } + res, err := client.GetEnvoyBootstrapParams(context.Background(), req) + if err != nil { + c.logger.Error("Unable to get bootstrap parameters", "error", err) + return err + } + if res.GetBootstrapConfig() != nil { + *bootstrapConfig = *res.GetBootstrapConfig() + } + return nil + } +} + +// This below implementation is loosely based on +// https://github.com/hashicorp/consul/blob/fe2d41ddad9ba2b8ff86cbdebbd8f05855b1523c/command/connect/redirecttraffic/redirect_traffic.go#L136. + +func (c *Command) applyTrafficRedirectionRules(config *pbmesh.BootstrapConfig) error { + err := json.Unmarshal([]byte(c.flagRedirectTrafficConfig), &c.iptablesConfig) + if err != nil { + return err + } + if c.iptablesProvider != nil { + c.iptablesConfig.IptablesProvider = c.iptablesProvider + } + + // TODO: provide dynamic updates to the c.iptablesConfig.ProxyOutboundPort + // We currently don't have a V2 endpoint that can gather the fully synthesized ProxyConfiguration. + // We need this to dynamically set c.iptablesConfig.ProxyOutboundPort with the outbound port configuration from + // pbmesh.DynamicConfiguration.TransparentProxy.OutboundListenerPort. + // We would either need to grab another resource that has this information rendered in it, or add + // pbmesh.DynamicConfiguration to the GetBootstrapParameters rpc. + // Right now this is an edge case because the mesh webhook configured the flagRedirectTrafficConfig with the default + // 15001 port. + + // TODO: provide dyanmic updates to the c.iptablesConfig.ProxyInboundPort + // This is the `mesh` port in the workload resource. + // Right now this will always be the default port (20000) + + if config.StatsBindAddr != "" { + _, port, err := net.SplitHostPort(config.StatsBindAddr) + if err != nil { + return fmt.Errorf("failed parsing host and port from StatsBindAddr: %s", err) + } + + c.iptablesConfig.ExcludeInboundPorts = append(c.iptablesConfig.ExcludeInboundPorts, port) + } + + // Configure any relevant information from the proxy service + err = iptables.Setup(c.iptablesConfig) + if err != nil { + return err + } + c.logger.Info("Successfully applied traffic redirection rules") + return nil +} + +const ( + synopsis = "Inject mesh init command." + help = ` +Usage: consul-k8s-control-plane mesh-init [options] + + Bootstraps mesh-injected pod components. + Uses V2 Consul Catalog APIs. + Not intended for stand-alone use. +` +) diff --git a/control-plane/subcommand/mesh-init/command_ent_test.go b/control-plane/subcommand/mesh-init/command_ent_test.go new file mode 100644 index 0000000000..59c710f6eb --- /dev/null +++ b/control-plane/subcommand/mesh-init/command_ent_test.go @@ -0,0 +1,115 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build enterprise + +package meshinit + +import ( + "context" + "strconv" + "testing" + + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/sdk/testutil" + "github.com/mitchellh/cli" + "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" + "github.com/hashicorp/consul-k8s/control-plane/helper/test" + "github.com/hashicorp/consul-k8s/control-plane/namespaces" +) + +func TestRun_WithNamespaces(t *testing.T) { + t.Parallel() + cases := []struct { + name string + consulNamespace string + consulPartition string + }{ + { + name: "default ns, default partition", + consulNamespace: constants.DefaultConsulNS, + consulPartition: constants.DefaultConsulPartition, + }, + { + name: "non-default ns, default partition", + consulNamespace: "bar", + consulPartition: constants.DefaultConsulPartition, + }, + { + name: "non-default ns, non-default partition", + consulNamespace: "bar", + consulPartition: "baz", + }, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + + var serverCfg *testutil.TestServerConfig + testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + c.Experiments = []string{"resource-apis"} + serverCfg = c + }) + + _, err := EnsurePartitionExists(testClient.APIClient, c.consulPartition) + require.NoError(t, err) + + partitionedCfg := testClient.Cfg.APIClientConfig + partitionedCfg.Partition = c.consulPartition + + partitionedClient, err := api.NewClient(partitionedCfg) + require.NoError(t, err) + + _, err = namespaces.EnsureExists(partitionedClient, c.consulNamespace, "") + require.NoError(t, err) + + // Register Consul workload. + loadResource(t, testClient.ResourceClient, getWorkloadID(testPodName, c.consulNamespace, c.consulPartition), getWorkload(), nil) + + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + maxPollingAttempts: 5, + } + // We build the consul-addr because normally it's defined by the init container setting + // CONSUL_HTTP_ADDR when it processes the command template. + flags := []string{"-proxy-name", testPodName, + "-addresses", "127.0.0.1", + "-http-port", strconv.Itoa(serverCfg.Ports.HTTP), + "-grpc-port", strconv.Itoa(serverCfg.Ports.GRPC), + "-namespace", c.consulNamespace, + "-partition", c.consulPartition, + } + + // Run the command. + code := cmd.Run(flags) + require.Equal(t, 0, code, ui.ErrorWriter.String()) + }) + } +} + +// EnsurePartitionExists ensures a Consul partition exists. +// Boolean return value indicates if the partition was created by this call. +// This is borrowed from namespaces.EnsureExists +func EnsurePartitionExists(client *api.Client, name string) (bool, error) { + if name == constants.DefaultConsulPartition { + return false, nil + } + // Check if the Consul namespace exists. + partitionInfo, _, err := client.Partitions().Read(context.Background(), name, nil) + if err != nil { + return false, err + } + if partitionInfo != nil { + return false, nil + } + + consulPartition := api.Partition{ + Name: name, + Description: "Auto-generated by consul-k8s", + } + + _, _, err = client.Partitions().Create(context.Background(), &consulPartition, nil) + return true, err +} diff --git a/control-plane/subcommand/mesh-init/command_test.go b/control-plane/subcommand/mesh-init/command_test.go new file mode 100644 index 0000000000..22184f5969 --- /dev/null +++ b/control-plane/subcommand/mesh-init/command_test.go @@ -0,0 +1,404 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package meshinit + +import ( + "context" + "encoding/json" + "fmt" + "strconv" + "strings" + "sync" + "testing" + "time" + + pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1" + "github.com/hashicorp/consul/proto-public/pbresource" + "github.com/hashicorp/consul/sdk/iptables" + "github.com/hashicorp/consul/sdk/testutil" + "github.com/mitchellh/cli" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" + + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" + "github.com/hashicorp/consul-k8s/control-plane/helper/test" +) + +func TestRun_FlagValidation(t *testing.T) { + t.Parallel() + cases := []struct { + flags []string + env string + expErr string + }{ + { + flags: []string{}, + expErr: "-proxy-name must be set", + }, + { + flags: []string{ + "-proxy-name", testPodName, + "-log-level", "invalid", + }, + expErr: "unknown log level: invalid", + }, + } + for _, c := range cases { + t.Run(c.expErr, func(t *testing.T) { + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + } + code := cmd.Run(c.flags) + require.Equal(t, 1, code) + require.Contains(t, ui.ErrorWriter.String(), c.expErr) + }) + } +} + +// TestRun_MeshServices tests that the command can log in to Consul (if ACLs are enabled) using a kubernetes +// auth method and, using the obtained token, make call to the dataplane GetBootstrapParams() RPC. +func TestRun_MeshServices(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + workload *pbcatalog.Workload + proxyConfiguration *pbmesh.ProxyConfiguration + aclsEnabled bool + expFail bool + }{ + { + name: "basic workload bootstrap", + workload: getWorkload(), + }, + { + name: "workload and proxyconfiguration bootstrap", + workload: getWorkload(), + proxyConfiguration: getProxyConfiguration(), + }, + { + name: "missing workload", + expFail: true, + }, + // TODO: acls enabled + } + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + // tokenFile := fmt.Sprintf("/tmp/%d1", rand.Int()) + // t.Cleanup(func() { + // _ = os.RemoveAll(tokenFile) + // }) + + // Create test consulServer server. + var serverCfg *testutil.TestServerConfig + testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + c.Experiments = []string{"resource-apis"} + serverCfg = c + }) + + loadResource(t, testClient.ResourceClient, getWorkloadID(testPodName, constants.DefaultConsulNS, constants.DefaultConsulPartition), tt.workload, nil) + loadResource(t, testClient.ResourceClient, getProxyConfigurationID(testPodName, constants.DefaultConsulNS, constants.DefaultConsulPartition), tt.proxyConfiguration, nil) + + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + maxPollingAttempts: 3, + } + + // We build the consul-addr because normally it's defined by the init container setting + // CONSUL_HTTP_ADDR when it processes the command template. + flags := []string{ + "-proxy-name", testPodName, + "-addresses", "127.0.0.1", + "-http-port", strconv.Itoa(serverCfg.Ports.HTTP), + "-grpc-port", strconv.Itoa(serverCfg.Ports.GRPC), + } + // if tt.aclsEnabled { + // flags = append(flags, "-auth-method-name", test.AuthMethod, + // "-service-account-name", tt.serviceAccountName, + // "-acl-token-sink", tokenFile) //TODO: what happens if this is unspecified? We don't need this file + // } + + // Run the command. + code := cmd.Run(flags) + if tt.expFail { + require.Equal(t, 1, code) + return + } + require.Equal(t, 0, code, ui.ErrorWriter.String()) + + // TODO: Can we remove the tokenFile from this workflow? + // consul-dataplane performs it's own login using the Serviceaccount bearer token + // if tt.aclsEnabled { + // // Validate the ACL token was written. + // tokenData, err := os.ReadFile(tokenFile) + // require.NoError(t, err) + // require.NotEmpty(t, tokenData) + // + // // Check that the token has the metadata with pod name and pod namespace. + // consulClient, err = api.NewClient(&api.Config{Address: server.HTTPAddr, Token: string(tokenData)}) + // require.NoError(t, err) + // token, _, err := consulClient.ACL().TokenReadSelf(nil) + // require.NoError(t, err) + // require.Equal(t, "token created via login: {\"pod\":\"default-ns/counting-pod\"}", token.Description) + // } + }) + } +} + +// TestRun_RetryServicePolling runs the command but does not register the consul service +// for 2 seconds and then asserts the command exits successfully. +func TestRun_RetryServicePolling(t *testing.T) { + t.Parallel() + + // Start Consul server. + var serverCfg *testutil.TestServerConfig + testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + c.Experiments = []string{"resource-apis"} + serverCfg = c + }) + + // Start the consul service registration in a go func and delay it so that it runs + // after the cmd.Run() starts. + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + // Wait a moment, this ensures that we are already in the retry logic. + time.Sleep(time.Second * 2) + // Register counting service. + loadResource(t, testClient.ResourceClient, getWorkloadID(testPodName, constants.DefaultConsulNS, constants.DefaultConsulPartition), getWorkload(), nil) + }() + + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + maxPollingAttempts: 10, + } + + flags := []string{ + "-proxy-name", testPodName, + "-addresses", "127.0.0.1", + "-http-port", strconv.Itoa(serverCfg.Ports.HTTP), + "-grpc-port", strconv.Itoa(serverCfg.Ports.GRPC), + } + code := cmd.Run(flags) + wg.Wait() + require.Equal(t, 0, code) +} + +func TestRun_TrafficRedirection(t *testing.T) { + cases := map[string]struct { + registerProxyConfiguration bool + expIptablesParamsFunc func(actual iptables.Config) error + }{ + "no proxyConfiguration provided": { + expIptablesParamsFunc: func(actual iptables.Config) error { + if len(actual.ExcludeInboundPorts) != 0 { + return fmt.Errorf("ExcludeInboundPorts in iptables.Config was %v, but should be empty", actual.ExcludeInboundPorts) + } + if actual.ProxyInboundPort != 20000 { + return fmt.Errorf("ProxyInboundPort in iptables.Config was %d, but should be [20000]", actual.ProxyOutboundPort) + } + if actual.ProxyOutboundPort != 15001 { + return fmt.Errorf("ProxyOutboundPort in iptables.Config was %d, but should be [15001]", actual.ProxyOutboundPort) + } + return nil + }, + }, + "stats bind port is provided in proxyConfiguration": { + registerProxyConfiguration: true, + expIptablesParamsFunc: func(actual iptables.Config) error { + if len(actual.ExcludeInboundPorts) != 1 || actual.ExcludeInboundPorts[0] != "9090" { + return fmt.Errorf("ExcludeInboundPorts in iptables.Config was %v, but should be [9090, 1234]", actual.ExcludeInboundPorts) + } + if actual.ProxyInboundPort != 20000 { + return fmt.Errorf("ProxyInboundPort in iptables.Config was %d, but should be [20000]", actual.ProxyOutboundPort) + } + if actual.ProxyOutboundPort != 15001 { + return fmt.Errorf("ProxyOutboundPort in iptables.Config was %d, but should be [15001]", actual.ProxyOutboundPort) + } + return nil + }, + }, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + // Start Consul server. + var serverCfg *testutil.TestServerConfig + testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + c.Experiments = []string{"resource-apis"} + serverCfg = c + }) + + // Add additional proxy configuration either to a config entry or to the service itself. + if c.registerProxyConfiguration { + loadResource(t, testClient.ResourceClient, getProxyConfigurationID(testPodName, constants.DefaultConsulNS, constants.DefaultConsulPartition), getProxyConfiguration(), nil) + } + + // Register Consul workload. + loadResource(t, testClient.ResourceClient, getWorkloadID(testPodName, constants.DefaultConsulNS, constants.DefaultConsulPartition), getWorkload(), nil) + + iptablesProvider := &fakeIptablesProvider{} + iptablesCfg := iptables.Config{ + ProxyUserID: "5995", + ProxyInboundPort: 20000, + ProxyOutboundPort: 15001, + } + + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + maxPollingAttempts: 3, + iptablesProvider: iptablesProvider, + } + iptablesCfgJSON, err := json.Marshal(iptablesCfg) + require.NoError(t, err) + + flags := []string{ + "-proxy-name", testPodName, + "-addresses", "127.0.0.1", + "-http-port", strconv.Itoa(serverCfg.Ports.HTTP), + "-grpc-port", strconv.Itoa(serverCfg.Ports.GRPC), + "-redirect-traffic-config", string(iptablesCfgJSON), + } + code := cmd.Run(flags) + require.Equal(t, 0, code, ui.ErrorWriter.String()) + require.Truef(t, iptablesProvider.applyCalled, "redirect traffic rules were not applied") + if c.expIptablesParamsFunc != nil { + errMsg := c.expIptablesParamsFunc(cmd.iptablesConfig) + require.NoError(t, errMsg) + } + }) + } +} + +const ( + testPodName = "foo" +) + +type fakeIptablesProvider struct { + applyCalled bool + rules []string +} + +func loadResource(t *testing.T, client pbresource.ResourceServiceClient, id *pbresource.ID, proto proto.Message, owner *pbresource.ID) { + if id == nil || !proto.ProtoReflect().IsValid() { + return + } + + data, err := anypb.New(proto) + require.NoError(t, err) + + resource := &pbresource.Resource{ + Id: id, + Data: data, + Owner: owner, + } + + req := &pbresource.WriteRequest{Resource: resource} + _, err = client.Write(context.Background(), req) + require.NoError(t, err) + test.ResourceHasPersisted(t, context.Background(), client, id) +} + +func getWorkloadID(name, namespace, partition string) *pbresource.ID { + return &pbresource.ID{ + Name: name, + Type: pbcatalog.WorkloadType, + Tenancy: &pbresource.Tenancy{ + Partition: partition, + Namespace: namespace, + }, + } +} + +// getWorkload creates a proxyConfiguration that matches the pod from createPod, +// assuming that metrics, telemetry, and overwrite probes are enabled separately. +func getWorkload() *pbcatalog.Workload { + return &pbcatalog.Workload{ + Addresses: []*pbcatalog.WorkloadAddress{ + {Host: "10.0.0.1", Ports: []string{"public", "admin", "mesh"}}, + }, + Ports: map[string]*pbcatalog.WorkloadPort{ + "public": { + Port: 80, + Protocol: pbcatalog.Protocol_PROTOCOL_UNSPECIFIED, + }, + "admin": { + Port: 8080, + Protocol: pbcatalog.Protocol_PROTOCOL_UNSPECIFIED, + }, + "mesh": { + Port: constants.ProxyDefaultInboundPort, + Protocol: pbcatalog.Protocol_PROTOCOL_MESH, + }, + }, + NodeName: "k8s-node-0", + Identity: testPodName, + } +} + +func getProxyConfigurationID(name, namespace, partition string) *pbresource.ID { + return &pbresource.ID{ + Name: name, + Type: pbmesh.ProxyConfigurationType, + Tenancy: &pbresource.Tenancy{ + Partition: partition, + Namespace: namespace, + }, + } +} + +// getProxyConfiguration creates a proxyConfiguration that matches the pod from createWorkload. +func getProxyConfiguration() *pbmesh.ProxyConfiguration { + return &pbmesh.ProxyConfiguration{ + Workloads: &pbcatalog.WorkloadSelector{ + Names: []string{testPodName}, + }, + DynamicConfig: &pbmesh.DynamicConfig{ + Mode: pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT, + ExposeConfig: &pbmesh.ExposeConfig{ + ExposePaths: []*pbmesh.ExposePath{ + { + ListenerPort: 20400, + LocalPathPort: 2001, + Path: "/livez", + }, + { + ListenerPort: 20300, + LocalPathPort: 2000, + Path: "/readyz", + }, + { + ListenerPort: 20500, + LocalPathPort: 2002, + Path: "/startupz", + }, + }, + }, + }, + BootstrapConfig: &pbmesh.BootstrapConfig{ + StatsBindAddr: "0.0.0.0:9090", + PrometheusBindAddr: "0.0.0.0:21234", // This gets added to the iptables exclude directly in the webhook + }, + } +} + +func (f *fakeIptablesProvider) AddRule(_ string, args ...string) { + f.rules = append(f.rules, strings.Join(args, " ")) +} + +func (f *fakeIptablesProvider) ApplyRules() error { + f.applyCalled = true + return nil +} + +func (f *fakeIptablesProvider) Rules() []string { + return f.rules +} diff --git a/control-plane/subcommand/partition-init/command.go b/control-plane/subcommand/partition-init/command.go index beb3a97d2c..19bb1bc6f5 100644 --- a/control-plane/subcommand/partition-init/command.go +++ b/control-plane/subcommand/partition-init/command.go @@ -13,9 +13,14 @@ import ( "github.com/hashicorp/go-hclog" "github.com/mitchellh/cli" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/anypb" "github.com/hashicorp/consul-server-connection-manager/discovery" "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/proto-public/pbresource" + pbtenancy "github.com/hashicorp/consul/proto-public/pbtenancy/v2beta1" "github.com/hashicorp/consul-k8s/control-plane/consul" "github.com/hashicorp/consul-k8s/control-plane/subcommand/common" @@ -28,9 +33,10 @@ type Command struct { flags *flag.FlagSet consul *flags.ConsulFlags - flagLogLevel string - flagLogJSON bool - flagTimeout time.Duration + flagLogLevel string + flagLogJSON bool + flagTimeout time.Duration + flagV2Tenancy bool // ctx is cancelled when the command timeout is reached. ctx context.Context @@ -53,6 +59,8 @@ func (c *Command) init() { "\"debug\", \"info\", \"warn\", and \"error\".") c.flags.BoolVar(&c.flagLogJSON, "log-json", false, "Enable or disable JSON output format for logging.") + c.flags.BoolVar(&c.flagV2Tenancy, "enable-v2tenancy", false, + "Enable V2 tenancy.") c.consul = &flags.ConsulFlags{} flags.Merge(c.flags, c.consul.Flags()) @@ -71,7 +79,61 @@ func (c *Command) Help() string { return c.help } -func (c *Command) ensurePartition(scm consul.ServerConnectionManager) error { +func (c *Command) ensureV2Partition(scm consul.ServerConnectionManager) error { + client, err := consul.NewResourceServiceClient(scm) + if err != nil { + c.UI.Error(fmt.Sprintf("unable to create grpc client: %s", err)) + return err + } + + for { + id := &pbresource.ID{ + Name: c.consul.Partition, + Type: pbtenancy.PartitionType, + } + + _, err = client.Read(c.ctx, &pbresource.ReadRequest{Id: id}) + switch { + + // found -> done + case err == nil: + c.log.Info("Admin Partition already exists", "name", c.consul.Partition) + return nil + + // not found -> create + case status.Code(err) == codes.NotFound: + data, err := anypb.New(&pbtenancy.Partition{Description: "Created by Helm installation"}) + if err != nil { + continue + } + _, err = client.Write(c.ctx, &pbresource.WriteRequest{Resource: &pbresource.Resource{ + Id: id, + Data: data, + }}) + if err == nil { + c.log.Info("Successfully created Admin Partition", "name", c.consul.Partition) + return nil + } + + // unexpected error -> retry + default: + c.log.Error("Error reading Partition from Consul", "name", c.consul.Partition, "error", err.Error()) + } + + // Wait on either the retry duration (in which case we continue) or the + // overall command timeout. + c.log.Info("Retrying in " + c.retryDuration.String()) + select { + case <-time.After(c.retryDuration): + continue + case <-c.ctx.Done(): + c.log.Error("Timed out attempting to ensure partition exists", "name", c.consul.Partition) + return err + } + } +} + +func (c *Command) ensureV1Partition(scm consul.ServerConnectionManager) error { state, err := scm.State() if err != nil { c.UI.Error(fmt.Sprintf("unable to get Consul server addresses from watcher: %s", err)) @@ -162,7 +224,11 @@ func (c *Command) Run(args []string) int { go watcher.Run() defer watcher.Stop() - err = c.ensurePartition(watcher) + if c.flagV2Tenancy { + err = c.ensureV2Partition(watcher) + } else { + err = c.ensureV1Partition(watcher) + } if err != nil { return 1 } diff --git a/control-plane/subcommand/partition-init/command_ent_test.go b/control-plane/subcommand/partition-init/command_ent_test.go index f53e92193b..21972a5a7a 100644 --- a/control-plane/subcommand/partition-init/command_ent_test.go +++ b/control-plane/subcommand/partition-init/command_ent_test.go @@ -13,8 +13,12 @@ import ( "github.com/mitchellh/cli" "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/proto-public/pbresource" + pbtenancy "github.com/hashicorp/consul/proto-public/pbtenancy/v2beta1" "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul-k8s/control-plane/helper/test" @@ -68,11 +72,15 @@ func TestRun_PartitionCreate(t *testing.T) { partitionName := "test-partition" type testCase struct { + v2tenancy bool + experiments []string requirePartitionCreated func(testClient *test.TestServerClient) } testCases := map[string]testCase{ - "simple": { + "v2tenancy false": { + v2tenancy: false, + experiments: []string{}, requirePartitionCreated: func(testClient *test.TestServerClient) { consul, err := api.NewClient(testClient.Cfg.APIClientConfig) require.NoError(t, err) @@ -83,12 +91,26 @@ func TestRun_PartitionCreate(t *testing.T) { require.Equal(t, partitionName, partition.Name) }, }, + "v2tenancy true": { + v2tenancy: true, + experiments: []string{"resource-apis", "v2tenancy"}, + requirePartitionCreated: func(testClient *test.TestServerClient) { + _, err := testClient.ResourceClient.Read(context.Background(), &pbresource.ReadRequest{ + Id: &pbresource.ID{ + Name: partitionName, + Type: pbtenancy.PartitionType, + }, + }) + require.NoError(t, err, "expected partition to be created") + }, + }, } for name, tc := range testCases { t.Run(name, func(t *testing.T) { var serverCfg *testutil.TestServerConfig testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + c.Experiments = tc.experiments serverCfg = c }) @@ -103,6 +125,7 @@ func TestRun_PartitionCreate(t *testing.T) { "-grpc-port", strconv.Itoa(serverCfg.Ports.GRPC), "-partition", partitionName, "-timeout", "1m", + "-enable-v2tenancy=" + strconv.FormatBool(tc.v2tenancy), } responseCode := cmd.Run(args) @@ -117,12 +140,17 @@ func TestRun_PartitionExists(t *testing.T) { partitionDesc := "Created before test" type testCase struct { + v2tenancy bool + experiments []string preCreatePartition func(testClient *test.TestServerClient) requirePartitionNotCreated func(testClient *test.TestServerClient) } testCases := map[string]testCase{ - "simple": { + "v2tenancy false": { + v2tenancy: false, + experiments: []string{}, + preCreatePartition: func(testClient *test.TestServerClient) { consul, err := api.NewClient(testClient.Cfg.APIClientConfig) require.NoError(t, err) @@ -144,12 +172,46 @@ func TestRun_PartitionExists(t *testing.T) { require.Equal(t, partitionDesc, partition.Description) }, }, + "v2tenancy true": { + v2tenancy: true, + experiments: []string{"resource-apis", "v2tenancy"}, + preCreatePartition: func(testClient *test.TestServerClient) { + data, err := anypb.New(&pbtenancy.Partition{Description: partitionDesc}) + require.NoError(t, err) + + _, err = testClient.ResourceClient.Write(context.Background(), &pbresource.WriteRequest{ + Resource: &pbresource.Resource{ + Id: &pbresource.ID{ + Name: partitionName, + Type: pbtenancy.PartitionType, + }, + Data: data, + }, + }) + require.NoError(t, err) + }, + requirePartitionNotCreated: func(testClient *test.TestServerClient) { + rsp, err := testClient.ResourceClient.Read(context.Background(), &pbresource.ReadRequest{ + Id: &pbresource.ID{ + Name: partitionName, + Type: pbtenancy.PartitionType, + }, + }) + require.NoError(t, err) + + partition := &pbtenancy.Partition{} + err = anypb.UnmarshalTo(rsp.Resource.Data, partition, proto.UnmarshalOptions{}) + require.NoError(t, err) + require.Equal(t, partitionDesc, partition.Description) + }, + }, } for name, tc := range testCases { t.Run(name, func(t *testing.T) { var serverCfg *testutil.TestServerConfig testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + c.Experiments = tc.experiments serverCfg = c }) @@ -166,6 +228,7 @@ func TestRun_PartitionExists(t *testing.T) { "-http-port", strconv.Itoa(serverCfg.Ports.HTTP), "-grpc-port", strconv.Itoa(serverCfg.Ports.GRPC), "-partition", partitionName, + "-enable-v2tenancy=" + strconv.FormatBool(tc.v2tenancy), } responseCode := cmd.Run(args) @@ -180,33 +243,55 @@ func TestRun_PartitionExists(t *testing.T) { func TestRun_ExitsAfterTimeout(t *testing.T) { partitionName := "test-partition" - var serverCfg *testutil.TestServerConfig - testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { - serverCfg = c - }) - - ui := cli.NewMockUi() - cmd := Command{ - UI: ui, + type testCase struct { + v2tenancy bool + experiments []string } - cmd.init() - - timeout := 500 * time.Millisecond - args := []string{ - "-addresses=" + "127.0.0.1", - "-http-port", strconv.Itoa(serverCfg.Ports.HTTP), - "-grpc-port", strconv.Itoa(serverCfg.Ports.GRPC), - "-timeout", timeout.String(), - "-partition", partitionName, + + testCases := map[string]testCase{ + "v2tenancy false": { + v2tenancy: false, + experiments: []string{}, + }, + "v2tenancy true": { + v2tenancy: true, + experiments: []string{"resource-apis", "v2tenancy"}, + }, } - testClient.TestServer.Stop() - startTime := time.Now() - responseCode := cmd.Run(args) - completeTime := time.Now() - require.Equal(t, 1, responseCode) + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + var serverCfg *testutil.TestServerConfig + testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + c.Experiments = tc.experiments + serverCfg = c + }) + + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + } + cmd.init() - // While the timeout is 500ms, adding a buffer of 500ms ensures we account for - // some buffer time required for the task to run and assignments to occur. - require.WithinDuration(t, completeTime, startTime, timeout+500*time.Millisecond) + timeout := 500 * time.Millisecond + args := []string{ + "-addresses=" + "127.0.0.1", + "-http-port", strconv.Itoa(serverCfg.Ports.HTTP), + "-grpc-port", strconv.Itoa(serverCfg.Ports.GRPC), + "-timeout", timeout.String(), + "-partition", partitionName, + "-enable-v2tenancy=" + strconv.FormatBool(tc.v2tenancy), + } + + testClient.TestServer.Stop() + startTime := time.Now() + responseCode := cmd.Run(args) + completeTime := time.Now() + require.Equal(t, 1, responseCode) + + // While the timeout is 500ms, adding a buffer of 500ms ensures we account for + // some buffer time required for the task to run and assignments to occur. + require.WithinDuration(t, completeTime, startTime, timeout+500*time.Millisecond) + }) + } } diff --git a/control-plane/subcommand/server-acl-init/anonymous_token_test.go b/control-plane/subcommand/server-acl-init/anonymous_token_test.go index 06327c3a91..41689a88c7 100644 --- a/control-plane/subcommand/server-acl-init/anonymous_token_test.go +++ b/control-plane/subcommand/server-acl-init/anonymous_token_test.go @@ -15,7 +15,7 @@ import ( func Test_configureAnonymousPolicy(t *testing.T) { - k8s, testClient := completeSetup(t) + k8s, testClient := completeSetup(t, false) consulHTTPAddr := testClient.TestServer.HTTPAddr consulGRPCAddr := testClient.TestServer.GRPCAddr diff --git a/control-plane/subcommand/server-acl-init/command.go b/control-plane/subcommand/server-acl-init/command.go index fc7550f232..3d3c28c5ae 100644 --- a/control-plane/subcommand/server-acl-init/command.go +++ b/control-plane/subcommand/server-acl-init/command.go @@ -34,8 +34,6 @@ import ( k8sflags "github.com/hashicorp/consul-k8s/control-plane/subcommand/flags" ) -const dnsProxyName = "dns-proxy" - type Command struct { UI cli.Ui @@ -46,6 +44,8 @@ type Command struct { flagResourcePrefix string flagK8sNamespace string + flagResourceAPIs bool // Use V2 APIs + flagAllowDNS bool flagSetServerTokens bool @@ -68,6 +68,8 @@ type Command struct { flagIngressGatewayNames []string flagTerminatingGatewayNames []string + flagAPIGatewayController bool + // Flags to configure Consul connection. flagServerPort uint @@ -122,13 +124,11 @@ type Command struct { state discovery.State - once sync.Once - help string - flagDNSProxy bool + once sync.Once + help string } func (c *Command) init() { - c.flags = flag.NewFlagSet("", flag.ContinueOnError) c.flags.StringVar(&c.flagResourcePrefix, "resource-prefix", "", "Prefix to use for Kubernetes resources.") @@ -137,6 +137,9 @@ func (c *Command) init() { c.flags.BoolVar(&c.flagSetServerTokens, "set-server-tokens", true, "Toggle for setting agent tokens for the servers.") + c.flags.BoolVar(&c.flagResourceAPIs, "enable-resource-apis", false, + "Enable or disable Consul V2 Resource APIs. This will affect the binding rule used for Kubernetes auth (Service vs. WorkloadIdentity)") + c.flags.BoolVar(&c.flagAllowDNS, "allow-dns", false, "Toggle for updating the anonymous token to allow DNS queries to work") c.flags.BoolVar(&c.flagClient, "client", true, @@ -170,6 +173,8 @@ func (c *Command) init() { "Name of a terminating gateway that needs an acl token. May be specified multiple times. "+ "[Enterprise Only] If using Consul namespaces and registering the gateway outside of the "+ "default namespace, specify the value in the form ..") + c.flags.BoolVar(&c.flagAPIGatewayController, "api-gateway-controller", false, + "Toggle for configuring ACL login for the API gateway controller.") c.flags.UintVar(&c.flagServerPort, "server-port", 8500, "The HTTP or HTTPS port of the Consul server. Defaults to 8500.") @@ -226,9 +231,6 @@ func (c *Command) init() { c.flags.BoolVar(&c.flagLogJSON, "log-json", false, "Enable or disable JSON output format for logging.") - c.flags.BoolVar(&c.flagDNSProxy, dnsProxyName, false, - "Toggle for configuring ACL login for the DNS proxy.") - c.k8s = &k8sflags.K8SFlags{} c.consulFlags = &flags.ConsulFlags{} flags.Merge(c.flags, c.k8s.Flags()) @@ -587,6 +589,28 @@ func (c *Command) Run(args []string) int { } } + if c.flagAPIGatewayController { + rules, err := c.apiGatewayControllerRules() + if err != nil { + c.log.Error("Error templating api gateway rules", "err", err) + return 1 + } + serviceAccountName := c.withPrefix("api-gateway-controller") + + // API gateways require a global policy/token because they must + // create config-entry resources in the primary, even when deployed + // to a secondary datacenter + authMethodName := localComponentAuthMethodName + if !primary { + authMethodName = globalComponentAuthMethodName + } + err = c.createACLPolicyRoleAndBindingRule("api-gateway-controller", rules, consulDC, primaryDC, globalPolicy, primary, authMethodName, serviceAccountName, dynamicClient) + if err != nil { + c.log.Error(err.Error()) + return 1 + } + } + if c.flagMeshGateway { rules, err := c.meshGatewayRules() if err != nil { @@ -675,21 +699,6 @@ func (c *Command) Run(args []string) int { } } - if c.flagDNSProxy { - serviceAccountName := c.withPrefix(dnsProxyName) - - dnsProxyRules, err := c.dnsProxyRules() - if err != nil { - c.log.Error("Error templating dns-proxy rules", "err", err) - return 1 - } - - if err := c.createACLPolicyRoleAndBindingRule(dnsProxyName, dnsProxyRules, consulDC, primaryDC, localPolicy, primary, localComponentAuthMethodName, serviceAccountName, dynamicClient); err != nil { - c.log.Error(err.Error()) - return 1 - } - } - c.log.Info("server-acl-init completed successfully") return 0 } diff --git a/control-plane/subcommand/server-acl-init/command_ent_test.go b/control-plane/subcommand/server-acl-init/command_ent_test.go index 834f789a1c..e28af9ef35 100644 --- a/control-plane/subcommand/server-acl-init/command_ent_test.go +++ b/control-plane/subcommand/server-acl-init/command_ent_test.go @@ -35,7 +35,9 @@ func TestRun_ConnectInject_SingleDestinationNamespace(t *testing.T) { t.Parallel() cases := map[string]struct { - Destination string + Destination string + ExtraFlags []string + V2BindingRule bool }{ "consul default ns": { Destination: "default", @@ -43,11 +45,16 @@ func TestRun_ConnectInject_SingleDestinationNamespace(t *testing.T) { "consul non-default ns": { Destination: "destination", }, + "consul non-default ns w/ resource-apis": { + Destination: "destination", + ExtraFlags: []string{"-enable-resource-apis=true"}, + V2BindingRule: true, + }, } for name, c := range cases { t.Run(name, func(tt *testing.T) { - k8s, testAgent := completeSetup(tt) + k8s, testAgent := completeSetup(tt, false) setUpK8sServiceAccount(tt, k8s, ns) ui := cli.NewMockUi() @@ -69,6 +76,10 @@ func TestRun_ConnectInject_SingleDestinationNamespace(t *testing.T) { "-acl-binding-rule-selector=serviceaccount.name!=default", } + if len(c.ExtraFlags) > 0 { + args = append(args, c.ExtraFlags...) + } + responseCode := cmd.Run(args) require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) @@ -110,8 +121,14 @@ func TestRun_ConnectInject_SingleDestinationNamespace(t *testing.T) { aclRule, _, err := consul.ACL().BindingRuleRead(rules[0].ID, namespaceQuery) require.NoError(t, err) require.NotNil(t, aclRule) - require.Equal(t, api.BindingRuleBindTypeService, aclRule.BindType) - require.Equal(t, "${serviceaccount.name}", aclRule.BindName) + if c.V2BindingRule { + require.Equal(t, api.BindingRuleBindTypeTemplatedPolicy, aclRule.BindType) + require.Equal(t, "builtin/workload-identity", aclRule.BindName) + require.Equal(t, "${serviceaccount.name}", aclRule.BindVars.Name) + } else { + require.Equal(t, api.BindingRuleBindTypeService, aclRule.BindType) + require.Equal(t, "${serviceaccount.name}", aclRule.BindName) + } require.Equal(t, "Kubernetes binding rule", aclRule.Description) require.Equal(t, "serviceaccount.name!=default", aclRule.Selector) @@ -154,6 +171,7 @@ func TestRun_ConnectInject_NamespaceMirroring(t *testing.T) { cases := map[string]struct { MirroringPrefix string ExtraFlags []string + V2BindingRule bool }{ "no prefix": { MirroringPrefix: "", @@ -169,11 +187,16 @@ func TestRun_ConnectInject_NamespaceMirroring(t *testing.T) { // effect. ExtraFlags: []string{"-consul-inject-destination-namespace=dest"}, }, + "no prefix w/ resource-apis": { + MirroringPrefix: "", + ExtraFlags: []string{"-enable-resource-apis=true"}, + V2BindingRule: true, + }, } for name, c := range cases { t.Run(name, func(tt *testing.T) { - k8s, testAgent := completeSetup(tt) + k8s, testAgent := completeSetup(tt, false) setUpK8sServiceAccount(tt, k8s, ns) ui := cli.NewMockUi() @@ -225,8 +248,14 @@ func TestRun_ConnectInject_NamespaceMirroring(t *testing.T) { aclRule, _, err := consul.ACL().BindingRuleRead(rules[0].ID, nil) require.NoError(t, err) require.NotNil(t, aclRule) - require.Equal(t, api.BindingRuleBindTypeService, aclRule.BindType) - require.Equal(t, "${serviceaccount.name}", aclRule.BindName) + if c.V2BindingRule { + require.Equal(t, api.BindingRuleBindTypeTemplatedPolicy, aclRule.BindType) + require.Equal(t, "builtin/workload-identity", aclRule.BindName) + require.Equal(t, "${serviceaccount.name}", aclRule.BindVars.Name) + } else { + require.Equal(t, api.BindingRuleBindTypeService, aclRule.BindType) + require.Equal(t, "${serviceaccount.name}", aclRule.BindName) + } require.Equal(t, "Kubernetes binding rule", aclRule.Description) require.Equal(t, "serviceaccount.name!=default", aclRule.Selector) }) @@ -286,7 +315,7 @@ func TestRun_ACLPolicyUpdates(t *testing.T) { k8sNamespaceFlags := []string{"default", "other"} for _, k8sNamespaceFlag := range k8sNamespaceFlags { t.Run(k8sNamespaceFlag, func(t *testing.T) { - k8s, testAgent := completeSetup(t) + k8s, testAgent := completeSetup(t, false) setUpK8sServiceAccount(t, k8s, k8sNamespaceFlag) ui := cli.NewMockUi() @@ -460,6 +489,8 @@ func TestRun_ConnectInject_Updates(t *testing.T) { AuthMethodExpectedNamespacePrefixConfig string // Expected namespace for the binding rule. BindingRuleExpectedNS string + // UseV2API, tests the bindingrule is compatible with workloadIdentites. + UseV2API bool }{ "no ns => mirroring ns, no prefix": { FirstRunArgs: nil, @@ -589,11 +620,148 @@ func TestRun_ConnectInject_Updates(t *testing.T) { AuthMethodExpectedNamespacePrefixConfig: "", BindingRuleExpectedNS: "default", }, + "(v2) no ns => mirroring ns, no prefix": { + FirstRunArgs: nil, + SecondRunArgs: []string{ + "-enable-namespaces", + "-enable-inject-k8s-namespace-mirroring", + }, + AuthMethodExpectedNS: "default", + AuthMethodExpectMapNamespacesConfig: true, + AuthMethodExpectedNamespacePrefixConfig: "", + BindingRuleExpectedNS: "default", + UseV2API: true, + }, + "(v2) no ns => mirroring ns, prefix": { + FirstRunArgs: nil, + SecondRunArgs: []string{ + "-enable-namespaces", + "-enable-inject-k8s-namespace-mirroring", + "-inject-k8s-namespace-mirroring-prefix=prefix-", + }, + AuthMethodExpectedNS: "default", + AuthMethodExpectMapNamespacesConfig: true, + AuthMethodExpectedNamespacePrefixConfig: "prefix-", + BindingRuleExpectedNS: "default", + UseV2API: true, + }, + "(v2) no ns => single dest ns": { + FirstRunArgs: nil, + SecondRunArgs: []string{ + "-enable-namespaces", + "-consul-inject-destination-namespace=dest", + }, + AuthMethodExpectedNS: "dest", + AuthMethodExpectMapNamespacesConfig: false, + AuthMethodExpectedNamespacePrefixConfig: "", + BindingRuleExpectedNS: "dest", + UseV2API: true, + }, + "(v2) mirroring ns => single dest ns": { + FirstRunArgs: []string{ + "-enable-namespaces", + "-enable-inject-k8s-namespace-mirroring", + "-inject-k8s-namespace-mirroring-prefix=prefix-", + }, + SecondRunArgs: []string{ + "-enable-namespaces", + "-consul-inject-destination-namespace=dest", + }, + AuthMethodExpectedNS: "dest", + AuthMethodExpectMapNamespacesConfig: false, + AuthMethodExpectedNamespacePrefixConfig: "", + BindingRuleExpectedNS: "dest", + UseV2API: true, + }, + "(v2) single dest ns => mirroring ns": { + FirstRunArgs: []string{ + "-enable-namespaces", + "-consul-inject-destination-namespace=dest", + }, + SecondRunArgs: []string{ + "-enable-namespaces", + "-enable-inject-k8s-namespace-mirroring", + "-inject-k8s-namespace-mirroring-prefix=prefix-", + }, + AuthMethodExpectedNS: "default", + AuthMethodExpectMapNamespacesConfig: true, + AuthMethodExpectedNamespacePrefixConfig: "prefix-", + BindingRuleExpectedNS: "default", + UseV2API: true, + }, + "(v2) mirroring ns (no prefix) => mirroring ns (no prefix)": { + FirstRunArgs: []string{ + "-enable-namespaces", + "-enable-inject-k8s-namespace-mirroring", + "-inject-k8s-namespace-mirroring-prefix=", + }, + SecondRunArgs: []string{ + "-enable-namespaces", + "-enable-inject-k8s-namespace-mirroring", + "-inject-k8s-namespace-mirroring-prefix=", + }, + AuthMethodExpectedNS: "default", + AuthMethodExpectMapNamespacesConfig: true, + AuthMethodExpectedNamespacePrefixConfig: "", + BindingRuleExpectedNS: "default", + UseV2API: true, + }, + "(v2) mirroring ns => mirroring ns (same prefix)": { + FirstRunArgs: []string{ + "-enable-namespaces", + "-enable-inject-k8s-namespace-mirroring", + "-inject-k8s-namespace-mirroring-prefix=prefix-", + }, + SecondRunArgs: []string{ + "-enable-namespaces", + "-enable-inject-k8s-namespace-mirroring", + "-inject-k8s-namespace-mirroring-prefix=prefix-", + }, + AuthMethodExpectedNS: "default", + AuthMethodExpectMapNamespacesConfig: true, + AuthMethodExpectedNamespacePrefixConfig: "prefix-", + BindingRuleExpectedNS: "default", + UseV2API: true, + }, + "(v2) mirroring ns (no prefix) => mirroring ns (prefix)": { + FirstRunArgs: []string{ + "-enable-namespaces", + "-enable-inject-k8s-namespace-mirroring", + "-inject-k8s-namespace-mirroring-prefix=", + }, + SecondRunArgs: []string{ + "-enable-namespaces", + "-enable-inject-k8s-namespace-mirroring", + "-inject-k8s-namespace-mirroring-prefix=prefix-", + }, + AuthMethodExpectedNS: "default", + AuthMethodExpectMapNamespacesConfig: true, + AuthMethodExpectedNamespacePrefixConfig: "prefix-", + BindingRuleExpectedNS: "default", + UseV2API: true, + }, + "(v2) mirroring ns (prefix) => mirroring ns (no prefix)": { + FirstRunArgs: []string{ + "-enable-namespaces", + "-enable-inject-k8s-namespace-mirroring", + "-inject-k8s-namespace-mirroring-prefix=prefix-", + }, + SecondRunArgs: []string{ + "-enable-namespaces", + "-enable-inject-k8s-namespace-mirroring", + "-inject-k8s-namespace-mirroring-prefix=", + }, + AuthMethodExpectedNS: "default", + AuthMethodExpectMapNamespacesConfig: true, + AuthMethodExpectedNamespacePrefixConfig: "", + BindingRuleExpectedNS: "default", + UseV2API: true, + }, } for name, c := range cases { t.Run(name, func(tt *testing.T) { - k8s, testAgent := completeSetup(tt) + k8s, testAgent := completeSetup(tt, c.UseV2API) setUpK8sServiceAccount(tt, k8s, ns) ui := cli.NewMockUi() @@ -607,6 +775,10 @@ func TestRun_ConnectInject_Updates(t *testing.T) { "-connect-inject", } + if c.UseV2API { + defaultArgs = append(defaultArgs, "-enable-resource-apis=true") + } + // First run. NOTE: we don't assert anything here since we've // tested these results in other tests. What we care about here // is the result after the second run. @@ -658,7 +830,11 @@ func TestRun_ConnectInject_Updates(t *testing.T) { }) require.NoError(t, err) require.Len(t, rules, 1) - require.Equal(tt, api.BindingRuleBindTypeService, rules[0].BindType) + if c.UseV2API { + require.Equal(tt, api.BindingRuleBindTypeTemplatedPolicy, rules[0].BindType) + } else { + require.Equal(tt, api.BindingRuleBindTypeService, rules[0].BindType) + } }) } } @@ -698,7 +874,7 @@ func TestRun_TokensWithNamespacesEnabled(t *testing.T) { } for testName, c := range cases { t.Run(testName, func(t *testing.T) { - k8s, testSvr := completeSetup(t) + k8s, testSvr := completeSetup(t, false) setUpK8sServiceAccount(t, k8s, ns) // Run the command. @@ -952,7 +1128,7 @@ partition "default" { } for _, c := range cases { t.Run(c.TestName, func(t *testing.T) { - k8s, testSvr := completeSetup(t) + k8s, testSvr := completeSetup(t, false) setUpK8sServiceAccount(t, k8s, ns) // Run the command. @@ -1039,7 +1215,7 @@ func TestRun_NamespaceEnabled_ValidateLoginToken_PrimaryDatacenter(t *testing.T) authMethodName := fmt.Sprintf("%s-%s", resourcePrefix, componentAuthMethod) serviceAccountName := fmt.Sprintf("%s-%s", resourcePrefix, c.ComponentName) - k8s, testSvr := completeSetup(t) + k8s, testSvr := completeSetup(t, false) _, jwtToken := setUpK8sServiceAccount(t, k8s, c.Namespace) k8sMockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -1199,7 +1375,7 @@ func TestRun_NamespaceEnabled_ValidateLoginToken_SecondaryDatacenter(t *testing. func TestRun_PartitionTokenDefaultPartition_WithProvidedSecretID(t *testing.T) { t.Parallel() - k8s, testSvr := completeSetup(t) + k8s, testSvr := completeSetup(t, false) setUpK8sServiceAccount(t, k8s, ns) partitionToken := "123e4567-e89b-12d3-a456-426614174000" diff --git a/control-plane/subcommand/server-acl-init/command_test.go b/control-plane/subcommand/server-acl-init/command_test.go index 978144a21e..7c3b778f5e 100644 --- a/control-plane/subcommand/server-acl-init/command_test.go +++ b/control-plane/subcommand/server-acl-init/command_test.go @@ -104,7 +104,7 @@ func TestRun_FlagValidation(t *testing.T) { func TestRun_Defaults(t *testing.T) { t.Parallel() - k8s, testClient := completeSetup(t) + k8s, testClient := completeSetup(t, false) setUpK8sServiceAccount(t, k8s, ns) // Run the command. @@ -187,7 +187,7 @@ func TestRun_TokensPrimaryDC(t *testing.T) { } for _, c := range cases { t.Run(c.TestName, func(t *testing.T) { - k8s, testClient := completeSetup(t) + k8s, testClient := completeSetup(t, false) setUpK8sServiceAccount(t, k8s, ns) // Run the command. @@ -252,7 +252,7 @@ func TestRun_TokensPrimaryDC(t *testing.T) { func TestRun_ReplicationTokenPrimaryDC_WithProvidedSecretID(t *testing.T) { t.Parallel() - k8s, testClient := completeSetup(t) + k8s, testClient := completeSetup(t, false) setUpK8sServiceAccount(t, k8s, ns) replicationToken := "123e4567-e89b-12d3-a456-426614174000" @@ -528,7 +528,7 @@ func TestRun_AnonymousTokenPolicy(t *testing.T) { flags = append(flags, "-acl-replication-token-file", tmp.Name()) } else { var testClient *test.TestServerClient - k8s, testClient = completeSetup(t) + k8s, testClient = completeSetup(t, false) consulHTTPAddr = testClient.TestServer.HTTPAddr consulGRPCAddr = testClient.TestServer.GRPCAddr } @@ -603,8 +603,9 @@ func TestRun_ConnectInjectAuthMethod(t *testing.T) { t.Parallel() cases := map[string]struct { - flags []string - expectedHost string + flags []string + expectedHost string + v2BindingRule bool }{ "-connect-inject flag": { flags: []string{"-connect-inject"}, @@ -617,11 +618,16 @@ func TestRun_ConnectInjectAuthMethod(t *testing.T) { }, expectedHost: "https://my-kube.com", }, + "-enable-resource-apis flag": { + flags: []string{"-connect-inject", "-enable-resource-apis=true"}, + expectedHost: "https://kubernetes.default.svc", + v2BindingRule: true, + }, } for testName, c := range cases { t.Run(testName, func(t *testing.T) { - k8s, testClient := completeSetup(t) + k8s, testClient := completeSetup(t, c.v2BindingRule) caCert, jwtToken := setUpK8sServiceAccount(t, k8s, ns) // Run the command. @@ -665,8 +671,14 @@ func TestRun_ConnectInjectAuthMethod(t *testing.T) { require.NoError(t, err) require.Len(t, rules, 1) - require.Equal(t, "service", string(rules[0].BindType)) - require.Equal(t, "${serviceaccount.name}", rules[0].BindName) + if c.v2BindingRule { + require.Equal(t, "templated-policy", string(rules[0].BindType)) + require.Equal(t, "builtin/workload-identity", rules[0].BindName) + require.Equal(t, "${serviceaccount.name}", rules[0].BindVars.Name) + } else { + require.Equal(t, "service", string(rules[0].BindType)) + require.Equal(t, "${serviceaccount.name}", rules[0].BindName) + } require.Equal(t, bindingRuleSelector, rules[0].Selector) // Test that if the same command is re-run it doesn't error. @@ -689,7 +701,7 @@ func TestRun_ConnectInjectAuthMethod(t *testing.T) { func TestRun_ConnectInjectAuthMethodUpdates(t *testing.T) { t.Parallel() - k8s, testClient := completeSetup(t) + k8s, testClient := completeSetup(t, false) caCert, jwtToken := setUpK8sServiceAccount(t, k8s, ns) ui := cli.NewMockUi() @@ -770,7 +782,7 @@ func TestRun_ConnectInjectAuthMethodUpdates(t *testing.T) { // Test that ACL binding rules are updated if the rule selector changes. func TestRun_BindingRuleUpdates(t *testing.T) { - k8s, testClient := completeSetup(t) + k8s, testClient := completeSetup(t, false) setUpK8sServiceAccount(t, k8s, ns) consul, err := api.NewClient(&api.Config{ @@ -846,10 +858,92 @@ func TestRun_BindingRuleUpdates(t *testing.T) { } } +// Test that the ACL binding template is updated if the rule selector changes. +// V2 only. +func TestRun_TemplateBindingRuleUpdates(t *testing.T) { + k8s, testClient := completeSetup(t, true) + setUpK8sServiceAccount(t, k8s, ns) + + consul, err := api.NewClient(&api.Config{ + Address: testClient.TestServer.HTTPAddr, + }) + require.NoError(t, err) + + ui := cli.NewMockUi() + commonArgs := []string{ + "-resource-prefix=" + resourcePrefix, + "-k8s-namespace=" + ns, + "-addresses", strings.Split(testClient.TestServer.HTTPAddr, ":")[0], + "-http-port", strings.Split(testClient.TestServer.HTTPAddr, ":")[1], + "-grpc-port", strings.Split(testClient.TestServer.GRPCAddr, ":")[1], + "-enable-resource-apis=true", + "-connect-inject", + } + firstRunArgs := append(commonArgs, + "-acl-binding-rule-selector=serviceaccount.name!=default", + ) + // On the second run, we change the binding rule selector. + secondRunArgs := append(commonArgs, + "-acl-binding-rule-selector=serviceaccount.name!=changed", + ) + + // Run the command first to populate the binding rule. + cmd := Command{ + UI: ui, + clientset: k8s, + } + responseCode := cmd.Run(firstRunArgs) + require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) + + // Validate the binding rule. + { + queryOpts := &api.QueryOptions{Token: getBootToken(t, k8s, resourcePrefix, ns)} + authMethodName := resourcePrefix + "-k8s-auth-method" + rules, _, err := consul.ACL().BindingRuleList(authMethodName, queryOpts) + require.NoError(t, err) + require.Len(t, rules, 1) + aclRule, _, err := consul.ACL().BindingRuleRead(rules[0].ID, queryOpts) + require.NoError(t, err) + require.NotNil(t, aclRule) + require.Equal(t, "Kubernetes binding rule", aclRule.Description) + require.Equal(t, "templated-policy", string(rules[0].BindType)) + require.Equal(t, "builtin/workload-identity", rules[0].BindName) + require.Equal(t, "${serviceaccount.name}", rules[0].BindVars.Name) + require.Equal(t, "serviceaccount.name!=default", aclRule.Selector) + } + + // Re-run the command with namespace flags. The policies should be updated. + // NOTE: We're redefining the command so that the old flag values are + // reset. + cmd = Command{ + UI: ui, + clientset: k8s, + } + responseCode = cmd.Run(secondRunArgs) + require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) + + // Check the binding rule is changed expected. + { + queryOpts := &api.QueryOptions{Token: getBootToken(t, k8s, resourcePrefix, ns)} + authMethodName := resourcePrefix + "-k8s-auth-method" + rules, _, err := consul.ACL().BindingRuleList(authMethodName, queryOpts) + require.NoError(t, err) + require.Len(t, rules, 1) + aclRule, _, err := consul.ACL().BindingRuleRead(rules[0].ID, queryOpts) + require.NoError(t, err) + require.NotNil(t, aclRule) + require.Equal(t, "Kubernetes binding rule", aclRule.Description) + require.Equal(t, "templated-policy", string(rules[0].BindType)) + require.Equal(t, "builtin/workload-identity", rules[0].BindName) + require.Equal(t, "${serviceaccount.name}", rules[0].BindVars.Name) + require.Equal(t, "serviceaccount.name!=changed", aclRule.Selector) + } +} + // Test that the catalog sync policy is updated if the Consul node name changes. func TestRun_SyncPolicyUpdates(t *testing.T) { t.Parallel() - k8s, testClient := completeSetup(t) + k8s, testClient := completeSetup(t, false) setUpK8sServiceAccount(t, k8s, ns) ui := cli.NewMockUi() @@ -1773,7 +1867,7 @@ func TestRun_SkipBootstrapping_WhenServersAreDisabled(t *testing.T) { // Test that we exit after timeout. func TestRun_Timeout(t *testing.T) { t.Parallel() - k8s, testClient := completeSetup(t) + k8s, testClient := completeSetup(t, false) setUpK8sServiceAccount(t, k8s, ns) _, err := api.NewClient(&api.Config{ @@ -1925,7 +2019,7 @@ func TestRun_GatewayErrors(t *testing.T) { for testName, c := range cases { t.Run(testName, func(tt *testing.T) { - k8s, testClient := completeSetup(tt) + k8s, testClient := completeSetup(tt, false) setUpK8sServiceAccount(t, k8s, ns) require := require.New(tt) @@ -1976,6 +2070,12 @@ func TestRun_PoliciesAndBindingRulesForACLLogin_PrimaryDatacenter(t *testing.T) PolicyNames: []string{"sync-catalog-policy"}, Roles: []string{resourcePrefix + "-sync-catalog-acl-role"}, }, + { + TestName: "API Gateway Controller", + TokenFlags: []string{"-api-gateway-controller"}, + PolicyNames: []string{"api-gateway-controller-policy"}, + Roles: []string{resourcePrefix + "-api-gateway-controller-acl-role"}, + }, { TestName: "Snapshot Agent", TokenFlags: []string{"-snapshot-agent"}, @@ -2021,7 +2121,7 @@ func TestRun_PoliciesAndBindingRulesForACLLogin_PrimaryDatacenter(t *testing.T) } for _, c := range cases { t.Run(c.TestName, func(t *testing.T) { - k8s, testClient := completeSetup(t) + k8s, testClient := completeSetup(t, false) setUpK8sServiceAccount(t, k8s, ns) // Run the command. @@ -2123,6 +2223,13 @@ func TestRun_PoliciesAndBindingRulesACLLogin_SecondaryDatacenter(t *testing.T) { Roles: []string{resourcePrefix + "-sync-catalog-acl-role-" + secondaryDatacenter}, GlobalAuthMethod: false, }, + { + TestName: "API Gateway Controller", + TokenFlags: []string{"-api-gateway-controller"}, + PolicyNames: []string{"api-gateway-controller-policy-" + secondaryDatacenter}, + Roles: []string{resourcePrefix + "-api-gateway-controller-acl-role-" + secondaryDatacenter}, + GlobalAuthMethod: true, + }, { TestName: "Snapshot Agent", TokenFlags: []string{"-snapshot-agent"}, @@ -2274,6 +2381,12 @@ func TestRun_ValidateLoginToken_PrimaryDatacenter(t *testing.T) { Roles: []string{resourcePrefix + "-sync-catalog-acl-role"}, GlobalToken: false, }, + { + ComponentName: "api-gateway-controller", + TokenFlags: []string{"-api-gateway-controller"}, + Roles: []string{resourcePrefix + "-api-gateway-controller-acl-role"}, + GlobalToken: false, + }, { ComponentName: "snapshot-agent", TokenFlags: []string{"-snapshot-agent"}, @@ -2322,7 +2435,7 @@ func TestRun_ValidateLoginToken_PrimaryDatacenter(t *testing.T) { serviceAccountName = c.ServiceAccountName } - k8s, testClient := completeSetup(t) + k8s, testClient := completeSetup(t, false) _, jwtToken := setUpK8sServiceAccount(t, k8s, ns) k8sMockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -2405,6 +2518,13 @@ func TestRun_ValidateLoginToken_SecondaryDatacenter(t *testing.T) { GlobalAuthMethod: false, GlobalToken: false, }, + { + ComponentName: "api-gateway-controller", + TokenFlags: []string{"-api-gateway-controller"}, + Roles: []string{resourcePrefix + "-api-gateway-controller-acl-role-dc2"}, + GlobalAuthMethod: true, + GlobalToken: true, + }, { ComponentName: "snapshot-agent", TokenFlags: []string{"-snapshot-agent"}, @@ -2531,7 +2651,7 @@ func TestRun_ValidateLoginToken_SecondaryDatacenter(t *testing.T) { func TestRun_PrimaryDatacenter_ComponentAuthMethod(t *testing.T) { t.Parallel() - k8s, testClient := completeSetup(t) + k8s, testClient := completeSetup(t, false) setUpK8sServiceAccount(t, k8s, ns) // Run the command. @@ -2607,11 +2727,15 @@ func TestRun_SecondaryDatacenter_ComponentAuthMethod(t *testing.T) { } // Set up test consul agent and kubernetes cluster. -func completeSetup(t *testing.T) (*fake.Clientset, *test.TestServerClient) { +func completeSetup(t *testing.T, useResourceAPI bool) (*fake.Clientset, *test.TestServerClient) { k8s := fake.NewSimpleClientset() testServerClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { c.ACL.Enabled = true + + if useResourceAPI { + c.Experiments = []string{"resource-apis"} + } }) testServerClient.TestServer.WaitForActiveCARoot(t) diff --git a/control-plane/subcommand/server-acl-init/connect_inject.go b/control-plane/subcommand/server-acl-init/connect_inject.go index d598bc8665..0e373d2ea5 100644 --- a/control-plane/subcommand/server-acl-init/connect_inject.go +++ b/control-plane/subcommand/server-acl-init/connect_inject.go @@ -78,12 +78,27 @@ func (c *Command) configureConnectInjectAuthMethod(client *consul.DynamicClient, return err } - abr := api.ACLBindingRule{ - Description: "Kubernetes binding rule", - AuthMethod: authMethodName, - BindType: api.BindingRuleBindTypeService, - BindName: "${serviceaccount.name}", - Selector: c.flagBindingRuleSelector, + var abr api.ACLBindingRule + if c.flagResourceAPIs { + c.log.Info("creating consul binding rule for WorkloadIdentityName") + abr = api.ACLBindingRule{ + Description: "Kubernetes binding rule", + AuthMethod: authMethodName, + BindType: api.BindingRuleBindTypeTemplatedPolicy, + BindName: api.ACLTemplatedPolicyWorkloadIdentityName, + BindVars: &api.ACLTemplatedPolicyVariables{ + Name: "${serviceaccount.name}", + }, + Selector: c.flagBindingRuleSelector, + } + } else { + abr = api.ACLBindingRule{ + Description: "Kubernetes binding rule", + AuthMethod: authMethodName, + BindType: api.BindingRuleBindTypeService, + BindName: "${serviceaccount.name}", + Selector: c.flagBindingRuleSelector, + } } return c.createConnectBindingRule(client, authMethodName, &abr) diff --git a/control-plane/subcommand/server-acl-init/rules.go b/control-plane/subcommand/server-acl-init/rules.go index ae145fae43..f408037157 100644 --- a/control-plane/subcommand/server-acl-init/rules.go +++ b/control-plane/subcommand/server-acl-init/rules.go @@ -145,6 +145,38 @@ partition_prefix "" { return c.renderRules(anonTokenRulesTpl) } +func (c *Command) apiGatewayControllerRules() (string, error) { + apiGatewayRulesTpl := `{{- if .EnablePartitions }} +partition "{{ .PartitionName }}" { + mesh = "write" + acl = "write" +{{- else }} +operator = "write" +acl = "write" +{{- end }} + +{{- if .EnableNamespaces }} +namespace_prefix "" { + policy = "write" +{{- end }} + service_prefix "" { + policy = "write" + intentions = "write" + } + node_prefix "" { + policy = "read" + } +{{- if .EnableNamespaces }} +} +{{- end }} +{{- if .EnablePartitions }} +} +{{- end }} +` + + return c.renderRules(apiGatewayRulesTpl) +} + // This assumes users are using the default name for the service, i.e. // "mesh-gateway". func (c *Command) meshGatewayRules() (string, error) { @@ -444,26 +476,3 @@ func (c *Command) renderRulesGeneric(tmpl string, data interface{}) (string, err return buf.String(), nil } - -// dnsProxyRules defines the ACL policy for the `dns-proxy` service. It defines the following: -// it defines the following: -// - read access to all nodes within the scoped partition -// - read access to all services within the scoped partition -// These accesses are needed to be able to perform DNS request over gRPC. -func (c *Command) dnsProxyRules() (string, error) { - dnsProxyRulesTpl := ` - {{- if .EnablePartitions }} - partition "{{ .PartitionName }}" { - {{- end }} - node_prefix "" { - policy = "read" - } - service_prefix "" { - policy = "read" - } - {{- if .EnablePartitions }} - } - {{- end }} - ` - return c.renderRules(dnsProxyRulesTpl) -} diff --git a/control-plane/subcommand/server-acl-init/rules_test.go b/control-plane/subcommand/server-acl-init/rules_test.go index a31ca5dcd5..c1a02a2218 100644 --- a/control-plane/subcommand/server-acl-init/rules_test.go +++ b/control-plane/subcommand/server-acl-init/rules_test.go @@ -5,6 +5,7 @@ package serveraclinit import ( "fmt" + "strings" "testing" "github.com/stretchr/testify/require" @@ -142,6 +143,82 @@ partition_prefix "" { } } +func TestAPIGatewayControllerRules(t *testing.T) { + cases := []struct { + Name string + EnableNamespaces bool + Partition string + Expected string + }{ + { + Name: "Namespaces are disabled", + Expected: ` +operator = "write" +acl = "write" + service_prefix "" { + policy = "write" + intentions = "write" + } + node_prefix "" { + policy = "read" + }`, + }, + { + Name: "Namespaces are enabled", + EnableNamespaces: true, + Expected: ` +operator = "write" +acl = "write" +namespace_prefix "" { + policy = "write" + service_prefix "" { + policy = "write" + intentions = "write" + } + node_prefix "" { + policy = "read" + } +}`, + }, + { + Name: "Namespaces are enabled, partitions enabled", + EnableNamespaces: true, + Partition: "Default", + Expected: ` +partition "Default" { + mesh = "write" + acl = "write" +namespace_prefix "" { + policy = "write" + service_prefix "" { + policy = "write" + intentions = "write" + } + node_prefix "" { + policy = "read" + } +} +}`, + }, + } + + for _, tt := range cases { + t.Run(tt.Name, func(t *testing.T) { + cmd := Command{ + flagEnableNamespaces: tt.EnableNamespaces, + consulFlags: &flags.ConsulFlags{ + Partition: tt.Partition, + }, + } + + meshGatewayRules, err := cmd.apiGatewayControllerRules() + + require.NoError(t, err) + require.Equal(t, tt.Expected, strings.Trim(meshGatewayRules, " ")) + }) + } +} + func TestMeshGatewayRules(t *testing.T) { cases := []struct { Name string @@ -1013,101 +1090,6 @@ partition "part-1" { } } -// Test the dns-proxy rules with namespaces enabled or disabled. -func TestDnsProxyRules(t *testing.T) { - cases := []struct { - EnableNamespaces bool - EnablePartitions bool - EnablePeering bool - PartitionName string - Expected string - }{ - { - EnableNamespaces: false, - EnablePartitions: false, - EnablePeering: false, - Expected: ` - node_prefix "" { - policy = "read" - } - service_prefix "" { - policy = "read" - }`, - }, - { - EnableNamespaces: true, - EnablePartitions: false, - EnablePeering: false, - Expected: ` - node_prefix "" { - policy = "read" - } - service_prefix "" { - policy = "read" - }`, - }, - { - EnableNamespaces: true, - EnablePartitions: false, - EnablePeering: true, - Expected: ` - node_prefix "" { - policy = "read" - } - service_prefix "" { - policy = "read" - }`, - }, - { - EnableNamespaces: true, - EnablePartitions: true, - EnablePeering: false, - PartitionName: "part-1", - Expected: ` - partition "part-1" { - node_prefix "" { - policy = "read" - } - service_prefix "" { - policy = "read" - } - }`, - }, - { - EnableNamespaces: true, - EnablePartitions: true, - EnablePeering: true, - PartitionName: "part-1", - Expected: ` - partition "part-1" { - node_prefix "" { - policy = "read" - } - service_prefix "" { - policy = "read" - } - }`, - }, - } - - for _, tt := range cases { - caseName := fmt.Sprintf("ns=%t, partition=%t, peering=%t", tt.EnableNamespaces, tt.EnablePartitions, tt.EnablePeering) - t.Run(caseName, func(t *testing.T) { - - cmd := Command{ - consulFlags: &flags.ConsulFlags{Partition: tt.PartitionName}, - flagEnableNamespaces: tt.EnableNamespaces, - flagEnablePeering: tt.EnablePeering, - } - - injectorRules, err := cmd.dnsProxyRules() - - require.NoError(t, err) - require.Equal(t, tt.Expected, injectorRules) - }) - } -} - func TestReplicationTokenRules(t *testing.T) { cases := []struct { Name string diff --git a/control-plane/tenancy/namespace/namespace.go b/control-plane/tenancy/namespace/namespace.go new file mode 100644 index 0000000000..55950bba1d --- /dev/null +++ b/control-plane/tenancy/namespace/namespace.go @@ -0,0 +1,117 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package namespace + +import ( + "context" + "fmt" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/anypb" + + "github.com/hashicorp/consul-k8s/control-plane/api/common" + "github.com/hashicorp/consul/proto-public/pbresource" + pbtenancy "github.com/hashicorp/consul/proto-public/pbtenancy/v2beta1" +) + +// DeletionTimestampKey is the key in a resource's metadata that stores the timestamp +// when a resource was marked for deletion. This only applies to resources with finalizers. +const DeletionTimestampKey = "deletionTimestamp" + +// EnsureDeleted ensures a Consul namespace with name ns in partition ap is deleted or is in the +// process of being deleted. If neither, it will mark it for deletion. +func EnsureDeleted(ctx context.Context, client pbresource.ResourceServiceClient, ap, ns string) error { + if ns == common.WildcardNamespace || ns == common.DefaultNamespaceName { + return nil + } + + // Check if the Consul namespace exists. + rsp, err := client.Read(ctx, &pbresource.ReadRequest{Id: &pbresource.ID{ + Name: ns, + Type: pbtenancy.NamespaceType, + Tenancy: &pbresource.Tenancy{Partition: ap}, + }}) + + switch { + case status.Code(err) == codes.NotFound: + // Nothing to do + return nil + case err != nil: + // Unexpected error + return fmt.Errorf("namespace read failed: %w", err) + case isMarkedForDeletion(rsp.Resource): + // Deletion already in progress, nothing to do + return nil + default: + // Namespace found, so non-CAS delete it. + _, err = client.Delete(ctx, &pbresource.DeleteRequest{Id: rsp.Resource.Id, Version: ""}) + if err != nil { + return fmt.Errorf("namespace delete failed: %w", err) + } + return nil + } +} + +// EnsureExists ensures a Consul namespace with name ns exists and is not marked +// for deletion. If it doesn't, exist it will create it. If it is marked for deletion, +// returns an error. +// +// Boolean return value indicates if the namespace was created by this call. +func EnsureExists(ctx context.Context, client pbresource.ResourceServiceClient, ap, ns string) (bool, error) { + if ns == common.WildcardNamespace || ns == common.DefaultNamespaceName { + return false, nil + } + + // Check if the Consul namespace exists. + rsp, err := client.Read(ctx, &pbresource.ReadRequest{Id: &pbresource.ID{ + Name: ns, + Type: pbtenancy.NamespaceType, + Tenancy: &pbresource.Tenancy{Partition: ap}, + }}) + + switch { + case err == nil && isMarkedForDeletion(rsp.Resource): + // Found, but delete in progress + return false, fmt.Errorf("consul namespace %q deletion in progress", ns) + case err == nil: + // Found and not marked for deletion, nothing to do + return false, nil + case status.Code(err) != codes.NotFound: + // Unexpected error + return false, fmt.Errorf("consul namespace read failed: %w", err) + } + + // Consul namespace not found, so create it + // TODO: Handle creation of crossNSACLPolicy when V2 ACLs are supported + nsData, err := anypb.New(&pbtenancy.Namespace{Description: "Auto-generated by consul-k8s"}) + if err != nil { + return false, err + } + + _, err = client.Write(ctx, &pbresource.WriteRequest{Resource: &pbresource.Resource{ + Id: &pbresource.ID{ + Name: ns, + Type: pbtenancy.NamespaceType, + Tenancy: &pbresource.Tenancy{Partition: ap}, + }, + Metadata: map[string]string{"external-source": "kubernetes"}, + Data: nsData, + }}) + + if err != nil { + return false, fmt.Errorf("consul namespace creation failed: %w", err) + } + return true, nil +} + +// isMarkedForDeletion returns true if a resource has been marked for deletion, +// false otherwise. +func isMarkedForDeletion(res *pbresource.Resource) bool { + if res.Metadata == nil { + return false + } + _, ok := res.Metadata[DeletionTimestampKey] + return ok +} diff --git a/control-plane/tenancy/namespace/namespace_controller.go b/control-plane/tenancy/namespace/namespace_controller.go new file mode 100644 index 0000000000..e08951b61c --- /dev/null +++ b/control-plane/tenancy/namespace/namespace_controller.go @@ -0,0 +1,95 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package namespace + +import ( + "context" + "fmt" + + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/hashicorp/consul-k8s/control-plane/api/common" + injectcommon "github.com/hashicorp/consul-k8s/control-plane/connect-inject/common" + "github.com/hashicorp/consul-k8s/control-plane/consul" +) + +// Namespace syncing between K8s and Consul is vastly simplified when V2 tenancy is enabled. +// Put simply, a K8s namespace maps 1:1 to a Consul namespace of the same name and that is +// the only supported behavior. +// +// The plethora of configuration options available when using V1 tenancy have been removed +// to simplify the user experience and mapping rules. +// +// Hence, the following V1 tenancy namespace helm configuration values are ignored: +// - global.enableConsulNamespaces +// - connectInject.consulNamespaces.consulDestinationNamespace +// - connectInject.consulNamespaces.mirroringK8S +// - connectInject.consulNamespaces.mirroringK8SPrefix. +type Controller struct { + client.Client + // ConsulServerConnMgr is the watcher for the Consul server addresses. + ConsulServerConnMgr consul.ServerConnectionManager + // K8sNamespaceConfig manages allow/deny Kubernetes namespaces. + common.K8sNamespaceConfig + // ConsulTenancyConfig contains the destination partition. + common.ConsulTenancyConfig + Log logr.Logger +} + +// Reconcile reads a Kubernetes Namespace and reconciles the mapped namespace in Consul. +func (r *Controller) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + var namespace corev1.Namespace + + // Ignore the request if the namespace should not be synced to consul. + if injectcommon.ShouldIgnore(req.Name, r.DenyK8sNamespacesSet, r.AllowK8sNamespacesSet) { + return ctrl.Result{}, nil + } + + // Create a gRPC resource service client + resourceClient, err := consul.NewResourceServiceClient(r.ConsulServerConnMgr) + if err != nil { + r.Log.Error(err, "failed to create Consul resource service client", "name", req.Name) + return ctrl.Result{}, err + } + + // Target consul tenancy + consulAP := r.ConsulPartition + consulNS := req.Name + + // Re-read the k8s namespace object + err = r.Client.Get(ctx, req.NamespacedName, &namespace) + + // If the namespace object has been deleted (we get an IsNotFound error), + // we need to remove the Namespace from Consul. + if k8serrors.IsNotFound(err) { + if err := EnsureDeleted(ctx, resourceClient, consulAP, consulNS); err != nil { + return ctrl.Result{}, fmt.Errorf("error deleting consul namespace: %w", err) + } + + return ctrl.Result{}, nil + } else if err != nil { + r.Log.Error(err, "failed to get k8s namespace", "name", req.Name) + return ctrl.Result{}, err + } + + // k8s namespace found, so make sure it is mapped correctly and exists in Consul. + r.Log.Info("retrieved", "k8s namespace", namespace.GetName()) + + if _, err := EnsureExists(ctx, resourceClient, consulAP, consulNS); err != nil { + r.Log.Error(err, "error checking or creating consul namespace", "namespace", consulNS) + return ctrl.Result{}, fmt.Errorf("error checking or creating consul namespace: %w", err) + } + return ctrl.Result{}, nil +} + +// SetupWithManager registers this controller with the manager. +func (r *Controller) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&corev1.Namespace{}). + Complete(r) +} diff --git a/control-plane/tenancy/namespace/namespace_controller_ent_test.go b/control-plane/tenancy/namespace/namespace_controller_ent_test.go new file mode 100644 index 0000000000..997164d638 --- /dev/null +++ b/control-plane/tenancy/namespace/namespace_controller_ent_test.go @@ -0,0 +1,35 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build enterprise + +package namespace + +import ( + "testing" +) + +func TestReconcileCreateNamespace_ENT(t *testing.T) { + testCases := []createTestCase{ + { + name: "consul namespace is ap1/ns1", + kubeNamespace: "ns1", + partition: "ap1", + expectedConsulNamespace: "ns1", + }, + } + testReconcileCreateNamespace(t, testCases) +} + +func TestReconcileDeleteNamespace_ENT(t *testing.T) { + testCases := []deleteTestCase{ + { + name: "non-default partition", + kubeNamespace: "ns1", + partition: "ap1", + existingConsulNamespace: "ns1", + expectNamespaceDeleted: "ns1", + }, + } + testReconcileDeleteNamespace(t, testCases) +} diff --git a/control-plane/tenancy/namespace/namespace_controller_test.go b/control-plane/tenancy/namespace/namespace_controller_test.go new file mode 100644 index 0000000000..7eee397659 --- /dev/null +++ b/control-plane/tenancy/namespace/namespace_controller_test.go @@ -0,0 +1,305 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package namespace + +import ( + "context" + "testing" + "time" + + mapset "github.com/deckarep/golang-set" + logrtest "github.com/go-logr/logr/testr" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "github.com/hashicorp/consul/proto-public/pbresource" + pbtenancy "github.com/hashicorp/consul/proto-public/pbtenancy/v2beta1" + "github.com/hashicorp/consul/sdk/testutil" + + "github.com/hashicorp/consul-k8s/control-plane/api/common" + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" + "github.com/hashicorp/consul-k8s/control-plane/helper/test" +) + +func TestReconcileCreateNamespace(t *testing.T) { + testCases := []createTestCase{ + { + name: "consul namespace is default/ns1", + kubeNamespace: "ns1", + partition: constants.DefaultConsulPartition, + expectedConsulNamespace: "ns1", + }, + } + testReconcileCreateNamespace(t, testCases) +} + +type createTestCase struct { + name string + kubeNamespace string + partition string + expectedConsulNamespace string +} + +// testReconcileCreateNamespace ensures that a new k8s namespace is reconciled to a +// Consul namespace. The actual namespace in Consul depends on if the controller +// is configured with a destination namespace or mirroring enabled. +func testReconcileCreateNamespace(t *testing.T, testCases []createTestCase) { + run := func(t *testing.T, tc createTestCase) { + // Create the default kube namespace and kube namespace under test. + kubeNS := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: tc.kubeNamespace}} + kubeDefaultNS := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: metav1.NamespaceDefault}} + kubeObjects := []client.Object{ + &kubeNS, + &kubeDefaultNS, + } + fakeClient := fake.NewClientBuilder(). + WithObjects(kubeObjects...). + WithStatusSubresource(kubeObjects...). + Build() + + // Fire up consul server with v2tenancy enabled + testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + c.Experiments = []string{"resource-apis", "v2tenancy"} + }) + + // Create partition if needed + testClient.Cfg.APIClientConfig.Partition = tc.partition + if tc.partition != "" && tc.partition != "default" { + _, err := testClient.ResourceClient.Write(context.Background(), &pbresource.WriteRequest{Resource: &pbresource.Resource{ + Id: &pbresource.ID{ + Name: tc.partition, + Type: pbtenancy.PartitionType, + }, + }}) + require.NoError(t, err, "failed to create partition") + } + + // Create the namespace controller injecting config from tc + nc := &Controller{ + Client: fakeClient, + ConsulServerConnMgr: testClient.Watcher, + K8sNamespaceConfig: common.K8sNamespaceConfig{ + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSetWith(), + }, + ConsulTenancyConfig: common.ConsulTenancyConfig{ + ConsulPartition: tc.partition, + }, + Log: logrtest.New(t), + } + + // Reconcile the kube namespace under test + resp, err := nc.Reconcile(context.Background(), ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: tc.kubeNamespace, + }, + }) + require.NoError(t, err) + require.False(t, resp.Requeue) + + // Verify consul namespace exists or was created during reconciliation + _, err = testClient.ResourceClient.Read(context.Background(), &pbresource.ReadRequest{ + Id: &pbresource.ID{ + Name: tc.expectedConsulNamespace, + Type: pbtenancy.NamespaceType, + Tenancy: &pbresource.Tenancy{Partition: tc.partition}, + }, + }) + require.NoError(t, err, "expected partition/namespace %s/%s to exist", tc.partition, tc.expectedConsulNamespace) + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + run(t, tc) + }) + } +} + +func TestReconcileDeleteNamespace(t *testing.T) { + testCases := []deleteTestCase{ + { + name: "consul namespace ns1", + kubeNamespace: "ns1", + partition: "default", + existingConsulNamespace: "ns1", + expectNamespaceDeleted: "ns1", + }, + { + name: "consul default namespace does not get deleted", + kubeNamespace: metav1.NamespaceDefault, + partition: "default", + existingConsulNamespace: "", + expectNamespaceExists: "default", + }, + { + name: "namespace is already removed from Consul", + kubeNamespace: "ns1", + partition: "default", + existingConsulNamespace: "", // don't pre-create consul namespace + expectNamespaceDeleted: "ns1", // read as "was never created" + }, + } + testReconcileDeleteNamespace(t, testCases) +} + +type deleteTestCase struct { + name string + kubeNamespace string + partition string + existingConsulNamespace string // If non-empty, this namespace is created in consul pre-reconcile + + // Pick one + expectNamespaceExists string // If non-empty, this namespace should exist in consul post-reconcile + expectNamespaceDeleted string // If non-empty, this namespace should not exist in consul post-reconcile +} + +// Tests deleting a Namespace object, with and without matching Consul namespace. +func testReconcileDeleteNamespace(t *testing.T, testCases []deleteTestCase) { + run := func(t *testing.T, tc deleteTestCase) { + // Don't seed with any kube namespaces since we're testing deletion. + fakeClient := fake.NewClientBuilder().WithRuntimeObjects().Build() + + // Fire up consul server with v2tenancy enabled + testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + c.Experiments = []string{"resource-apis", "v2tenancy"} + }) + + // Create partition if needed + testClient.Cfg.APIClientConfig.Partition = tc.partition + if tc.partition != "" && tc.partition != "default" { + _, err := testClient.ResourceClient.Write(context.Background(), &pbresource.WriteRequest{Resource: &pbresource.Resource{ + Id: &pbresource.ID{ + Name: tc.partition, + Type: pbtenancy.PartitionType, + }, + }}) + require.NoError(t, err, "failed to create partition") + } + + // Create the consul namespace if needed + if tc.existingConsulNamespace != "" && tc.existingConsulNamespace != "default" { + id := &pbresource.ID{ + Name: tc.existingConsulNamespace, + Type: pbtenancy.NamespaceType, + Tenancy: &pbresource.Tenancy{Partition: tc.partition}, + } + + rsp, err := testClient.ResourceClient.Write(context.Background(), &pbresource.WriteRequest{Resource: &pbresource.Resource{Id: id}}) + require.NoError(t, err, "failed to create namespace") + + // TODO: Remove after https://hashicorp.atlassian.net/browse/NET-6719 implemented + requireEventuallyAccepted(t, testClient.ResourceClient, rsp.Resource.Id) + } + + // Create the namespace controller. + nc := &Controller{ + Client: fakeClient, + ConsulServerConnMgr: testClient.Watcher, + K8sNamespaceConfig: common.K8sNamespaceConfig{ + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSetWith(), + }, + ConsulTenancyConfig: common.ConsulTenancyConfig{ + ConsulPartition: tc.partition, + }, + Log: logrtest.New(t), + } + + // Reconcile the kube namespace under test - imagine it has just been deleted + resp, err := nc.Reconcile(context.Background(), ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: tc.kubeNamespace, + }, + }) + require.NoError(t, err) + require.False(t, resp.Requeue) + + // Verify appropriate action was taken on the counterpart consul namespace + if tc.expectNamespaceExists != "" { + // Verify consul namespace was not deleted + _, err = testClient.ResourceClient.Read(context.Background(), &pbresource.ReadRequest{ + Id: &pbresource.ID{ + Name: tc.expectNamespaceExists, + Type: pbtenancy.NamespaceType, + Tenancy: &pbresource.Tenancy{Partition: tc.partition}, + }, + }) + require.NoError(t, err, "expected partition/namespace %s/%s to exist", tc.partition, tc.expectNamespaceExists) + } else if tc.expectNamespaceDeleted != "" { + // Verify consul namespace was deleted + id := &pbresource.ID{ + Name: tc.expectNamespaceDeleted, + Type: pbtenancy.NamespaceType, + Tenancy: &pbresource.Tenancy{Partition: tc.partition}, + } + requireEventuallyNotFound(t, testClient.ResourceClient, id) + } else { + panic("tc.expectedNamespaceExists or tc.expectedNamespaceDeleted must be set") + } + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + run(t, tc) + }) + } +} + +// RequireStatusAccepted waits for a recently created resource to have a resource status of accepted so that +// attempts to delete it by the single-shot controller under test's reconcile will not fail with a CAS error. +// +// Remove refs to this after https://hashicorp.atlassian.net/browse/NET-6719 is implemented. +func requireEventuallyAccepted(t *testing.T, resourceClient pbresource.ResourceServiceClient, id *pbresource.ID) { + require.Eventuallyf(t, + func() bool { + rsp, err := resourceClient.Read(context.Background(), &pbresource.ReadRequest{Id: id}) + if err != nil { + return false + } + if rsp.Resource.Status == nil || len(rsp.Resource.Status) == 0 { + return false + } + + for _, status := range rsp.Resource.Status { + for _, condition := range status.Conditions { + // common.ConditionAccepted in consul namespace controller + if condition.Type == "accepted" && condition.State == pbresource.Condition_STATE_TRUE { + return true + } + } + } + return false + }, + time.Second*5, + time.Millisecond*100, + "timed out out waiting for %s to have status accepted", + id, + ) +} + +func requireEventuallyNotFound(t *testing.T, resourceClient pbresource.ResourceServiceClient, id *pbresource.ID) { + // allow both "not found" and "marked for deletion" so we're not waiting around unnecessarily + require.Eventuallyf(t, func() bool { + rsp, err := resourceClient.Read(context.Background(), &pbresource.ReadRequest{Id: id}) + if err == nil { + return isMarkedForDeletion(rsp.Resource) + } + if status.Code(err) == codes.NotFound { + return true + } + return false + }, + time.Second*5, + time.Millisecond*100, + "timed out waiting for %s to not be found", + id, + ) +} diff --git a/hack/aws-acceptance-test-cleanup/main.go b/hack/aws-acceptance-test-cleanup/main.go index 1680e9ca63..556f8420c1 100644 --- a/hack/aws-acceptance-test-cleanup/main.go +++ b/hack/aws-acceptance-test-cleanup/main.go @@ -201,7 +201,7 @@ func realMain(ctx context.Context) error { if err != nil { return err } - toDeleteVPCs = append(toDeleteVPCs, vpcsOutput.Vpcs...) + toDeleteVPCs = append(vpcsOutput.Vpcs) nextToken = vpcsOutput.NextToken if nextToken == nil { break @@ -371,11 +371,6 @@ func realMain(ctx context.Context) error { }, }, }) - - if err != nil { - return err - } - vpcPeeringConnectionsToDelete := append(vpcPeeringConnectionsWithAcceptor.VpcPeeringConnections, vpcPeeringConnectionsWithRequester.VpcPeeringConnections...) // Delete NAT gateways. @@ -387,11 +382,9 @@ func realMain(ctx context.Context) error { }, }, }) - if err != nil { return err } - for _, gateway := range natGateways.NatGateways { fmt.Printf("NAT gateway: Destroying... [id=%s]\n", *gateway.NatGatewayId) _, err = ec2Client.DeleteNatGatewayWithContext(ctx, &ec2.DeleteNatGatewayInput{ @@ -496,11 +489,6 @@ func realMain(ctx context.Context) error { }, }, }) - - if err != nil { - return err - } - for _, igw := range igws.InternetGateways { fmt.Printf("Internet gateway: Detaching from VPC... [id=%s]\n", *igw.InternetGatewayId) if err := destroyBackoff(ctx, "Internet Gateway", *igw.InternetGatewayId, func() error { @@ -528,37 +516,6 @@ func realMain(ctx context.Context) error { fmt.Printf("Internet gateway: Destroyed [id=%s]\n", *igw.InternetGatewayId) } - // Delete network interfaces - networkInterfaces, err := ec2Client.DescribeNetworkInterfacesWithContext(ctx, &ec2.DescribeNetworkInterfacesInput{ - Filters: []*ec2.Filter{ - { - Name: aws.String("vpc-id"), - Values: []*string{vpcID}, - }, - }, - }) - - if err != nil { - return err - } - - for _, networkInterface := range networkInterfaces.NetworkInterfaces { - fmt.Printf("Network Interface: Destroying... [id=%s]\n", *networkInterface.NetworkInterfaceId) - if err := destroyBackoff(ctx, "Network Interface", *networkInterface.NetworkInterfaceId, func() error { - _, err := ec2Client.DeleteNetworkInterfaceWithContext(ctx, &ec2.DeleteNetworkInterfaceInput{ - NetworkInterfaceId: networkInterface.NetworkInterfaceId, - }) - if err != nil { - return err - } - return nil - }); err != nil { - return err - } - - fmt.Printf("Network interface: Destroyed [id=%s]\n", *networkInterface.NetworkInterfaceId) - } - // Delete subnets. subnets, err := ec2Client.DescribeSubnetsWithContext(ctx, &ec2.DescribeSubnetsInput{ Filters: []*ec2.Filter{ @@ -568,11 +525,6 @@ func realMain(ctx context.Context) error { }, }, }) - - if err != nil { - return err - } - for _, subnet := range subnets.Subnets { fmt.Printf("Subnet: Destroying... [id=%s]\n", *subnet.SubnetId) if err := destroyBackoff(ctx, "Subnet", *subnet.SubnetId, func() error { @@ -600,11 +552,6 @@ func realMain(ctx context.Context) error { }, }, }) - - if err != nil { - return err - } - for _, routeTable := range routeTables.RouteTables { // Find out if this is the main route table. var mainRouteTable bool @@ -638,11 +585,6 @@ func realMain(ctx context.Context) error { }, }, }) - - if err != nil { - return err - } - for _, sg := range sgs.SecurityGroups { if len(sg.IpPermissions) > 0 { revokeSGInput := &ec2.RevokeSecurityGroupIngressInput{GroupId: sg.GroupId} diff --git a/version/version.go b/version/version.go index e5d773c66d..152661252d 100644 --- a/version/version.go +++ b/version/version.go @@ -17,7 +17,7 @@ var ( // // Version must conform to the format expected by // github.com/hashicorp/go-version for tests to work. - Version = "1.6.0" + Version = "1.4.5" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release