From fe5597823406187192499163b270755e1891bb64 Mon Sep 17 00:00:00 2001
From: "github-actions[bot]"
<41898282+github-actions[bot]@users.noreply.github.com>
Date: Wed, 11 Sep 2024 09:48:38 +0800
Subject: [PATCH] Sync feature branch to master/feature directory (#2400)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
---
feature/CONTRIBUTORS.md | 121 +
feature/LICENSE | 201 +
feature/Makefile | 619 ++
feature/OWNERS | 20 +
feature/README.md | 28 +
feature/build/controller-manager/Dockerfile | 37 +
feature/build/kk/Dockerfile | 44 +
feature/builtin/Makefile | 25 +
feature/builtin/fs.go | 33 +
feature/builtin/inventory/config.yaml | 46 +
feature/builtin/inventory/inventory.yaml | 38 +
.../builtin/playbooks/artifact_export.yaml | 8 +
.../builtin/playbooks/artifact_images.yaml | 6 +
feature/builtin/playbooks/certs_renew.yaml | 34 +
feature/builtin/playbooks/create_cluster.yaml | 50 +
.../builtin/playbooks/hook/post_install.yaml | 22 +
.../builtin/playbooks/hook/pre_install.yaml | 22 +
feature/builtin/playbooks/init_os.yaml | 12 +
feature/builtin/playbooks/init_registry.yaml | 15 +
feature/builtin/playbooks/precheck.yaml | 16 +
.../builtin/playbooks/vars/certs_renew.yaml | 10 +
.../vars/create_cluster_kubernetes.yaml | 27 +
.../roles/addons/cni/defaults/main.yaml | 114 +
.../roles/addons/cni/tasks/calico.yaml | 11 +
.../roles/addons/cni/tasks/cilium.yaml | 37 +
.../roles/addons/cni/tasks/flannel.yaml | 11 +
.../roles/addons/cni/tasks/hybridnet.yaml | 19 +
.../roles/addons/cni/tasks/kubeovn.yaml | 30 +
.../builtin/roles/addons/cni/tasks/main.yaml | 18 +
.../roles/addons/cni/tasks/multus.yaml | 9 +
.../addons/cni/templates/calico/pdg.yaml | 35 +
.../addons/cni/templates/calico/v3.27.yaml | 5342 +++++++++++++++++
.../addons/cni/templates/flannel/flannel.yaml | 213 +
.../addons/cni/templates/multus/multus.yaml | 206 +
.../roles/addons/kata/defaults/main.yaml | 4 +
.../builtin/roles/addons/kata/tasks/main.yaml | 11 +
.../addons/kata/templates/kata-deploy.yaml | 127 +
.../roles/addons/nfd/defaults/main.yaml | 4 +
.../builtin/roles/addons/nfd/tasks/main.yaml | 11 +
.../addons/nfd/templates/nfd-deploy.yaml | 620 ++
.../roles/addons/sc/defaults/main.yaml | 15 +
.../builtin/roles/addons/sc/tasks/local.yaml | 9 +
.../builtin/roles/addons/sc/tasks/main.yaml | 6 +
.../builtin/roles/addons/sc/tasks/nfs.yaml | 13 +
.../addons/sc/templates/local-volume.yaml | 150 +
.../roles/certs/renew-etcd/tasks/main.yaml | 26 +
.../certs/renew-kubernetes/tasks/etcd.yaml | 19 +
.../certs/renew-kubernetes/tasks/kube.yaml | 51 +
.../certs/renew-kubernetes/tasks/main.yaml | 28 +
.../certs/renew-registry/tasks/harbor.yaml | 20 +
.../certs/renew-registry/tasks/main.yaml | 6 +
.../certs/renew-registry/tasks/registry.yaml | 20 +
.../init/init-artifact/defaults/main.yaml | 225 +
.../init-artifact/tasks/download_by_curl.yaml | 252 +
.../init-artifact/tasks/download_by_helm.yaml | 44 +
.../roles/init/init-artifact/tasks/main.yaml | 37 +
.../roles/init/init-artifact/tasks/pki.yaml | 52 +
.../roles/init/init-os/defaults/main.yaml | 2 +
.../init/init-os/tasks/init_ntpserver.yaml | 39 +
.../init/init-os/tasks/init_repository.yaml | 78 +
.../roles/init/init-os/tasks/main.yaml | 27 +
.../roles/init/init-os/templates/init-os.sh | 229 +
.../roles/install/certs/defaults/main.yaml | 14 +
.../certs/files/k8s-certs-renew.service | 5 +
.../install/certs/files/k8s-certs-renew.timer | 7 +
.../roles/install/certs/tasks/main.yaml | 20 +
.../install/certs/templates/renew_script.sh | 29 +
.../roles/install/cri/defaults/main.yaml | 32 +
.../install/cri/files/containerd.service | 26 +
.../install/cri/files/cri_docker.service | 36 +
.../roles/install/cri/files/docker.service | 47 +
.../install/cri/tasks/install_containerd.yaml | 62 +
.../install/cri/tasks/install_crictl.yaml | 22 +
.../install/cri/tasks/install_cridockerd.yaml | 29 +
.../install/cri/tasks/install_docker.yaml | 56 +
.../builtin/roles/install/cri/tasks/main.yaml | 19 +
.../install/cri/templates/containerd.config | 84 +
.../roles/install/cri/templates/crictl.config | 5 +
.../roles/install/cri/templates/docker.config | 19 +
.../roles/install/etcd/defaults/main.yaml | 27 +
.../roles/install/etcd/files/backup.service | 5 +
.../roles/install/etcd/files/etcd.service | 18 +
.../roles/install/etcd/tasks/backup_etcd.yaml | 21 +
.../install/etcd/tasks/install_etcd.yaml | 58 +
.../roles/install/etcd/tasks/main.yaml | 27 +
.../roles/install/etcd/templates/backup.sh | 33 +
.../roles/install/etcd/templates/backup.timer | 7 +
.../roles/install/etcd/templates/etcd.env | 57 +
.../install/image-registry/defaults/main.yaml | 51 +
.../image-registry/files/containerd.service | 26 +
.../image-registry/files/docker.service | 47 +
.../image-registry/tasks/install_docker.yaml | 34 +
.../tasks/install_docker_compose.yaml | 13 +
.../image-registry/tasks/install_harbor.yaml | 52 +
.../tasks/install_keepalived.yaml | 23 +
.../tasks/install_registry.yaml | 58 +
.../image-registry/tasks/load_images.yaml | 55 +
.../install/image-registry/tasks/main.yaml | 30 +
.../image-registry/templates/docker.config | 19 +
.../image-registry/templates/harbor.config | 311 +
.../image-registry/templates/harbor.service | 12 +
.../harbor_keepalived.docker-compose | 26 +
.../templates/keepalived.config | 31 +
.../templates/keepalived.healthcheck | 17 +
.../image-registry/templates/registry.config | 218 +
.../templates/registry.docker-compose | 54 +
.../image-registry/templates/registry.service | 12 +
.../install/kubernetes/defaults/main.yaml | 166 +
.../kubernetes/files/audit/audit_policy.yaml | 123 +
.../kubernetes/files/audit/audit_webhook.yaml | 15 +
.../install/kubernetes/files/kubelet.service | 15 +
.../kubernetes/tasks/deploy_cluster_dns.yaml | 19 +
.../kubernetes/tasks/deploy_haproxy.yaml | 15 +
.../kubernetes/tasks/deploy_kube_vip.yaml | 28 +
.../kubernetes/tasks/init_kubernetes.yaml | 85 +
.../kubernetes/tasks/install_binaries.yaml | 92 +
.../kubernetes/tasks/join_kubernetes.yaml | 43 +
.../roles/install/kubernetes/tasks/main.yaml | 69 +
.../templates/dns/coredns.deployment | 262 +
.../templates/dns/nodelocaldns.daemonset | 229 +
.../kubernetes/templates/haproxy/haproxy.cfg | 41 +
.../kubernetes/templates/haproxy/haproxy.yaml | 41 +
.../templates/kubeadm/kubeadm-init.v1beta2 | 200 +
.../templates/kubeadm/kubeadm-init.v1beta3 | 195 +
.../templates/kubeadm/kubeadm-join.v1beta2 | 19 +
.../templates/kubeadm/kubeadm-join.v1beta3 | 19 +
.../kubernetes/templates/kubeadm/kubelet.env | 12 +
.../kubernetes/templates/kubevip/kubevip.ARP | 65 +
.../kubernetes/templates/kubevip/kubevip.BGP | 82 +
.../roles/install/nfs/defaults/main.yaml | 3 +
.../roles/install/nfs/tasks/debian.yaml | 28 +
.../builtin/roles/install/nfs/tasks/main.yaml | 6 +
.../builtin/roles/install/nfs/tasks/rhel.yaml | 28 +
.../roles/install/nfs/templates/exports | 3 +
.../roles/install/security/tasks/main.yaml | 39 +
.../precheck/artifact_check/tasks/main.yaml | 21 +
.../precheck/env_check/defaults/main.yaml | 31 +
.../roles/precheck/env_check/tasks/cri.yaml | 18 +
.../roles/precheck/env_check/tasks/etcd.yaml | 48 +
.../roles/precheck/env_check/tasks/main.yaml | 45 +
.../precheck/env_check/tasks/network.yaml | 22 +
.../roles/precheck/env_check/tasks/nfs.yaml | 6 +
.../roles/precheck/env_check/tasks/os.yaml | 38 +
.../controller-manager/app/options/common.go | 163 +
.../app/options/controller_manager.go | 61 +
feature/cmd/controller-manager/app/server.go | 84 +
feature/cmd/controller-manager/app/version.go | 33 +
.../controller-manager/controller_manager.go | 31 +
feature/cmd/kk/app/artifact.go | 108 +
feature/cmd/kk/app/certs.go | 76 +
feature/cmd/kk/app/create.go | 76 +
feature/cmd/kk/app/init.go | 106 +
feature/cmd/kk/app/options/artifact.go | 134 +
feature/cmd/kk/app/options/builtin.go | 36 +
feature/cmd/kk/app/options/certs.go | 77 +
feature/cmd/kk/app/options/common.go | 166 +
feature/cmd/kk/app/options/create.go | 96 +
feature/cmd/kk/app/options/init.go | 132 +
feature/cmd/kk/app/options/option.go | 257 +
feature/cmd/kk/app/options/pipeline.go | 32 +
feature/cmd/kk/app/options/precheck.go | 79 +
feature/cmd/kk/app/options/run.go | 117 +
feature/cmd/kk/app/pipeline.go | 83 +
feature/cmd/kk/app/precheck.go | 66 +
feature/cmd/kk/app/root.go | 72 +
feature/cmd/kk/app/run.go | 105 +
feature/cmd/kk/app/version.go | 33 +
feature/cmd/kk/kubekey.go | 31 +
feature/config/kubekey/Chart.yaml | 15 +
.../crds/kubekey.kubesphere.io_configs.yaml | 44 +
.../kubekey.kubesphere.io_inventories.yaml | 74 +
.../crds/kubekey.kubesphere.io_pipelines.yaml | 2034 +++++++
feature/config/kubekey/templates/_helpers.tpl | 45 +
.../config/kubekey/templates/_tplvalues.tpl | 13 +
.../config/kubekey/templates/deployment.yaml | 76 +
feature/config/kubekey/templates/role.yaml | 90 +
.../kubekey/templates/serviceaccount.yaml | 23 +
feature/config/kubekey/values.yaml | 83 +
feature/docs/zh/001-project.md | 39 +
feature/docs/zh/002-playbook.md | 62 +
feature/docs/zh/003-role.md | 41 +
feature/docs/zh/004-task.md | 53 +
feature/docs/zh/005-module.md | 116 +
feature/docs/zh/101-syntax.md | 19 +
feature/docs/zh/201-variable.md | 73 +
feature/exp/README.md | 7 +
feature/go.mod | 137 +
feature/go.sum | 441 ++
feature/hack/auto-update-version.py | 113 +
feature/hack/boilerplate.go.txt | 15 +
feature/hack/fetch-kubernetes-hash.sh | 45 +
.../gen-repository-iso/dockerfile.almalinux90 | 21 +
.../gen-repository-iso/dockerfile.centos7 | 22 +
.../gen-repository-iso/dockerfile.debian10 | 38 +
.../gen-repository-iso/dockerfile.debian11 | 41 +
.../gen-repository-iso/dockerfile.ubuntu1604 | 33 +
.../gen-repository-iso/dockerfile.ubuntu1804 | 34 +
.../gen-repository-iso/dockerfile.ubuntu2004 | 33 +
.../gen-repository-iso/dockerfile.ubuntu2204 | 33 +
.../hack/gen-repository-iso/download-pkgs.sh | 7 +
feature/hack/gen-repository-iso/packages.yaml | 88 +
feature/hack/go_install.sh | 45 +
feature/hack/lib/golang.sh | 64 +
feature/hack/lib/init.sh | 111 +
feature/hack/lib/logging.sh | 171 +
feature/hack/lib/util.sh | 765 +++
feature/hack/sync-components.sh | 340 ++
feature/hack/update-goimports.sh | 44 +
feature/hack/verify-dockerfiles.sh | 0
feature/hack/verify-goimports.sh | 52 +
feature/hack/version.sh | 108 +
feature/pkg/apis/core/v1/config_types.go | 120 +
feature/pkg/apis/core/v1/config_types_test.go | 112 +
feature/pkg/apis/core/v1/inventory_types.go | 71 +
feature/pkg/apis/core/v1/pipeline_types.go | 223 +
feature/pkg/apis/core/v1/register.go | 36 +
.../pkg/apis/core/v1/zz_generated.deepcopy.go | 457 ++
feature/pkg/apis/core/v1alpha1/conversion.go | 43 +
feature/pkg/apis/core/v1alpha1/register.go | 37 +
feature/pkg/apis/core/v1alpha1/task_types.go | 119 +
.../core/v1alpha1/zz_generated.deepcopy.go | 172 +
feature/pkg/apis/project/v1/base.go | 54 +
feature/pkg/apis/project/v1/block.go | 190 +
.../pkg/apis/project/v1/collectionsearch.go | 22 +
feature/pkg/apis/project/v1/conditional.go | 50 +
feature/pkg/apis/project/v1/delegatable.go | 23 +
feature/pkg/apis/project/v1/docs.go | 188 +
feature/pkg/apis/project/v1/handler.go | 24 +
feature/pkg/apis/project/v1/loop.go | 27 +
feature/pkg/apis/project/v1/notifiable.go | 22 +
feature/pkg/apis/project/v1/play.go | 108 +
feature/pkg/apis/project/v1/play_test.go | 223 +
feature/pkg/apis/project/v1/playbook.go | 45 +
feature/pkg/apis/project/v1/playbook_test.go | 47 +
feature/pkg/apis/project/v1/role.go | 54 +
feature/pkg/apis/project/v1/taggable.go | 96 +
feature/pkg/connector/connector.go | 190 +
feature/pkg/connector/helper.go | 73 +
feature/pkg/connector/helper_test.go | 110 +
feature/pkg/connector/kubernetes_connector.go | 121 +
feature/pkg/connector/local_connector.go | 140 +
feature/pkg/connector/local_connector_test.go | 81 +
feature/pkg/connector/ssh_connector.go | 229 +
feature/pkg/const/common.go | 81 +
feature/pkg/const/helper.go | 49 +
feature/pkg/const/helper_test.go | 34 +
feature/pkg/const/scheme.go | 55 +
feature/pkg/const/workdir.go | 133 +
.../pkg/controllers/pipeline_controller.go | 312 +
feature/pkg/converter/converter.go | 126 +
feature/pkg/converter/converter_test.go | 107 +
feature/pkg/converter/internal/functions.go | 60 +
feature/pkg/converter/internal/helper.go | 133 +
feature/pkg/converter/internal/helper_test.go | 53 +
feature/pkg/converter/tmpl/template.go | 77 +
feature/pkg/converter/tmpl/template_test.go | 644 ++
feature/pkg/executor/block_executor.go | 209 +
feature/pkg/executor/block_executor_test.go | 133 +
feature/pkg/executor/executor.go | 26 +
feature/pkg/executor/executor_test.go | 73 +
feature/pkg/executor/pipeline_executor.go | 276 +
.../pkg/executor/pipeline_executor_test.go | 38 +
feature/pkg/executor/task_executor.go | 342 ++
feature/pkg/executor/task_executor_test.go | 71 +
feature/pkg/manager/command_manager.go | 93 +
feature/pkg/manager/controller_manager.go | 71 +
feature/pkg/manager/manager.go | 66 +
feature/pkg/modules/assert.go | 106 +
feature/pkg/modules/assert_test.go | 121 +
feature/pkg/modules/command.go | 55 +
feature/pkg/modules/command_test.go | 78 +
feature/pkg/modules/copy.go | 289 +
feature/pkg/modules/copy_test.go | 118 +
feature/pkg/modules/debug.go | 51 +
feature/pkg/modules/debug_test.go | 86 +
feature/pkg/modules/fetch.go | 75 +
feature/pkg/modules/fetch_test.go | 74 +
feature/pkg/modules/gen_cert.go | 467 ++
feature/pkg/modules/gen_cert_test.go | 72 +
feature/pkg/modules/image.go | 498 ++
feature/pkg/modules/image_test.go | 75 +
feature/pkg/modules/module.go | 141 +
feature/pkg/modules/module_test.go | 88 +
feature/pkg/modules/set_fact.go | 36 +
feature/pkg/modules/set_fact_test.go | 63 +
feature/pkg/modules/template.go | 285 +
feature/pkg/modules/template_test.go | 86 +
feature/pkg/project/builtin.go | 110 +
feature/pkg/project/git.go | 184 +
feature/pkg/project/helper.go | 376 ++
feature/pkg/project/helper_test.go | 290 +
feature/pkg/project/local.go | 126 +
feature/pkg/project/project.go | 64 +
.../project/testdata/playbooks/playbook1.yaml | 30 +
.../project/testdata/playbooks/playbook2.yaml | 0
.../project/testdata/playbooks/playbook2.yml | 0
.../project/testdata/playbooks/playbook3.yml | 0
.../playbooks/playbooks/playbook3.yaml | 0
.../playbooks/roles/role2/tasks/main.yaml | 3 +
.../testdata/roles/role1/tasks/main.yaml | 3 +
feature/pkg/proxy/admit.go | 48 +
feature/pkg/proxy/api_resources.go | 178 +
feature/pkg/proxy/internal/file_storage.go | 557 ++
feature/pkg/proxy/internal/rest_option.go | 117 +
feature/pkg/proxy/internal/watcher.go | 207 +
feature/pkg/proxy/path_expression.go | 105 +
feature/pkg/proxy/resources/config/storage.go | 65 +
.../pkg/proxy/resources/config/strategy.go | 103 +
.../pkg/proxy/resources/inventory/storage.go | 63 +
.../pkg/proxy/resources/inventory/strategy.go | 101 +
.../pkg/proxy/resources/pipeline/storage.go | 112 +
.../pkg/proxy/resources/pipeline/strategy.go | 116 +
feature/pkg/proxy/resources/task/storage.go | 117 +
feature/pkg/proxy/resources/task/strategy.go | 213 +
feature/pkg/proxy/router.go | 67 +
feature/pkg/proxy/transport.go | 480 ++
feature/pkg/variable/helper.go | 469 ++
feature/pkg/variable/helper_test.go | 358 ++
feature/pkg/variable/internal.go | 387 ++
feature/pkg/variable/internal_test.go | 95 +
feature/pkg/variable/source/file.go | 95 +
feature/pkg/variable/source/memory.go | 24 +
feature/pkg/variable/source/source.go | 33 +
feature/pkg/variable/variable.go | 137 +
feature/plugins/playbooks/backup.yaml | 5 +
feature/plugins/playbooks/restore.yaml | 15 +
feature/plugins/playbooks/sonobuoy.yaml | 6 +
feature/plugins/playbooks/upgrade_kernel.yaml | 16 +
.../playbooks/vars/upgrade_kernel.yaml | 2 +
.../roles/etcd/backup/defaults/main.yaml | 5 +
.../plugins/roles/etcd/backup/tasks/main.yaml | 15 +
.../roles/etcd/restore/defaults/main.yaml | 5 +
.../roles/etcd/restore/tasks/main.yaml | 31 +
.../roles/kubernetes/start/defaults/main.yaml | 2 +
.../roles/kubernetes/start/tasks/main.yaml | 14 +
.../roles/kubernetes/stop/defaults/main.yaml | 2 +
.../roles/kubernetes/stop/tasks/main.yaml | 14 +
.../roles/os/init-kernel/defaults/main.yaml | 5 +
.../roles/os/init-kernel/tasks/centos.yaml | 30 +
.../roles/os/init-kernel/tasks/main.yaml | 3 +
.../roles/os/upgrade-kernel/tasks/centos.yaml | 31 +
.../roles/os/upgrade-kernel/tasks/main.yaml | 3 +
.../plugins/roles/sonobuoy/defaults/main.yaml | 28 +
.../plugins/roles/sonobuoy/tasks/main.yaml | 23 +
.../sonobuoy/templates/plugins/e2e-ks.yaml | 47 +
.../templates/plugins/kube-bench-master.yaml | 86 +
.../templates/plugins/kube-bench.yaml | 86 +
feature/scripts/ci-lint-dockerfiles.sh | 29 +
feature/scripts/docker-install.sh | 526 ++
feature/scripts/downloadKubekey.sh | 96 +
.../harborCreateRegistriesAndReplications.sh | 64 +
.../scripts/harbor_keepalived/check_harbor.sh | 12 +
.../docker-compose-keepalived-backup.yaml | 14 +
.../docker-compose-keepalived-master.yaml | 14 +
.../harbor_keepalived/keepalived-backup.conf | 31 +
.../harbor_keepalived/keepalived-master.conf | 31 +
feature/version/version.go | 78 +
357 files changed, 38026 insertions(+)
create mode 100644 feature/CONTRIBUTORS.md
create mode 100644 feature/LICENSE
create mode 100644 feature/Makefile
create mode 100644 feature/OWNERS
create mode 100644 feature/README.md
create mode 100644 feature/build/controller-manager/Dockerfile
create mode 100644 feature/build/kk/Dockerfile
create mode 100644 feature/builtin/Makefile
create mode 100644 feature/builtin/fs.go
create mode 100644 feature/builtin/inventory/config.yaml
create mode 100644 feature/builtin/inventory/inventory.yaml
create mode 100644 feature/builtin/playbooks/artifact_export.yaml
create mode 100644 feature/builtin/playbooks/artifact_images.yaml
create mode 100644 feature/builtin/playbooks/certs_renew.yaml
create mode 100644 feature/builtin/playbooks/create_cluster.yaml
create mode 100644 feature/builtin/playbooks/hook/post_install.yaml
create mode 100644 feature/builtin/playbooks/hook/pre_install.yaml
create mode 100644 feature/builtin/playbooks/init_os.yaml
create mode 100644 feature/builtin/playbooks/init_registry.yaml
create mode 100644 feature/builtin/playbooks/precheck.yaml
create mode 100644 feature/builtin/playbooks/vars/certs_renew.yaml
create mode 100644 feature/builtin/playbooks/vars/create_cluster_kubernetes.yaml
create mode 100644 feature/builtin/roles/addons/cni/defaults/main.yaml
create mode 100644 feature/builtin/roles/addons/cni/tasks/calico.yaml
create mode 100644 feature/builtin/roles/addons/cni/tasks/cilium.yaml
create mode 100644 feature/builtin/roles/addons/cni/tasks/flannel.yaml
create mode 100644 feature/builtin/roles/addons/cni/tasks/hybridnet.yaml
create mode 100644 feature/builtin/roles/addons/cni/tasks/kubeovn.yaml
create mode 100644 feature/builtin/roles/addons/cni/tasks/main.yaml
create mode 100644 feature/builtin/roles/addons/cni/tasks/multus.yaml
create mode 100644 feature/builtin/roles/addons/cni/templates/calico/pdg.yaml
create mode 100644 feature/builtin/roles/addons/cni/templates/calico/v3.27.yaml
create mode 100644 feature/builtin/roles/addons/cni/templates/flannel/flannel.yaml
create mode 100644 feature/builtin/roles/addons/cni/templates/multus/multus.yaml
create mode 100644 feature/builtin/roles/addons/kata/defaults/main.yaml
create mode 100644 feature/builtin/roles/addons/kata/tasks/main.yaml
create mode 100644 feature/builtin/roles/addons/kata/templates/kata-deploy.yaml
create mode 100644 feature/builtin/roles/addons/nfd/defaults/main.yaml
create mode 100644 feature/builtin/roles/addons/nfd/tasks/main.yaml
create mode 100644 feature/builtin/roles/addons/nfd/templates/nfd-deploy.yaml
create mode 100644 feature/builtin/roles/addons/sc/defaults/main.yaml
create mode 100644 feature/builtin/roles/addons/sc/tasks/local.yaml
create mode 100644 feature/builtin/roles/addons/sc/tasks/main.yaml
create mode 100644 feature/builtin/roles/addons/sc/tasks/nfs.yaml
create mode 100644 feature/builtin/roles/addons/sc/templates/local-volume.yaml
create mode 100644 feature/builtin/roles/certs/renew-etcd/tasks/main.yaml
create mode 100644 feature/builtin/roles/certs/renew-kubernetes/tasks/etcd.yaml
create mode 100644 feature/builtin/roles/certs/renew-kubernetes/tasks/kube.yaml
create mode 100644 feature/builtin/roles/certs/renew-kubernetes/tasks/main.yaml
create mode 100644 feature/builtin/roles/certs/renew-registry/tasks/harbor.yaml
create mode 100644 feature/builtin/roles/certs/renew-registry/tasks/main.yaml
create mode 100644 feature/builtin/roles/certs/renew-registry/tasks/registry.yaml
create mode 100644 feature/builtin/roles/init/init-artifact/defaults/main.yaml
create mode 100644 feature/builtin/roles/init/init-artifact/tasks/download_by_curl.yaml
create mode 100644 feature/builtin/roles/init/init-artifact/tasks/download_by_helm.yaml
create mode 100644 feature/builtin/roles/init/init-artifact/tasks/main.yaml
create mode 100644 feature/builtin/roles/init/init-artifact/tasks/pki.yaml
create mode 100644 feature/builtin/roles/init/init-os/defaults/main.yaml
create mode 100644 feature/builtin/roles/init/init-os/tasks/init_ntpserver.yaml
create mode 100644 feature/builtin/roles/init/init-os/tasks/init_repository.yaml
create mode 100644 feature/builtin/roles/init/init-os/tasks/main.yaml
create mode 100644 feature/builtin/roles/init/init-os/templates/init-os.sh
create mode 100644 feature/builtin/roles/install/certs/defaults/main.yaml
create mode 100644 feature/builtin/roles/install/certs/files/k8s-certs-renew.service
create mode 100644 feature/builtin/roles/install/certs/files/k8s-certs-renew.timer
create mode 100644 feature/builtin/roles/install/certs/tasks/main.yaml
create mode 100644 feature/builtin/roles/install/certs/templates/renew_script.sh
create mode 100644 feature/builtin/roles/install/cri/defaults/main.yaml
create mode 100644 feature/builtin/roles/install/cri/files/containerd.service
create mode 100644 feature/builtin/roles/install/cri/files/cri_docker.service
create mode 100644 feature/builtin/roles/install/cri/files/docker.service
create mode 100644 feature/builtin/roles/install/cri/tasks/install_containerd.yaml
create mode 100644 feature/builtin/roles/install/cri/tasks/install_crictl.yaml
create mode 100644 feature/builtin/roles/install/cri/tasks/install_cridockerd.yaml
create mode 100644 feature/builtin/roles/install/cri/tasks/install_docker.yaml
create mode 100644 feature/builtin/roles/install/cri/tasks/main.yaml
create mode 100644 feature/builtin/roles/install/cri/templates/containerd.config
create mode 100644 feature/builtin/roles/install/cri/templates/crictl.config
create mode 100644 feature/builtin/roles/install/cri/templates/docker.config
create mode 100644 feature/builtin/roles/install/etcd/defaults/main.yaml
create mode 100644 feature/builtin/roles/install/etcd/files/backup.service
create mode 100644 feature/builtin/roles/install/etcd/files/etcd.service
create mode 100644 feature/builtin/roles/install/etcd/tasks/backup_etcd.yaml
create mode 100644 feature/builtin/roles/install/etcd/tasks/install_etcd.yaml
create mode 100644 feature/builtin/roles/install/etcd/tasks/main.yaml
create mode 100644 feature/builtin/roles/install/etcd/templates/backup.sh
create mode 100644 feature/builtin/roles/install/etcd/templates/backup.timer
create mode 100644 feature/builtin/roles/install/etcd/templates/etcd.env
create mode 100644 feature/builtin/roles/install/image-registry/defaults/main.yaml
create mode 100644 feature/builtin/roles/install/image-registry/files/containerd.service
create mode 100644 feature/builtin/roles/install/image-registry/files/docker.service
create mode 100644 feature/builtin/roles/install/image-registry/tasks/install_docker.yaml
create mode 100644 feature/builtin/roles/install/image-registry/tasks/install_docker_compose.yaml
create mode 100644 feature/builtin/roles/install/image-registry/tasks/install_harbor.yaml
create mode 100644 feature/builtin/roles/install/image-registry/tasks/install_keepalived.yaml
create mode 100644 feature/builtin/roles/install/image-registry/tasks/install_registry.yaml
create mode 100644 feature/builtin/roles/install/image-registry/tasks/load_images.yaml
create mode 100644 feature/builtin/roles/install/image-registry/tasks/main.yaml
create mode 100644 feature/builtin/roles/install/image-registry/templates/docker.config
create mode 100644 feature/builtin/roles/install/image-registry/templates/harbor.config
create mode 100644 feature/builtin/roles/install/image-registry/templates/harbor.service
create mode 100644 feature/builtin/roles/install/image-registry/templates/harbor_keepalived.docker-compose
create mode 100644 feature/builtin/roles/install/image-registry/templates/keepalived.config
create mode 100644 feature/builtin/roles/install/image-registry/templates/keepalived.healthcheck
create mode 100644 feature/builtin/roles/install/image-registry/templates/registry.config
create mode 100644 feature/builtin/roles/install/image-registry/templates/registry.docker-compose
create mode 100644 feature/builtin/roles/install/image-registry/templates/registry.service
create mode 100644 feature/builtin/roles/install/kubernetes/defaults/main.yaml
create mode 100644 feature/builtin/roles/install/kubernetes/files/audit/audit_policy.yaml
create mode 100644 feature/builtin/roles/install/kubernetes/files/audit/audit_webhook.yaml
create mode 100644 feature/builtin/roles/install/kubernetes/files/kubelet.service
create mode 100644 feature/builtin/roles/install/kubernetes/tasks/deploy_cluster_dns.yaml
create mode 100644 feature/builtin/roles/install/kubernetes/tasks/deploy_haproxy.yaml
create mode 100644 feature/builtin/roles/install/kubernetes/tasks/deploy_kube_vip.yaml
create mode 100644 feature/builtin/roles/install/kubernetes/tasks/init_kubernetes.yaml
create mode 100644 feature/builtin/roles/install/kubernetes/tasks/install_binaries.yaml
create mode 100644 feature/builtin/roles/install/kubernetes/tasks/join_kubernetes.yaml
create mode 100644 feature/builtin/roles/install/kubernetes/tasks/main.yaml
create mode 100644 feature/builtin/roles/install/kubernetes/templates/dns/coredns.deployment
create mode 100644 feature/builtin/roles/install/kubernetes/templates/dns/nodelocaldns.daemonset
create mode 100644 feature/builtin/roles/install/kubernetes/templates/haproxy/haproxy.cfg
create mode 100644 feature/builtin/roles/install/kubernetes/templates/haproxy/haproxy.yaml
create mode 100644 feature/builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-init.v1beta2
create mode 100644 feature/builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-init.v1beta3
create mode 100644 feature/builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-join.v1beta2
create mode 100644 feature/builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-join.v1beta3
create mode 100644 feature/builtin/roles/install/kubernetes/templates/kubeadm/kubelet.env
create mode 100644 feature/builtin/roles/install/kubernetes/templates/kubevip/kubevip.ARP
create mode 100644 feature/builtin/roles/install/kubernetes/templates/kubevip/kubevip.BGP
create mode 100644 feature/builtin/roles/install/nfs/defaults/main.yaml
create mode 100644 feature/builtin/roles/install/nfs/tasks/debian.yaml
create mode 100644 feature/builtin/roles/install/nfs/tasks/main.yaml
create mode 100644 feature/builtin/roles/install/nfs/tasks/rhel.yaml
create mode 100644 feature/builtin/roles/install/nfs/templates/exports
create mode 100644 feature/builtin/roles/install/security/tasks/main.yaml
create mode 100644 feature/builtin/roles/precheck/artifact_check/tasks/main.yaml
create mode 100644 feature/builtin/roles/precheck/env_check/defaults/main.yaml
create mode 100644 feature/builtin/roles/precheck/env_check/tasks/cri.yaml
create mode 100644 feature/builtin/roles/precheck/env_check/tasks/etcd.yaml
create mode 100644 feature/builtin/roles/precheck/env_check/tasks/main.yaml
create mode 100644 feature/builtin/roles/precheck/env_check/tasks/network.yaml
create mode 100644 feature/builtin/roles/precheck/env_check/tasks/nfs.yaml
create mode 100644 feature/builtin/roles/precheck/env_check/tasks/os.yaml
create mode 100644 feature/cmd/controller-manager/app/options/common.go
create mode 100644 feature/cmd/controller-manager/app/options/controller_manager.go
create mode 100644 feature/cmd/controller-manager/app/server.go
create mode 100644 feature/cmd/controller-manager/app/version.go
create mode 100644 feature/cmd/controller-manager/controller_manager.go
create mode 100644 feature/cmd/kk/app/artifact.go
create mode 100644 feature/cmd/kk/app/certs.go
create mode 100644 feature/cmd/kk/app/create.go
create mode 100644 feature/cmd/kk/app/init.go
create mode 100644 feature/cmd/kk/app/options/artifact.go
create mode 100644 feature/cmd/kk/app/options/builtin.go
create mode 100644 feature/cmd/kk/app/options/certs.go
create mode 100644 feature/cmd/kk/app/options/common.go
create mode 100644 feature/cmd/kk/app/options/create.go
create mode 100644 feature/cmd/kk/app/options/init.go
create mode 100644 feature/cmd/kk/app/options/option.go
create mode 100644 feature/cmd/kk/app/options/pipeline.go
create mode 100644 feature/cmd/kk/app/options/precheck.go
create mode 100644 feature/cmd/kk/app/options/run.go
create mode 100644 feature/cmd/kk/app/pipeline.go
create mode 100644 feature/cmd/kk/app/precheck.go
create mode 100644 feature/cmd/kk/app/root.go
create mode 100644 feature/cmd/kk/app/run.go
create mode 100644 feature/cmd/kk/app/version.go
create mode 100644 feature/cmd/kk/kubekey.go
create mode 100644 feature/config/kubekey/Chart.yaml
create mode 100644 feature/config/kubekey/crds/kubekey.kubesphere.io_configs.yaml
create mode 100644 feature/config/kubekey/crds/kubekey.kubesphere.io_inventories.yaml
create mode 100644 feature/config/kubekey/crds/kubekey.kubesphere.io_pipelines.yaml
create mode 100644 feature/config/kubekey/templates/_helpers.tpl
create mode 100644 feature/config/kubekey/templates/_tplvalues.tpl
create mode 100644 feature/config/kubekey/templates/deployment.yaml
create mode 100644 feature/config/kubekey/templates/role.yaml
create mode 100644 feature/config/kubekey/templates/serviceaccount.yaml
create mode 100644 feature/config/kubekey/values.yaml
create mode 100644 feature/docs/zh/001-project.md
create mode 100644 feature/docs/zh/002-playbook.md
create mode 100644 feature/docs/zh/003-role.md
create mode 100644 feature/docs/zh/004-task.md
create mode 100644 feature/docs/zh/005-module.md
create mode 100644 feature/docs/zh/101-syntax.md
create mode 100644 feature/docs/zh/201-variable.md
create mode 100644 feature/exp/README.md
create mode 100644 feature/go.mod
create mode 100644 feature/go.sum
create mode 100755 feature/hack/auto-update-version.py
create mode 100644 feature/hack/boilerplate.go.txt
create mode 100755 feature/hack/fetch-kubernetes-hash.sh
create mode 100644 feature/hack/gen-repository-iso/dockerfile.almalinux90
create mode 100644 feature/hack/gen-repository-iso/dockerfile.centos7
create mode 100644 feature/hack/gen-repository-iso/dockerfile.debian10
create mode 100644 feature/hack/gen-repository-iso/dockerfile.debian11
create mode 100644 feature/hack/gen-repository-iso/dockerfile.ubuntu1604
create mode 100644 feature/hack/gen-repository-iso/dockerfile.ubuntu1804
create mode 100644 feature/hack/gen-repository-iso/dockerfile.ubuntu2004
create mode 100644 feature/hack/gen-repository-iso/dockerfile.ubuntu2204
create mode 100644 feature/hack/gen-repository-iso/download-pkgs.sh
create mode 100644 feature/hack/gen-repository-iso/packages.yaml
create mode 100755 feature/hack/go_install.sh
create mode 100755 feature/hack/lib/golang.sh
create mode 100755 feature/hack/lib/init.sh
create mode 100755 feature/hack/lib/logging.sh
create mode 100755 feature/hack/lib/util.sh
create mode 100755 feature/hack/sync-components.sh
create mode 100755 feature/hack/update-goimports.sh
create mode 100644 feature/hack/verify-dockerfiles.sh
create mode 100755 feature/hack/verify-goimports.sh
create mode 100755 feature/hack/version.sh
create mode 100644 feature/pkg/apis/core/v1/config_types.go
create mode 100644 feature/pkg/apis/core/v1/config_types_test.go
create mode 100644 feature/pkg/apis/core/v1/inventory_types.go
create mode 100644 feature/pkg/apis/core/v1/pipeline_types.go
create mode 100644 feature/pkg/apis/core/v1/register.go
create mode 100644 feature/pkg/apis/core/v1/zz_generated.deepcopy.go
create mode 100644 feature/pkg/apis/core/v1alpha1/conversion.go
create mode 100644 feature/pkg/apis/core/v1alpha1/register.go
create mode 100644 feature/pkg/apis/core/v1alpha1/task_types.go
create mode 100644 feature/pkg/apis/core/v1alpha1/zz_generated.deepcopy.go
create mode 100644 feature/pkg/apis/project/v1/base.go
create mode 100644 feature/pkg/apis/project/v1/block.go
create mode 100644 feature/pkg/apis/project/v1/collectionsearch.go
create mode 100644 feature/pkg/apis/project/v1/conditional.go
create mode 100644 feature/pkg/apis/project/v1/delegatable.go
create mode 100644 feature/pkg/apis/project/v1/docs.go
create mode 100644 feature/pkg/apis/project/v1/handler.go
create mode 100644 feature/pkg/apis/project/v1/loop.go
create mode 100644 feature/pkg/apis/project/v1/notifiable.go
create mode 100644 feature/pkg/apis/project/v1/play.go
create mode 100644 feature/pkg/apis/project/v1/play_test.go
create mode 100644 feature/pkg/apis/project/v1/playbook.go
create mode 100644 feature/pkg/apis/project/v1/playbook_test.go
create mode 100644 feature/pkg/apis/project/v1/role.go
create mode 100644 feature/pkg/apis/project/v1/taggable.go
create mode 100644 feature/pkg/connector/connector.go
create mode 100644 feature/pkg/connector/helper.go
create mode 100644 feature/pkg/connector/helper_test.go
create mode 100644 feature/pkg/connector/kubernetes_connector.go
create mode 100644 feature/pkg/connector/local_connector.go
create mode 100644 feature/pkg/connector/local_connector_test.go
create mode 100644 feature/pkg/connector/ssh_connector.go
create mode 100644 feature/pkg/const/common.go
create mode 100644 feature/pkg/const/helper.go
create mode 100644 feature/pkg/const/helper_test.go
create mode 100644 feature/pkg/const/scheme.go
create mode 100644 feature/pkg/const/workdir.go
create mode 100644 feature/pkg/controllers/pipeline_controller.go
create mode 100644 feature/pkg/converter/converter.go
create mode 100644 feature/pkg/converter/converter_test.go
create mode 100644 feature/pkg/converter/internal/functions.go
create mode 100644 feature/pkg/converter/internal/helper.go
create mode 100644 feature/pkg/converter/internal/helper_test.go
create mode 100644 feature/pkg/converter/tmpl/template.go
create mode 100644 feature/pkg/converter/tmpl/template_test.go
create mode 100644 feature/pkg/executor/block_executor.go
create mode 100644 feature/pkg/executor/block_executor_test.go
create mode 100644 feature/pkg/executor/executor.go
create mode 100644 feature/pkg/executor/executor_test.go
create mode 100644 feature/pkg/executor/pipeline_executor.go
create mode 100644 feature/pkg/executor/pipeline_executor_test.go
create mode 100644 feature/pkg/executor/task_executor.go
create mode 100644 feature/pkg/executor/task_executor_test.go
create mode 100644 feature/pkg/manager/command_manager.go
create mode 100644 feature/pkg/manager/controller_manager.go
create mode 100644 feature/pkg/manager/manager.go
create mode 100644 feature/pkg/modules/assert.go
create mode 100644 feature/pkg/modules/assert_test.go
create mode 100644 feature/pkg/modules/command.go
create mode 100644 feature/pkg/modules/command_test.go
create mode 100644 feature/pkg/modules/copy.go
create mode 100644 feature/pkg/modules/copy_test.go
create mode 100644 feature/pkg/modules/debug.go
create mode 100644 feature/pkg/modules/debug_test.go
create mode 100644 feature/pkg/modules/fetch.go
create mode 100644 feature/pkg/modules/fetch_test.go
create mode 100644 feature/pkg/modules/gen_cert.go
create mode 100644 feature/pkg/modules/gen_cert_test.go
create mode 100644 feature/pkg/modules/image.go
create mode 100644 feature/pkg/modules/image_test.go
create mode 100644 feature/pkg/modules/module.go
create mode 100644 feature/pkg/modules/module_test.go
create mode 100644 feature/pkg/modules/set_fact.go
create mode 100644 feature/pkg/modules/set_fact_test.go
create mode 100644 feature/pkg/modules/template.go
create mode 100644 feature/pkg/modules/template_test.go
create mode 100644 feature/pkg/project/builtin.go
create mode 100644 feature/pkg/project/git.go
create mode 100644 feature/pkg/project/helper.go
create mode 100644 feature/pkg/project/helper_test.go
create mode 100644 feature/pkg/project/local.go
create mode 100644 feature/pkg/project/project.go
create mode 100644 feature/pkg/project/testdata/playbooks/playbook1.yaml
create mode 100644 feature/pkg/project/testdata/playbooks/playbook2.yaml
create mode 100644 feature/pkg/project/testdata/playbooks/playbook2.yml
create mode 100644 feature/pkg/project/testdata/playbooks/playbook3.yml
create mode 100644 feature/pkg/project/testdata/playbooks/playbooks/playbook3.yaml
create mode 100644 feature/pkg/project/testdata/playbooks/roles/role2/tasks/main.yaml
create mode 100644 feature/pkg/project/testdata/roles/role1/tasks/main.yaml
create mode 100644 feature/pkg/proxy/admit.go
create mode 100644 feature/pkg/proxy/api_resources.go
create mode 100644 feature/pkg/proxy/internal/file_storage.go
create mode 100644 feature/pkg/proxy/internal/rest_option.go
create mode 100644 feature/pkg/proxy/internal/watcher.go
create mode 100644 feature/pkg/proxy/path_expression.go
create mode 100644 feature/pkg/proxy/resources/config/storage.go
create mode 100644 feature/pkg/proxy/resources/config/strategy.go
create mode 100644 feature/pkg/proxy/resources/inventory/storage.go
create mode 100644 feature/pkg/proxy/resources/inventory/strategy.go
create mode 100644 feature/pkg/proxy/resources/pipeline/storage.go
create mode 100644 feature/pkg/proxy/resources/pipeline/strategy.go
create mode 100644 feature/pkg/proxy/resources/task/storage.go
create mode 100644 feature/pkg/proxy/resources/task/strategy.go
create mode 100644 feature/pkg/proxy/router.go
create mode 100644 feature/pkg/proxy/transport.go
create mode 100644 feature/pkg/variable/helper.go
create mode 100644 feature/pkg/variable/helper_test.go
create mode 100644 feature/pkg/variable/internal.go
create mode 100644 feature/pkg/variable/internal_test.go
create mode 100644 feature/pkg/variable/source/file.go
create mode 100644 feature/pkg/variable/source/memory.go
create mode 100644 feature/pkg/variable/source/source.go
create mode 100644 feature/pkg/variable/variable.go
create mode 100644 feature/plugins/playbooks/backup.yaml
create mode 100644 feature/plugins/playbooks/restore.yaml
create mode 100644 feature/plugins/playbooks/sonobuoy.yaml
create mode 100644 feature/plugins/playbooks/upgrade_kernel.yaml
create mode 100644 feature/plugins/playbooks/vars/upgrade_kernel.yaml
create mode 100644 feature/plugins/roles/etcd/backup/defaults/main.yaml
create mode 100644 feature/plugins/roles/etcd/backup/tasks/main.yaml
create mode 100644 feature/plugins/roles/etcd/restore/defaults/main.yaml
create mode 100644 feature/plugins/roles/etcd/restore/tasks/main.yaml
create mode 100644 feature/plugins/roles/kubernetes/start/defaults/main.yaml
create mode 100644 feature/plugins/roles/kubernetes/start/tasks/main.yaml
create mode 100644 feature/plugins/roles/kubernetes/stop/defaults/main.yaml
create mode 100644 feature/plugins/roles/kubernetes/stop/tasks/main.yaml
create mode 100644 feature/plugins/roles/os/init-kernel/defaults/main.yaml
create mode 100644 feature/plugins/roles/os/init-kernel/tasks/centos.yaml
create mode 100644 feature/plugins/roles/os/init-kernel/tasks/main.yaml
create mode 100644 feature/plugins/roles/os/upgrade-kernel/tasks/centos.yaml
create mode 100644 feature/plugins/roles/os/upgrade-kernel/tasks/main.yaml
create mode 100644 feature/plugins/roles/sonobuoy/defaults/main.yaml
create mode 100644 feature/plugins/roles/sonobuoy/tasks/main.yaml
create mode 100644 feature/plugins/roles/sonobuoy/templates/plugins/e2e-ks.yaml
create mode 100644 feature/plugins/roles/sonobuoy/templates/plugins/kube-bench-master.yaml
create mode 100644 feature/plugins/roles/sonobuoy/templates/plugins/kube-bench.yaml
create mode 100755 feature/scripts/ci-lint-dockerfiles.sh
create mode 100755 feature/scripts/docker-install.sh
create mode 100755 feature/scripts/downloadKubekey.sh
create mode 100644 feature/scripts/harborCreateRegistriesAndReplications.sh
create mode 100644 feature/scripts/harbor_keepalived/check_harbor.sh
create mode 100644 feature/scripts/harbor_keepalived/docker-compose-keepalived-backup.yaml
create mode 100644 feature/scripts/harbor_keepalived/docker-compose-keepalived-master.yaml
create mode 100644 feature/scripts/harbor_keepalived/keepalived-backup.conf
create mode 100644 feature/scripts/harbor_keepalived/keepalived-master.conf
create mode 100644 feature/version/version.go
diff --git a/feature/CONTRIBUTORS.md b/feature/CONTRIBUTORS.md
new file mode 100644
index 000000000..1b9073c18
--- /dev/null
+++ b/feature/CONTRIBUTORS.md
@@ -0,0 +1,121 @@
+### Sincere gratitude goes to the following people for their contributions to Pipeline
+
+Contributions of any kind are welcome! Thanks goes to these wonderful contributors, they made our project grow fast.
+
+
+
+
+
+
+
+
+
+
diff --git a/feature/LICENSE b/feature/LICENSE
new file mode 100644
index 000000000..cd92c18de
--- /dev/null
+++ b/feature/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2018-2020 KubeSphere Authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/feature/Makefile b/feature/Makefile
new file mode 100644
index 000000000..38718abcb
--- /dev/null
+++ b/feature/Makefile
@@ -0,0 +1,619 @@
+# Ensure Make is run with bash shell as some syntax below is bash-specific
+SHELL:=/usr/bin/env bash
+
+.DEFAULT_GOAL:=help
+
+#
+# Go.
+#
+GO_VERSION ?= 1.22
+GO_CONTAINER_IMAGE ?= docker.io/library/golang:$(GO_VERSION)
+GOARCH ?= $(shell go env GOARCH)
+GOOS ?= $(shell go env GOOS)
+# Use GOPROXY environment variable if set
+GOPROXY := $(shell go env GOPROXY)
+ifeq ($(GOPROXY),)
+GOPROXY := https://goproxy.cn,direct
+endif
+export GOPROXY
+
+# Active module mode, as we use go modules to manage dependencies
+#export GO111MODULE=on
+
+# This option is for running docker manifest command
+#export DOCKER_CLI_EXPERIMENTAL := enabled
+
+#
+# Directories.
+#
+# Full directory of where the Makefile resides
+ROOT_DIR:=$(shell dirname $(realpath $(firstword $(MAKEFILE_LIST))))
+#EXP_DIR := exp
+
+TEST_DIR := test
+TOOLS_DIR := hack/tools
+#BIN_DIR := $(abspath $(TOOLS_DIR)/$(BIN_DIR))
+E2E_FRAMEWORK_DIR := $(TEST_DIR)/framework
+GO_INSTALL := ./hack/go_install.sh
+
+# output
+OUTPUT_DIR := $(abspath $(ROOT_DIR)/_output)
+OUTPUT_BIN_DIR := $(OUTPUT_DIR)/bin
+OUTPUT_TOOLS_DIR := $(OUTPUT_DIR)/tools
+#ARTIFACTS ?= ${OUTPUT_DIR}/_artifacts
+
+dirs := $(OUTPUT_DIR) $(OUTPUT_BIN_DIR) $(OUTPUT_TOOLS_DIR)
+
+$(foreach dir, $(dirs), \
+ $(if $(shell [ -d $(dir) ] && echo 1 || echo 0),, \
+ $(shell mkdir -p $(dir)) \
+ ) \
+)
+
+export PATH := $(abspath $(OUTPUT_BIN_DIR)):$(abspath $(OUTPUT_TOOLS_DIR)):$(PATH)
+
+#
+# Binaries.
+#
+# Note: Need to use abspath so we can invoke these from subdirectories
+KUSTOMIZE_VER := v4.5.2
+KUSTOMIZE_BIN := kustomize
+KUSTOMIZE := $(abspath $(OUTPUT_TOOLS_DIR)/$(KUSTOMIZE_BIN)-$(KUSTOMIZE_VER))
+KUSTOMIZE_PKG := sigs.k8s.io/kustomize/kustomize/v4
+
+SETUP_ENVTEST_VER := v0.0.0-20240521074430-fbb7d370bebc
+SETUP_ENVTEST_BIN := setup-envtest
+SETUP_ENVTEST := $(abspath $(OUTPUT_TOOLS_DIR)/$(SETUP_ENVTEST_BIN)-$(SETUP_ENVTEST_VER))
+SETUP_ENVTEST_PKG := sigs.k8s.io/controller-runtime/tools/setup-envtest
+
+CONTROLLER_GEN_VER := v0.15.0
+CONTROLLER_GEN_BIN := controller-gen
+CONTROLLER_GEN := $(abspath $(OUTPUT_TOOLS_DIR)/$(CONTROLLER_GEN_BIN)-$(CONTROLLER_GEN_VER))
+CONTROLLER_GEN_PKG := sigs.k8s.io/controller-tools/cmd/controller-gen
+
+GOTESTSUM_VER := v1.6.4
+GOTESTSUM_BIN := gotestsum
+GOTESTSUM := $(abspath $(OUTPUT_TOOLS_DIR)/$(GOTESTSUM_BIN)-$(GOTESTSUM_VER))
+GOTESTSUM_PKG := gotest.tools/gotestsum
+
+HADOLINT_VER := v2.10.0
+HADOLINT_FAILURE_THRESHOLD = warning
+
+GOLANGCI_LINT_VER := $(shell cat .github/workflows/golangci-lint.yml | grep [[:space:]]version | sed 's/.*version: //')
+GOLANGCI_LINT_BIN := golangci-lint
+GOLANGCI_LINT := $(abspath $(OUTPUT_TOOLS_DIR)/$(GOLANGCI_LINT_BIN))
+GOLANGCI_LINT_PKG := github.com/golangci/golangci-lint/cmd/golangci-lint
+
+GORELEASER_VERSION := v2.0.1
+GORELEASER_BIN := goreleaser
+GORELEASER_PKG := github.com/goreleaser/goreleaser/v2
+GORELEASER := $(abspath $(OUTPUT_TOOLS_DIR)/$(GORELEASER_BIN))
+
+#
+# Docker.
+#
+DOCKERCMD ?= $(shell which docker)
+DOCKER_BUILD_ENV = DOCKER_BUILDKIT=1
+DOCKER_BUILD ?= $(DOCKER_BUILD_ENV) $(DOCKERCMD) buildx build
+PLATFORM ?= linux/amd64,linux/arm64
+DOCKER_OUT_TYPE ?= --push
+DOCKER_PUSH ?= $(DOCKER_BUILD) --platform $(PLATFORM) $(DOCKER_OUT_TYPE)
+
+# Define Docker related variables. Releases should modify and double check these vars.
+REGISTRY ?= docker.io/kubespheredev
+#REGISTRY ?= docker.io/kubespheredev
+#PROD_REGISTRY ?= docker.io/kubesphere
+
+# capkk
+#CAPKK_IMAGE_NAME ?= capkk-controller
+#CAPKK_CONTROLLER_IMG ?= $(REGISTRY)/$(CAPKK_IMAGE_NAME)
+# controller-manager
+OPERATOR_IMAGE_NAME ?= kk-controller-manager
+OPERATOR_CONTROLLER_IMG ?= $(REGISTRY)/$(OPERATOR_IMAGE_NAME)
+# executor
+EXECUTOR_IMAGE_NAME ?= kk-executor
+EXECUTOR_CONTROLLER_IMG ?= $(REGISTRY)/$(EXECUTOR_IMAGE_NAME)
+
+# bootstrap
+#K3S_BOOTSTRAP_IMAGE_NAME ?= k3s-bootstrap-controller
+#K3S_BOOTSTRAP_CONTROLLER_IMG ?= $(REGISTRY)/$(K3S_BOOTSTRAP_IMAGE_NAME)
+
+# control plane
+#K3S_CONTROL_PLANE_IMAGE_NAME ?= k3s-control-plane-controller
+#K3S_CONTROL_PLANE_CONTROLLER_IMG ?= $(REGISTRY)/$(K3S_CONTROL_PLANE_IMAGE_NAME)
+
+# It is set by Prow GIT_TAG, a git-based tag of the form vYYYYMMDD-hash, e.g., v20210120-v0.3.10-308-gc61521971
+
+TAG ?= dev
+
+#ALL_ARCH = amd64 arm arm64 ppc64le s390x
+
+# Allow overriding the imagePullPolicy
+#PULL_POLICY ?= Always
+
+# Hosts running SELinux need :z added to volume mounts
+#SELINUX_ENABLED := $(shell cat /sys/fs/selinux/enforce 2> /dev/null || echo 0)
+#
+#ifeq ($(SELINUX_ENABLED),1)
+# DOCKER_VOL_OPTS?=:z
+#endif
+
+# Set build time variables including version details
+LDFLAGS := $(shell hack/version.sh)
+# Set kk build tags
+#BUILDTAGS = exclude_graphdriver_devicemapper exclude_graphdriver_btrfs containers_image_openpgp
+BUILDTAGS ?= builtin
+
+#.PHONY: all
+#all: test managers
+
+.PHONY: help
+help: ## Display this help.
+ @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n\nTargets:\n"} /^[0-9A-Za-z_-]+:.*?##/ { printf " \033[36m%-45s\033[0m %s\n", $$1, $$2 } /^\$$\([0-9A-Za-z_-]+\):.*?##/ { gsub("_","-", $$1); printf " \033[36m%-45s\033[0m %s\n", tolower(substr($$1, 3, length($$1)-7)), $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
+
+## --------------------------------------
+## Generate / Manifests
+## --------------------------------------:
+
+##@ generate:
+
+.PHONY: generate
+generate: ## Run all generate-manifests-*, generate-go-deepcopy-* targets
+ $(MAKE) generate-go-deepcopy-kubekey generate-manifests-kubekey generate-modules generate-goimports
+
+.PHONY: generate-go-deepcopy-kubekey
+generate-go-deepcopy-kubekey: $(CONTROLLER_GEN) ## Generate deepcopy object
+ $(MAKE) clean-generated-deepcopy SRC_DIRS="./pkg/apis/"
+ $(CONTROLLER_GEN) \
+ object:headerFile=./hack/boilerplate.go.txt \
+ paths=./pkg/apis/... \
+
+.PHONY: generate-manifests-kubekey
+generate-manifests-kubekey: $(CONTROLLER_GEN) ## Generate manifests e.g. CRD, RBAC etc.
+ $(CONTROLLER_GEN) \
+ paths=./pkg/apis/core/... \
+ crd \
+ output:crd:dir=./config/kubekey/crds/
+
+.PHONY: generate-modules
+generate-modules: ## Run go mod tidy to ensure modules are up to date
+ @go mod tidy && go mod vendor
+
+.PHONY: generate-goimports
+generate-goimports: ## Format all import, `goimports` is required.
+ @hack/update-goimports.sh
+
+## --------------------------------------
+## Lint / Verify
+## --------------------------------------
+
+##@ lint and verify:
+
+.PHONY: lint
+lint: $(GOLANGCI_LINT) ## Lint the codebase
+ $(GOLANGCI_LINT) run -v $(GOLANGCI_LINT_EXTRA_ARGS)
+ cd $(TEST_DIR); $(GOLANGCI_LINT) run -v $(GOLANGCI_LINT_EXTRA_ARGS)
+
+.PHONY: verify-dockerfiles
+verify-dockerfiles:
+ ./hack/ci-lint-dockerfiles.sh $(HADOLINT_VER) $(HADOLINT_FAILURE_THRESHOLD)
+
+ALL_VERIFY_CHECKS ?= modules gen goimports releaser
+
+.PHONY: verify
+verify: $(addprefix verify-,$(ALL_VERIFY_CHECKS)) ## Run all verify-* targets
+
+.PHONY: verify-modules
+verify-modules: ## Verify go modules are up to date
+ @if !(git diff --quiet HEAD -- go.sum go.mod $(TOOLS_DIR)/go.mod $(TOOLS_DIR)/go.sum $(TEST_DIR)/go.mod $(TEST_DIR)/go.sum); then \
+ git diff; \
+ echo "go module files are out of date"; exit 1; \
+ fi
+ @if (find . -name 'go.mod' | xargs -n1 grep -q -i 'k8s.io/client-go.*+incompatible'); then \
+ find . -name "go.mod" -exec grep -i 'k8s.io/client-go.*+incompatible' {} \; -print; \
+ echo "go module contains an incompatible client-go version"; exit 1; \
+ fi
+
+.PHONY: verify-gen
+verify-gen: ## Verify go generated files are up to date
+ @if !(git diff --quiet HEAD); then \
+ git diff; \
+ echo "generated files are out of date, run make generate"; exit 1; \
+ fi
+
+.PHONY: verify-goimports
+verify-goimports: ## Verify go imports
+ @hack/verify-goimports.sh
+
+.PHONY: verify-releaser
+verify-releaser: $(GORELEASER) ## Verify goreleaser
+ @$(GORELEASER) check
+
+## --------------------------------------
+## Binaries
+## --------------------------------------
+
+##@ build:
+
+.PHONY: kk
+kk: ## build kk binary
+ @CGO_ENABLED=0 GOARCH=$(GOARCH) GOOS=$(GOOS) go build -trimpath -tags "$(BUILDTAGS)" -ldflags "$(LDFLAGS)" -o $(OUTPUT_BIN_DIR)/kk cmd/kk/kubekey.go
+
+.PHONY: kk-releaser
+kk-releaser: $(GORELEASER_BIN)
+ LDFLAGS=$(bash ./hack/version.sh) $(GORELEASER_BIN) release --clean --skip validate --skip publish
+
+.PHONY: docker-build ## build and push all images
+docker-build: docker-build-operator docker-build-kk
+
+.PHONY: docker-build-operator
+docker-build-operator: ## Build the docker image for operator
+ @$(DOCKER_PUSH) \
+ --build-arg builder_image=$(GO_CONTAINER_IMAGE) \
+ --build-arg goproxy=$(GOPROXY) \
+ --build-arg ldflags="$(LDFLAGS)" --build-arg build_tags="$(BUILDTAGS)" \
+ -f build/controller-manager/Dockerfile -t $(OPERATOR_CONTROLLER_IMG):$(TAG) .
+
+.PHONY: docker-build-kk
+docker-build-kk: ## Build the docker image for kk
+ @$(DOCKER_PUSH) \
+ --build-arg builder_image=$(GO_CONTAINER_IMAGE) \
+ --build-arg goproxy=$(GOPROXY) \
+ --build-arg ldflags="$(LDFLAGS)" --build-arg build_tags="$(BUILDTAGS)" \
+ -f build/kk/Dockerfile -t $(EXECUTOR_CONTROLLER_IMG):$(TAG) .
+
+
+#ALL_MANAGERS = capkk k3s-bootstrap k3s-control-plane
+
+#.PHONY: managers
+#managers: $(addprefix manager-,$(ALL_MANAGERS)) ## Run all manager-* targets
+#
+#.PHONY: manager-capkk
+#manager-capkk: ## Build the capkk manager binary into the ./bin folder
+# go build -trimpath -ldflags "$(LDFLAGS)" -o $(BIN_DIR)/manager github.com/kubesphere/kubekey/v3
+#
+#.PHONY: manager-k3s-bootstrap
+#manager-k3s-bootstrap: ## Build the k3s bootstrap manager binary into the ./bin folder
+# go build -trimpath -ldflags "$(LDFLAGS)" -o $(BIN_DIR)/k3s-bootstrap-manager github.com/kubesphere/kubekey/v3/bootstrap/k3s
+#
+#.PHONY: manager-k3s-control-plane
+#manager-k3s-control-plane: ## Build the k3s control plane manager binary into the ./bin folder
+# go build -trimpath -ldflags "$(LDFLAGS)" -o $(BIN_DIR)/k3s-control-plane-manager github.com/kubesphere/kubekey/v3/controlplane/k3s
+#
+#.PHONY: docker-pull-prerequisites
+#docker-pull-prerequisites:
+# docker pull docker.io/docker/dockerfile:1.4
+# docker pull $(GO_CONTAINER_IMAGE)
+#
+#.PHONY: docker-build-all
+#docker-build-all: $(addprefix docker-build-,$(ALL_ARCH)) ## Build docker images for all architectures
+#
+#docker-build-%:
+# $(MAKE) ARCH=$* docker-build
+#
+#ALL_DOCKER_BUILD = capkk k3s-bootstrap k3s-control-plane
+#
+#.PHONY: docker-build
+#docker-build: docker-pull-prerequisites ## Run docker-build-* targets for all providers
+# $(MAKE) ARCH=$(ARCH) $(addprefix docker-build-,$(ALL_DOCKER_BUILD))
+#
+#.PHONY: docker-build-capkk
+#docker-build-capkk: ## Build the docker image for capkk
+# DOCKER_BUILDKIT=1 docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) --build-arg ARCH=$(ARCH) --build-arg ldflags="$(LDFLAGS)" . -t $(CAPKK_CONTROLLER_IMG)-$(ARCH):$(TAG)
+#
+#.PHONY: docker-build-k3s-bootstrap
+#docker-build-k3s-bootstrap: ## Build the docker image for k3s bootstrap controller manager
+# DOCKER_BUILDKIT=1 docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) --build-arg ARCH=$(ARCH) --build-arg package=./bootstrap/k3s --build-arg ldflags="$(LDFLAGS)" . -t $(K3S_BOOTSTRAP_CONTROLLER_IMG)-$(ARCH):$(TAG)
+#
+#.PHONY: docker-build-k3s-control-plane
+#docker-build-k3s-control-plane: ## Build the docker image for k3s control plane controller manager
+# DOCKER_BUILDKIT=1 docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) --build-arg ARCH=$(ARCH) --build-arg package=./controlplane/k3s --build-arg ldflags="$(LDFLAGS)" . -t $(K3S_CONTROL_PLANE_CONTROLLER_IMG)-$(ARCH):$(TAG)
+#
+#.PHONY: docker-build-e2e
+#docker-build-e2e: ## Build the docker image for capkk
+# $(MAKE) docker-build REGISTRY=docker.io/kubespheredev PULL_POLICY=IfNotPresent TAG=e2e
+
+## --------------------------------------
+## Deployment
+## --------------------------------------
+
+##@ deployment
+
+.PHONY: helm-package
+helm-package: ## Helm-package.
+ helm package config/helm -d $(OUTPUT_DIR)
+
+#ifndef ignore-not-found
+# ignore-not-found = false
+#endif
+#
+#.PHONY: install
+#install: generate $(KUSTOMIZE) ## Install CRDs into the K8s cluster specified in ~/.kube/config.
+# $(KUSTOMIZE) build config/crd | kubectl apply -f -
+#
+#.PHONY: uninstall
+#uninstall: generate $(KUSTOMIZE) ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
+# $(KUSTOMIZE) build config/crd | kubectl delete --ignore-not-found=$(ignore-not-found) -f -
+#
+#.PHONY: deploy
+#deploy: generate $(KUSTOMIZE) ## Deploy controller to the K8s cluster specified in ~/.kube/config.
+# $(MAKE) set-manifest-image \
+# MANIFEST_IMG=$(REGISTRY)/$(CAPKK_IMAGE_NAME)-$(ARCH) MANIFEST_TAG=$(TAG) \
+# TARGET_RESOURCE="./config/default/manager_image_patch.yaml"
+# cd config/manager
+# $(KUSTOMIZE) build config/default | kubectl apply -f -
+#
+#.PHONY: undeploy
+#undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
+# $(KUSTOMIZE) build config/default | kubectl delete --ignore-not-found=$(ignore-not-found) -f -
+
+## --------------------------------------
+## Testing
+## --------------------------------------
+
+##@ test:
+
+#ifeq ($(shell go env GOOS),darwin) # Use the darwin/amd64 binary until an arm64 version is available
+# KUBEBUILDER_ASSETS ?= $(shell $(SETUP_ENVTEST) use --use-env -p path --arch amd64 $(KUBEBUILDER_ENVTEST_KUBERNETES_VERSION))
+#else
+# KUBEBUILDER_ASSETS ?= $(shell $(SETUP_ENVTEST) use --use-env -p path $(KUBEBUILDER_ENVTEST_KUBERNETES_VERSION))
+#endif
+
+.PHONY: test
+test: $(SETUP_ENVTEST) ## Run unit and integration tests
+ KUBEBUILDER_ASSETS="$(KUBEBUILDER_ASSETS)" go test ./... $(TEST_ARGS)
+
+.PHONY: test-verbose
+test-verbose: ## Run unit and integration tests with verbose flag
+ $(MAKE) test TEST_ARGS="$(TEST_ARGS) -v"
+#
+#.PHONY: test-junit
+#test-junit: $(SETUP_ENVTEST) $(GOTESTSUM) ## Run unit and integration tests and generate a junit report
+# set +o errexit; (KUBEBUILDER_ASSETS="$(KUBEBUILDER_ASSETS)" go test -json ./... $(TEST_ARGS); echo $$? > $(ARTIFACTS)/junit.exitcode) | tee $(ARTIFACTS)/junit.stdout
+# $(GOTESTSUM) --junitfile $(ARTIFACTS)/junit.xml --raw-command cat $(ARTIFACTS)/junit.stdout
+# exit $$(cat $(ARTIFACTS)/junit.exitcode)
+#
+#.PHONY: test-cover
+#test-cover: ## Run unit and integration tests and generate a coverage report
+# $(MAKE) test TEST_ARGS="$(TEST_ARGS) -coverprofile=out/coverage.out"
+# go tool cover -func=out/coverage.out -o out/coverage.txt
+# go tool cover -html=out/coverage.out -o out/coverage.html
+#
+#.PHONY: test-e2e
+#test-e2e: ## Run e2e tests
+# $(MAKE) -C $(TEST_DIR)/e2e run
+#
+#.PHONY: test-e2e-k3s
+#test-e2e-k3s: ## Run e2e tests
+# $(MAKE) -C $(TEST_DIR)/e2e run-k3s
+
+## --------------------------------------
+## Release
+## --------------------------------------
+
+##@ release:
+
+## latest git tag for the commit, e.g., v0.3.10
+#RELEASE_TAG ?= $(shell git describe --abbrev=0 2>/dev/null)
+#ifneq (,$(findstring -,$(RELEASE_TAG)))
+# PRE_RELEASE=true
+#endif
+## the previous release tag, e.g., v0.3.9, excluding pre-release tags
+#PREVIOUS_TAG ?= $(shell git tag -l | grep -E "^v[0-9]+\.[0-9]+\.[0-9]+$$" | sort -V | grep -B1 $(RELEASE_TAG) | head -n 1 2>/dev/null)
+#RELEASE_DIR := out
+#
+#$(RELEASE_DIR):
+# mkdir -p $(RELEASE_DIR)/
+#
+#.PHONY: release
+#release: clean-release ## Build and push container images using the latest git tag for the commit
+# @if [ -z "${RELEASE_TAG}" ]; then echo "RELEASE_TAG is not set"; exit 1; fi
+# @if ! [ -z "$$(git status --porcelain)" ]; then echo "Your local git repository contains uncommitted changes, use git clean before proceeding."; exit 1; fi
+# git checkout "${RELEASE_TAG}"
+# ## Build binaries first.
+# GIT_VERSION=$(RELEASE_TAG) $(MAKE) release-binaries
+# # Set the manifest image to the production bucket.
+# $(MAKE) manifest-modification REGISTRY=$(PROD_REGISTRY)
+# ## Build the manifests
+# $(MAKE) release-manifests
+# ## Build the templates
+# $(MAKE) release-templates
+# ## Clean the git artifacts modified in the release process
+# $(MAKE) clean-release-git
+#
+#release-binaries: ## Build the binaries to publish with a release
+# RELEASE_BINARY=./cmd/kk GOOS=linux GOARCH=amd64 $(MAKE) release-binary
+# RELEASE_BINARY=./cmd/kk GOOS=linux GOARCH=amd64 $(MAKE) release-archive
+# RELEASE_BINARY=./cmd/kk GOOS=linux GOARCH=arm64 $(MAKE) release-binary
+# RELEASE_BINARY=./cmd/kk GOOS=linux GOARCH=arm64 $(MAKE) release-archive
+# RELEASE_BINARY=./cmd/kk GOOS=darwin GOARCH=amd64 $(MAKE) release-binary
+# RELEASE_BINARY=./cmd/kk GOOS=darwin GOARCH=amd64 $(MAKE) release-archive
+# RELEASE_BINARY=./cmd/kk GOOS=darwin GOARCH=arm64 $(MAKE) release-binary
+# RELEASE_BINARY=./cmd/kk GOOS=darwin GOARCH=arm64 $(MAKE) release-archive
+#
+#release-binary: $(RELEASE_DIR)
+# docker run \
+# --rm \
+# -e CGO_ENABLED=0 \
+# -e GOOS=$(GOOS) \
+# -e GOARCH=$(GOARCH) \
+# -e GOPROXY=$(GOPROXY) \
+# -v "$$(pwd):/workspace$(DOCKER_VOL_OPTS)" \
+# -w /workspace \
+# golang:$(GO_VERSION) \
+# go build -a -trimpath -tags "$(BUILDTAGS)" -ldflags "$(LDFLAGS) -extldflags '-static'" \
+# -o $(RELEASE_DIR)/$(notdir $(RELEASE_BINARY)) $(RELEASE_BINARY)
+#
+#release-archive: $(RELEASE_DIR)
+# tar -czf $(RELEASE_DIR)/kubekey-$(RELEASE_TAG)-$(GOOS)-$(GOARCH).tar.gz -C $(RELEASE_DIR)/ $(notdir $(RELEASE_BINARY))
+# rm -rf $(RELEASE_DIR)/$(notdir $(RELEASE_BINARY))
+#
+#.PHONY: manifest-modification
+#manifest-modification: # Set the manifest images to the staging/production bucket.
+# $(MAKE) set-manifest-image \
+# MANIFEST_IMG=$(REGISTRY)/$(CAPKK_IMAGE_NAME) MANIFEST_TAG=$(RELEASE_TAG) \
+# TARGET_RESOURCE="./config/default/manager_image_patch.yaml"
+# $(MAKE) set-manifest-image \
+# MANIFEST_IMG=$(REGISTRY)/$(K3S_BOOTSTRAP_IMAGE_NAME) MANIFEST_TAG=$(RELEASE_TAG) \
+# TARGET_RESOURCE="./bootstrap/k3s/config/default/manager_image_patch.yaml"
+# $(MAKE) set-manifest-image \
+# MANIFEST_IMG=$(REGISTRY)/$(K3S_CONTROL_PLANE_IMAGE_NAME) MANIFEST_TAG=$(RELEASE_TAG) \
+# TARGET_RESOURCE="./controlplane/k3s/config/default/manager_image_patch.yaml"
+# $(MAKE) set-manifest-pull-policy PULL_POLICY=IfNotPresent TARGET_RESOURCE="./config/default/manager_pull_policy.yaml"
+# $(MAKE) set-manifest-pull-policy PULL_POLICY=IfNotPresent TARGET_RESOURCE="./bootstrap/k3s/config/default/manager_pull_policy.yaml"
+# $(MAKE) set-manifest-pull-policy PULL_POLICY=IfNotPresent TARGET_RESOURCE="./controlplane/k3s/config/default/manager_pull_policy.yaml"
+#
+#.PHONY: release-manifests
+#release-manifests: $(RELEASE_DIR) $(KUSTOMIZE) ## Build the manifests to publish with a release
+# # Build capkk-components.
+# $(KUSTOMIZE) build config/default > $(RELEASE_DIR)/infrastructure-components.yaml
+# # Build bootstrap-components.
+# $(KUSTOMIZE) build bootstrap/k3s/config/default > $(RELEASE_DIR)/bootstrap-components.yaml
+# # Build control-plane-components.
+# $(KUSTOMIZE) build controlplane/k3s/config/default > $(RELEASE_DIR)/control-plane-components.yaml
+#
+# # Add metadata to the release artifacts
+# cp metadata.yaml $(RELEASE_DIR)/metadata.yaml
+#
+#.PHONY: release-templates
+#release-templates: $(RELEASE_DIR) ## Generate release templates
+# cp templates/cluster-template*.yaml $(RELEASE_DIR)/
+#
+#.PHONY: release-prod
+#release-prod: ## Build and push container images to the prod
+# REGISTRY=$(PROD_REGISTRY) TAG=$(RELEASE_TAG) $(MAKE) docker-build-all docker-push-all
+
+## --------------------------------------
+## Docker
+## --------------------------------------
+
+#
+#.PHONY: docker-push-all
+#docker-push-all: $(addprefix docker-push-,$(ALL_ARCH)) ## Push the docker images to be included in the release for all architectures + related multiarch manifests
+# $(MAKE) docker-push-manifest-capkk
+# $(MAKE) docker-push-manifest-k3s-bootstrap
+# $(MAKE) docker-push-manifest-k3s-control-plane
+#
+#docker-push-%:
+# $(MAKE) ARCH=$* docker-push
+#
+#.PHONY: docker-push
+#docker-push: ## Push the docker images
+# docker push $(CAPKK_CONTROLLER_IMG)-$(ARCH):$(TAG)
+# docker push $(K3S_BOOTSTRAP_CONTROLLER_IMG)-$(ARCH):$(TAG)
+# docker push $(K3S_CONTROL_PLANE_CONTROLLER_IMG)-$(ARCH):$(TAG)
+#
+#.PHONY: docker-push-manifest-capkk
+#docker-push-manifest-capkk: ## Push the multiarch manifest for the capkk docker images
+# ## Minimum docker version 18.06.0 is required for creating and pushing manifest images.
+# docker manifest create --amend $(CAPKK_CONTROLLER_IMG):$(TAG) $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~$(CAPKK_CONTROLLER_IMG)\-&:$(TAG)~g")
+# @for arch in $(ALL_ARCH); do docker manifest annotate --arch $${arch} ${CAPKK_CONTROLLER_IMG}:${TAG} ${CAPKK_CONTROLLER_IMG}-$${arch}:${TAG}; done
+# docker manifest push --purge $(CAPKK_CONTROLLER_IMG):$(TAG)
+#
+#.PHONY: docker-push-manifest-k3s-bootstrap
+#docker-push-manifest-k3s-bootstrap: ## Push the multiarch manifest for the k3s bootstrap docker images
+# ## Minimum docker version 18.06.0 is required for creating and pushing manifest images.
+# docker manifest create --amend $(K3S_BOOTSTRAP_CONTROLLER_IMG):$(TAG) $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~$(K3S_BOOTSTRAP_CONTROLLER_IMG)\-&:$(TAG)~g")
+# @for arch in $(ALL_ARCH); do docker manifest annotate --arch $${arch} ${K3S_BOOTSTRAP_CONTROLLER_IMG}:${TAG} ${K3S_BOOTSTRAP_CONTROLLER_IMG}-$${arch}:${TAG}; done
+# docker manifest push --purge $(K3S_BOOTSTRAP_CONTROLLER_IMG):$(TAG)
+#
+#.PHONY: docker-push-manifest-k3s-control-plane
+#docker-push-manifest-k3s-control-plane: ## Push the multiarch manifest for the k3s control plane docker images
+# ## Minimum docker version 18.06.0 is required for creating and pushing manifest images.
+# docker manifest create --amend $(K3S_CONTROL_PLANE_CONTROLLER_IMG):$(TAG) $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~$(K3S_CONTROL_PLANE_CONTROLLER_IMG)\-&:$(TAG)~g")
+# @for arch in $(ALL_ARCH); do docker manifest annotate --arch $${arch} ${K3S_CONTROL_PLANE_CONTROLLER_IMG}:${TAG} ${K3S_CONTROL_PLANE_CONTROLLER_IMG}-$${arch}:${TAG}; done
+# docker manifest push --purge $(K3S_CONTROL_PLANE_CONTROLLER_IMG):$(TAG)
+#
+#.PHONY: set-manifest-pull-policy
+#set-manifest-pull-policy:
+# $(info Updating kustomize pull policy file for manager resources)
+# sed -i'' -e 's@imagePullPolicy: .*@imagePullPolicy: '"$(PULL_POLICY)"'@' $(TARGET_RESOURCE)
+#
+#.PHONY: set-manifest-image
+#set-manifest-image:
+# $(info Updating kustomize image patch file for manager resource)
+# sed -i'' -e 's@image: .*@image: '"${MANIFEST_IMG}:$(MANIFEST_TAG)"'@' $(TARGET_RESOURCE)
+
+## --------------------------------------
+## Cleanup / Verification
+## --------------------------------------
+
+##@ clean:
+
+.PHONY: clean
+clean: ## Remove all generated files
+ $(MAKE) clean-output clean-generated-deepcopy
+
+.PHONY: clean-output
+clean-output: ## Remove all generated binaries
+ rm -rf $(OUTPUT_DIR)
+
+#.PHONY: clean-release
+#clean-release: ## Remove the release folder
+# rm -rf $(RELEASE_DIR)
+
+#.PHONY: clean-release-git
+#clean-release-git: ## Restores the git files usually modified during a release
+# git restore ./*manager_image_patch.yaml ./*manager_pull_policy.yaml
+#
+#.PHONY: clean-generated-yaml
+#clean-generated-yaml: ## Remove files generated by conversion-gen from the mentioned dirs. Example SRC_DIRS="./api/v1beta1"
+# (IFS=','; for i in $(SRC_DIRS); do find $$i -type f -name '*.yaml' -exec rm -f {} \;; done)
+#
+.PHONY: clean-generated-deepcopy
+clean-generated-deepcopy: ## Remove files generated by conversion-gen from the mentioned dirs. Example SRC_DIRS="./api/v1beta1"
+ (IFS=','; for i in $(SRC_DIRS); do find $$i -type f -name 'zz_generated.deepcopy*' -exec rm -f {} \;; done)
+
+## --------------------------------------
+## Hack / Tools
+## --------------------------------------
+
+##@ hack/tools:
+
+.PHONY: $(CONTROLLER_GEN_BIN)
+$(CONTROLLER_GEN_BIN): $(CONTROLLER_GEN) ## Build a local copy of controller-gen.
+
+.PHONY: $(GOTESTSUM_BIN)
+$(GOTESTSUM_BIN): $(GOTESTSUM) ## Build a local copy of gotestsum.
+
+.PHONY: $(KUSTOMIZE_BIN)
+$(KUSTOMIZE_BIN): $(KUSTOMIZE) ## Build a local copy of kustomize.
+
+.PHONY: $(SETUP_ENVTEST_BIN)
+$(SETUP_ENVTEST_BIN): $(SETUP_ENVTEST) ## Build a local copy of setup-envtest.
+
+.PHONY: $(GOLANGCI_LINT_BIN)
+$(GOLANGCI_LINT_BIN): $(GOLANGCI_LINT) ## Build a local copy of golangci-lint
+
+.PHONY: $(GORELEASER)
+$(GORELEASER_BIN): $(GORELEASER) ## Build a local copy of golangci-lint
+
+$(CONTROLLER_GEN): # Build controller-gen into tools folder.
+ @if [ ! -f $(OUTPUT_TOOLS_DIR)/$(CONTROLLER_GEN_BIN) ]; then \
+ CGO_ENABLED=0 GOBIN=$(OUTPUT_TOOLS_DIR) $(GO_INSTALL) $(CONTROLLER_GEN_PKG) $(CONTROLLER_GEN_BIN) $(CONTROLLER_GEN_VER); \
+ fi
+
+$(GOTESTSUM): # Build gotestsum into tools folder.
+ @if [ ! -f $(OUTPUT_TOOLS_DIR)/$(GOTESTSUM_BIN) ]; then \
+ CGO_ENABLED=0 GOBIN=$(OUTPUT_TOOLS_DIR) $(GO_INSTALL) $(GOTESTSUM_PKG) $(GOTESTSUM_BIN) $(GOTESTSUM_VER); \
+ fi
+
+$(KUSTOMIZE): # Build kustomize into tools folder.
+ @if [ ! -f $(OUTPUT_TOOLS_DIR)/$(KUSTOMIZE_PKG) ]; then \
+ CGO_ENABLED=0 GOBIN=$(OUTPUT_TOOLS_DIR) $(GO_INSTALL) $(KUSTOMIZE_PKG) $(KUSTOMIZE_BIN) $(KUSTOMIZE_VER); \
+ fi
+
+$(SETUP_ENVTEST): # Build setup-envtest into tools folder.
+ if [ ! -f $(OUTPUT_TOOLS_DIR)/$(SETUP_ENVTEST_BIN) ]; then \
+ CGO_ENABLED=0 GOBIN=$(OUTPUT_TOOLS_DIR) $(GO_INSTALL) $(SETUP_ENVTEST_PKG) $(SETUP_ENVTEST_BIN) $(SETUP_ENVTEST_VER); \
+ fi
+
+$(GOLANGCI_LINT): # Build golangci-lint into tools folder.
+ @if [ ! -f $(OUTPUT_TOOLS_DIR)/$(GOLANGCI_LINT_BIN) ]; then \
+ CGO_ENABLED=0 GOBIN=$(OUTPUT_TOOLS_DIR) $(GO_INSTALL) $(GOLANGCI_LINT_PKG) $(GOLANGCI_LINT_BIN) $(GOLANGCI_LINT_VER); \
+ fi
+
+$(GORELEASER): # Build goreleaser into tools folder.
+ @if [ ! -f $(OUTPUT_TOOLS_DIR)/$(GOLANGCI_LINT_BIN) ]; then \
+ CGO_ENABLED=0 GOBIN=$(OUTPUT_TOOLS_DIR) $(GO_INSTALL) $(GORELEASER_PKG) $(GORELEASER_BIN) $(GORELEASER_VERSION); \
+ fi
diff --git a/feature/OWNERS b/feature/OWNERS
new file mode 100644
index 000000000..ee1951992
--- /dev/null
+++ b/feature/OWNERS
@@ -0,0 +1,20 @@
+approvers:
+ - pixiake
+ - 24sama
+ - rayzhou2017
+ - liangzai006
+ - redscholar
+
+reviewers:
+ - pixiake
+ - rayzhou2017
+ - zryfish
+ - benjaminhuo
+ - calvinyv
+ - FeynmanZhou
+ - huanggze
+ - wansir
+ - LinuxSuRen
+ - 24sama
+ - liangzai006
+ - redscholar
diff --git a/feature/README.md b/feature/README.md
new file mode 100644
index 000000000..63355776e
--- /dev/null
+++ b/feature/README.md
@@ -0,0 +1,28 @@
+# 背景
+当前kubekey中,如果要添加命令,或修改命令,都需要提交代码并重新发版。扩展性较差。
+1. 任务与框架分离(优势,目的,更方便扩展,借鉴ansible的playbook设计)
+2. 支持gitops(可通过git方式,管理自动化任务)
+3. 支持connector扩展
+4. 支持云原生方式自动化批量任务管理
+
+# 安装kubekey
+## kubernetes中安装
+```shell
+helm upgrade --install --create-namespace -n kubekey-system kubekey kubekey-1.0.0.tgz
+```
+然后通过创建Inventory, Config, 和Pipeline资源来执行命令
+**Inventory**: 任务执行的host清单. 用于定义与host相关, 与任务模板无关的变量. 详见[参数定义](docs/zh/201-variable.md)
+**Config**: 给任务模板设置全局变量. 用于定义与host无关, 与任务模板相关的变量. 详见[参数定义](docs/zh/201-variable.md)
+**Pipeline**: 指定执行的playbook文件
+
+## 二进制执行
+可直接用二进制在命令行中执行命令
+```shell
+kk run -i inventory.yaml -c config.yaml playbook.yaml
+```
+运行命令后, 会在工作目录的runtime下生成对应的Inventory, Config和Pipeline资源
+
+# 文档
+**[项目模版编写规范](docs/zh/001-project.md)**
+**[模板语法](docs/zh/101-syntax.md)**
+**[参数定义](docs/zh/201-variable.md)**
diff --git a/feature/build/controller-manager/Dockerfile b/feature/build/controller-manager/Dockerfile
new file mode 100644
index 000000000..7106daa51
--- /dev/null
+++ b/feature/build/controller-manager/Dockerfile
@@ -0,0 +1,37 @@
+ARG builder_image
+# Build the manager binary
+FROM ${builder_image} as builder
+
+ARG goproxy=https://goproxy.cn,direct
+ENV GOPROXY ${goproxy}
+
+WORKDIR /workspace
+
+COPY go.mod go.mod
+COPY go.sum go.sum
+
+# Cache deps before building and copying source so that we don't need to re-download as much
+# and so that source changes don't invalidate our downloaded layer
+RUN --mount=type=cache,target=/go/pkg/mod go mod download
+
+# Copy the go source
+COPY ./ ./
+
+ARG ldflags
+ARG build_tags
+
+ENV LDFLAGS ${ldflags}
+ENV BUILDTAGS ${build_tags}
+
+# Cache the go build into the the Go’s compiler cache folder so we take benefits of compiler caching across docker build calls
+RUN --mount=type=cache,target=/root/.cache/go-build \
+ --mount=type=cache,target=/go/pkg/mod \
+ CGO_ENABLED=0 go build -trimpath -tags "$BUILDTAGS" -ldflags "$LDFLAGS" -o controller-manager cmd/controller-manager/controller_manager.go
+
+FROM alpine:3.19.0
+
+WORKDIR /kubekey
+
+COPY --from=builder /workspace/controller-manager /usr/local/bin/controller-manager
+
+ENTRYPOINT ["sh"]
diff --git a/feature/build/kk/Dockerfile b/feature/build/kk/Dockerfile
new file mode 100644
index 000000000..1e6231c8b
--- /dev/null
+++ b/feature/build/kk/Dockerfile
@@ -0,0 +1,44 @@
+ARG builder_image
+# Build the manager binary
+FROM ${builder_image} as builder
+
+ARG goproxy=https://goproxy.cn,direct
+ENV GOPROXY ${goproxy}
+
+WORKDIR /workspace
+
+COPY go.mod go.mod
+COPY go.sum go.sum
+
+# Cache deps before building and copying source so that we don't need to re-download as much
+# and so that source changes don't invalidate our downloaded layer
+RUN --mount=type=cache,target=/go/pkg/mod go mod download
+
+# Copy the go source
+COPY ./ ./
+
+ARG ldflags
+ARG build_tags
+
+ENV LDFLAGS ${ldflags}
+ENV BUILDTAGS ${build_tags}
+
+# Cache the go build into the the Go’s compiler cache folder so we take benefits of compiler caching across docker build calls
+RUN --mount=type=cache,target=/root/.cache/go-build \
+ --mount=type=cache,target=/go/pkg/mod \
+ CGO_ENABLED=0 go build -trimpath -tags "$BUILDTAGS" -ldflags "$LDFLAGS" -o kk cmd/kk/kubekey.go
+
+FROM alpine:3.19.0
+
+WORKDIR /kubekey
+
+# install tool
+RUN apk update && apk add bash && apk add curl && apk add openssl
+RUN curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 && \
+ chmod 700 get_helm.sh && \
+ ./get_helm.sh
+
+COPY --from=ghcr.io/oras-project/oras:v1.1.0 /bin/oras /usr/local/bin/oras
+COPY --from=builder /workspace/kk /usr/local/bin/kk
+
+ENTRYPOINT ["sh"]
diff --git a/feature/builtin/Makefile b/feature/builtin/Makefile
new file mode 100644
index 000000000..a30cc158f
--- /dev/null
+++ b/feature/builtin/Makefile
@@ -0,0 +1,25 @@
+.PHONY: create-role
+create-role: ## create a role necessary file in roles
+ @echo "Creating role $(role)..."
+ @mkdir -p roles/$(role)/tasks
+ @echo "---" > roles/$(role)/tasks/main.yaml
+ @mkdir -p roles/$(role)/defaults
+ @echo "" > roles/$(role)/defaults/main.yaml
+ifeq ($(VARIABLE_NAME),"full")
+ @mkdir -p roles/$(role)/handlers
+ @mkdir -p roles/$(role)/templates
+ @mkdir -p roles/$(role)/files
+ @mkdir -p roles/$(role)/vars
+ @mkdir -p roles/$(role)/meta
+ @echo "---" > roles/$(role)/handlers/main.yaml
+ @echo "---" > roles/$(role)/templates/main.yaml
+ @echo "---" > roles/$(role)/files/main.yaml
+ @echo "---" > roles/$(role)/vars/main.yaml
+ @echo "---" > roles/$(role)/defaults/main.yaml
+ @echo "---" > roles/$(role)/meta/main.yaml
+endif
+ @echo "Role $(role) created successfully"
+
+.PHONY: help
+help: ## Display this help.
+ @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n\nTargets:\n"} /^[0-9A-Za-z_-]+:.*?##/ { printf " \033[36m%-45s\033[0m %s\n", $$1, $$2 } /^\$$\([0-9A-Za-z_-]+\):.*?##/ { gsub("_","-", $$1); printf " \033[36m%-45s\033[0m %s\n", tolower(substr($$1, 3, length($$1)-7)), $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
diff --git a/feature/builtin/fs.go b/feature/builtin/fs.go
new file mode 100644
index 000000000..f47e412a4
--- /dev/null
+++ b/feature/builtin/fs.go
@@ -0,0 +1,33 @@
+//go:build builtin
+// +build builtin
+
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package builtin
+
+import (
+ "embed"
+)
+
+//go:embed playbooks roles
+var BuiltinPipeline embed.FS
+
+//go:embed inventory/inventory.yaml
+var DefaultInventory []byte
+
+//go:embed inventory/config.yaml
+var DefaultConfig []byte
diff --git a/feature/builtin/inventory/config.yaml b/feature/builtin/inventory/config.yaml
new file mode 100644
index 000000000..f807cbef1
--- /dev/null
+++ b/feature/builtin/inventory/config.yaml
@@ -0,0 +1,46 @@
+apiVersion: kubekey.kubesphere.io/v1
+kind: Config
+metadata:
+ name: default
+spec:
+ # zone for kk. how to download files
+# kkzone: cn
+ # work_dir is the directory where the artifact is extracted.
+# work_dir: /var/lib/kubekey/
+ # the version of kubernetes to be installed.
+ # should be greater than or equal to kube_version_min_required.
+ kube_version: v1.23.15
+ # helm binary
+ helm_version: v3.14.2
+ # cni binary
+ cni_version: v1.2.0
+ # calicoctl binary
+ calico_version: v3.27.2
+ # etcd binary
+ etcd_version: v3.5.6
+ # harbor image tag
+# harbor_version: v2.10.1
+ # docker-compose binary
+# dockercompose_version: v2.24.6
+ # registry image tag
+# registry_version: 2.8.3
+ # keepalived image tag
+# keepalived_version: stable
+ # crictl binary
+ crictl_version: v1.29.0
+ # docker binary
+ docker_version: 24.0.6
+ # cilium helm
+# cilium_version: 1.15.4
+ # kubeovn helm
+# kubeovn_version: 0.1.0
+ # hybridnet helm
+# hybridnet_version: 0.6.8
+ # containerd binary
+# containerd_version: v1.7.0
+ # runc binary
+# runc_version: v1.1.11
+ # cridockerd
+# cridockerd_version: v0.3.10
+ # nfs provisioner helm version
+# nfs_provisioner_version: 4.0.18
diff --git a/feature/builtin/inventory/inventory.yaml b/feature/builtin/inventory/inventory.yaml
new file mode 100644
index 000000000..b7f723366
--- /dev/null
+++ b/feature/builtin/inventory/inventory.yaml
@@ -0,0 +1,38 @@
+apiVersion: kubekey.kubesphere.io/v1
+kind: Inventory
+metadata:
+ name: default
+spec:
+ hosts: # your can set all nodes here. or set nodes on special groups.
+# node1:
+# connector:
+# type: ssh
+# host: node1
+# port: 22
+# user: root
+# password: 123456
+ groups:
+ # all kubernetes nodes.
+ k8s_cluster:
+ groups:
+ - kube_control_plane
+ - kube_worker
+ # control_plane nodes
+ kube_control_plane:
+ hosts:
+ - localhost
+ # worker nodes
+ kube_worker:
+ hosts:
+ - localhost
+ # etcd nodes when etcd_deployment_type is external
+ etcd:
+ hosts:
+ - localhost
+# image_registry:
+# hosts:
+# - localhost
+ # nfs nodes for registry storage. and kubernetes nfs storage
+# nfs:
+# hosts:
+# - localhost
diff --git a/feature/builtin/playbooks/artifact_export.yaml b/feature/builtin/playbooks/artifact_export.yaml
new file mode 100644
index 000000000..bc3ba97fe
--- /dev/null
+++ b/feature/builtin/playbooks/artifact_export.yaml
@@ -0,0 +1,8 @@
+- hosts:
+ - localhost
+ roles:
+ - init/init-artifact
+ tasks:
+ - name: Export artifact
+ command: |
+ cd {{ .work_dir }}/kubekey && tar -czvf ../kubekey-artifact.tar.gz *
diff --git a/feature/builtin/playbooks/artifact_images.yaml b/feature/builtin/playbooks/artifact_images.yaml
new file mode 100644
index 000000000..377afb268
--- /dev/null
+++ b/feature/builtin/playbooks/artifact_images.yaml
@@ -0,0 +1,6 @@
+- hosts:
+ - localhost
+ tags: ["always"]
+ roles:
+ - init/init-artifact
+ - install/image-registry
diff --git a/feature/builtin/playbooks/certs_renew.yaml b/feature/builtin/playbooks/certs_renew.yaml
new file mode 100644
index 000000000..599d64668
--- /dev/null
+++ b/feature/builtin/playbooks/certs_renew.yaml
@@ -0,0 +1,34 @@
+- hosts:
+ - localhost
+ tags: ["certs"]
+ vars_files:
+ - vars/certs_renew.yaml
+ roles:
+ - init/init-artifact
+
+- hosts:
+ - etcd
+ tags: ["certs"]
+ vars_files:
+ - vars/certs_renew.yaml
+ roles:
+ - role: certs/renew-etcd
+ when: and (.groups.etcd | default list | len | lt 0) .renew_etcd
+
+- hosts:
+ - image_registry
+ tags: ["certs"]
+ vars_files:
+ - vars/certs_renew.yaml
+ roles:
+ - role: certs/renew-registry
+ when: and (.groups.image_registry | default list | len | lt 0) .renew_image_registry
+
+- hosts:
+ - kube_control_plane
+ vars_files:
+ - vars/certs_renew.yaml
+ tags: ["certs"]
+ roles:
+ - role: certs/renew-kubernetes
+ when: and (.groups.kube_control_plane | default list | len | lt 0) .renew_kubernetes
diff --git a/feature/builtin/playbooks/create_cluster.yaml b/feature/builtin/playbooks/create_cluster.yaml
new file mode 100644
index 000000000..1970b1034
--- /dev/null
+++ b/feature/builtin/playbooks/create_cluster.yaml
@@ -0,0 +1,50 @@
+---
+- import_playbook: hook/pre_install.yaml
+
+- import_playbook: precheck.yaml
+
+- import_playbook: init_os.yaml
+
+# install
+- hosts:
+ - nfs
+ gather_facts: true
+ roles:
+ - install/nfs
+
+- hosts:
+ - etcd
+ gather_facts: true
+ roles:
+ - install/etcd
+
+- hosts:
+ - image_registry
+ gather_facts: true
+ roles:
+ - install/image-registry
+
+- hosts:
+ - k8s_cluster
+ vars_files:
+ - vars/create_cluster_kubernetes.yaml
+ gather_facts: true
+ roles:
+ - install/cri
+ - install/kubernetes
+
+- hosts:
+ - kube_control_plane
+ roles:
+ - role: install/certs
+ when: .renew_certs.enabled
+
+- hosts:
+ - kube_control_plane|random
+ roles:
+ - addons/cni
+ - addons/kata
+ - addons/nfd
+ - addons/sc
+
+- import_playbook: hook/post_install.yaml
diff --git a/feature/builtin/playbooks/hook/post_install.yaml b/feature/builtin/playbooks/hook/post_install.yaml
new file mode 100644
index 000000000..25d37123f
--- /dev/null
+++ b/feature/builtin/playbooks/hook/post_install.yaml
@@ -0,0 +1,22 @@
+---
+- name: Execute post install scripts
+ hosts:
+ - all
+ tasks:
+ - name: Copy post install scripts to remote
+ ignore_errors: yes
+ copy:
+ src: |
+ {{ .work_dir }}/scripts/post_install_{{ .inventory_name }}.sh
+ dest: |
+ /etc/kubekey/scripts/post_install_{{ .inventory_name }}.sh
+ mode: 0755
+ - name: Execute post install scripts
+ command: |
+ for file in /etc/kubekey/scripts/post_install_*.sh; do
+ if [ -f $file ]; then
+ # execute file
+ chmod +x $file
+ $file
+ fi
+ done
diff --git a/feature/builtin/playbooks/hook/pre_install.yaml b/feature/builtin/playbooks/hook/pre_install.yaml
new file mode 100644
index 000000000..627f906c5
--- /dev/null
+++ b/feature/builtin/playbooks/hook/pre_install.yaml
@@ -0,0 +1,22 @@
+---
+- name: Execute pre install scripts
+ hosts:
+ - all
+ tasks:
+ - name: Copy pre install scripts to remote
+ ignore_errors: yes
+ copy:
+ src: |
+ {{ .work_dir }}/scripts/pre_install_{{ .inventory_name }}.sh
+ dest: |
+ /etc/kubekey/scripts/pre_install_{{ .inventory_name }}.sh
+ mode: 0755
+ - name: Execute pre install scripts
+ command: |
+ for file in /etc/kubekey/scripts/pre_install_*.sh; do
+ if [ -f $file ]; then
+ # execute file
+ chmod +x $file
+ $file
+ fi
+ done
diff --git a/feature/builtin/playbooks/init_os.yaml b/feature/builtin/playbooks/init_os.yaml
new file mode 100644
index 000000000..7d8d03983
--- /dev/null
+++ b/feature/builtin/playbooks/init_os.yaml
@@ -0,0 +1,12 @@
+---
+- hosts:
+ - localhost
+ roles:
+ - init/init-artifact
+
+- hosts:
+ - etcd
+ - k8s_cluster
+ - registry
+ roles:
+ - init/init-os
diff --git a/feature/builtin/playbooks/init_registry.yaml b/feature/builtin/playbooks/init_registry.yaml
new file mode 100644
index 000000000..11cc3859d
--- /dev/null
+++ b/feature/builtin/playbooks/init_registry.yaml
@@ -0,0 +1,15 @@
+---
+- import_playbook: hook/pre_install.yaml
+
+- hosts:
+ - localhost
+ roles:
+ - init/init-artifact
+
+- hosts:
+ - image_registry
+ gather_facts: true
+ roles:
+ - install/image-registry
+
+- import_playbook: hook/post_install.yaml
diff --git a/feature/builtin/playbooks/precheck.yaml b/feature/builtin/playbooks/precheck.yaml
new file mode 100644
index 000000000..2cebd8110
--- /dev/null
+++ b/feature/builtin/playbooks/precheck.yaml
@@ -0,0 +1,16 @@
+---
+- hosts:
+ - localhost
+ roles:
+ - role: precheck/artifact_check
+ when: and .artifact.artifact_file (ne .artifact.artifact_file "")
+
+- hosts:
+ - k8s_cluster
+ - etcd
+ - image_registry
+ - nfs
+ gather_facts: true
+ tags: ["always"]
+ roles:
+ - precheck/env_check
diff --git a/feature/builtin/playbooks/vars/certs_renew.yaml b/feature/builtin/playbooks/vars/certs_renew.yaml
new file mode 100644
index 000000000..5c0c99308
--- /dev/null
+++ b/feature/builtin/playbooks/vars/certs_renew.yaml
@@ -0,0 +1,10 @@
+renew_etcd: true
+renew_image_registry: true
+renew_kubernetes: true
+kubernetes:
+ etcd:
+ deployment_type: external
+cri:
+ container_manager: docker
+image_registry:
+ type: harbor
diff --git a/feature/builtin/playbooks/vars/create_cluster_kubernetes.yaml b/feature/builtin/playbooks/vars/create_cluster_kubernetes.yaml
new file mode 100644
index 000000000..200a34d51
--- /dev/null
+++ b/feature/builtin/playbooks/vars/create_cluster_kubernetes.yaml
@@ -0,0 +1,27 @@
+global_registry: ""
+dockerio_registry: |
+ {{- if ne .global_registry "" -}}
+ {{ .global_registry }}
+ {{- else -}}
+ docker.io
+ {{- end -}}
+quayio_registry: |
+ {{- if ne .global_registry "" -}}
+ {{ .global_registry }}
+ {{- else -}}
+ quay.io
+ {{- end -}}
+ghcrio_registry: |
+ {{- if ne .global_registry "" -}}
+ {{ .global_registry }}
+ {{- else -}}
+ ghcr.io
+ {{- end -}}
+k8s_registry: |
+ {{- if ne .global_registry "" -}}
+ {{ .global_registry }}
+ {{- else -}}
+ registry.k8s.io
+ {{- end -}}
+
+security_enhancement: false
diff --git a/feature/builtin/roles/addons/cni/defaults/main.yaml b/feature/builtin/roles/addons/cni/defaults/main.yaml
new file mode 100644
index 000000000..332f78ba5
--- /dev/null
+++ b/feature/builtin/roles/addons/cni/defaults/main.yaml
@@ -0,0 +1,114 @@
+cni:
+ kube_proxy: |
+ {{ .kubernetes.kube_proxy.enabled | default true }}
+ # apiVersion for policy may be changed for difference kubernetes version. https://kube-api.ninja
+ api_version_policy: |
+ {{- if .kube_version | semverCompare " 50. it default true.
+ typha: |
+ {{- if gt (.groups.k8s_cluster | default list | len) 50 }}
+ true
+ {{- else }}
+ false
+ {{- end }}
+ veth_mtu: 0
+ ipip_mode: Always
+ vxlan_mode: Never
+ # true is enabled
+ ipv4pool_nat_outgoing: true
+ # true is enabled
+ default_ip_pool: true
+ # image
+ cni_image: |
+ {{ .dockerio_registry }}/calico/cni:{{ .calico_version }}
+ node_image: |
+ {{ .dockerio_registry }}/calico/node:{{ .calico_version }}
+ kube_controller_image: |
+ {{ .dockerio_registry }}/calico/kube-controllers:{{ .calico_version }}
+ typha_image: |
+ {{ .dockerio_registry }}/calico/typha:{{ .calico_version }}
+ replicas: 1
+ node_selector: {}
+ flannel:
+ # https://github.com/flannel-io/flannel/blob/master/Documentation/backends.md
+ backend: vxlan
+ cni_plugin_image: |
+ {{ .dockerio_registry }}/flannel/flannel-cni-plugin:v1.4.0-flannel1
+ flannel_image: |
+ {{ .dockerio_registry }}/flannel/flannel:{{ .flannel_version }}
+ cilium:
+ # image repo
+ cilium_repository: |
+ {{ .quayio_registry }}/cilium/cilium
+ certgen_repository: |
+ {{ .quayio_registry }}/cilium/certgen
+ hubble_relay_repository: |
+ {{ .quayio_registry }}/cilium/hubble-relay
+ hubble_ui_backend_repository: |
+ {{ .quayio_registry }}/cilium/hubble-ui-backend
+ hubble_ui_repository: |
+ {{ .quayio_registry }}/cilium/hubble-ui
+ cilium_envoy_repository: |
+ {{ .quayio_registry }}/cilium/cilium-envoy
+ cilium_etcd_operator_repository: |
+ {{ .quayio_registry }}/cilium/cilium-etcd-operator
+ operator_repository: |
+ {{ .quayio_registry }}/cilium/operator
+ startup_script_repository: |
+ {{ .quayio_registry }}/cilium/startup-script
+ clustermesh_apiserver_repository: |
+ {{ .quayio_registry }}/cilium/clustermesh-apiserver
+ busybox_repository: |
+ {{ .dockerio_registry }}/library/busybox
+ spire_agent_repository: |
+ {{ .ghcrio_registry }}/spiffe/spire-agent
+ spire_server_repository: |
+ {{ .ghcrio_registry }}/spiffe/spire-server
+ k8s_endpoint: |
+ {{- if and .kubernetes.control_plane_endpoint (ne .kubernetes.control_plane_endpoint "") }}
+ {{ .kubernetes.control_plane_endpoint }}
+ {{- else }}
+ {{ .groups.kube_control_plane | default list | first }}
+ {{- end }}
+ operator_replicas: |
+ 1
+ k8s_port: |
+ {{ .kubernetes.apiserver.port | default 6443 }}
+ kubeovn:
+ replica: 1
+ registry: |
+ {{ .dockerio_registry }}/kubeovn
+ hybridnet:
+ registry: |
+ {{ .dockerio_registry }}
+# hybridnet_image: hybridnetdev/hybridnet
+# hybridnet_tag: v0.8.8
diff --git a/feature/builtin/roles/addons/cni/tasks/calico.yaml b/feature/builtin/roles/addons/cni/tasks/calico.yaml
new file mode 100644
index 000000000..2b0826915
--- /dev/null
+++ b/feature/builtin/roles/addons/cni/tasks/calico.yaml
@@ -0,0 +1,11 @@
+---
+- name: Generate calico manifest
+ template:
+ src: |
+ calico/{{ slice (.calico_version | splitList ".") 0 2 | join "." }}.yaml
+ dest: |
+ /etc/kubernetes/cni/calico-{{ .calico_version }}.yaml
+
+- name: Apply calico
+ command: |
+ kubectl apply -f /etc/kubernetes/cni/calico-{{ .calico_version }}.yaml --force
diff --git a/feature/builtin/roles/addons/cni/tasks/cilium.yaml b/feature/builtin/roles/addons/cni/tasks/cilium.yaml
new file mode 100644
index 000000000..8f441d202
--- /dev/null
+++ b/feature/builtin/roles/addons/cni/tasks/cilium.yaml
@@ -0,0 +1,37 @@
+---
+- name: Sync cilium helm chart to remote
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/cni/cilium-{{ .cilium_version }}.tgz
+ dest: |
+ /etc/kubernetes/cni/cilium-{{ .cilium_version }}.tgz
+
+# https://docs.cilium.io/en/stable/installation/k8s-install-helm/
+- name: Install cilium
+ command: |
+ helm install cilium /etc/kubernetes/cni/cilium-{{ .cilium_version }}.tgz --namespace kube-system \
+ --set image.repository={{ .cni.cilium.cilium_repository }} \
+ --set preflight.image.repository={{ .cni.cilium.cilium_repository }} \
+ --set certgen.image.repository={{ .cni.cilium.certgen_repository }} \
+ --set hubble.relay.image.repository={{ .cni.cilium.hubble_relay_repository }} \
+ --set hubble.ui.backend.image.repository={{ .cni.cilium.hubble_ui_backend_repository }} \
+ --set hubble.ui.frontend.image.repository={{ .cni.cilium.hubble_ui_repository }} \
+ --set envoy.image.repository={{ .cni.cilium.cilium_envoy_repository }} \
+ --set etcd.image.repository={{ .cni.cilium.cilium_etcd_operator_repository }} \
+ --set operator.image.repository={{ .cni.cilium.operator_repository }} \
+ --set nodeinit.image.repository={{ .cni.cilium.startup_script_repository }} \
+ --set clustermesh.apiserver.image.repository={{ .cni.cilium.clustermesh_apiserver_repository }} \
+ --set authentication.mutual.spire.install.initImage.image.repository={{ .cni.cilium.busybox_repository }} \
+ --set authentication.mutual.spire.install.agent.image.repository={{ .cni.cilium.spire_agent_repository }} \
+ --set authentication.mutual.spire.install.server.image.repository={{ .cni.cilium.spire_server_repository }} \
+ --set operator.replicas={{ .cni.cilium.operator_replicas }} \
+ --set ipv6.enabled={{ .cni.ipv6_support }} \
+ --set ipv4NativeRoutingCIDR: {{ .cni.kube_pods_v4_cidr }} \
+ {{- if .cni.ipv6_support }}
+ --set ipv6NativeRoutingCIDR: {{ .cni.kube_pods_v6_cidr }} \
+ {{- end }}
+ {{- if .cni.kube_proxy }}
+ --set kubeProxyReplacement=strict \
+ --set k8sServiceHost={{ .cni.cilium.k8s_endpoint }} \
+ --set k8sServicePort={{ .cni.cilium.k8s_port }}
+ {{- end }}
diff --git a/feature/builtin/roles/addons/cni/tasks/flannel.yaml b/feature/builtin/roles/addons/cni/tasks/flannel.yaml
new file mode 100644
index 000000000..e51b180ff
--- /dev/null
+++ b/feature/builtin/roles/addons/cni/tasks/flannel.yaml
@@ -0,0 +1,11 @@
+---
+# https://github.com/flannel-io/flannel/blob/master/Documentation/kubernetes.md
+- name: Generate flannel manifest
+ template:
+ src: flannel/flannel.yaml
+ dest: |
+ /etc/kubernetes/cni/flannel-{{ .flannel_version }}.yaml
+
+- name: Apply calico
+ command: |
+ kubectl apply -f /etc/kubernetes/cni/flannel-{{ .flannel_version }}.yaml
diff --git a/feature/builtin/roles/addons/cni/tasks/hybridnet.yaml b/feature/builtin/roles/addons/cni/tasks/hybridnet.yaml
new file mode 100644
index 000000000..21fcf402d
--- /dev/null
+++ b/feature/builtin/roles/addons/cni/tasks/hybridnet.yaml
@@ -0,0 +1,19 @@
+---
+- name: Sync hybridnet helm chart to remote
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/cni/hybridnet-{{ .hybridnet_version }}.tgz
+ dest: |
+ /etc/kubernetes/cni/hybridnet-{{ .hybridnet_version }}.tgz
+
+# https://artifacthub.io/packages/helm/hybridnet/hybridnet
+- name: Install hybridnet
+ command: |
+ helm install hybridnet /etc/kubernetes/cni/hybridnet-{{ .hybridnet_version }}.tgz --namespace kube-system \
+ {{- if ne .cni.hybridnet.hybridnet_image "" }}
+ --set images.hybridnet.image={{ .cni.hybridnet.hybridnet_image }} \
+ {{- end }}
+ {{- if ne .cni.hybridnet.hybridnet_tag "" }}
+ --set images.hybridnet.tag={{ .cni.hybridnet.hybridnet_tag }} \
+ {{- end }}
+ --set image.registryURL={{ .cni.hybridnet.registry }} \
diff --git a/feature/builtin/roles/addons/cni/tasks/kubeovn.yaml b/feature/builtin/roles/addons/cni/tasks/kubeovn.yaml
new file mode 100644
index 000000000..5c892188c
--- /dev/null
+++ b/feature/builtin/roles/addons/cni/tasks/kubeovn.yaml
@@ -0,0 +1,30 @@
+---
+- name: Add kubeovn label to node
+ command: |
+ kubectl label node -lbeta.kubernetes.io/os=linux kubernetes.io/os=linux --overwrite
+ kubectl label node -lnode-role.kubernetes.io/control-plane kube-ovn/role=master --overwrite
+
+# kubeovn-0.1.0.tgz is helm version not helm appVersion
+- name: Sync kubeovn helm chart to remote
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/cni/kubeovn-{{ .kubeovn_version }}.tgz
+ dest: |
+ /etc/kubernetes/cni/kubeovn-{{ .kubeovn_version }}.tgz
+
+# https://kubeovn.github.io/docs/stable/start/one-step-install/#helm-chart
+- name: Install kubeovn
+ command: |
+ helm install kubeovn /etc/kubernetes/cni/kubeovn-{{ .kubeovn_version }}.tgz --set replicaCount={{ .cni.kubeovn.replica }} \
+ {{- $ips := list }}
+ {{- range .groups.kube_control_plane | default list }}
+ {{- $ips = append $ips (index $.inventory_hosts . "internal_ipv4") }}
+ {{- end }}
+ --set MASTER_NODES={{ $ips | join "," }} \
+ --set global.registry.address={{ .cni.kubeovn.registry }} \
+ --set ipv4.POD_CIDR={{ .cni.kubeovn.kube_pods_v4_cidr }} --set ipv4.SVC_CIDR={{ .cni.kubeovn.kube_svc_cidr }} \
+ {{- if .cni.ipv6_support }}
+ --set networking.NET_STACK=dual_stack \
+ --set dual_stack.POD_CIDR={{ .cni.kubeovn.kube_pods_v4_cidr }},{{ .cni.kubeovn.kube_pods_v6_cidr }} \
+ --set dual_stack.SVC_CIDR={{ .cni.kubeovn.kube_svc_cidr }} \
+ {{- end }}
diff --git a/feature/builtin/roles/addons/cni/tasks/main.yaml b/feature/builtin/roles/addons/cni/tasks/main.yaml
new file mode 100644
index 000000000..9c3be2782
--- /dev/null
+++ b/feature/builtin/roles/addons/cni/tasks/main.yaml
@@ -0,0 +1,18 @@
+---
+- include_tasks: calico.yaml
+ when: .cni.kube_network_plugin | eq "calico"
+
+- include_tasks: flannel.yaml
+ when: .cni.kube_network_plugin | eq "flannel"
+
+- include_tasks: cilium.yaml
+ when: .cni.kube_network_plugin | eq "cilium"
+
+- include_tasks: kubeovn.yaml
+ when: .cni.kube_network_plugin | eq "kubeovn"
+
+- include_tasks: hybridnet.yaml
+ when: .cni.kube_network_plugin | eq "hyvbridnet"
+
+- include_tasks: multus.yaml
+ when: .cni.multus.enabled
diff --git a/feature/builtin/roles/addons/cni/tasks/multus.yaml b/feature/builtin/roles/addons/cni/tasks/multus.yaml
new file mode 100644
index 000000000..b91f30b68
--- /dev/null
+++ b/feature/builtin/roles/addons/cni/tasks/multus.yaml
@@ -0,0 +1,9 @@
+---
+- name: Generate multus yaml
+ template:
+ src: multus/multus.yaml
+ desc: /etc/kubernetes/cni/cmultus.yaml
+
+- name: Apply multus
+ command: |
+ kubectl apply -f /etc/kubernetes/cni/cmultus.yaml
diff --git a/feature/builtin/roles/addons/cni/templates/calico/pdg.yaml b/feature/builtin/roles/addons/cni/templates/calico/pdg.yaml
new file mode 100644
index 000000000..51093eb87
--- /dev/null
+++ b/feature/builtin/roles/addons/cni/templates/calico/pdg.yaml
@@ -0,0 +1,35 @@
+---
+# Source: calico/templates/calico-kube-controllers.yaml
+# This manifest creates a Pod Disruption Budget for Controller to allow K8s Cluster Autoscaler to evict
+
+apiVersion: {{ .cni.api_version_policy }}
+kind: PodDisruptionBudget
+metadata:
+ name: calico-kube-controllers
+ namespace: kube-system
+ labels:
+ k8s-app: calico-kube-controllers
+spec:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ k8s-app: calico-kube-controllers
+
+{{- if .cni.calico.typha }}
+---
+# Source: calico/templates/calico-typha.yaml
+# This manifest creates a Pod Disruption Budget for Typha to allow K8s Cluster Autoscaler to evict
+
+apiVersion: {{ .cni.api_version_policy }}
+kind: PodDisruptionBudget
+metadata:
+ name: calico-typha
+ namespace: kube-system
+ labels:
+ k8s-app: calico-typha
+spec:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ k8s-app: calico-typha
+{{- end }}
diff --git a/feature/builtin/roles/addons/cni/templates/calico/v3.27.yaml b/feature/builtin/roles/addons/cni/templates/calico/v3.27.yaml
new file mode 100644
index 000000000..9f1df95c8
--- /dev/null
+++ b/feature/builtin/roles/addons/cni/templates/calico/v3.27.yaml
@@ -0,0 +1,5342 @@
+---
+# Source: calico/templates/calico-kube-controllers.yaml
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: calico-kube-controllers
+ namespace: kube-system
+---
+# Source: calico/templates/calico-node.yaml
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: calico-node
+ namespace: kube-system
+---
+# Source: calico/templates/calico-node.yaml
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: calico-cni-plugin
+ namespace: kube-system
+---
+# Source: calico/templates/calico-config.yaml
+# This ConfigMap is used to configure a self-hosted Calico installation.
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: calico-config
+ namespace: kube-system
+data:
+ # You must set a non-zero value for Typha replicas below.
+ typha_service_name: "{{ if .cni.calico.typha }}calico-typha{{ else }}none{{ end }}"
+ # Configure the backend to use.
+ calico_backend: "bird"
+
+ # Configure the MTU to use for workload interfaces and tunnels.
+ # By default, MTU is auto-detected, and explicitly setting this field should not be required.
+ # You can override auto-detection by providing a non-zero value.
+ veth_mtu: "{{ .cni.calico.veth_mtu }}"
+
+ # The CNI network configuration to install on each node. The special
+ # values in this config will be automatically populated.
+ cni_network_config: |-
+ {
+ "name": "k8s-pod-network",
+ "cniVersion": "0.3.1",
+ "plugins": [
+ {
+ "type": "calico",
+ "log_level": "info",
+ "log_file_path": "/var/log/calico/cni/cni.log",
+ "datastore_type": "kubernetes",
+ "nodename": "__KUBERNETES_NODE_NAME__",
+ "mtu": __CNI_MTU__,
+ "ipam": {
+ "type": "calico-ipam"
+ },
+ "policy": {
+ "type": "k8s"
+ },
+ "kubernetes": {
+ "kubeconfig": "__KUBECONFIG_FILEPATH__"
+ }
+ },
+ {
+ "type": "portmap",
+ "snat": true,
+ "capabilities": {"portMappings": true}
+ },
+ {
+ "type": "bandwidth",
+ "capabilities": {"bandwidth": true}
+ }
+ ]
+ }
+---
+# Source: calico/templates/kdd-crds.yaml
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: bgpconfigurations.crd.projectcalico.org
+spec:
+ group: crd.projectcalico.org
+ names:
+ kind: BGPConfiguration
+ listKind: BGPConfigurationList
+ plural: bgpconfigurations
+ singular: bgpconfiguration
+ preserveUnknownFields: false
+ scope: Cluster
+ versions:
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ description: BGPConfiguration contains the configuration for any BGP routing.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: BGPConfigurationSpec contains the values of the BGP configuration.
+ properties:
+ asNumber:
+ description: 'ASNumber is the default AS number used by a node. [Default:
+ 64512]'
+ format: int32
+ type: integer
+ bindMode:
+ description: BindMode indicates whether to listen for BGP connections
+ on all addresses (None) or only on the node's canonical IP address
+ Node.Spec.BGP.IPvXAddress (NodeIP). Default behaviour is to listen
+ for BGP connections on all addresses.
+ type: string
+ communities:
+ description: Communities is a list of BGP community values and their
+ arbitrary names for tagging routes.
+ items:
+ description: Community contains standard or large community value
+ and its name.
+ properties:
+ name:
+ description: Name given to community value.
+ type: string
+ value:
+ description: Value must be of format `aa:nn` or `aa:nn:mm`.
+ For standard community use `aa:nn` format, where `aa` and
+ `nn` are 16 bit number. For large community use `aa:nn:mm`
+ format, where `aa`, `nn` and `mm` are 32 bit number. Where,
+ `aa` is an AS Number, `nn` and `mm` are per-AS identifier.
+ pattern: ^(\d+):(\d+)$|^(\d+):(\d+):(\d+)$
+ type: string
+ type: object
+ type: array
+ ignoredInterfaces:
+ description: IgnoredInterfaces indicates the network interfaces that
+ needs to be excluded when reading device routes.
+ items:
+ type: string
+ type: array
+ listenPort:
+ description: ListenPort is the port where BGP protocol should listen.
+ Defaults to 179
+ maximum: 65535
+ minimum: 1
+ type: integer
+ logSeverityScreen:
+ description: 'LogSeverityScreen is the log severity above which logs
+ are sent to the stdout. [Default: INFO]'
+ type: string
+ nodeMeshMaxRestartTime:
+ description: Time to allow for software restart for node-to-mesh peerings. When
+ specified, this is configured as the graceful restart timeout. When
+ not specified, the BIRD default of 120s is used. This field can
+ only be set on the default BGPConfiguration instance and requires
+ that NodeMesh is enabled
+ type: string
+ nodeMeshPassword:
+ description: Optional BGP password for full node-to-mesh peerings.
+ This field can only be set on the default BGPConfiguration instance
+ and requires that NodeMesh is enabled
+ properties:
+ secretKeyRef:
+ description: Selects a key of a secret in the node pod's namespace.
+ properties:
+ key:
+ description: The key of the secret to select from. Must be
+ a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ optional:
+ description: Specify whether the Secret or its key must be
+ defined
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ nodeToNodeMeshEnabled:
+ description: 'NodeToNodeMeshEnabled sets whether full node to node
+ BGP mesh is enabled. [Default: true]'
+ type: boolean
+ prefixAdvertisements:
+ description: PrefixAdvertisements contains per-prefix advertisement
+ configuration.
+ items:
+ description: PrefixAdvertisement configures advertisement properties
+ for the specified CIDR.
+ properties:
+ cidr:
+ description: CIDR for which properties should be advertised.
+ type: string
+ communities:
+ description: Communities can be list of either community names
+ already defined in `Specs.Communities` or community value
+ of format `aa:nn` or `aa:nn:mm`. For standard community use
+ `aa:nn` format, where `aa` and `nn` are 16 bit number. For
+ large community use `aa:nn:mm` format, where `aa`, `nn` and
+ `mm` are 32 bit number. Where,`aa` is an AS Number, `nn` and
+ `mm` are per-AS identifier.
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ serviceClusterIPs:
+ description: ServiceClusterIPs are the CIDR blocks from which service
+ cluster IPs are allocated. If specified, Calico will advertise these
+ blocks, as well as any cluster IPs within them.
+ items:
+ description: ServiceClusterIPBlock represents a single allowed ClusterIP
+ CIDR block.
+ properties:
+ cidr:
+ type: string
+ type: object
+ type: array
+ serviceExternalIPs:
+ description: ServiceExternalIPs are the CIDR blocks for Kubernetes
+ Service External IPs. Kubernetes Service ExternalIPs will only be
+ advertised if they are within one of these blocks.
+ items:
+ description: ServiceExternalIPBlock represents a single allowed
+ External IP CIDR block.
+ properties:
+ cidr:
+ type: string
+ type: object
+ type: array
+ serviceLoadBalancerIPs:
+ description: ServiceLoadBalancerIPs are the CIDR blocks for Kubernetes
+ Service LoadBalancer IPs. Kubernetes Service status.LoadBalancer.Ingress
+ IPs will only be advertised if they are within one of these blocks.
+ items:
+ description: ServiceLoadBalancerIPBlock represents a single allowed
+ LoadBalancer IP CIDR block.
+ properties:
+ cidr:
+ type: string
+ type: object
+ type: array
+ type: object
+ type: object
+ served: true
+ storage: true
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+# Source: calico/templates/kdd-crds.yaml
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: (devel)
+ creationTimestamp: null
+ name: bgpfilters.crd.projectcalico.org
+spec:
+ group: crd.projectcalico.org
+ names:
+ kind: BGPFilter
+ listKind: BGPFilterList
+ plural: bgpfilters
+ singular: bgpfilter
+ scope: Cluster
+ versions:
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: BGPFilterSpec contains the IPv4 and IPv6 filter rules of
+ the BGP Filter.
+ properties:
+ exportV4:
+ description: The ordered set of IPv4 BGPFilter rules acting on exporting
+ routes to a peer.
+ items:
+ description: BGPFilterRuleV4 defines a BGP filter rule consisting
+ a single IPv4 CIDR block and a filter action for this CIDR.
+ properties:
+ action:
+ type: string
+ cidr:
+ type: string
+ interface:
+ type: string
+ matchOperator:
+ type: string
+ source:
+ type: string
+ required:
+ - action
+ type: object
+ type: array
+ exportV6:
+ description: The ordered set of IPv6 BGPFilter rules acting on exporting
+ routes to a peer.
+ items:
+ description: BGPFilterRuleV6 defines a BGP filter rule consisting
+ a single IPv6 CIDR block and a filter action for this CIDR.
+ properties:
+ action:
+ type: string
+ cidr:
+ type: string
+ interface:
+ type: string
+ matchOperator:
+ type: string
+ source:
+ type: string
+ required:
+ - action
+ type: object
+ type: array
+ importV4:
+ description: The ordered set of IPv4 BGPFilter rules acting on importing
+ routes from a peer.
+ items:
+ description: BGPFilterRuleV4 defines a BGP filter rule consisting
+ a single IPv4 CIDR block and a filter action for this CIDR.
+ properties:
+ action:
+ type: string
+ cidr:
+ type: string
+ interface:
+ type: string
+ matchOperator:
+ type: string
+ source:
+ type: string
+ required:
+ - action
+ type: object
+ type: array
+ importV6:
+ description: The ordered set of IPv6 BGPFilter rules acting on importing
+ routes from a peer.
+ items:
+ description: BGPFilterRuleV6 defines a BGP filter rule consisting
+ a single IPv6 CIDR block and a filter action for this CIDR.
+ properties:
+ action:
+ type: string
+ cidr:
+ type: string
+ interface:
+ type: string
+ matchOperator:
+ type: string
+ source:
+ type: string
+ required:
+ - action
+ type: object
+ type: array
+ type: object
+ type: object
+ served: true
+ storage: true
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+# Source: calico/templates/kdd-crds.yaml
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: bgppeers.crd.projectcalico.org
+spec:
+ group: crd.projectcalico.org
+ names:
+ kind: BGPPeer
+ listKind: BGPPeerList
+ plural: bgppeers
+ singular: bgppeer
+ preserveUnknownFields: false
+ scope: Cluster
+ versions:
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: BGPPeerSpec contains the specification for a BGPPeer resource.
+ properties:
+ asNumber:
+ description: The AS Number of the peer.
+ format: int32
+ type: integer
+ filters:
+ description: The ordered set of BGPFilters applied on this BGP peer.
+ items:
+ type: string
+ type: array
+ keepOriginalNextHop:
+ description: Option to keep the original nexthop field when routes
+ are sent to a BGP Peer. Setting "true" configures the selected BGP
+ Peers node to use the "next hop keep;" instead of "next hop self;"(default)
+ in the specific branch of the Node on "bird.cfg".
+ type: boolean
+ maxRestartTime:
+ description: Time to allow for software restart. When specified,
+ this is configured as the graceful restart timeout. When not specified,
+ the BIRD default of 120s is used.
+ type: string
+ node:
+ description: The node name identifying the Calico node instance that
+ is targeted by this peer. If this is not set, and no nodeSelector
+ is specified, then this BGP peer selects all nodes in the cluster.
+ type: string
+ nodeSelector:
+ description: Selector for the nodes that should have this peering. When
+ this is set, the Node field must be empty.
+ type: string
+ numAllowedLocalASNumbers:
+ description: Maximum number of local AS numbers that are allowed in
+ the AS path for received routes. This removes BGP loop prevention
+ and should only be used if absolutely necesssary.
+ format: int32
+ type: integer
+ password:
+ description: Optional BGP password for the peerings generated by this
+ BGPPeer resource.
+ properties:
+ secretKeyRef:
+ description: Selects a key of a secret in the node pod's namespace.
+ properties:
+ key:
+ description: The key of the secret to select from. Must be
+ a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ optional:
+ description: Specify whether the Secret or its key must be
+ defined
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ peerIP:
+ description: The IP address of the peer followed by an optional port
+ number to peer with. If port number is given, format should be `[]:port`
+ or `:` for IPv4. If optional port number is not set,
+ and this peer IP and ASNumber belongs to a calico/node with ListenPort
+ set in BGPConfiguration, then we use that port to peer.
+ type: string
+ peerSelector:
+ description: Selector for the remote nodes to peer with. When this
+ is set, the PeerIP and ASNumber fields must be empty. For each
+ peering between the local node and selected remote nodes, we configure
+ an IPv4 peering if both ends have NodeBGPSpec.IPv4Address specified,
+ and an IPv6 peering if both ends have NodeBGPSpec.IPv6Address specified. The
+ remote AS number comes from the remote node's NodeBGPSpec.ASNumber,
+ or the global default if that is not set.
+ type: string
+ reachableBy:
+ description: Add an exact, i.e. /32, static route toward peer IP in
+ order to prevent route flapping. ReachableBy contains the address
+ of the gateway which peer can be reached by.
+ type: string
+ sourceAddress:
+ description: Specifies whether and how to configure a source address
+ for the peerings generated by this BGPPeer resource. Default value
+ "UseNodeIP" means to configure the node IP as the source address. "None"
+ means not to configure a source address.
+ type: string
+ ttlSecurity:
+ description: TTLSecurity enables the generalized TTL security mechanism
+ (GTSM) which protects against spoofed packets by ignoring received
+ packets with a smaller than expected TTL value. The provided value
+ is the number of hops (edges) between the peers.
+ type: integer
+ type: object
+ type: object
+ served: true
+ storage: true
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+# Source: calico/templates/kdd-crds.yaml
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: blockaffinities.crd.projectcalico.org
+spec:
+ group: crd.projectcalico.org
+ names:
+ kind: BlockAffinity
+ listKind: BlockAffinityList
+ plural: blockaffinities
+ singular: blockaffinity
+ preserveUnknownFields: false
+ scope: Cluster
+ versions:
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: BlockAffinitySpec contains the specification for a BlockAffinity
+ resource.
+ properties:
+ cidr:
+ type: string
+ deleted:
+ description: Deleted indicates that this block affinity is being deleted.
+ This field is a string for compatibility with older releases that
+ mistakenly treat this field as a string.
+ type: string
+ node:
+ type: string
+ state:
+ type: string
+ required:
+ - cidr
+ - deleted
+ - node
+ - state
+ type: object
+ type: object
+ served: true
+ storage: true
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+# Source: calico/templates/kdd-crds.yaml
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: (devel)
+ creationTimestamp: null
+ name: caliconodestatuses.crd.projectcalico.org
+spec:
+ group: crd.projectcalico.org
+ names:
+ kind: CalicoNodeStatus
+ listKind: CalicoNodeStatusList
+ plural: caliconodestatuses
+ singular: caliconodestatus
+ preserveUnknownFields: false
+ scope: Cluster
+ versions:
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: CalicoNodeStatusSpec contains the specification for a CalicoNodeStatus
+ resource.
+ properties:
+ classes:
+ description: Classes declares the types of information to monitor
+ for this calico/node, and allows for selective status reporting
+ about certain subsets of information.
+ items:
+ type: string
+ type: array
+ node:
+ description: The node name identifies the Calico node instance for
+ node status.
+ type: string
+ updatePeriodSeconds:
+ description: UpdatePeriodSeconds is the period at which CalicoNodeStatus
+ should be updated. Set to 0 to disable CalicoNodeStatus refresh.
+ Maximum update period is one day.
+ format: int32
+ type: integer
+ type: object
+ status:
+ description: CalicoNodeStatusStatus defines the observed state of CalicoNodeStatus.
+ No validation needed for status since it is updated by Calico.
+ properties:
+ agent:
+ description: Agent holds agent status on the node.
+ properties:
+ birdV4:
+ description: BIRDV4 represents the latest observed status of bird4.
+ properties:
+ lastBootTime:
+ description: LastBootTime holds the value of lastBootTime
+ from bird.ctl output.
+ type: string
+ lastReconfigurationTime:
+ description: LastReconfigurationTime holds the value of lastReconfigTime
+ from bird.ctl output.
+ type: string
+ routerID:
+ description: Router ID used by bird.
+ type: string
+ state:
+ description: The state of the BGP Daemon.
+ type: string
+ version:
+ description: Version of the BGP daemon
+ type: string
+ type: object
+ birdV6:
+ description: BIRDV6 represents the latest observed status of bird6.
+ properties:
+ lastBootTime:
+ description: LastBootTime holds the value of lastBootTime
+ from bird.ctl output.
+ type: string
+ lastReconfigurationTime:
+ description: LastReconfigurationTime holds the value of lastReconfigTime
+ from bird.ctl output.
+ type: string
+ routerID:
+ description: Router ID used by bird.
+ type: string
+ state:
+ description: The state of the BGP Daemon.
+ type: string
+ version:
+ description: Version of the BGP daemon
+ type: string
+ type: object
+ type: object
+ bgp:
+ description: BGP holds node BGP status.
+ properties:
+ numberEstablishedV4:
+ description: The total number of IPv4 established bgp sessions.
+ type: integer
+ numberEstablishedV6:
+ description: The total number of IPv6 established bgp sessions.
+ type: integer
+ numberNotEstablishedV4:
+ description: The total number of IPv4 non-established bgp sessions.
+ type: integer
+ numberNotEstablishedV6:
+ description: The total number of IPv6 non-established bgp sessions.
+ type: integer
+ peersV4:
+ description: PeersV4 represents IPv4 BGP peers status on the node.
+ items:
+ description: CalicoNodePeer contains the status of BGP peers
+ on the node.
+ properties:
+ peerIP:
+ description: IP address of the peer whose condition we are
+ reporting.
+ type: string
+ since:
+ description: Since the state or reason last changed.
+ type: string
+ state:
+ description: State is the BGP session state.
+ type: string
+ type:
+ description: Type indicates whether this peer is configured
+ via the node-to-node mesh, or via en explicit global or
+ per-node BGPPeer object.
+ type: string
+ type: object
+ type: array
+ peersV6:
+ description: PeersV6 represents IPv6 BGP peers status on the node.
+ items:
+ description: CalicoNodePeer contains the status of BGP peers
+ on the node.
+ properties:
+ peerIP:
+ description: IP address of the peer whose condition we are
+ reporting.
+ type: string
+ since:
+ description: Since the state or reason last changed.
+ type: string
+ state:
+ description: State is the BGP session state.
+ type: string
+ type:
+ description: Type indicates whether this peer is configured
+ via the node-to-node mesh, or via en explicit global or
+ per-node BGPPeer object.
+ type: string
+ type: object
+ type: array
+ required:
+ - numberEstablishedV4
+ - numberEstablishedV6
+ - numberNotEstablishedV4
+ - numberNotEstablishedV6
+ type: object
+ lastUpdated:
+ description: LastUpdated is a timestamp representing the server time
+ when CalicoNodeStatus object last updated. It is represented in
+ RFC3339 form and is in UTC.
+ format: date-time
+ nullable: true
+ type: string
+ routes:
+ description: Routes reports routes known to the Calico BGP daemon
+ on the node.
+ properties:
+ routesV4:
+ description: RoutesV4 represents IPv4 routes on the node.
+ items:
+ description: CalicoNodeRoute contains the status of BGP routes
+ on the node.
+ properties:
+ destination:
+ description: Destination of the route.
+ type: string
+ gateway:
+ description: Gateway for the destination.
+ type: string
+ interface:
+ description: Interface for the destination
+ type: string
+ learnedFrom:
+ description: LearnedFrom contains information regarding
+ where this route originated.
+ properties:
+ peerIP:
+ description: If sourceType is NodeMesh or BGPPeer, IP
+ address of the router that sent us this route.
+ type: string
+ sourceType:
+ description: Type of the source where a route is learned
+ from.
+ type: string
+ type: object
+ type:
+ description: Type indicates if the route is being used for
+ forwarding or not.
+ type: string
+ type: object
+ type: array
+ routesV6:
+ description: RoutesV6 represents IPv6 routes on the node.
+ items:
+ description: CalicoNodeRoute contains the status of BGP routes
+ on the node.
+ properties:
+ destination:
+ description: Destination of the route.
+ type: string
+ gateway:
+ description: Gateway for the destination.
+ type: string
+ interface:
+ description: Interface for the destination
+ type: string
+ learnedFrom:
+ description: LearnedFrom contains information regarding
+ where this route originated.
+ properties:
+ peerIP:
+ description: If sourceType is NodeMesh or BGPPeer, IP
+ address of the router that sent us this route.
+ type: string
+ sourceType:
+ description: Type of the source where a route is learned
+ from.
+ type: string
+ type: object
+ type:
+ description: Type indicates if the route is being used for
+ forwarding or not.
+ type: string
+ type: object
+ type: array
+ type: object
+ type: object
+ type: object
+ served: true
+ storage: true
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+# Source: calico/templates/kdd-crds.yaml
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: clusterinformations.crd.projectcalico.org
+spec:
+ group: crd.projectcalico.org
+ names:
+ kind: ClusterInformation
+ listKind: ClusterInformationList
+ plural: clusterinformations
+ singular: clusterinformation
+ preserveUnknownFields: false
+ scope: Cluster
+ versions:
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ description: ClusterInformation contains the cluster specific information.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: ClusterInformationSpec contains the values of describing
+ the cluster.
+ properties:
+ calicoVersion:
+ description: CalicoVersion is the version of Calico that the cluster
+ is running
+ type: string
+ clusterGUID:
+ description: ClusterGUID is the GUID of the cluster
+ type: string
+ clusterType:
+ description: ClusterType describes the type of the cluster
+ type: string
+ datastoreReady:
+ description: DatastoreReady is used during significant datastore migrations
+ to signal to components such as Felix that it should wait before
+ accessing the datastore.
+ type: boolean
+ variant:
+ description: Variant declares which variant of Calico should be active.
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+# Source: calico/templates/kdd-crds.yaml
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: felixconfigurations.crd.projectcalico.org
+spec:
+ group: crd.projectcalico.org
+ names:
+ kind: FelixConfiguration
+ listKind: FelixConfigurationList
+ plural: felixconfigurations
+ singular: felixconfiguration
+ preserveUnknownFields: false
+ scope: Cluster
+ versions:
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ description: Felix Configuration contains the configuration for Felix.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: FelixConfigurationSpec contains the values of the Felix configuration.
+ properties:
+ allowIPIPPacketsFromWorkloads:
+ description: 'AllowIPIPPacketsFromWorkloads controls whether Felix
+ will add a rule to drop IPIP encapsulated traffic from workloads
+ [Default: false]'
+ type: boolean
+ allowVXLANPacketsFromWorkloads:
+ description: 'AllowVXLANPacketsFromWorkloads controls whether Felix
+ will add a rule to drop VXLAN encapsulated traffic from workloads
+ [Default: false]'
+ type: boolean
+ awsSrcDstCheck:
+ description: 'Set source-destination-check on AWS EC2 instances. Accepted
+ value must be one of "DoNothing", "Enable" or "Disable". [Default:
+ DoNothing]'
+ enum:
+ - DoNothing
+ - Enable
+ - Disable
+ type: string
+ bpfCTLBLogFilter:
+ description: 'BPFCTLBLogFilter specifies, what is logged by connect
+ time load balancer when BPFLogLevel is debug. Currently has to be
+ specified as ''all'' when BPFLogFilters is set to see CTLB logs.
+ [Default: unset - means logs are emitted when BPFLogLevel id debug
+ and BPFLogFilters not set.]'
+ type: string
+ bpfConnectTimeLoadBalancing:
+ description: 'BPFConnectTimeLoadBalancing when in BPF mode, controls
+ whether Felix installs the connect-time load balancer. The connect-time
+ load balancer is required for the host to be able to reach Kubernetes
+ services and it improves the performance of pod-to-service connections.When
+ set to TCP, connect time load balancing is available only for services
+ with TCP ports. [Default: TCP]'
+ enum:
+ - TCP
+ - Enabled
+ - Disabled
+ type: string
+ bpfConnectTimeLoadBalancingEnabled:
+ description: 'BPFConnectTimeLoadBalancingEnabled when in BPF mode,
+ controls whether Felix installs the connection-time load balancer. The
+ connect-time load balancer is required for the host to be able to
+ reach Kubernetes services and it improves the performance of pod-to-service
+ connections. The only reason to disable it is for debugging purposes.
+ This will be deprecated. Use BPFConnectTimeLoadBalancing [Default:
+ true]'
+ type: boolean
+ bpfDSROptoutCIDRs:
+ description: BPFDSROptoutCIDRs is a list of CIDRs which are excluded
+ from DSR. That is, clients in those CIDRs will accesses nodeports
+ as if BPFExternalServiceMode was set to Tunnel.
+ items:
+ type: string
+ type: array
+ bpfDataIfacePattern:
+ description: BPFDataIfacePattern is a regular expression that controls
+ which interfaces Felix should attach BPF programs to in order to
+ catch traffic to/from the network. This needs to match the interfaces
+ that Calico workload traffic flows over as well as any interfaces
+ that handle incoming traffic to nodeports and services from outside
+ the cluster. It should not match the workload interfaces (usually
+ named cali...).
+ type: string
+ bpfDisableGROForIfaces:
+ description: BPFDisableGROForIfaces is a regular expression that controls
+ which interfaces Felix should disable the Generic Receive Offload
+ [GRO] option. It should not match the workload interfaces (usually
+ named cali...).
+ type: string
+ bpfDisableUnprivileged:
+ description: 'BPFDisableUnprivileged, if enabled, Felix sets the kernel.unprivileged_bpf_disabled
+ sysctl to disable unprivileged use of BPF. This ensures that unprivileged
+ users cannot access Calico''s BPF maps and cannot insert their own
+ BPF programs to interfere with Calico''s. [Default: true]'
+ type: boolean
+ bpfEnabled:
+ description: 'BPFEnabled, if enabled Felix will use the BPF dataplane.
+ [Default: false]'
+ type: boolean
+ bpfEnforceRPF:
+ description: 'BPFEnforceRPF enforce strict RPF on all host interfaces
+ with BPF programs regardless of what is the per-interfaces or global
+ setting. Possible values are Disabled, Strict or Loose. [Default:
+ Loose]'
+ pattern: ^(?i)(Disabled|Strict|Loose)?$
+ type: string
+ bpfExcludeCIDRsFromNAT:
+ description: BPFExcludeCIDRsFromNAT is a list of CIDRs that are to
+ be excluded from NAT resolution so that host can handle them. A
+ typical usecase is node local DNS cache.
+ items:
+ type: string
+ type: array
+ bpfExtToServiceConnmark:
+ description: 'BPFExtToServiceConnmark in BPF mode, control a 32bit
+ mark that is set on connections from an external client to a local
+ service. This mark allows us to control how packets of that connection
+ are routed within the host and how is routing interpreted by RPF
+ check. [Default: 0]'
+ type: integer
+ bpfExternalServiceMode:
+ description: 'BPFExternalServiceMode in BPF mode, controls how connections
+ from outside the cluster to services (node ports and cluster IPs)
+ are forwarded to remote workloads. If set to "Tunnel" then both
+ request and response traffic is tunneled to the remote node. If
+ set to "DSR", the request traffic is tunneled but the response traffic
+ is sent directly from the remote node. In "DSR" mode, the remote
+ node appears to use the IP of the ingress node; this requires a
+ permissive L2 network. [Default: Tunnel]'
+ pattern: ^(?i)(Tunnel|DSR)?$
+ type: string
+ bpfForceTrackPacketsFromIfaces:
+ description: 'BPFForceTrackPacketsFromIfaces in BPF mode, forces traffic
+ from these interfaces to skip Calico''s iptables NOTRACK rule, allowing
+ traffic from those interfaces to be tracked by Linux conntrack. Should
+ only be used for interfaces that are not used for the Calico fabric. For
+ example, a docker bridge device for non-Calico-networked containers.
+ [Default: docker+]'
+ items:
+ type: string
+ type: array
+ bpfHostConntrackBypass:
+ description: 'BPFHostConntrackBypass Controls whether to bypass Linux
+ conntrack in BPF mode for workloads and services. [Default: true
+ - bypass Linux conntrack]'
+ type: boolean
+ bpfHostNetworkedNATWithoutCTLB:
+ description: 'BPFHostNetworkedNATWithoutCTLB when in BPF mode, controls
+ whether Felix does a NAT without CTLB. This along with BPFConnectTimeLoadBalancing
+ determines the CTLB behavior. [Default: Enabled]'
+ enum:
+ - Enabled
+ - Disabled
+ type: string
+ bpfKubeProxyEndpointSlicesEnabled:
+ description: BPFKubeProxyEndpointSlicesEnabled in BPF mode, controls
+ whether Felix's embedded kube-proxy accepts EndpointSlices or not.
+ type: boolean
+ bpfKubeProxyIptablesCleanupEnabled:
+ description: 'BPFKubeProxyIptablesCleanupEnabled, if enabled in BPF
+ mode, Felix will proactively clean up the upstream Kubernetes kube-proxy''s
+ iptables chains. Should only be enabled if kube-proxy is not running. [Default:
+ true]'
+ type: boolean
+ bpfKubeProxyMinSyncPeriod:
+ description: 'BPFKubeProxyMinSyncPeriod, in BPF mode, controls the
+ minimum time between updates to the dataplane for Felix''s embedded
+ kube-proxy. Lower values give reduced set-up latency. Higher values
+ reduce Felix CPU usage by batching up more work. [Default: 1s]'
+ pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$
+ type: string
+ bpfL3IfacePattern:
+ description: BPFL3IfacePattern is a regular expression that allows
+ to list tunnel devices like wireguard or vxlan (i.e., L3 devices)
+ in addition to BPFDataIfacePattern. That is, tunnel interfaces not
+ created by Calico, that Calico workload traffic flows over as well
+ as any interfaces that handle incoming traffic to nodeports and
+ services from outside the cluster.
+ type: string
+ bpfLogFilters:
+ additionalProperties:
+ type: string
+ description: "BPFLogFilters is a map of key=values where the value
+ is a pcap filter expression and the key is an interface name with
+ 'all' denoting all interfaces, 'weps' all workload endpoints and
+ 'heps' all host endpoints. \n When specified as an env var, it accepts
+ a comma-separated list of key=values. [Default: unset - means all
+ debug logs are emitted]"
+ type: object
+ bpfLogLevel:
+ description: 'BPFLogLevel controls the log level of the BPF programs
+ when in BPF dataplane mode. One of "Off", "Info", or "Debug". The
+ logs are emitted to the BPF trace pipe, accessible with the command
+ `tc exec bpf debug`. [Default: Off].'
+ pattern: ^(?i)(Off|Info|Debug)?$
+ type: string
+ bpfMapSizeConntrack:
+ description: 'BPFMapSizeConntrack sets the size for the conntrack
+ map. This map must be large enough to hold an entry for each active
+ connection. Warning: changing the size of the conntrack map can
+ cause disruption.'
+ type: integer
+ bpfMapSizeIPSets:
+ description: BPFMapSizeIPSets sets the size for ipsets map. The IP
+ sets map must be large enough to hold an entry for each endpoint
+ matched by every selector in the source/destination matches in network
+ policy. Selectors such as "all()" can result in large numbers of
+ entries (one entry per endpoint in that case).
+ type: integer
+ bpfMapSizeIfState:
+ description: BPFMapSizeIfState sets the size for ifstate map. The
+ ifstate map must be large enough to hold an entry for each device
+ (host + workloads) on a host.
+ type: integer
+ bpfMapSizeNATAffinity:
+ type: integer
+ bpfMapSizeNATBackend:
+ description: BPFMapSizeNATBackend sets the size for nat back end map.
+ This is the total number of endpoints. This is mostly more than
+ the size of the number of services.
+ type: integer
+ bpfMapSizeNATFrontend:
+ description: BPFMapSizeNATFrontend sets the size for nat front end
+ map. FrontendMap should be large enough to hold an entry for each
+ nodeport, external IP and each port in each service.
+ type: integer
+ bpfMapSizeRoute:
+ description: BPFMapSizeRoute sets the size for the routes map. The
+ routes map should be large enough to hold one entry per workload
+ and a handful of entries per host (enough to cover its own IPs and
+ tunnel IPs).
+ type: integer
+ bpfPSNATPorts:
+ anyOf:
+ - type: integer
+ - type: string
+ description: 'BPFPSNATPorts sets the range from which we randomly
+ pick a port if there is a source port collision. This should be
+ within the ephemeral range as defined by RFC 6056 (1024–65535) and
+ preferably outside the ephemeral ranges used by common operating
+ systems. Linux uses 32768–60999, while others mostly use the IANA
+ defined range 49152–65535. It is not necessarily a problem if this
+ range overlaps with the operating systems. Both ends of the range
+ are inclusive. [Default: 20000:29999]'
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ bpfPolicyDebugEnabled:
+ description: BPFPolicyDebugEnabled when true, Felix records detailed
+ information about the BPF policy programs, which can be examined
+ with the calico-bpf command-line tool.
+ type: boolean
+ chainInsertMode:
+ description: 'ChainInsertMode controls whether Felix hooks the kernel''s
+ top-level iptables chains by inserting a rule at the top of the
+ chain or by appending a rule at the bottom. insert is the safe default
+ since it prevents Calico''s rules from being bypassed. If you switch
+ to append mode, be sure that the other rules in the chains signal
+ acceptance by falling through to the Calico rules, otherwise the
+ Calico policy will be bypassed. [Default: insert]'
+ pattern: ^(?i)(insert|append)?$
+ type: string
+ dataplaneDriver:
+ description: DataplaneDriver filename of the external dataplane driver
+ to use. Only used if UseInternalDataplaneDriver is set to false.
+ type: string
+ dataplaneWatchdogTimeout:
+ description: "DataplaneWatchdogTimeout is the readiness/liveness timeout
+ used for Felix's (internal) dataplane driver. Increase this value
+ if you experience spurious non-ready or non-live events when Felix
+ is under heavy load. Decrease the value to get felix to report non-live
+ or non-ready more quickly. [Default: 90s] \n Deprecated: replaced
+ by the generic HealthTimeoutOverrides."
+ type: string
+ debugDisableLogDropping:
+ type: boolean
+ debugMemoryProfilePath:
+ type: string
+ debugSimulateCalcGraphHangAfter:
+ pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$
+ type: string
+ debugSimulateDataplaneHangAfter:
+ pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$
+ type: string
+ defaultEndpointToHostAction:
+ description: 'DefaultEndpointToHostAction controls what happens to
+ traffic that goes from a workload endpoint to the host itself (after
+ the traffic hits the endpoint egress policy). By default Calico
+ blocks traffic from workload endpoints to the host itself with an
+ iptables "DROP" action. If you want to allow some or all traffic
+ from endpoint to host, set this parameter to RETURN or ACCEPT. Use
+ RETURN if you have your own rules in the iptables "INPUT" chain;
+ Calico will insert its rules at the top of that chain, then "RETURN"
+ packets to the "INPUT" chain once it has completed processing workload
+ endpoint egress policy. Use ACCEPT to unconditionally accept packets
+ from workloads after processing workload endpoint egress policy.
+ [Default: Drop]'
+ pattern: ^(?i)(Drop|Accept|Return)?$
+ type: string
+ deviceRouteProtocol:
+ description: This defines the route protocol added to programmed device
+ routes, by default this will be RTPROT_BOOT when left blank.
+ type: integer
+ deviceRouteSourceAddress:
+ description: This is the IPv4 source address to use on programmed
+ device routes. By default the source address is left blank, leaving
+ the kernel to choose the source address used.
+ type: string
+ deviceRouteSourceAddressIPv6:
+ description: This is the IPv6 source address to use on programmed
+ device routes. By default the source address is left blank, leaving
+ the kernel to choose the source address used.
+ type: string
+ disableConntrackInvalidCheck:
+ type: boolean
+ endpointReportingDelay:
+ pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$
+ type: string
+ endpointReportingEnabled:
+ type: boolean
+ externalNodesList:
+ description: ExternalNodesCIDRList is a list of CIDR's of external-non-calico-nodes
+ which may source tunnel traffic and have the tunneled traffic be
+ accepted at calico nodes.
+ items:
+ type: string
+ type: array
+ failsafeInboundHostPorts:
+ description: 'FailsafeInboundHostPorts is a list of UDP/TCP ports
+ and CIDRs that Felix will allow incoming traffic to host endpoints
+ on irrespective of the security policy. This is useful to avoid
+ accidentally cutting off a host with incorrect configuration. For
+ back-compatibility, if the protocol is not specified, it defaults
+ to "tcp". If a CIDR is not specified, it will allow traffic from
+ all addresses. To disable all inbound host ports, use the value
+ none. The default value allows ssh access and DHCP. [Default: tcp:22,
+ udp:68, tcp:179, tcp:2379, tcp:2380, tcp:6443, tcp:6666, tcp:6667]'
+ items:
+ description: ProtoPort is combination of protocol, port, and CIDR.
+ Protocol and port must be specified.
+ properties:
+ net:
+ type: string
+ port:
+ type: integer
+ protocol:
+ type: string
+ required:
+ - port
+ - protocol
+ type: object
+ type: array
+ failsafeOutboundHostPorts:
+ description: 'FailsafeOutboundHostPorts is a list of UDP/TCP ports
+ and CIDRs that Felix will allow outgoing traffic from host endpoints
+ to irrespective of the security policy. This is useful to avoid
+ accidentally cutting off a host with incorrect configuration. For
+ back-compatibility, if the protocol is not specified, it defaults
+ to "tcp". If a CIDR is not specified, it will allow traffic from
+ all addresses. To disable all outbound host ports, use the value
+ none. The default value opens etcd''s standard ports to ensure that
+ Felix does not get cut off from etcd as well as allowing DHCP and
+ DNS. [Default: tcp:179, tcp:2379, tcp:2380, tcp:6443, tcp:6666,
+ tcp:6667, udp:53, udp:67]'
+ items:
+ description: ProtoPort is combination of protocol, port, and CIDR.
+ Protocol and port must be specified.
+ properties:
+ net:
+ type: string
+ port:
+ type: integer
+ protocol:
+ type: string
+ required:
+ - port
+ - protocol
+ type: object
+ type: array
+ featureDetectOverride:
+ description: FeatureDetectOverride is used to override feature detection
+ based on auto-detected platform capabilities. Values are specified
+ in a comma separated list with no spaces, example; "SNATFullyRandom=true,MASQFullyRandom=false,RestoreSupportsLock=". "true"
+ or "false" will force the feature, empty or omitted values are auto-detected.
+ pattern: ^([a-zA-Z0-9-_]+=(true|false|),)*([a-zA-Z0-9-_]+=(true|false|))?$
+ type: string
+ featureGates:
+ description: FeatureGates is used to enable or disable tech-preview
+ Calico features. Values are specified in a comma separated list
+ with no spaces, example; "BPFConnectTimeLoadBalancingWorkaround=enabled,XyZ=false".
+ This is used to enable features that are not fully production ready.
+ pattern: ^([a-zA-Z0-9-_]+=([^=]+),)*([a-zA-Z0-9-_]+=([^=]+))?$
+ type: string
+ floatingIPs:
+ description: FloatingIPs configures whether or not Felix will program
+ non-OpenStack floating IP addresses. (OpenStack-derived floating
+ IPs are always programmed, regardless of this setting.)
+ enum:
+ - Enabled
+ - Disabled
+ type: string
+ genericXDPEnabled:
+ description: 'GenericXDPEnabled enables Generic XDP so network cards
+ that don''t support XDP offload or driver modes can use XDP. This
+ is not recommended since it doesn''t provide better performance
+ than iptables. [Default: false]'
+ type: boolean
+ healthEnabled:
+ type: boolean
+ healthHost:
+ type: string
+ healthPort:
+ type: integer
+ healthTimeoutOverrides:
+ description: HealthTimeoutOverrides allows the internal watchdog timeouts
+ of individual subcomponents to be overridden. This is useful for
+ working around "false positive" liveness timeouts that can occur
+ in particularly stressful workloads or if CPU is constrained. For
+ a list of active subcomponents, see Felix's logs.
+ items:
+ properties:
+ name:
+ type: string
+ timeout:
+ type: string
+ required:
+ - name
+ - timeout
+ type: object
+ type: array
+ interfaceExclude:
+ description: 'InterfaceExclude is a comma-separated list of interfaces
+ that Felix should exclude when monitoring for host endpoints. The
+ default value ensures that Felix ignores Kubernetes'' IPVS dummy
+ interface, which is used internally by kube-proxy. If you want to
+ exclude multiple interface names using a single value, the list
+ supports regular expressions. For regular expressions you must wrap
+ the value with ''/''. For example having values ''/^kube/,veth1''
+ will exclude all interfaces that begin with ''kube'' and also the
+ interface ''veth1''. [Default: kube-ipvs0]'
+ type: string
+ interfacePrefix:
+ description: 'InterfacePrefix is the interface name prefix that identifies
+ workload endpoints and so distinguishes them from host endpoint
+ interfaces. Note: in environments other than bare metal, the orchestrators
+ configure this appropriately. For example our Kubernetes and Docker
+ integrations set the ''cali'' value, and our OpenStack integration
+ sets the ''tap'' value. [Default: cali]'
+ type: string
+ interfaceRefreshInterval:
+ description: InterfaceRefreshInterval is the period at which Felix
+ rescans local interfaces to verify their state. The rescan can be
+ disabled by setting the interval to 0.
+ pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$
+ type: string
+ ipipEnabled:
+ description: 'IPIPEnabled overrides whether Felix should configure
+ an IPIP interface on the host. Optional as Felix determines this
+ based on the existing IP pools. [Default: nil (unset)]'
+ type: boolean
+ ipipMTU:
+ description: 'IPIPMTU is the MTU to set on the tunnel device. See
+ Configuring MTU [Default: 1440]'
+ type: integer
+ ipsetsRefreshInterval:
+ description: 'IpsetsRefreshInterval is the period at which Felix re-checks
+ all iptables state to ensure that no other process has accidentally
+ broken Calico''s rules. Set to 0 to disable iptables refresh. [Default:
+ 90s]'
+ pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$
+ type: string
+ iptablesBackend:
+ description: IptablesBackend specifies which backend of iptables will
+ be used. The default is Auto.
+ pattern: ^(?i)(Auto|FelixConfiguration|FelixConfigurationList|Legacy|NFT)?$
+ type: string
+ iptablesFilterAllowAction:
+ pattern: ^(?i)(Accept|Return)?$
+ type: string
+ iptablesFilterDenyAction:
+ description: IptablesFilterDenyAction controls what happens to traffic
+ that is denied by network policy. By default Calico blocks traffic
+ with an iptables "DROP" action. If you want to use "REJECT" action
+ instead you can configure it in here.
+ pattern: ^(?i)(Drop|Reject)?$
+ type: string
+ iptablesLockFilePath:
+ description: 'IptablesLockFilePath is the location of the iptables
+ lock file. You may need to change this if the lock file is not in
+ its standard location (for example if you have mapped it into Felix''s
+ container at a different path). [Default: /run/xtables.lock]'
+ type: string
+ iptablesLockProbeInterval:
+ description: 'IptablesLockProbeInterval is the time that Felix will
+ wait between attempts to acquire the iptables lock if it is not
+ available. Lower values make Felix more responsive when the lock
+ is contended, but use more CPU. [Default: 50ms]'
+ pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$
+ type: string
+ iptablesLockTimeout:
+ description: 'IptablesLockTimeout is the time that Felix will wait
+ for the iptables lock, or 0, to disable. To use this feature, Felix
+ must share the iptables lock file with all other processes that
+ also take the lock. When running Felix inside a container, this
+ requires the /run directory of the host to be mounted into the calico/node
+ or calico/felix container. [Default: 0s disabled]'
+ pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$
+ type: string
+ iptablesMangleAllowAction:
+ pattern: ^(?i)(Accept|Return)?$
+ type: string
+ iptablesMarkMask:
+ description: 'IptablesMarkMask is the mask that Felix selects its
+ IPTables Mark bits from. Should be a 32 bit hexadecimal number with
+ at least 8 bits set, none of which clash with any other mark bits
+ in use on the system. [Default: 0xff000000]'
+ format: int32
+ type: integer
+ iptablesNATOutgoingInterfaceFilter:
+ type: string
+ iptablesPostWriteCheckInterval:
+ description: 'IptablesPostWriteCheckInterval is the period after Felix
+ has done a write to the dataplane that it schedules an extra read
+ back in order to check the write was not clobbered by another process.
+ This should only occur if another application on the system doesn''t
+ respect the iptables lock. [Default: 1s]'
+ pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$
+ type: string
+ iptablesRefreshInterval:
+ description: 'IptablesRefreshInterval is the period at which Felix
+ re-checks the IP sets in the dataplane to ensure that no other process
+ has accidentally broken Calico''s rules. Set to 0 to disable IP
+ sets refresh. Note: the default for this value is lower than the
+ other refresh intervals as a workaround for a Linux kernel bug that
+ was fixed in kernel version 4.11. If you are using v4.11 or greater
+ you may want to set this to, a higher value to reduce Felix CPU
+ usage. [Default: 10s]'
+ pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$
+ type: string
+ ipv6Support:
+ description: IPv6Support controls whether Felix enables support for
+ IPv6 (if supported by the in-use dataplane).
+ type: boolean
+ kubeNodePortRanges:
+ description: 'KubeNodePortRanges holds list of port ranges used for
+ service node ports. Only used if felix detects kube-proxy running
+ in ipvs mode. Felix uses these ranges to separate host and workload
+ traffic. [Default: 30000:32767].'
+ items:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ type: array
+ logDebugFilenameRegex:
+ description: LogDebugFilenameRegex controls which source code files
+ have their Debug log output included in the logs. Only logs from
+ files with names that match the given regular expression are included. The
+ filter only applies to Debug level logs.
+ type: string
+ logFilePath:
+ description: 'LogFilePath is the full path to the Felix log. Set to
+ none to disable file logging. [Default: /var/log/calico/felix.log]'
+ type: string
+ logPrefix:
+ description: 'LogPrefix is the log prefix that Felix uses when rendering
+ LOG rules. [Default: calico-packet]'
+ type: string
+ logSeverityFile:
+ description: 'LogSeverityFile is the log severity above which logs
+ are sent to the log file. [Default: Info]'
+ pattern: ^(?i)(Debug|Info|Warning|Error|Fatal)?$
+ type: string
+ logSeverityScreen:
+ description: 'LogSeverityScreen is the log severity above which logs
+ are sent to the stdout. [Default: Info]'
+ pattern: ^(?i)(Debug|Info|Warning|Error|Fatal)?$
+ type: string
+ logSeveritySys:
+ description: 'LogSeveritySys is the log severity above which logs
+ are sent to the syslog. Set to None for no logging to syslog. [Default:
+ Info]'
+ pattern: ^(?i)(Debug|Info|Warning|Error|Fatal)?$
+ type: string
+ maxIpsetSize:
+ type: integer
+ metadataAddr:
+ description: 'MetadataAddr is the IP address or domain name of the
+ server that can answer VM queries for cloud-init metadata. In OpenStack,
+ this corresponds to the machine running nova-api (or in Ubuntu,
+ nova-api-metadata). A value of none (case insensitive) means that
+ Felix should not set up any NAT rule for the metadata path. [Default:
+ 127.0.0.1]'
+ type: string
+ metadataPort:
+ description: 'MetadataPort is the port of the metadata server. This,
+ combined with global.MetadataAddr (if not ''None''), is used to
+ set up a NAT rule, from 169.254.169.254:80 to MetadataAddr:MetadataPort.
+ In most cases this should not need to be changed [Default: 8775].'
+ type: integer
+ mtuIfacePattern:
+ description: MTUIfacePattern is a regular expression that controls
+ which interfaces Felix should scan in order to calculate the host's
+ MTU. This should not match workload interfaces (usually named cali...).
+ type: string
+ natOutgoingAddress:
+ description: NATOutgoingAddress specifies an address to use when performing
+ source NAT for traffic in a natOutgoing pool that is leaving the
+ network. By default the address used is an address on the interface
+ the traffic is leaving on (ie it uses the iptables MASQUERADE target)
+ type: string
+ natPortRange:
+ anyOf:
+ - type: integer
+ - type: string
+ description: NATPortRange specifies the range of ports that is used
+ for port mapping when doing outgoing NAT. When unset the default
+ behavior of the network stack is used.
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ netlinkTimeout:
+ pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$
+ type: string
+ openstackRegion:
+ description: 'OpenstackRegion is the name of the region that a particular
+ Felix belongs to. In a multi-region Calico/OpenStack deployment,
+ this must be configured somehow for each Felix (here in the datamodel,
+ or in felix.cfg or the environment on each compute node), and must
+ match the [calico] openstack_region value configured in neutron.conf
+ on each node. [Default: Empty]'
+ type: string
+ policySyncPathPrefix:
+ description: 'PolicySyncPathPrefix is used to by Felix to communicate
+ policy changes to external services, like Application layer policy.
+ [Default: Empty]'
+ type: string
+ prometheusGoMetricsEnabled:
+ description: 'PrometheusGoMetricsEnabled disables Go runtime metrics
+ collection, which the Prometheus client does by default, when set
+ to false. This reduces the number of metrics reported, reducing
+ Prometheus load. [Default: true]'
+ type: boolean
+ prometheusMetricsEnabled:
+ description: 'PrometheusMetricsEnabled enables the Prometheus metrics
+ server in Felix if set to true. [Default: false]'
+ type: boolean
+ prometheusMetricsHost:
+ description: 'PrometheusMetricsHost is the host that the Prometheus
+ metrics server should bind to. [Default: empty]'
+ type: string
+ prometheusMetricsPort:
+ description: 'PrometheusMetricsPort is the TCP port that the Prometheus
+ metrics server should bind to. [Default: 9091]'
+ type: integer
+ prometheusProcessMetricsEnabled:
+ description: 'PrometheusProcessMetricsEnabled disables process metrics
+ collection, which the Prometheus client does by default, when set
+ to false. This reduces the number of metrics reported, reducing
+ Prometheus load. [Default: true]'
+ type: boolean
+ prometheusWireGuardMetricsEnabled:
+ description: 'PrometheusWireGuardMetricsEnabled disables wireguard
+ metrics collection, which the Prometheus client does by default,
+ when set to false. This reduces the number of metrics reported,
+ reducing Prometheus load. [Default: true]'
+ type: boolean
+ removeExternalRoutes:
+ description: Whether or not to remove device routes that have not
+ been programmed by Felix. Disabling this will allow external applications
+ to also add device routes. This is enabled by default which means
+ we will remove externally added routes.
+ type: boolean
+ reportingInterval:
+ description: 'ReportingInterval is the interval at which Felix reports
+ its status into the datastore or 0 to disable. Must be non-zero
+ in OpenStack deployments. [Default: 30s]'
+ pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$
+ type: string
+ reportingTTL:
+ description: 'ReportingTTL is the time-to-live setting for process-wide
+ status reports. [Default: 90s]'
+ pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$
+ type: string
+ routeRefreshInterval:
+ description: 'RouteRefreshInterval is the period at which Felix re-checks
+ the routes in the dataplane to ensure that no other process has
+ accidentally broken Calico''s rules. Set to 0 to disable route refresh.
+ [Default: 90s]'
+ pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$
+ type: string
+ routeSource:
+ description: 'RouteSource configures where Felix gets its routing
+ information. - WorkloadIPs: use workload endpoints to construct
+ routes. - CalicoIPAM: the default - use IPAM data to construct routes.'
+ pattern: ^(?i)(WorkloadIPs|CalicoIPAM)?$
+ type: string
+ routeSyncDisabled:
+ description: RouteSyncDisabled will disable all operations performed
+ on the route table. Set to true to run in network-policy mode only.
+ type: boolean
+ routeTableRange:
+ description: Deprecated in favor of RouteTableRanges. Calico programs
+ additional Linux route tables for various purposes. RouteTableRange
+ specifies the indices of the route tables that Calico should use.
+ properties:
+ max:
+ type: integer
+ min:
+ type: integer
+ required:
+ - max
+ - min
+ type: object
+ routeTableRanges:
+ description: Calico programs additional Linux route tables for various
+ purposes. RouteTableRanges specifies a set of table index ranges
+ that Calico should use. Deprecates`RouteTableRange`, overrides `RouteTableRange`.
+ items:
+ properties:
+ max:
+ type: integer
+ min:
+ type: integer
+ required:
+ - max
+ - min
+ type: object
+ type: array
+ serviceLoopPrevention:
+ description: 'When service IP advertisement is enabled, prevent routing
+ loops to service IPs that are not in use, by dropping or rejecting
+ packets that do not get DNAT''d by kube-proxy. Unless set to "Disabled",
+ in which case such routing loops continue to be allowed. [Default:
+ Drop]'
+ pattern: ^(?i)(Drop|Reject|Disabled)?$
+ type: string
+ sidecarAccelerationEnabled:
+ description: 'SidecarAccelerationEnabled enables experimental sidecar
+ acceleration [Default: false]'
+ type: boolean
+ usageReportingEnabled:
+ description: 'UsageReportingEnabled reports anonymous Calico version
+ number and cluster size to projectcalico.org. Logs warnings returned
+ by the usage server. For example, if a significant security vulnerability
+ has been discovered in the version of Calico being used. [Default:
+ true]'
+ type: boolean
+ usageReportingInitialDelay:
+ description: 'UsageReportingInitialDelay controls the minimum delay
+ before Felix makes a report. [Default: 300s]'
+ pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$
+ type: string
+ usageReportingInterval:
+ description: 'UsageReportingInterval controls the interval at which
+ Felix makes reports. [Default: 86400s]'
+ pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$
+ type: string
+ useInternalDataplaneDriver:
+ description: UseInternalDataplaneDriver, if true, Felix will use its
+ internal dataplane programming logic. If false, it will launch
+ an external dataplane driver and communicate with it over protobuf.
+ type: boolean
+ vxlanEnabled:
+ description: 'VXLANEnabled overrides whether Felix should create the
+ VXLAN tunnel device for IPv4 VXLAN networking. Optional as Felix
+ determines this based on the existing IP pools. [Default: nil (unset)]'
+ type: boolean
+ vxlanMTU:
+ description: 'VXLANMTU is the MTU to set on the IPv4 VXLAN tunnel
+ device. See Configuring MTU [Default: 1410]'
+ type: integer
+ vxlanMTUV6:
+ description: 'VXLANMTUV6 is the MTU to set on the IPv6 VXLAN tunnel
+ device. See Configuring MTU [Default: 1390]'
+ type: integer
+ vxlanPort:
+ type: integer
+ vxlanVNI:
+ type: integer
+ windowsManageFirewallRules:
+ description: 'WindowsManageFirewallRules configures whether or not
+ Felix will program Windows Firewall rules. (to allow inbound access
+ to its own metrics ports) [Default: Disabled]'
+ enum:
+ - Enabled
+ - Disabled
+ type: string
+ wireguardEnabled:
+ description: 'WireguardEnabled controls whether Wireguard is enabled
+ for IPv4 (encapsulating IPv4 traffic over an IPv4 underlay network).
+ [Default: false]'
+ type: boolean
+ wireguardEnabledV6:
+ description: 'WireguardEnabledV6 controls whether Wireguard is enabled
+ for IPv6 (encapsulating IPv6 traffic over an IPv6 underlay network).
+ [Default: false]'
+ type: boolean
+ wireguardHostEncryptionEnabled:
+ description: 'WireguardHostEncryptionEnabled controls whether Wireguard
+ host-to-host encryption is enabled. [Default: false]'
+ type: boolean
+ wireguardInterfaceName:
+ description: 'WireguardInterfaceName specifies the name to use for
+ the IPv4 Wireguard interface. [Default: wireguard.cali]'
+ type: string
+ wireguardInterfaceNameV6:
+ description: 'WireguardInterfaceNameV6 specifies the name to use for
+ the IPv6 Wireguard interface. [Default: wg-v6.cali]'
+ type: string
+ wireguardKeepAlive:
+ description: 'WireguardKeepAlive controls Wireguard PersistentKeepalive
+ option. Set 0 to disable. [Default: 0]'
+ pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$
+ type: string
+ wireguardListeningPort:
+ description: 'WireguardListeningPort controls the listening port used
+ by IPv4 Wireguard. [Default: 51820]'
+ type: integer
+ wireguardListeningPortV6:
+ description: 'WireguardListeningPortV6 controls the listening port
+ used by IPv6 Wireguard. [Default: 51821]'
+ type: integer
+ wireguardMTU:
+ description: 'WireguardMTU controls the MTU on the IPv4 Wireguard
+ interface. See Configuring MTU [Default: 1440]'
+ type: integer
+ wireguardMTUV6:
+ description: 'WireguardMTUV6 controls the MTU on the IPv6 Wireguard
+ interface. See Configuring MTU [Default: 1420]'
+ type: integer
+ wireguardRoutingRulePriority:
+ description: 'WireguardRoutingRulePriority controls the priority value
+ to use for the Wireguard routing rule. [Default: 99]'
+ type: integer
+ workloadSourceSpoofing:
+ description: WorkloadSourceSpoofing controls whether pods can use
+ the allowedSourcePrefixes annotation to send traffic with a source
+ IP address that is not theirs. This is disabled by default. When
+ set to "Any", pods can request any prefix.
+ pattern: ^(?i)(Disabled|Any)?$
+ type: string
+ xdpEnabled:
+ description: 'XDPEnabled enables XDP acceleration for suitable untracked
+ incoming deny rules. [Default: true]'
+ type: boolean
+ xdpRefreshInterval:
+ description: 'XDPRefreshInterval is the period at which Felix re-checks
+ all XDP state to ensure that no other process has accidentally broken
+ Calico''s BPF maps or attached programs. Set to 0 to disable XDP
+ refresh. [Default: 90s]'
+ pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+# Source: calico/templates/kdd-crds.yaml
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: globalnetworkpolicies.crd.projectcalico.org
+spec:
+ group: crd.projectcalico.org
+ names:
+ kind: GlobalNetworkPolicy
+ listKind: GlobalNetworkPolicyList
+ plural: globalnetworkpolicies
+ singular: globalnetworkpolicy
+ preserveUnknownFields: false
+ scope: Cluster
+ versions:
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ properties:
+ applyOnForward:
+ description: ApplyOnForward indicates to apply the rules in this policy
+ on forward traffic.
+ type: boolean
+ doNotTrack:
+ description: DoNotTrack indicates whether packets matched by the rules
+ in this policy should go through the data plane's connection tracking,
+ such as Linux conntrack. If True, the rules in this policy are
+ applied before any data plane connection tracking, and packets allowed
+ by this policy are marked as not to be tracked.
+ type: boolean
+ egress:
+ description: The ordered set of egress rules. Each rule contains
+ a set of packet match criteria and a corresponding action to apply.
+ items:
+ description: "A Rule encapsulates a set of match criteria and an
+ action. Both selector-based security Policy and security Profiles
+ reference rules - separated out as a list of rules for both ingress
+ and egress packet matching. \n Each positive match criteria has
+ a negated version, prefixed with \"Not\". All the match criteria
+ within a rule must be satisfied for a packet to match. A single
+ rule can contain the positive and negative version of a match
+ and both must be satisfied for the rule to match."
+ properties:
+ action:
+ type: string
+ destination:
+ description: Destination contains the match criteria that apply
+ to destination entity.
+ properties:
+ namespaceSelector:
+ description: "NamespaceSelector is an optional field that
+ contains a selector expression. Only traffic that originates
+ from (or terminates at) endpoints within the selected
+ namespaces will be matched. When both NamespaceSelector
+ and another selector are defined on the same rule, then
+ only workload endpoints that are matched by both selectors
+ will be selected by the rule. \n For NetworkPolicy, an
+ empty NamespaceSelector implies that the Selector is limited
+ to selecting only workload endpoints in the same namespace
+ as the NetworkPolicy. \n For NetworkPolicy, `global()`
+ NamespaceSelector implies that the Selector is limited
+ to selecting only GlobalNetworkSet or HostEndpoint. \n
+ For GlobalNetworkPolicy, an empty NamespaceSelector implies
+ the Selector applies to workload endpoints across all
+ namespaces."
+ type: string
+ nets:
+ description: Nets is an optional field that restricts the
+ rule to only apply to traffic that originates from (or
+ terminates at) IP addresses in any of the given subnets.
+ items:
+ type: string
+ type: array
+ notNets:
+ description: NotNets is the negated version of the Nets
+ field.
+ items:
+ type: string
+ type: array
+ notPorts:
+ description: NotPorts is the negated version of the Ports
+ field. Since only some protocols have ports, if any ports
+ are specified it requires the Protocol match in the Rule
+ to be set to "TCP" or "UDP".
+ items:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ type: array
+ notSelector:
+ description: NotSelector is the negated version of the Selector
+ field. See Selector field for subtleties with negated
+ selectors.
+ type: string
+ ports:
+ description: "Ports is an optional field that restricts
+ the rule to only apply to traffic that has a source (destination)
+ port that matches one of these ranges/values. This value
+ is a list of integers or strings that represent ranges
+ of ports. \n Since only some protocols have ports, if
+ any ports are specified it requires the Protocol match
+ in the Rule to be set to \"TCP\" or \"UDP\"."
+ items:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ type: array
+ selector:
+ description: "Selector is an optional field that contains
+ a selector expression (see Policy for sample syntax).
+ \ Only traffic that originates from (terminates at) endpoints
+ matching the selector will be matched. \n Note that: in
+ addition to the negated version of the Selector (see NotSelector
+ below), the selector expression syntax itself supports
+ negation. The two types of negation are subtly different.
+ One negates the set of matched endpoints, the other negates
+ the whole match: \n \tSelector = \"!has(my_label)\" matches
+ packets that are from other Calico-controlled \tendpoints
+ that do not have the label \"my_label\". \n \tNotSelector
+ = \"has(my_label)\" matches packets that are not from
+ Calico-controlled \tendpoints that do have the label \"my_label\".
+ \n The effect is that the latter will accept packets from
+ non-Calico sources whereas the former is limited to packets
+ from Calico-controlled endpoints."
+ type: string
+ serviceAccounts:
+ description: ServiceAccounts is an optional field that restricts
+ the rule to only apply to traffic that originates from
+ (or terminates at) a pod running as a matching service
+ account.
+ properties:
+ names:
+ description: Names is an optional field that restricts
+ the rule to only apply to traffic that originates
+ from (or terminates at) a pod running as a service
+ account whose name is in the list.
+ items:
+ type: string
+ type: array
+ selector:
+ description: Selector is an optional field that restricts
+ the rule to only apply to traffic that originates
+ from (or terminates at) a pod running as a service
+ account that matches the given label selector. If
+ both Names and Selector are specified then they are
+ AND'ed.
+ type: string
+ type: object
+ services:
+ description: "Services is an optional field that contains
+ options for matching Kubernetes Services. If specified,
+ only traffic that originates from or terminates at endpoints
+ within the selected service(s) will be matched, and only
+ to/from each endpoint's port. \n Services cannot be specified
+ on the same rule as Selector, NotSelector, NamespaceSelector,
+ Nets, NotNets or ServiceAccounts. \n Ports and NotPorts
+ can only be specified with Services on ingress rules."
+ properties:
+ name:
+ description: Name specifies the name of a Kubernetes
+ Service to match.
+ type: string
+ namespace:
+ description: Namespace specifies the namespace of the
+ given Service. If left empty, the rule will match
+ within this policy's namespace.
+ type: string
+ type: object
+ type: object
+ http:
+ description: HTTP contains match criteria that apply to HTTP
+ requests.
+ properties:
+ methods:
+ description: Methods is an optional field that restricts
+ the rule to apply only to HTTP requests that use one of
+ the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple
+ methods are OR'd together.
+ items:
+ type: string
+ type: array
+ paths:
+ description: 'Paths is an optional field that restricts
+ the rule to apply to HTTP requests that use one of the
+ listed HTTP Paths. Multiple paths are OR''d together.
+ e.g: - exact: /foo - prefix: /bar NOTE: Each entry may
+ ONLY specify either a `exact` or a `prefix` match. The
+ validator will check for it.'
+ items:
+ description: 'HTTPPath specifies an HTTP path to match.
+ It may be either of the form: exact: : which matches
+ the path exactly or prefix: : which matches
+ the path prefix'
+ properties:
+ exact:
+ type: string
+ prefix:
+ type: string
+ type: object
+ type: array
+ type: object
+ icmp:
+ description: ICMP is an optional field that restricts the rule
+ to apply to a specific type and code of ICMP traffic. This
+ should only be specified if the Protocol field is set to "ICMP"
+ or "ICMPv6".
+ properties:
+ code:
+ description: Match on a specific ICMP code. If specified,
+ the Type value must also be specified. This is a technical
+ limitation imposed by the kernel's iptables firewall,
+ which Calico uses to enforce the rule.
+ type: integer
+ type:
+ description: Match on a specific ICMP type. For example
+ a value of 8 refers to ICMP Echo Request (i.e. pings).
+ type: integer
+ type: object
+ ipVersion:
+ description: IPVersion is an optional field that restricts the
+ rule to only match a specific IP version.
+ type: integer
+ metadata:
+ description: Metadata contains additional information for this
+ rule
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ description: Annotations is a set of key value pairs that
+ give extra information about the rule
+ type: object
+ type: object
+ notICMP:
+ description: NotICMP is the negated version of the ICMP field.
+ properties:
+ code:
+ description: Match on a specific ICMP code. If specified,
+ the Type value must also be specified. This is a technical
+ limitation imposed by the kernel's iptables firewall,
+ which Calico uses to enforce the rule.
+ type: integer
+ type:
+ description: Match on a specific ICMP type. For example
+ a value of 8 refers to ICMP Echo Request (i.e. pings).
+ type: integer
+ type: object
+ notProtocol:
+ anyOf:
+ - type: integer
+ - type: string
+ description: NotProtocol is the negated version of the Protocol
+ field.
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ protocol:
+ anyOf:
+ - type: integer
+ - type: string
+ description: "Protocol is an optional field that restricts the
+ rule to only apply to traffic of a specific IP protocol. Required
+ if any of the EntityRules contain Ports (because ports only
+ apply to certain protocols). \n Must be one of these string
+ values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\",
+ \"UDPLite\" or an integer in the range 1-255."
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ source:
+ description: Source contains the match criteria that apply to
+ source entity.
+ properties:
+ namespaceSelector:
+ description: "NamespaceSelector is an optional field that
+ contains a selector expression. Only traffic that originates
+ from (or terminates at) endpoints within the selected
+ namespaces will be matched. When both NamespaceSelector
+ and another selector are defined on the same rule, then
+ only workload endpoints that are matched by both selectors
+ will be selected by the rule. \n For NetworkPolicy, an
+ empty NamespaceSelector implies that the Selector is limited
+ to selecting only workload endpoints in the same namespace
+ as the NetworkPolicy. \n For NetworkPolicy, `global()`
+ NamespaceSelector implies that the Selector is limited
+ to selecting only GlobalNetworkSet or HostEndpoint. \n
+ For GlobalNetworkPolicy, an empty NamespaceSelector implies
+ the Selector applies to workload endpoints across all
+ namespaces."
+ type: string
+ nets:
+ description: Nets is an optional field that restricts the
+ rule to only apply to traffic that originates from (or
+ terminates at) IP addresses in any of the given subnets.
+ items:
+ type: string
+ type: array
+ notNets:
+ description: NotNets is the negated version of the Nets
+ field.
+ items:
+ type: string
+ type: array
+ notPorts:
+ description: NotPorts is the negated version of the Ports
+ field. Since only some protocols have ports, if any ports
+ are specified it requires the Protocol match in the Rule
+ to be set to "TCP" or "UDP".
+ items:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ type: array
+ notSelector:
+ description: NotSelector is the negated version of the Selector
+ field. See Selector field for subtleties with negated
+ selectors.
+ type: string
+ ports:
+ description: "Ports is an optional field that restricts
+ the rule to only apply to traffic that has a source (destination)
+ port that matches one of these ranges/values. This value
+ is a list of integers or strings that represent ranges
+ of ports. \n Since only some protocols have ports, if
+ any ports are specified it requires the Protocol match
+ in the Rule to be set to \"TCP\" or \"UDP\"."
+ items:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ type: array
+ selector:
+ description: "Selector is an optional field that contains
+ a selector expression (see Policy for sample syntax).
+ \ Only traffic that originates from (terminates at) endpoints
+ matching the selector will be matched. \n Note that: in
+ addition to the negated version of the Selector (see NotSelector
+ below), the selector expression syntax itself supports
+ negation. The two types of negation are subtly different.
+ One negates the set of matched endpoints, the other negates
+ the whole match: \n \tSelector = \"!has(my_label)\" matches
+ packets that are from other Calico-controlled \tendpoints
+ that do not have the label \"my_label\". \n \tNotSelector
+ = \"has(my_label)\" matches packets that are not from
+ Calico-controlled \tendpoints that do have the label \"my_label\".
+ \n The effect is that the latter will accept packets from
+ non-Calico sources whereas the former is limited to packets
+ from Calico-controlled endpoints."
+ type: string
+ serviceAccounts:
+ description: ServiceAccounts is an optional field that restricts
+ the rule to only apply to traffic that originates from
+ (or terminates at) a pod running as a matching service
+ account.
+ properties:
+ names:
+ description: Names is an optional field that restricts
+ the rule to only apply to traffic that originates
+ from (or terminates at) a pod running as a service
+ account whose name is in the list.
+ items:
+ type: string
+ type: array
+ selector:
+ description: Selector is an optional field that restricts
+ the rule to only apply to traffic that originates
+ from (or terminates at) a pod running as a service
+ account that matches the given label selector. If
+ both Names and Selector are specified then they are
+ AND'ed.
+ type: string
+ type: object
+ services:
+ description: "Services is an optional field that contains
+ options for matching Kubernetes Services. If specified,
+ only traffic that originates from or terminates at endpoints
+ within the selected service(s) will be matched, and only
+ to/from each endpoint's port. \n Services cannot be specified
+ on the same rule as Selector, NotSelector, NamespaceSelector,
+ Nets, NotNets or ServiceAccounts. \n Ports and NotPorts
+ can only be specified with Services on ingress rules."
+ properties:
+ name:
+ description: Name specifies the name of a Kubernetes
+ Service to match.
+ type: string
+ namespace:
+ description: Namespace specifies the namespace of the
+ given Service. If left empty, the rule will match
+ within this policy's namespace.
+ type: string
+ type: object
+ type: object
+ required:
+ - action
+ type: object
+ type: array
+ ingress:
+ description: The ordered set of ingress rules. Each rule contains
+ a set of packet match criteria and a corresponding action to apply.
+ items:
+ description: "A Rule encapsulates a set of match criteria and an
+ action. Both selector-based security Policy and security Profiles
+ reference rules - separated out as a list of rules for both ingress
+ and egress packet matching. \n Each positive match criteria has
+ a negated version, prefixed with \"Not\". All the match criteria
+ within a rule must be satisfied for a packet to match. A single
+ rule can contain the positive and negative version of a match
+ and both must be satisfied for the rule to match."
+ properties:
+ action:
+ type: string
+ destination:
+ description: Destination contains the match criteria that apply
+ to destination entity.
+ properties:
+ namespaceSelector:
+ description: "NamespaceSelector is an optional field that
+ contains a selector expression. Only traffic that originates
+ from (or terminates at) endpoints within the selected
+ namespaces will be matched. When both NamespaceSelector
+ and another selector are defined on the same rule, then
+ only workload endpoints that are matched by both selectors
+ will be selected by the rule. \n For NetworkPolicy, an
+ empty NamespaceSelector implies that the Selector is limited
+ to selecting only workload endpoints in the same namespace
+ as the NetworkPolicy. \n For NetworkPolicy, `global()`
+ NamespaceSelector implies that the Selector is limited
+ to selecting only GlobalNetworkSet or HostEndpoint. \n
+ For GlobalNetworkPolicy, an empty NamespaceSelector implies
+ the Selector applies to workload endpoints across all
+ namespaces."
+ type: string
+ nets:
+ description: Nets is an optional field that restricts the
+ rule to only apply to traffic that originates from (or
+ terminates at) IP addresses in any of the given subnets.
+ items:
+ type: string
+ type: array
+ notNets:
+ description: NotNets is the negated version of the Nets
+ field.
+ items:
+ type: string
+ type: array
+ notPorts:
+ description: NotPorts is the negated version of the Ports
+ field. Since only some protocols have ports, if any ports
+ are specified it requires the Protocol match in the Rule
+ to be set to "TCP" or "UDP".
+ items:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ type: array
+ notSelector:
+ description: NotSelector is the negated version of the Selector
+ field. See Selector field for subtleties with negated
+ selectors.
+ type: string
+ ports:
+ description: "Ports is an optional field that restricts
+ the rule to only apply to traffic that has a source (destination)
+ port that matches one of these ranges/values. This value
+ is a list of integers or strings that represent ranges
+ of ports. \n Since only some protocols have ports, if
+ any ports are specified it requires the Protocol match
+ in the Rule to be set to \"TCP\" or \"UDP\"."
+ items:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ type: array
+ selector:
+ description: "Selector is an optional field that contains
+ a selector expression (see Policy for sample syntax).
+ \ Only traffic that originates from (terminates at) endpoints
+ matching the selector will be matched. \n Note that: in
+ addition to the negated version of the Selector (see NotSelector
+ below), the selector expression syntax itself supports
+ negation. The two types of negation are subtly different.
+ One negates the set of matched endpoints, the other negates
+ the whole match: \n \tSelector = \"!has(my_label)\" matches
+ packets that are from other Calico-controlled \tendpoints
+ that do not have the label \"my_label\". \n \tNotSelector
+ = \"has(my_label)\" matches packets that are not from
+ Calico-controlled \tendpoints that do have the label \"my_label\".
+ \n The effect is that the latter will accept packets from
+ non-Calico sources whereas the former is limited to packets
+ from Calico-controlled endpoints."
+ type: string
+ serviceAccounts:
+ description: ServiceAccounts is an optional field that restricts
+ the rule to only apply to traffic that originates from
+ (or terminates at) a pod running as a matching service
+ account.
+ properties:
+ names:
+ description: Names is an optional field that restricts
+ the rule to only apply to traffic that originates
+ from (or terminates at) a pod running as a service
+ account whose name is in the list.
+ items:
+ type: string
+ type: array
+ selector:
+ description: Selector is an optional field that restricts
+ the rule to only apply to traffic that originates
+ from (or terminates at) a pod running as a service
+ account that matches the given label selector. If
+ both Names and Selector are specified then they are
+ AND'ed.
+ type: string
+ type: object
+ services:
+ description: "Services is an optional field that contains
+ options for matching Kubernetes Services. If specified,
+ only traffic that originates from or terminates at endpoints
+ within the selected service(s) will be matched, and only
+ to/from each endpoint's port. \n Services cannot be specified
+ on the same rule as Selector, NotSelector, NamespaceSelector,
+ Nets, NotNets or ServiceAccounts. \n Ports and NotPorts
+ can only be specified with Services on ingress rules."
+ properties:
+ name:
+ description: Name specifies the name of a Kubernetes
+ Service to match.
+ type: string
+ namespace:
+ description: Namespace specifies the namespace of the
+ given Service. If left empty, the rule will match
+ within this policy's namespace.
+ type: string
+ type: object
+ type: object
+ http:
+ description: HTTP contains match criteria that apply to HTTP
+ requests.
+ properties:
+ methods:
+ description: Methods is an optional field that restricts
+ the rule to apply only to HTTP requests that use one of
+ the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple
+ methods are OR'd together.
+ items:
+ type: string
+ type: array
+ paths:
+ description: 'Paths is an optional field that restricts
+ the rule to apply to HTTP requests that use one of the
+ listed HTTP Paths. Multiple paths are OR''d together.
+ e.g: - exact: /foo - prefix: /bar NOTE: Each entry may
+ ONLY specify either a `exact` or a `prefix` match. The
+ validator will check for it.'
+ items:
+ description: 'HTTPPath specifies an HTTP path to match.
+ It may be either of the form: exact: : which matches
+ the path exactly or prefix: : which matches
+ the path prefix'
+ properties:
+ exact:
+ type: string
+ prefix:
+ type: string
+ type: object
+ type: array
+ type: object
+ icmp:
+ description: ICMP is an optional field that restricts the rule
+ to apply to a specific type and code of ICMP traffic. This
+ should only be specified if the Protocol field is set to "ICMP"
+ or "ICMPv6".
+ properties:
+ code:
+ description: Match on a specific ICMP code. If specified,
+ the Type value must also be specified. This is a technical
+ limitation imposed by the kernel's iptables firewall,
+ which Calico uses to enforce the rule.
+ type: integer
+ type:
+ description: Match on a specific ICMP type. For example
+ a value of 8 refers to ICMP Echo Request (i.e. pings).
+ type: integer
+ type: object
+ ipVersion:
+ description: IPVersion is an optional field that restricts the
+ rule to only match a specific IP version.
+ type: integer
+ metadata:
+ description: Metadata contains additional information for this
+ rule
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ description: Annotations is a set of key value pairs that
+ give extra information about the rule
+ type: object
+ type: object
+ notICMP:
+ description: NotICMP is the negated version of the ICMP field.
+ properties:
+ code:
+ description: Match on a specific ICMP code. If specified,
+ the Type value must also be specified. This is a technical
+ limitation imposed by the kernel's iptables firewall,
+ which Calico uses to enforce the rule.
+ type: integer
+ type:
+ description: Match on a specific ICMP type. For example
+ a value of 8 refers to ICMP Echo Request (i.e. pings).
+ type: integer
+ type: object
+ notProtocol:
+ anyOf:
+ - type: integer
+ - type: string
+ description: NotProtocol is the negated version of the Protocol
+ field.
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ protocol:
+ anyOf:
+ - type: integer
+ - type: string
+ description: "Protocol is an optional field that restricts the
+ rule to only apply to traffic of a specific IP protocol. Required
+ if any of the EntityRules contain Ports (because ports only
+ apply to certain protocols). \n Must be one of these string
+ values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\",
+ \"UDPLite\" or an integer in the range 1-255."
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ source:
+ description: Source contains the match criteria that apply to
+ source entity.
+ properties:
+ namespaceSelector:
+ description: "NamespaceSelector is an optional field that
+ contains a selector expression. Only traffic that originates
+ from (or terminates at) endpoints within the selected
+ namespaces will be matched. When both NamespaceSelector
+ and another selector are defined on the same rule, then
+ only workload endpoints that are matched by both selectors
+ will be selected by the rule. \n For NetworkPolicy, an
+ empty NamespaceSelector implies that the Selector is limited
+ to selecting only workload endpoints in the same namespace
+ as the NetworkPolicy. \n For NetworkPolicy, `global()`
+ NamespaceSelector implies that the Selector is limited
+ to selecting only GlobalNetworkSet or HostEndpoint. \n
+ For GlobalNetworkPolicy, an empty NamespaceSelector implies
+ the Selector applies to workload endpoints across all
+ namespaces."
+ type: string
+ nets:
+ description: Nets is an optional field that restricts the
+ rule to only apply to traffic that originates from (or
+ terminates at) IP addresses in any of the given subnets.
+ items:
+ type: string
+ type: array
+ notNets:
+ description: NotNets is the negated version of the Nets
+ field.
+ items:
+ type: string
+ type: array
+ notPorts:
+ description: NotPorts is the negated version of the Ports
+ field. Since only some protocols have ports, if any ports
+ are specified it requires the Protocol match in the Rule
+ to be set to "TCP" or "UDP".
+ items:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ type: array
+ notSelector:
+ description: NotSelector is the negated version of the Selector
+ field. See Selector field for subtleties with negated
+ selectors.
+ type: string
+ ports:
+ description: "Ports is an optional field that restricts
+ the rule to only apply to traffic that has a source (destination)
+ port that matches one of these ranges/values. This value
+ is a list of integers or strings that represent ranges
+ of ports. \n Since only some protocols have ports, if
+ any ports are specified it requires the Protocol match
+ in the Rule to be set to \"TCP\" or \"UDP\"."
+ items:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ type: array
+ selector:
+ description: "Selector is an optional field that contains
+ a selector expression (see Policy for sample syntax).
+ \ Only traffic that originates from (terminates at) endpoints
+ matching the selector will be matched. \n Note that: in
+ addition to the negated version of the Selector (see NotSelector
+ below), the selector expression syntax itself supports
+ negation. The two types of negation are subtly different.
+ One negates the set of matched endpoints, the other negates
+ the whole match: \n \tSelector = \"!has(my_label)\" matches
+ packets that are from other Calico-controlled \tendpoints
+ that do not have the label \"my_label\". \n \tNotSelector
+ = \"has(my_label)\" matches packets that are not from
+ Calico-controlled \tendpoints that do have the label \"my_label\".
+ \n The effect is that the latter will accept packets from
+ non-Calico sources whereas the former is limited to packets
+ from Calico-controlled endpoints."
+ type: string
+ serviceAccounts:
+ description: ServiceAccounts is an optional field that restricts
+ the rule to only apply to traffic that originates from
+ (or terminates at) a pod running as a matching service
+ account.
+ properties:
+ names:
+ description: Names is an optional field that restricts
+ the rule to only apply to traffic that originates
+ from (or terminates at) a pod running as a service
+ account whose name is in the list.
+ items:
+ type: string
+ type: array
+ selector:
+ description: Selector is an optional field that restricts
+ the rule to only apply to traffic that originates
+ from (or terminates at) a pod running as a service
+ account that matches the given label selector. If
+ both Names and Selector are specified then they are
+ AND'ed.
+ type: string
+ type: object
+ services:
+ description: "Services is an optional field that contains
+ options for matching Kubernetes Services. If specified,
+ only traffic that originates from or terminates at endpoints
+ within the selected service(s) will be matched, and only
+ to/from each endpoint's port. \n Services cannot be specified
+ on the same rule as Selector, NotSelector, NamespaceSelector,
+ Nets, NotNets or ServiceAccounts. \n Ports and NotPorts
+ can only be specified with Services on ingress rules."
+ properties:
+ name:
+ description: Name specifies the name of a Kubernetes
+ Service to match.
+ type: string
+ namespace:
+ description: Namespace specifies the namespace of the
+ given Service. If left empty, the rule will match
+ within this policy's namespace.
+ type: string
+ type: object
+ type: object
+ required:
+ - action
+ type: object
+ type: array
+ namespaceSelector:
+ description: NamespaceSelector is an optional field for an expression
+ used to select a pod based on namespaces.
+ type: string
+ order:
+ description: Order is an optional field that specifies the order in
+ which the policy is applied. Policies with higher "order" are applied
+ after those with lower order. If the order is omitted, it may be
+ considered to be "infinite" - i.e. the policy will be applied last. Policies
+ with identical order will be applied in alphanumerical order based
+ on the Policy "Name".
+ type: number
+ performanceHints:
+ description: "PerformanceHints contains a list of hints to Calico's
+ policy engine to help process the policy more efficiently. Hints
+ never change the enforcement behaviour of the policy. \n Currently,
+ the only available hint is \"AssumeNeededOnEveryNode\". When that
+ hint is set on a policy, Felix will act as if the policy matches
+ a local endpoint even if it does not. This is useful for \"preloading\"
+ any large static policies that are known to be used on every node.
+ If the policy is _not_ used on a particular node then the work done
+ to preload the policy (and to maintain it) is wasted."
+ items:
+ type: string
+ type: array
+ preDNAT:
+ description: PreDNAT indicates to apply the rules in this policy before
+ any DNAT.
+ type: boolean
+ selector:
+ description: "The selector is an expression used to pick pick out
+ the endpoints that the policy should be applied to. \n Selector
+ expressions follow this syntax: \n \tlabel == \"string_literal\"
+ \ -> comparison, e.g. my_label == \"foo bar\" \tlabel != \"string_literal\"
+ \ -> not equal; also matches if label is not present \tlabel in
+ { \"a\", \"b\", \"c\", ... } -> true if the value of label X is
+ one of \"a\", \"b\", \"c\" \tlabel not in { \"a\", \"b\", \"c\",
+ ... } -> true if the value of label X is not one of \"a\", \"b\",
+ \"c\" \thas(label_name) -> True if that label is present \t! expr
+ -> negation of expr \texpr && expr -> Short-circuit and \texpr
+ || expr -> Short-circuit or \t( expr ) -> parens for grouping \tall()
+ or the empty selector -> matches all endpoints. \n Label names are
+ allowed to contain alphanumerics, -, _ and /. String literals are
+ more permissive but they do not support escape characters. \n Examples
+ (with made-up labels): \n \ttype == \"webserver\" && deployment
+ == \"prod\" \ttype in {\"frontend\", \"backend\"} \tdeployment !=
+ \"dev\" \t! has(label_name)"
+ type: string
+ serviceAccountSelector:
+ description: ServiceAccountSelector is an optional field for an expression
+ used to select a pod based on service accounts.
+ type: string
+ types:
+ description: "Types indicates whether this policy applies to ingress,
+ or to egress, or to both. When not explicitly specified (and so
+ the value on creation is empty or nil), Calico defaults Types according
+ to what Ingress and Egress rules are present in the policy. The
+ default is: \n - [ PolicyTypeIngress ], if there are no Egress rules
+ (including the case where there are also no Ingress rules) \n
+ - [ PolicyTypeEgress ], if there are Egress rules but no Ingress
+ rules \n - [ PolicyTypeIngress, PolicyTypeEgress ], if there are
+ both Ingress and Egress rules. \n When the policy is read back again,
+ Types will always be one of these values, never empty or nil."
+ items:
+ description: PolicyType enumerates the possible values of the PolicySpec
+ Types field.
+ type: string
+ type: array
+ type: object
+ type: object
+ served: true
+ storage: true
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+# Source: calico/templates/kdd-crds.yaml
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: globalnetworksets.crd.projectcalico.org
+spec:
+ group: crd.projectcalico.org
+ names:
+ kind: GlobalNetworkSet
+ listKind: GlobalNetworkSetList
+ plural: globalnetworksets
+ singular: globalnetworkset
+ preserveUnknownFields: false
+ scope: Cluster
+ versions:
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ description: GlobalNetworkSet contains a set of arbitrary IP sub-networks/CIDRs
+ that share labels to allow rules to refer to them via selectors. The labels
+ of GlobalNetworkSet are not namespaced.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: GlobalNetworkSetSpec contains the specification for a NetworkSet
+ resource.
+ properties:
+ nets:
+ description: The list of IP networks that belong to this set.
+ items:
+ type: string
+ type: array
+ type: object
+ type: object
+ served: true
+ storage: true
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+# Source: calico/templates/kdd-crds.yaml
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: hostendpoints.crd.projectcalico.org
+spec:
+ group: crd.projectcalico.org
+ names:
+ kind: HostEndpoint
+ listKind: HostEndpointList
+ plural: hostendpoints
+ singular: hostendpoint
+ preserveUnknownFields: false
+ scope: Cluster
+ versions:
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: HostEndpointSpec contains the specification for a HostEndpoint
+ resource.
+ properties:
+ expectedIPs:
+ description: "The expected IP addresses (IPv4 and IPv6) of the endpoint.
+ If \"InterfaceName\" is not present, Calico will look for an interface
+ matching any of the IPs in the list and apply policy to that. Note:
+ \tWhen using the selector match criteria in an ingress or egress
+ security Policy \tor Profile, Calico converts the selector into
+ a set of IP addresses. For host \tendpoints, the ExpectedIPs field
+ is used for that purpose. (If only the interface \tname is specified,
+ Calico does not learn the IPs of the interface for use in match
+ \tcriteria.)"
+ items:
+ type: string
+ type: array
+ interfaceName:
+ description: "Either \"*\", or the name of a specific Linux interface
+ to apply policy to; or empty. \"*\" indicates that this HostEndpoint
+ governs all traffic to, from or through the default network namespace
+ of the host named by the \"Node\" field; entering and leaving that
+ namespace via any interface, including those from/to non-host-networked
+ local workloads. \n If InterfaceName is not \"*\", this HostEndpoint
+ only governs traffic that enters or leaves the host through the
+ specific interface named by InterfaceName, or - when InterfaceName
+ is empty - through the specific interface that has one of the IPs
+ in ExpectedIPs. Therefore, when InterfaceName is empty, at least
+ one expected IP must be specified. Only external interfaces (such
+ as \"eth0\") are supported here; it isn't possible for a HostEndpoint
+ to protect traffic through a specific local workload interface.
+ \n Note: Only some kinds of policy are implemented for \"*\" HostEndpoints;
+ initially just pre-DNAT policy. Please check Calico documentation
+ for the latest position."
+ type: string
+ node:
+ description: The node name identifying the Calico node instance.
+ type: string
+ ports:
+ description: Ports contains the endpoint's named ports, which may
+ be referenced in security policy rules.
+ items:
+ properties:
+ name:
+ type: string
+ port:
+ type: integer
+ protocol:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ required:
+ - name
+ - port
+ - protocol
+ type: object
+ type: array
+ profiles:
+ description: A list of identifiers of security Profile objects that
+ apply to this endpoint. Each profile is applied in the order that
+ they appear in this list. Profile rules are applied after the selector-based
+ security policy.
+ items:
+ type: string
+ type: array
+ type: object
+ type: object
+ served: true
+ storage: true
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+# Source: calico/templates/kdd-crds.yaml
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: ipamblocks.crd.projectcalico.org
+spec:
+ group: crd.projectcalico.org
+ names:
+ kind: IPAMBlock
+ listKind: IPAMBlockList
+ plural: ipamblocks
+ singular: ipamblock
+ preserveUnknownFields: false
+ scope: Cluster
+ versions:
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: IPAMBlockSpec contains the specification for an IPAMBlock
+ resource.
+ properties:
+ affinity:
+ description: Affinity of the block, if this block has one. If set,
+ it will be of the form "host:". If not set, this block
+ is not affine to a host.
+ type: string
+ allocations:
+ description: Array of allocations in-use within this block. nil entries
+ mean the allocation is free. For non-nil entries at index i, the
+ index is the ordinal of the allocation within this block and the
+ value is the index of the associated attributes in the Attributes
+ array.
+ items:
+ type: integer
+ # TODO: This nullable is manually added in. We should update controller-gen
+ # to handle []*int properly itself.
+ nullable: true
+ type: array
+ attributes:
+ description: Attributes is an array of arbitrary metadata associated
+ with allocations in the block. To find attributes for a given allocation,
+ use the value of the allocation's entry in the Allocations array
+ as the index of the element in this array.
+ items:
+ properties:
+ handle_id:
+ type: string
+ secondary:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ type: array
+ cidr:
+ description: The block's CIDR.
+ type: string
+ deleted:
+ description: Deleted is an internal boolean used to workaround a limitation
+ in the Kubernetes API whereby deletion will not return a conflict
+ error if the block has been updated. It should not be set manually.
+ type: boolean
+ sequenceNumber:
+ default: 0
+ description: We store a sequence number that is updated each time
+ the block is written. Each allocation will also store the sequence
+ number of the block at the time of its creation. When releasing
+ an IP, passing the sequence number associated with the allocation
+ allows us to protect against a race condition and ensure the IP
+ hasn't been released and re-allocated since the release request.
+ format: int64
+ type: integer
+ sequenceNumberForAllocation:
+ additionalProperties:
+ format: int64
+ type: integer
+ description: Map of allocated ordinal within the block to sequence
+ number of the block at the time of allocation. Kubernetes does not
+ allow numerical keys for maps, so the key is cast to a string.
+ type: object
+ strictAffinity:
+ description: StrictAffinity on the IPAMBlock is deprecated and no
+ longer used by the code. Use IPAMConfig StrictAffinity instead.
+ type: boolean
+ unallocated:
+ description: Unallocated is an ordered list of allocations which are
+ free in the block.
+ items:
+ type: integer
+ type: array
+ required:
+ - allocations
+ - attributes
+ - cidr
+ - strictAffinity
+ - unallocated
+ type: object
+ type: object
+ served: true
+ storage: true
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+# Source: calico/templates/kdd-crds.yaml
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: ipamconfigs.crd.projectcalico.org
+spec:
+ group: crd.projectcalico.org
+ names:
+ kind: IPAMConfig
+ listKind: IPAMConfigList
+ plural: ipamconfigs
+ singular: ipamconfig
+ preserveUnknownFields: false
+ scope: Cluster
+ versions:
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: IPAMConfigSpec contains the specification for an IPAMConfig
+ resource.
+ properties:
+ autoAllocateBlocks:
+ type: boolean
+ maxBlocksPerHost:
+ description: MaxBlocksPerHost, if non-zero, is the max number of blocks
+ that can be affine to each host.
+ maximum: 2147483647
+ minimum: 0
+ type: integer
+ strictAffinity:
+ type: boolean
+ required:
+ - autoAllocateBlocks
+ - strictAffinity
+ type: object
+ type: object
+ served: true
+ storage: true
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+# Source: calico/templates/kdd-crds.yaml
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: ipamhandles.crd.projectcalico.org
+spec:
+ group: crd.projectcalico.org
+ names:
+ kind: IPAMHandle
+ listKind: IPAMHandleList
+ plural: ipamhandles
+ singular: ipamhandle
+ preserveUnknownFields: false
+ scope: Cluster
+ versions:
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: IPAMHandleSpec contains the specification for an IPAMHandle
+ resource.
+ properties:
+ block:
+ additionalProperties:
+ type: integer
+ type: object
+ deleted:
+ type: boolean
+ handleID:
+ type: string
+ required:
+ - block
+ - handleID
+ type: object
+ type: object
+ served: true
+ storage: true
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+# Source: calico/templates/kdd-crds.yaml
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: ippools.crd.projectcalico.org
+spec:
+ group: crd.projectcalico.org
+ names:
+ kind: IPPool
+ listKind: IPPoolList
+ plural: ippools
+ singular: ippool
+ preserveUnknownFields: false
+ scope: Cluster
+ versions:
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: IPPoolSpec contains the specification for an IPPool resource.
+ properties:
+ allowedUses:
+ description: AllowedUse controls what the IP pool will be used for. If
+ not specified or empty, defaults to ["Tunnel", "Workload"] for back-compatibility
+ items:
+ type: string
+ type: array
+ blockSize:
+ description: The block size to use for IP address assignments from
+ this pool. Defaults to 26 for IPv4 and 122 for IPv6.
+ type: integer
+ cidr:
+ description: The pool CIDR.
+ type: string
+ disableBGPExport:
+ description: 'Disable exporting routes from this IP Pool''s CIDR over
+ BGP. [Default: false]'
+ type: boolean
+ disabled:
+ description: When disabled is true, Calico IPAM will not assign addresses
+ from this pool.
+ type: boolean
+ ipip:
+ description: 'Deprecated: this field is only used for APIv1 backwards
+ compatibility. Setting this field is not allowed, this field is
+ for internal use only.'
+ properties:
+ enabled:
+ description: When enabled is true, ipip tunneling will be used
+ to deliver packets to destinations within this pool.
+ type: boolean
+ mode:
+ description: The IPIP mode. This can be one of "always" or "cross-subnet". A
+ mode of "always" will also use IPIP tunneling for routing to
+ destination IP addresses within this pool. A mode of "cross-subnet"
+ will only use IPIP tunneling when the destination node is on
+ a different subnet to the originating node. The default value
+ (if not specified) is "always".
+ type: string
+ type: object
+ ipipMode:
+ description: Contains configuration for IPIP tunneling for this pool.
+ If not specified, then this is defaulted to "Never" (i.e. IPIP tunneling
+ is disabled).
+ type: string
+ nat-outgoing:
+ description: 'Deprecated: this field is only used for APIv1 backwards
+ compatibility. Setting this field is not allowed, this field is
+ for internal use only.'
+ type: boolean
+ natOutgoing:
+ description: When natOutgoing is true, packets sent from Calico networked
+ containers in this pool to destinations outside of this pool will
+ be masqueraded.
+ type: boolean
+ nodeSelector:
+ description: Allows IPPool to allocate for a specific node by label
+ selector.
+ type: string
+ vxlanMode:
+ description: Contains configuration for VXLAN tunneling for this pool.
+ If not specified, then this is defaulted to "Never" (i.e. VXLAN
+ tunneling is disabled).
+ type: string
+ required:
+ - cidr
+ type: object
+ type: object
+ served: true
+ storage: true
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+# Source: calico/templates/kdd-crds.yaml
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: (devel)
+ creationTimestamp: null
+ name: ipreservations.crd.projectcalico.org
+spec:
+ group: crd.projectcalico.org
+ names:
+ kind: IPReservation
+ listKind: IPReservationList
+ plural: ipreservations
+ singular: ipreservation
+ preserveUnknownFields: false
+ scope: Cluster
+ versions:
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: IPReservationSpec contains the specification for an IPReservation
+ resource.
+ properties:
+ reservedCIDRs:
+ description: ReservedCIDRs is a list of CIDRs and/or IP addresses
+ that Calico IPAM will exclude from new allocations.
+ items:
+ type: string
+ type: array
+ type: object
+ type: object
+ served: true
+ storage: true
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+# Source: calico/templates/kdd-crds.yaml
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: kubecontrollersconfigurations.crd.projectcalico.org
+spec:
+ group: crd.projectcalico.org
+ names:
+ kind: KubeControllersConfiguration
+ listKind: KubeControllersConfigurationList
+ plural: kubecontrollersconfigurations
+ singular: kubecontrollersconfiguration
+ preserveUnknownFields: false
+ scope: Cluster
+ versions:
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: KubeControllersConfigurationSpec contains the values of the
+ Kubernetes controllers configuration.
+ properties:
+ controllers:
+ description: Controllers enables and configures individual Kubernetes
+ controllers
+ properties:
+ namespace:
+ description: Namespace enables and configures the namespace controller.
+ Enabled by default, set to nil to disable.
+ properties:
+ reconcilerPeriod:
+ description: 'ReconcilerPeriod is the period to perform reconciliation
+ with the Calico datastore. [Default: 5m]'
+ type: string
+ type: object
+ node:
+ description: Node enables and configures the node controller.
+ Enabled by default, set to nil to disable.
+ properties:
+ hostEndpoint:
+ description: HostEndpoint controls syncing nodes to host endpoints.
+ Disabled by default, set to nil to disable.
+ properties:
+ autoCreate:
+ description: 'AutoCreate enables automatic creation of
+ host endpoints for every node. [Default: Disabled]'
+ type: string
+ type: object
+ leakGracePeriod:
+ description: 'LeakGracePeriod is the period used by the controller
+ to determine if an IP address has been leaked. Set to 0
+ to disable IP garbage collection. [Default: 15m]'
+ type: string
+ reconcilerPeriod:
+ description: 'ReconcilerPeriod is the period to perform reconciliation
+ with the Calico datastore. [Default: 5m]'
+ type: string
+ syncLabels:
+ description: 'SyncLabels controls whether to copy Kubernetes
+ node labels to Calico nodes. [Default: Enabled]'
+ type: string
+ type: object
+ policy:
+ description: Policy enables and configures the policy controller.
+ Enabled by default, set to nil to disable.
+ properties:
+ reconcilerPeriod:
+ description: 'ReconcilerPeriod is the period to perform reconciliation
+ with the Calico datastore. [Default: 5m]'
+ type: string
+ type: object
+ serviceAccount:
+ description: ServiceAccount enables and configures the service
+ account controller. Enabled by default, set to nil to disable.
+ properties:
+ reconcilerPeriod:
+ description: 'ReconcilerPeriod is the period to perform reconciliation
+ with the Calico datastore. [Default: 5m]'
+ type: string
+ type: object
+ workloadEndpoint:
+ description: WorkloadEndpoint enables and configures the workload
+ endpoint controller. Enabled by default, set to nil to disable.
+ properties:
+ reconcilerPeriod:
+ description: 'ReconcilerPeriod is the period to perform reconciliation
+ with the Calico datastore. [Default: 5m]'
+ type: string
+ type: object
+ type: object
+ debugProfilePort:
+ description: DebugProfilePort configures the port to serve memory
+ and cpu profiles on. If not specified, profiling is disabled.
+ format: int32
+ type: integer
+ etcdV3CompactionPeriod:
+ description: 'EtcdV3CompactionPeriod is the period between etcdv3
+ compaction requests. Set to 0 to disable. [Default: 10m]'
+ type: string
+ healthChecks:
+ description: 'HealthChecks enables or disables support for health
+ checks [Default: Enabled]'
+ type: string
+ logSeverityScreen:
+ description: 'LogSeverityScreen is the log severity above which logs
+ are sent to the stdout. [Default: Info]'
+ type: string
+ prometheusMetricsPort:
+ description: 'PrometheusMetricsPort is the TCP port that the Prometheus
+ metrics server should bind to. Set to 0 to disable. [Default: 9094]'
+ type: integer
+ required:
+ - controllers
+ type: object
+ status:
+ description: KubeControllersConfigurationStatus represents the status
+ of the configuration. It's useful for admins to be able to see the actual
+ config that was applied, which can be modified by environment variables
+ on the kube-controllers process.
+ properties:
+ environmentVars:
+ additionalProperties:
+ type: string
+ description: EnvironmentVars contains the environment variables on
+ the kube-controllers that influenced the RunningConfig.
+ type: object
+ runningConfig:
+ description: RunningConfig contains the effective config that is running
+ in the kube-controllers pod, after merging the API resource with
+ any environment variables.
+ properties:
+ controllers:
+ description: Controllers enables and configures individual Kubernetes
+ controllers
+ properties:
+ namespace:
+ description: Namespace enables and configures the namespace
+ controller. Enabled by default, set to nil to disable.
+ properties:
+ reconcilerPeriod:
+ description: 'ReconcilerPeriod is the period to perform
+ reconciliation with the Calico datastore. [Default:
+ 5m]'
+ type: string
+ type: object
+ node:
+ description: Node enables and configures the node controller.
+ Enabled by default, set to nil to disable.
+ properties:
+ hostEndpoint:
+ description: HostEndpoint controls syncing nodes to host
+ endpoints. Disabled by default, set to nil to disable.
+ properties:
+ autoCreate:
+ description: 'AutoCreate enables automatic creation
+ of host endpoints for every node. [Default: Disabled]'
+ type: string
+ type: object
+ leakGracePeriod:
+ description: 'LeakGracePeriod is the period used by the
+ controller to determine if an IP address has been leaked.
+ Set to 0 to disable IP garbage collection. [Default:
+ 15m]'
+ type: string
+ reconcilerPeriod:
+ description: 'ReconcilerPeriod is the period to perform
+ reconciliation with the Calico datastore. [Default:
+ 5m]'
+ type: string
+ syncLabels:
+ description: 'SyncLabels controls whether to copy Kubernetes
+ node labels to Calico nodes. [Default: Enabled]'
+ type: string
+ type: object
+ policy:
+ description: Policy enables and configures the policy controller.
+ Enabled by default, set to nil to disable.
+ properties:
+ reconcilerPeriod:
+ description: 'ReconcilerPeriod is the period to perform
+ reconciliation with the Calico datastore. [Default:
+ 5m]'
+ type: string
+ type: object
+ serviceAccount:
+ description: ServiceAccount enables and configures the service
+ account controller. Enabled by default, set to nil to disable.
+ properties:
+ reconcilerPeriod:
+ description: 'ReconcilerPeriod is the period to perform
+ reconciliation with the Calico datastore. [Default:
+ 5m]'
+ type: string
+ type: object
+ workloadEndpoint:
+ description: WorkloadEndpoint enables and configures the workload
+ endpoint controller. Enabled by default, set to nil to disable.
+ properties:
+ reconcilerPeriod:
+ description: 'ReconcilerPeriod is the period to perform
+ reconciliation with the Calico datastore. [Default:
+ 5m]'
+ type: string
+ type: object
+ type: object
+ debugProfilePort:
+ description: DebugProfilePort configures the port to serve memory
+ and cpu profiles on. If not specified, profiling is disabled.
+ format: int32
+ type: integer
+ etcdV3CompactionPeriod:
+ description: 'EtcdV3CompactionPeriod is the period between etcdv3
+ compaction requests. Set to 0 to disable. [Default: 10m]'
+ type: string
+ healthChecks:
+ description: 'HealthChecks enables or disables support for health
+ checks [Default: Enabled]'
+ type: string
+ logSeverityScreen:
+ description: 'LogSeverityScreen is the log severity above which
+ logs are sent to the stdout. [Default: Info]'
+ type: string
+ prometheusMetricsPort:
+ description: 'PrometheusMetricsPort is the TCP port that the Prometheus
+ metrics server should bind to. Set to 0 to disable. [Default:
+ 9094]'
+ type: integer
+ required:
+ - controllers
+ type: object
+ type: object
+ type: object
+ served: true
+ storage: true
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+# Source: calico/templates/kdd-crds.yaml
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: networkpolicies.crd.projectcalico.org
+spec:
+ group: crd.projectcalico.org
+ names:
+ kind: NetworkPolicy
+ listKind: NetworkPolicyList
+ plural: networkpolicies
+ singular: networkpolicy
+ preserveUnknownFields: false
+ scope: Namespaced
+ versions:
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ properties:
+ egress:
+ description: The ordered set of egress rules. Each rule contains
+ a set of packet match criteria and a corresponding action to apply.
+ items:
+ description: "A Rule encapsulates a set of match criteria and an
+ action. Both selector-based security Policy and security Profiles
+ reference rules - separated out as a list of rules for both ingress
+ and egress packet matching. \n Each positive match criteria has
+ a negated version, prefixed with \"Not\". All the match criteria
+ within a rule must be satisfied for a packet to match. A single
+ rule can contain the positive and negative version of a match
+ and both must be satisfied for the rule to match."
+ properties:
+ action:
+ type: string
+ destination:
+ description: Destination contains the match criteria that apply
+ to destination entity.
+ properties:
+ namespaceSelector:
+ description: "NamespaceSelector is an optional field that
+ contains a selector expression. Only traffic that originates
+ from (or terminates at) endpoints within the selected
+ namespaces will be matched. When both NamespaceSelector
+ and another selector are defined on the same rule, then
+ only workload endpoints that are matched by both selectors
+ will be selected by the rule. \n For NetworkPolicy, an
+ empty NamespaceSelector implies that the Selector is limited
+ to selecting only workload endpoints in the same namespace
+ as the NetworkPolicy. \n For NetworkPolicy, `global()`
+ NamespaceSelector implies that the Selector is limited
+ to selecting only GlobalNetworkSet or HostEndpoint. \n
+ For GlobalNetworkPolicy, an empty NamespaceSelector implies
+ the Selector applies to workload endpoints across all
+ namespaces."
+ type: string
+ nets:
+ description: Nets is an optional field that restricts the
+ rule to only apply to traffic that originates from (or
+ terminates at) IP addresses in any of the given subnets.
+ items:
+ type: string
+ type: array
+ notNets:
+ description: NotNets is the negated version of the Nets
+ field.
+ items:
+ type: string
+ type: array
+ notPorts:
+ description: NotPorts is the negated version of the Ports
+ field. Since only some protocols have ports, if any ports
+ are specified it requires the Protocol match in the Rule
+ to be set to "TCP" or "UDP".
+ items:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ type: array
+ notSelector:
+ description: NotSelector is the negated version of the Selector
+ field. See Selector field for subtleties with negated
+ selectors.
+ type: string
+ ports:
+ description: "Ports is an optional field that restricts
+ the rule to only apply to traffic that has a source (destination)
+ port that matches one of these ranges/values. This value
+ is a list of integers or strings that represent ranges
+ of ports. \n Since only some protocols have ports, if
+ any ports are specified it requires the Protocol match
+ in the Rule to be set to \"TCP\" or \"UDP\"."
+ items:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ type: array
+ selector:
+ description: "Selector is an optional field that contains
+ a selector expression (see Policy for sample syntax).
+ \ Only traffic that originates from (terminates at) endpoints
+ matching the selector will be matched. \n Note that: in
+ addition to the negated version of the Selector (see NotSelector
+ below), the selector expression syntax itself supports
+ negation. The two types of negation are subtly different.
+ One negates the set of matched endpoints, the other negates
+ the whole match: \n \tSelector = \"!has(my_label)\" matches
+ packets that are from other Calico-controlled \tendpoints
+ that do not have the label \"my_label\". \n \tNotSelector
+ = \"has(my_label)\" matches packets that are not from
+ Calico-controlled \tendpoints that do have the label \"my_label\".
+ \n The effect is that the latter will accept packets from
+ non-Calico sources whereas the former is limited to packets
+ from Calico-controlled endpoints."
+ type: string
+ serviceAccounts:
+ description: ServiceAccounts is an optional field that restricts
+ the rule to only apply to traffic that originates from
+ (or terminates at) a pod running as a matching service
+ account.
+ properties:
+ names:
+ description: Names is an optional field that restricts
+ the rule to only apply to traffic that originates
+ from (or terminates at) a pod running as a service
+ account whose name is in the list.
+ items:
+ type: string
+ type: array
+ selector:
+ description: Selector is an optional field that restricts
+ the rule to only apply to traffic that originates
+ from (or terminates at) a pod running as a service
+ account that matches the given label selector. If
+ both Names and Selector are specified then they are
+ AND'ed.
+ type: string
+ type: object
+ services:
+ description: "Services is an optional field that contains
+ options for matching Kubernetes Services. If specified,
+ only traffic that originates from or terminates at endpoints
+ within the selected service(s) will be matched, and only
+ to/from each endpoint's port. \n Services cannot be specified
+ on the same rule as Selector, NotSelector, NamespaceSelector,
+ Nets, NotNets or ServiceAccounts. \n Ports and NotPorts
+ can only be specified with Services on ingress rules."
+ properties:
+ name:
+ description: Name specifies the name of a Kubernetes
+ Service to match.
+ type: string
+ namespace:
+ description: Namespace specifies the namespace of the
+ given Service. If left empty, the rule will match
+ within this policy's namespace.
+ type: string
+ type: object
+ type: object
+ http:
+ description: HTTP contains match criteria that apply to HTTP
+ requests.
+ properties:
+ methods:
+ description: Methods is an optional field that restricts
+ the rule to apply only to HTTP requests that use one of
+ the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple
+ methods are OR'd together.
+ items:
+ type: string
+ type: array
+ paths:
+ description: 'Paths is an optional field that restricts
+ the rule to apply to HTTP requests that use one of the
+ listed HTTP Paths. Multiple paths are OR''d together.
+ e.g: - exact: /foo - prefix: /bar NOTE: Each entry may
+ ONLY specify either a `exact` or a `prefix` match. The
+ validator will check for it.'
+ items:
+ description: 'HTTPPath specifies an HTTP path to match.
+ It may be either of the form: exact: : which matches
+ the path exactly or prefix: : which matches
+ the path prefix'
+ properties:
+ exact:
+ type: string
+ prefix:
+ type: string
+ type: object
+ type: array
+ type: object
+ icmp:
+ description: ICMP is an optional field that restricts the rule
+ to apply to a specific type and code of ICMP traffic. This
+ should only be specified if the Protocol field is set to "ICMP"
+ or "ICMPv6".
+ properties:
+ code:
+ description: Match on a specific ICMP code. If specified,
+ the Type value must also be specified. This is a technical
+ limitation imposed by the kernel's iptables firewall,
+ which Calico uses to enforce the rule.
+ type: integer
+ type:
+ description: Match on a specific ICMP type. For example
+ a value of 8 refers to ICMP Echo Request (i.e. pings).
+ type: integer
+ type: object
+ ipVersion:
+ description: IPVersion is an optional field that restricts the
+ rule to only match a specific IP version.
+ type: integer
+ metadata:
+ description: Metadata contains additional information for this
+ rule
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ description: Annotations is a set of key value pairs that
+ give extra information about the rule
+ type: object
+ type: object
+ notICMP:
+ description: NotICMP is the negated version of the ICMP field.
+ properties:
+ code:
+ description: Match on a specific ICMP code. If specified,
+ the Type value must also be specified. This is a technical
+ limitation imposed by the kernel's iptables firewall,
+ which Calico uses to enforce the rule.
+ type: integer
+ type:
+ description: Match on a specific ICMP type. For example
+ a value of 8 refers to ICMP Echo Request (i.e. pings).
+ type: integer
+ type: object
+ notProtocol:
+ anyOf:
+ - type: integer
+ - type: string
+ description: NotProtocol is the negated version of the Protocol
+ field.
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ protocol:
+ anyOf:
+ - type: integer
+ - type: string
+ description: "Protocol is an optional field that restricts the
+ rule to only apply to traffic of a specific IP protocol. Required
+ if any of the EntityRules contain Ports (because ports only
+ apply to certain protocols). \n Must be one of these string
+ values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\",
+ \"UDPLite\" or an integer in the range 1-255."
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ source:
+ description: Source contains the match criteria that apply to
+ source entity.
+ properties:
+ namespaceSelector:
+ description: "NamespaceSelector is an optional field that
+ contains a selector expression. Only traffic that originates
+ from (or terminates at) endpoints within the selected
+ namespaces will be matched. When both NamespaceSelector
+ and another selector are defined on the same rule, then
+ only workload endpoints that are matched by both selectors
+ will be selected by the rule. \n For NetworkPolicy, an
+ empty NamespaceSelector implies that the Selector is limited
+ to selecting only workload endpoints in the same namespace
+ as the NetworkPolicy. \n For NetworkPolicy, `global()`
+ NamespaceSelector implies that the Selector is limited
+ to selecting only GlobalNetworkSet or HostEndpoint. \n
+ For GlobalNetworkPolicy, an empty NamespaceSelector implies
+ the Selector applies to workload endpoints across all
+ namespaces."
+ type: string
+ nets:
+ description: Nets is an optional field that restricts the
+ rule to only apply to traffic that originates from (or
+ terminates at) IP addresses in any of the given subnets.
+ items:
+ type: string
+ type: array
+ notNets:
+ description: NotNets is the negated version of the Nets
+ field.
+ items:
+ type: string
+ type: array
+ notPorts:
+ description: NotPorts is the negated version of the Ports
+ field. Since only some protocols have ports, if any ports
+ are specified it requires the Protocol match in the Rule
+ to be set to "TCP" or "UDP".
+ items:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ type: array
+ notSelector:
+ description: NotSelector is the negated version of the Selector
+ field. See Selector field for subtleties with negated
+ selectors.
+ type: string
+ ports:
+ description: "Ports is an optional field that restricts
+ the rule to only apply to traffic that has a source (destination)
+ port that matches one of these ranges/values. This value
+ is a list of integers or strings that represent ranges
+ of ports. \n Since only some protocols have ports, if
+ any ports are specified it requires the Protocol match
+ in the Rule to be set to \"TCP\" or \"UDP\"."
+ items:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ type: array
+ selector:
+ description: "Selector is an optional field that contains
+ a selector expression (see Policy for sample syntax).
+ \ Only traffic that originates from (terminates at) endpoints
+ matching the selector will be matched. \n Note that: in
+ addition to the negated version of the Selector (see NotSelector
+ below), the selector expression syntax itself supports
+ negation. The two types of negation are subtly different.
+ One negates the set of matched endpoints, the other negates
+ the whole match: \n \tSelector = \"!has(my_label)\" matches
+ packets that are from other Calico-controlled \tendpoints
+ that do not have the label \"my_label\". \n \tNotSelector
+ = \"has(my_label)\" matches packets that are not from
+ Calico-controlled \tendpoints that do have the label \"my_label\".
+ \n The effect is that the latter will accept packets from
+ non-Calico sources whereas the former is limited to packets
+ from Calico-controlled endpoints."
+ type: string
+ serviceAccounts:
+ description: ServiceAccounts is an optional field that restricts
+ the rule to only apply to traffic that originates from
+ (or terminates at) a pod running as a matching service
+ account.
+ properties:
+ names:
+ description: Names is an optional field that restricts
+ the rule to only apply to traffic that originates
+ from (or terminates at) a pod running as a service
+ account whose name is in the list.
+ items:
+ type: string
+ type: array
+ selector:
+ description: Selector is an optional field that restricts
+ the rule to only apply to traffic that originates
+ from (or terminates at) a pod running as a service
+ account that matches the given label selector. If
+ both Names and Selector are specified then they are
+ AND'ed.
+ type: string
+ type: object
+ services:
+ description: "Services is an optional field that contains
+ options for matching Kubernetes Services. If specified,
+ only traffic that originates from or terminates at endpoints
+ within the selected service(s) will be matched, and only
+ to/from each endpoint's port. \n Services cannot be specified
+ on the same rule as Selector, NotSelector, NamespaceSelector,
+ Nets, NotNets or ServiceAccounts. \n Ports and NotPorts
+ can only be specified with Services on ingress rules."
+ properties:
+ name:
+ description: Name specifies the name of a Kubernetes
+ Service to match.
+ type: string
+ namespace:
+ description: Namespace specifies the namespace of the
+ given Service. If left empty, the rule will match
+ within this policy's namespace.
+ type: string
+ type: object
+ type: object
+ required:
+ - action
+ type: object
+ type: array
+ ingress:
+ description: The ordered set of ingress rules. Each rule contains
+ a set of packet match criteria and a corresponding action to apply.
+ items:
+ description: "A Rule encapsulates a set of match criteria and an
+ action. Both selector-based security Policy and security Profiles
+ reference rules - separated out as a list of rules for both ingress
+ and egress packet matching. \n Each positive match criteria has
+ a negated version, prefixed with \"Not\". All the match criteria
+ within a rule must be satisfied for a packet to match. A single
+ rule can contain the positive and negative version of a match
+ and both must be satisfied for the rule to match."
+ properties:
+ action:
+ type: string
+ destination:
+ description: Destination contains the match criteria that apply
+ to destination entity.
+ properties:
+ namespaceSelector:
+ description: "NamespaceSelector is an optional field that
+ contains a selector expression. Only traffic that originates
+ from (or terminates at) endpoints within the selected
+ namespaces will be matched. When both NamespaceSelector
+ and another selector are defined on the same rule, then
+ only workload endpoints that are matched by both selectors
+ will be selected by the rule. \n For NetworkPolicy, an
+ empty NamespaceSelector implies that the Selector is limited
+ to selecting only workload endpoints in the same namespace
+ as the NetworkPolicy. \n For NetworkPolicy, `global()`
+ NamespaceSelector implies that the Selector is limited
+ to selecting only GlobalNetworkSet or HostEndpoint. \n
+ For GlobalNetworkPolicy, an empty NamespaceSelector implies
+ the Selector applies to workload endpoints across all
+ namespaces."
+ type: string
+ nets:
+ description: Nets is an optional field that restricts the
+ rule to only apply to traffic that originates from (or
+ terminates at) IP addresses in any of the given subnets.
+ items:
+ type: string
+ type: array
+ notNets:
+ description: NotNets is the negated version of the Nets
+ field.
+ items:
+ type: string
+ type: array
+ notPorts:
+ description: NotPorts is the negated version of the Ports
+ field. Since only some protocols have ports, if any ports
+ are specified it requires the Protocol match in the Rule
+ to be set to "TCP" or "UDP".
+ items:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ type: array
+ notSelector:
+ description: NotSelector is the negated version of the Selector
+ field. See Selector field for subtleties with negated
+ selectors.
+ type: string
+ ports:
+ description: "Ports is an optional field that restricts
+ the rule to only apply to traffic that has a source (destination)
+ port that matches one of these ranges/values. This value
+ is a list of integers or strings that represent ranges
+ of ports. \n Since only some protocols have ports, if
+ any ports are specified it requires the Protocol match
+ in the Rule to be set to \"TCP\" or \"UDP\"."
+ items:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ type: array
+ selector:
+ description: "Selector is an optional field that contains
+ a selector expression (see Policy for sample syntax).
+ \ Only traffic that originates from (terminates at) endpoints
+ matching the selector will be matched. \n Note that: in
+ addition to the negated version of the Selector (see NotSelector
+ below), the selector expression syntax itself supports
+ negation. The two types of negation are subtly different.
+ One negates the set of matched endpoints, the other negates
+ the whole match: \n \tSelector = \"!has(my_label)\" matches
+ packets that are from other Calico-controlled \tendpoints
+ that do not have the label \"my_label\". \n \tNotSelector
+ = \"has(my_label)\" matches packets that are not from
+ Calico-controlled \tendpoints that do have the label \"my_label\".
+ \n The effect is that the latter will accept packets from
+ non-Calico sources whereas the former is limited to packets
+ from Calico-controlled endpoints."
+ type: string
+ serviceAccounts:
+ description: ServiceAccounts is an optional field that restricts
+ the rule to only apply to traffic that originates from
+ (or terminates at) a pod running as a matching service
+ account.
+ properties:
+ names:
+ description: Names is an optional field that restricts
+ the rule to only apply to traffic that originates
+ from (or terminates at) a pod running as a service
+ account whose name is in the list.
+ items:
+ type: string
+ type: array
+ selector:
+ description: Selector is an optional field that restricts
+ the rule to only apply to traffic that originates
+ from (or terminates at) a pod running as a service
+ account that matches the given label selector. If
+ both Names and Selector are specified then they are
+ AND'ed.
+ type: string
+ type: object
+ services:
+ description: "Services is an optional field that contains
+ options for matching Kubernetes Services. If specified,
+ only traffic that originates from or terminates at endpoints
+ within the selected service(s) will be matched, and only
+ to/from each endpoint's port. \n Services cannot be specified
+ on the same rule as Selector, NotSelector, NamespaceSelector,
+ Nets, NotNets or ServiceAccounts. \n Ports and NotPorts
+ can only be specified with Services on ingress rules."
+ properties:
+ name:
+ description: Name specifies the name of a Kubernetes
+ Service to match.
+ type: string
+ namespace:
+ description: Namespace specifies the namespace of the
+ given Service. If left empty, the rule will match
+ within this policy's namespace.
+ type: string
+ type: object
+ type: object
+ http:
+ description: HTTP contains match criteria that apply to HTTP
+ requests.
+ properties:
+ methods:
+ description: Methods is an optional field that restricts
+ the rule to apply only to HTTP requests that use one of
+ the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple
+ methods are OR'd together.
+ items:
+ type: string
+ type: array
+ paths:
+ description: 'Paths is an optional field that restricts
+ the rule to apply to HTTP requests that use one of the
+ listed HTTP Paths. Multiple paths are OR''d together.
+ e.g: - exact: /foo - prefix: /bar NOTE: Each entry may
+ ONLY specify either a `exact` or a `prefix` match. The
+ validator will check for it.'
+ items:
+ description: 'HTTPPath specifies an HTTP path to match.
+ It may be either of the form: exact: : which matches
+ the path exactly or prefix: : which matches
+ the path prefix'
+ properties:
+ exact:
+ type: string
+ prefix:
+ type: string
+ type: object
+ type: array
+ type: object
+ icmp:
+ description: ICMP is an optional field that restricts the rule
+ to apply to a specific type and code of ICMP traffic. This
+ should only be specified if the Protocol field is set to "ICMP"
+ or "ICMPv6".
+ properties:
+ code:
+ description: Match on a specific ICMP code. If specified,
+ the Type value must also be specified. This is a technical
+ limitation imposed by the kernel's iptables firewall,
+ which Calico uses to enforce the rule.
+ type: integer
+ type:
+ description: Match on a specific ICMP type. For example
+ a value of 8 refers to ICMP Echo Request (i.e. pings).
+ type: integer
+ type: object
+ ipVersion:
+ description: IPVersion is an optional field that restricts the
+ rule to only match a specific IP version.
+ type: integer
+ metadata:
+ description: Metadata contains additional information for this
+ rule
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ description: Annotations is a set of key value pairs that
+ give extra information about the rule
+ type: object
+ type: object
+ notICMP:
+ description: NotICMP is the negated version of the ICMP field.
+ properties:
+ code:
+ description: Match on a specific ICMP code. If specified,
+ the Type value must also be specified. This is a technical
+ limitation imposed by the kernel's iptables firewall,
+ which Calico uses to enforce the rule.
+ type: integer
+ type:
+ description: Match on a specific ICMP type. For example
+ a value of 8 refers to ICMP Echo Request (i.e. pings).
+ type: integer
+ type: object
+ notProtocol:
+ anyOf:
+ - type: integer
+ - type: string
+ description: NotProtocol is the negated version of the Protocol
+ field.
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ protocol:
+ anyOf:
+ - type: integer
+ - type: string
+ description: "Protocol is an optional field that restricts the
+ rule to only apply to traffic of a specific IP protocol. Required
+ if any of the EntityRules contain Ports (because ports only
+ apply to certain protocols). \n Must be one of these string
+ values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\",
+ \"UDPLite\" or an integer in the range 1-255."
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ source:
+ description: Source contains the match criteria that apply to
+ source entity.
+ properties:
+ namespaceSelector:
+ description: "NamespaceSelector is an optional field that
+ contains a selector expression. Only traffic that originates
+ from (or terminates at) endpoints within the selected
+ namespaces will be matched. When both NamespaceSelector
+ and another selector are defined on the same rule, then
+ only workload endpoints that are matched by both selectors
+ will be selected by the rule. \n For NetworkPolicy, an
+ empty NamespaceSelector implies that the Selector is limited
+ to selecting only workload endpoints in the same namespace
+ as the NetworkPolicy. \n For NetworkPolicy, `global()`
+ NamespaceSelector implies that the Selector is limited
+ to selecting only GlobalNetworkSet or HostEndpoint. \n
+ For GlobalNetworkPolicy, an empty NamespaceSelector implies
+ the Selector applies to workload endpoints across all
+ namespaces."
+ type: string
+ nets:
+ description: Nets is an optional field that restricts the
+ rule to only apply to traffic that originates from (or
+ terminates at) IP addresses in any of the given subnets.
+ items:
+ type: string
+ type: array
+ notNets:
+ description: NotNets is the negated version of the Nets
+ field.
+ items:
+ type: string
+ type: array
+ notPorts:
+ description: NotPorts is the negated version of the Ports
+ field. Since only some protocols have ports, if any ports
+ are specified it requires the Protocol match in the Rule
+ to be set to "TCP" or "UDP".
+ items:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ type: array
+ notSelector:
+ description: NotSelector is the negated version of the Selector
+ field. See Selector field for subtleties with negated
+ selectors.
+ type: string
+ ports:
+ description: "Ports is an optional field that restricts
+ the rule to only apply to traffic that has a source (destination)
+ port that matches one of these ranges/values. This value
+ is a list of integers or strings that represent ranges
+ of ports. \n Since only some protocols have ports, if
+ any ports are specified it requires the Protocol match
+ in the Rule to be set to \"TCP\" or \"UDP\"."
+ items:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ type: array
+ selector:
+ description: "Selector is an optional field that contains
+ a selector expression (see Policy for sample syntax).
+ \ Only traffic that originates from (terminates at) endpoints
+ matching the selector will be matched. \n Note that: in
+ addition to the negated version of the Selector (see NotSelector
+ below), the selector expression syntax itself supports
+ negation. The two types of negation are subtly different.
+ One negates the set of matched endpoints, the other negates
+ the whole match: \n \tSelector = \"!has(my_label)\" matches
+ packets that are from other Calico-controlled \tendpoints
+ that do not have the label \"my_label\". \n \tNotSelector
+ = \"has(my_label)\" matches packets that are not from
+ Calico-controlled \tendpoints that do have the label \"my_label\".
+ \n The effect is that the latter will accept packets from
+ non-Calico sources whereas the former is limited to packets
+ from Calico-controlled endpoints."
+ type: string
+ serviceAccounts:
+ description: ServiceAccounts is an optional field that restricts
+ the rule to only apply to traffic that originates from
+ (or terminates at) a pod running as a matching service
+ account.
+ properties:
+ names:
+ description: Names is an optional field that restricts
+ the rule to only apply to traffic that originates
+ from (or terminates at) a pod running as a service
+ account whose name is in the list.
+ items:
+ type: string
+ type: array
+ selector:
+ description: Selector is an optional field that restricts
+ the rule to only apply to traffic that originates
+ from (or terminates at) a pod running as a service
+ account that matches the given label selector. If
+ both Names and Selector are specified then they are
+ AND'ed.
+ type: string
+ type: object
+ services:
+ description: "Services is an optional field that contains
+ options for matching Kubernetes Services. If specified,
+ only traffic that originates from or terminates at endpoints
+ within the selected service(s) will be matched, and only
+ to/from each endpoint's port. \n Services cannot be specified
+ on the same rule as Selector, NotSelector, NamespaceSelector,
+ Nets, NotNets or ServiceAccounts. \n Ports and NotPorts
+ can only be specified with Services on ingress rules."
+ properties:
+ name:
+ description: Name specifies the name of a Kubernetes
+ Service to match.
+ type: string
+ namespace:
+ description: Namespace specifies the namespace of the
+ given Service. If left empty, the rule will match
+ within this policy's namespace.
+ type: string
+ type: object
+ type: object
+ required:
+ - action
+ type: object
+ type: array
+ order:
+ description: Order is an optional field that specifies the order in
+ which the policy is applied. Policies with higher "order" are applied
+ after those with lower order. If the order is omitted, it may be
+ considered to be "infinite" - i.e. the policy will be applied last. Policies
+ with identical order will be applied in alphanumerical order based
+ on the Policy "Name".
+ type: number
+ performanceHints:
+ description: "PerformanceHints contains a list of hints to Calico's
+ policy engine to help process the policy more efficiently. Hints
+ never change the enforcement behaviour of the policy. \n Currently,
+ the only available hint is \"AssumeNeededOnEveryNode\". When that
+ hint is set on a policy, Felix will act as if the policy matches
+ a local endpoint even if it does not. This is useful for \"preloading\"
+ any large static policies that are known to be used on every node.
+ If the policy is _not_ used on a particular node then the work done
+ to preload the policy (and to maintain it) is wasted."
+ items:
+ type: string
+ type: array
+ selector:
+ description: "The selector is an expression used to pick pick out
+ the endpoints that the policy should be applied to. \n Selector
+ expressions follow this syntax: \n \tlabel == \"string_literal\"
+ \ -> comparison, e.g. my_label == \"foo bar\" \tlabel != \"string_literal\"
+ \ -> not equal; also matches if label is not present \tlabel in
+ { \"a\", \"b\", \"c\", ... } -> true if the value of label X is
+ one of \"a\", \"b\", \"c\" \tlabel not in { \"a\", \"b\", \"c\",
+ ... } -> true if the value of label X is not one of \"a\", \"b\",
+ \"c\" \thas(label_name) -> True if that label is present \t! expr
+ -> negation of expr \texpr && expr -> Short-circuit and \texpr
+ || expr -> Short-circuit or \t( expr ) -> parens for grouping \tall()
+ or the empty selector -> matches all endpoints. \n Label names are
+ allowed to contain alphanumerics, -, _ and /. String literals are
+ more permissive but they do not support escape characters. \n Examples
+ (with made-up labels): \n \ttype == \"webserver\" && deployment
+ == \"prod\" \ttype in {\"frontend\", \"backend\"} \tdeployment !=
+ \"dev\" \t! has(label_name)"
+ type: string
+ serviceAccountSelector:
+ description: ServiceAccountSelector is an optional field for an expression
+ used to select a pod based on service accounts.
+ type: string
+ types:
+ description: "Types indicates whether this policy applies to ingress,
+ or to egress, or to both. When not explicitly specified (and so
+ the value on creation is empty or nil), Calico defaults Types according
+ to what Ingress and Egress are present in the policy. The default
+ is: \n - [ PolicyTypeIngress ], if there are no Egress rules (including
+ the case where there are also no Ingress rules) \n - [ PolicyTypeEgress
+ ], if there are Egress rules but no Ingress rules \n - [ PolicyTypeIngress,
+ PolicyTypeEgress ], if there are both Ingress and Egress rules.
+ \n When the policy is read back again, Types will always be one
+ of these values, never empty or nil."
+ items:
+ description: PolicyType enumerates the possible values of the PolicySpec
+ Types field.
+ type: string
+ type: array
+ type: object
+ type: object
+ served: true
+ storage: true
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+# Source: calico/templates/kdd-crds.yaml
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: networksets.crd.projectcalico.org
+spec:
+ group: crd.projectcalico.org
+ names:
+ kind: NetworkSet
+ listKind: NetworkSetList
+ plural: networksets
+ singular: networkset
+ preserveUnknownFields: false
+ scope: Namespaced
+ versions:
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ description: NetworkSet is the Namespaced-equivalent of the GlobalNetworkSet.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: NetworkSetSpec contains the specification for a NetworkSet
+ resource.
+ properties:
+ nets:
+ description: The list of IP networks that belong to this set.
+ items:
+ type: string
+ type: array
+ type: object
+ type: object
+ served: true
+ storage: true
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+# Source: calico/templates/calico-kube-controllers-rbac.yaml
+# Include a clusterrole for the kube-controllers component,
+# and bind it to the calico-kube-controllers serviceaccount.
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: calico-kube-controllers
+rules:
+ # Nodes are watched to monitor for deletions.
+ - apiGroups: [""]
+ resources:
+ - nodes
+ verbs:
+ - watch
+ - list
+ - get
+ # Pods are watched to check for existence as part of IPAM controller.
+ - apiGroups: [""]
+ resources:
+ - pods
+ verbs:
+ - get
+ - list
+ - watch
+ # IPAM resources are manipulated in response to node and block updates, as well as periodic triggers.
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - ipreservations
+ verbs:
+ - list
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - blockaffinities
+ - ipamblocks
+ - ipamhandles
+ verbs:
+ - get
+ - list
+ - create
+ - update
+ - delete
+ - watch
+ # Pools are watched to maintain a mapping of blocks to IP pools.
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - ippools
+ verbs:
+ - list
+ - watch
+ # kube-controllers manages hostendpoints.
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - hostendpoints
+ verbs:
+ - get
+ - list
+ - create
+ - update
+ - delete
+ # Needs access to update clusterinformations.
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - clusterinformations
+ verbs:
+ - get
+ - list
+ - create
+ - update
+ - watch
+ # KubeControllersConfiguration is where it gets its config
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - kubecontrollersconfigurations
+ verbs:
+ # read its own config
+ - get
+ # create a default if none exists
+ - create
+ # update status
+ - update
+ # watch for changes
+ - watch
+---
+# Source: calico/templates/calico-node-rbac.yaml
+# Include a clusterrole for the calico-node DaemonSet,
+# and bind it to the calico-node serviceaccount.
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: calico-node
+rules:
+ # Used for creating service account tokens to be used by the CNI plugin
+ - apiGroups: [""]
+ resources:
+ - serviceaccounts/token
+ resourceNames:
+ - calico-cni-plugin
+ verbs:
+ - create
+ # The CNI plugin needs to get pods, nodes, and namespaces.
+ - apiGroups: [""]
+ resources:
+ - pods
+ - nodes
+ - namespaces
+ verbs:
+ - get
+ # EndpointSlices are used for Service-based network policy rule
+ # enforcement.
+ - apiGroups: ["discovery.k8s.io"]
+ resources:
+ - endpointslices
+ verbs:
+ - watch
+ - list
+ - apiGroups: [""]
+ resources:
+ - endpoints
+ - services
+ verbs:
+ # Used to discover service IPs for advertisement.
+ - watch
+ - list
+ # Used to discover Typhas.
+ - get
+ # Pod CIDR auto-detection on kubeadm needs access to config maps.
+ - apiGroups: [""]
+ resources:
+ - configmaps
+ verbs:
+ - get
+ - apiGroups: [""]
+ resources:
+ - nodes/status
+ verbs:
+ # Needed for clearing NodeNetworkUnavailable flag.
+ - patch
+ # Calico stores some configuration information in node annotations.
+ - update
+ # Watch for changes to Kubernetes NetworkPolicies.
+ - apiGroups: ["networking.k8s.io"]
+ resources:
+ - networkpolicies
+ verbs:
+ - watch
+ - list
+ # Used by Calico for policy information.
+ - apiGroups: [""]
+ resources:
+ - pods
+ - namespaces
+ - serviceaccounts
+ verbs:
+ - list
+ - watch
+ # The CNI plugin patches pods/status.
+ - apiGroups: [""]
+ resources:
+ - pods/status
+ verbs:
+ - patch
+ # Calico monitors various CRDs for config.
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - globalfelixconfigs
+ - felixconfigurations
+ - bgppeers
+ - bgpfilters
+ - globalbgpconfigs
+ - bgpconfigurations
+ - ippools
+ - ipreservations
+ - ipamblocks
+ - globalnetworkpolicies
+ - globalnetworksets
+ - networkpolicies
+ - networksets
+ - clusterinformations
+ - hostendpoints
+ - blockaffinities
+ - caliconodestatuses
+ verbs:
+ - get
+ - list
+ - watch
+ # Calico must create and update some CRDs on startup.
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - ippools
+ - felixconfigurations
+ - clusterinformations
+ verbs:
+ - create
+ - update
+ # Calico must update some CRDs.
+ - apiGroups: [ "crd.projectcalico.org" ]
+ resources:
+ - caliconodestatuses
+ verbs:
+ - update
+ # Calico stores some configuration information on the node.
+ - apiGroups: [""]
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ # These permissions are only required for upgrade from v2.6, and can
+ # be removed after upgrade or on fresh installations.
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - bgpconfigurations
+ - bgppeers
+ verbs:
+ - create
+ - update
+ # These permissions are required for Calico CNI to perform IPAM allocations.
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - blockaffinities
+ - ipamblocks
+ - ipamhandles
+ verbs:
+ - get
+ - list
+ - create
+ - update
+ - delete
+ # The CNI plugin and calico/node need to be able to create a default
+ # IPAMConfiguration
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - ipamconfigs
+ verbs:
+ - get
+ - create
+ # Block affinities must also be watchable by confd for route aggregation.
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - blockaffinities
+ verbs:
+ - watch
+ # The Calico IPAM migration needs to get daemonsets. These permissions can be
+ # removed if not upgrading from an installation using host-local IPAM.
+ - apiGroups: ["apps"]
+ resources:
+ - daemonsets
+ verbs:
+ - get
+---
+# Source: calico/templates/calico-node-rbac.yaml
+# CNI cluster role
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: calico-cni-plugin
+rules:
+ - apiGroups: [""]
+ resources:
+ - pods
+ - nodes
+ - namespaces
+ verbs:
+ - get
+ - apiGroups: [""]
+ resources:
+ - pods/status
+ verbs:
+ - patch
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - blockaffinities
+ - ipamblocks
+ - ipamhandles
+ - clusterinformations
+ - ippools
+ - ipreservations
+ - ipamconfigs
+ verbs:
+ - get
+ - list
+ - create
+ - update
+ - delete
+---
+# Source: calico/templates/calico-kube-controllers-rbac.yaml
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: calico-kube-controllers
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: calico-kube-controllers
+subjects:
+ - kind: ServiceAccount
+ name: calico-kube-controllers
+ namespace: kube-system
+---
+# Source: calico/templates/calico-node-rbac.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: calico-node
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: calico-node
+subjects:
+ - kind: ServiceAccount
+ name: calico-node
+ namespace: kube-system
+---
+# Source: calico/templates/calico-node-rbac.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: calico-cni-plugin
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: calico-cni-plugin
+subjects:
+ - kind: ServiceAccount
+ name: calico-cni-plugin
+ namespace: kube-system
+
+{{- if .cni.calico.typha }}
+---
+# Source: calico/templates/calico-typha.yaml
+# This manifest creates a Service, which will be backed by Calico's Typha daemon.
+# Typha sits in between Felix and the API server, reducing Calico's load on the API server.
+
+apiVersion: v1
+kind: Service
+metadata:
+ name: calico-typha
+ namespace: kube-system
+ labels:
+ k8s-app: calico-typha
+spec:
+ ports:
+ - port: 5473
+ protocol: TCP
+ targetPort: calico-typha
+ name: calico-typha
+ selector:
+ k8s-app: calico-typha
+{{- end }}
+---
+# Source: calico/templates/calico-node.yaml
+# This manifest installs the calico-node container, as well
+# as the CNI plugins and network config on
+# each master and worker node in a Kubernetes cluster.
+kind: DaemonSet
+apiVersion: apps/v1
+metadata:
+ name: calico-node
+ namespace: kube-system
+ labels:
+ k8s-app: calico-node
+spec:
+ selector:
+ matchLabels:
+ k8s-app: calico-node
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 1
+ template:
+ metadata:
+ labels:
+ k8s-app: calico-node
+ spec:
+ nodeSelector:
+ kubernetes.io/os: linux
+ hostNetwork: true
+ tolerations:
+ # Make sure calico-node gets scheduled on all nodes.
+ - effect: NoSchedule
+ operator: Exists
+ # Mark the pod as a critical add-on for rescheduling.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ serviceAccountName: calico-node
+ # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
+ # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
+ terminationGracePeriodSeconds: 0
+ priorityClassName: system-node-critical
+ initContainers:
+ # This container performs upgrade from host-local IPAM to calico-ipam.
+ # It can be deleted if this is a fresh installation, or if you have already
+ # upgraded to use calico-ipam.
+ - name: upgrade-ipam
+ image: {{ .cni.calico.cni_image }}
+ imagePullPolicy: IfNotPresent
+ command: ["/opt/cni/bin/calico-ipam", "-upgrade"]
+ envFrom:
+ - configMapRef:
+ # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
+ name: kubernetes-services-endpoint
+ optional: true
+ env:
+ - name: KUBERNETES_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: CALICO_NETWORKING_BACKEND
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: calico_backend
+ volumeMounts:
+ - mountPath: /var/lib/cni/networks
+ name: host-local-net-dir
+ - mountPath: /host/opt/cni/bin
+ name: cni-bin-dir
+ securityContext:
+ privileged: true
+ # This container installs the CNI binaries
+ # and CNI network config file on each node.
+ - name: install-cni
+ image: {{ .cni.calico.cni_image }}
+ imagePullPolicy: IfNotPresent
+ command: ["/opt/cni/bin/install"]
+ envFrom:
+ - configMapRef:
+ # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
+ name: kubernetes-services-endpoint
+ optional: true
+ env:
+ # Name of the CNI config file to create.
+ - name: CNI_CONF_NAME
+ value: "10-calico.conflist"
+ # The CNI network config to install on each node.
+ - name: CNI_NETWORK_CONFIG
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: cni_network_config
+ # Set the hostname based on the k8s node name.
+ - name: KUBERNETES_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ # CNI MTU Config variable
+ - name: CNI_MTU
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: veth_mtu
+ # Prevents the container from sleeping forever.
+ - name: SLEEP
+ value: "false"
+ volumeMounts:
+ - mountPath: /host/opt/cni/bin
+ name: cni-bin-dir
+ - mountPath: /host/etc/cni/net.d
+ name: cni-net-dir
+ securityContext:
+ privileged: true
+ # This init container mounts the necessary filesystems needed by the BPF data plane
+ # i.e. bpf at /sys/fs/bpf and cgroup2 at /run/calico/cgroup. Calico-node initialisation is executed
+ # in best effort fashion, i.e. no failure for errors, to not disrupt pod creation in iptable mode.
+ - name: "mount-bpffs"
+ image: {{ .cni.calico.node_image }}
+ imagePullPolicy: IfNotPresent
+ command: ["calico-node", "-init", "-best-effort"]
+ volumeMounts:
+ - mountPath: /sys/fs
+ name: sys-fs
+ # Bidirectional is required to ensure that the new mount we make at /sys/fs/bpf propagates to the host
+ # so that it outlives the init container.
+ mountPropagation: Bidirectional
+ - mountPath: /var/run/calico
+ name: var-run-calico
+ # Bidirectional is required to ensure that the new mount we make at /run/calico/cgroup propagates to the host
+ # so that it outlives the init container.
+ mountPropagation: Bidirectional
+ # Mount /proc/ from host which usually is an init program at /nodeproc. It's needed by mountns binary,
+ # executed by calico-node, to mount root cgroup2 fs at /run/calico/cgroup to attach CTLB programs correctly.
+ - mountPath: /nodeproc
+ name: nodeproc
+ readOnly: true
+ securityContext:
+ privileged: true
+ containers:
+ # Runs calico-node container on each Kubernetes node. This
+ # container programs network policy and routes on each
+ # host.
+ - name: calico-node
+ image: {{ .cni.calico.node_image }}
+ imagePullPolicy: IfNotPresent
+ envFrom:
+ - configMapRef:
+ # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
+ name: kubernetes-services-endpoint
+ optional: true
+ env:
+ # Use Kubernetes API as the backing datastore.
+ - name: DATASTORE_TYPE
+ value: "kubernetes"
+{{- if .cni.calico.typha }}
+ # Typha support: controlled by the ConfigMap.
+ - name: FELIX_TYPHAK8SSERVICENAME
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: typha_service_name
+{{- end }}
+ # Wait for the datastore.
+ - name: WAIT_FOR_DATASTORE
+ value: "true"
+ # Set based on the k8s node name.
+ - name: NODENAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ # Choose the backend to use.
+ - name: CALICO_NETWORKING_BACKEND
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: calico_backend
+ # Cluster type to identify the deployment type
+ - name: CLUSTER_TYPE
+ value: "k8s,bgp"
+ # Auto-detect the BGP IP address.
+ - name: NODEIP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ - name: IP_AUTODETECTION_METHOD
+ value: "can-reach=$(NODEIP)"
+ - name: IP
+ value: "autodetect"
+{{- if .cni.ipv6_support }}
+ - name: IP6
+ value: "autodetect"
+{{- end }}
+ # Enable IPIP
+ - name: CALICO_IPV4POOL_IPIP
+ value: "{{ .cni.calico.ipip_mode }}"
+ # Enable or Disable VXLAN on the default IP pool.
+ - name: CALICO_IPV4POOL_VXLAN
+ value: "{{ .cni.calico.vxlan_mode }}"
+{{- if .cni.calico.ipv4pool_nat_outgoing }}
+ - name: CALICO_IPV4POOL_NAT_OUTGOING
+ value: "true"
+{{- else }}
+ - name: CALICO_IPV4POOL_NAT_OUTGOING
+ value: "false"
+{{- end }}
+{{- if .cni.ipv6_support }}
+ # Enable or Disable VXLAN on the default IPv6 IP pool.
+ - name: CALICO_IPV6POOL_VXLAN
+ value: "Always"
+ - name: CALICO_IPV6POOL_NAT_OUTGOING
+ value: "true"
+{{- else }}
+ # Enable or Disable VXLAN on the default IPv6 IP pool.
+ - name: CALICO_IPV6POOL_VXLAN
+ value: "Never"
+ - name: CALICO_IPV6POOL_NAT_OUTGOING
+ value: "false"
+{{- end }}
+ # Set MTU for tunnel device used if ipip is enabled
+ - name: FELIX_IPINIPMTU
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: veth_mtu
+ # Set MTU for the VXLAN tunnel device.
+ - name: FELIX_VXLANMTU
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: veth_mtu
+ # Set MTU for the Wireguard tunnel device.
+ - name: FELIX_WIREGUARDMTU
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: veth_mtu
+{{- if .cni.calico.default_ip_pool }}
+ # The default IPv4 pool to create on startup if none exists. Pod IPs will be
+ # chosen from this range. Changing this value after installation will have
+ # no effect.
+ - name: CALICO_IPV4POOL_CIDR
+ value: "{{ .cni.kube_pods_v4_cidr }}"
+ - name: CALICO_IPV4POOL_BLOCK_SIZE
+ value: "{{ .cni.node_cidr_mask_size }}"
+ {{- if .cni.ipv6_support }}
+ - name: CALICO_IPV6POOL_CIDR
+ value: "{{ .cni.kube_pods_v6_cidr }}"
+ - name: CALICO_IPV6POOL_BLOCK_SIZE
+ value: "120"
+ {{- end }}
+{{- else }}
+ - name: NO_DEFAULT_POOLS
+ value: "true"
+ - name: CALICO_IPV4POOL_CIDR
+ value: ""
+ {{- if .cni.ipv6_support }}
+ - name: CALICO_IPV6POOL_CIDR
+ value: ""
+ {{- end }}
+{{- end }}
+ - name: CALICO_DISABLE_FILE_LOGGING
+ value: "true"
+ # Set Felix endpoint to host default action to ACCEPT.
+ - name: FELIX_DEFAULTENDPOINTTOHOSTACTION
+ value: "ACCEPT"
+ # Disable IPv6 on Kubernetes.
+{{- if .cni.ipv6_support }}
+ - name: FELIX_IPV6SUPPORT
+ value: "true"
+{{- else }}
+ - name: FELIX_IPV6SUPPORT
+ value: "false"
+{{- end }}
+ - name: FELIX_HEALTHENABLED
+ value: "true"
+ - name: FELIX_DEVICEROUTESOURCEADDRESS
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.hostIP
+ securityContext:
+ privileged: true
+ resources:
+ requests:
+ cpu: 250m
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - /bin/calico-node
+ - -shutdown
+ livenessProbe:
+ exec:
+ command:
+ - /bin/calico-node
+ - -felix-live
+ - -bird-live
+ periodSeconds: 10
+ initialDelaySeconds: 10
+ failureThreshold: 6
+ timeoutSeconds: 10
+ readinessProbe:
+ exec:
+ command:
+ - /bin/calico-node
+ - -felix-ready
+ - -bird-ready
+ periodSeconds: 10
+ timeoutSeconds: 10
+ volumeMounts:
+ # For maintaining CNI plugin API credentials.
+ - mountPath: /host/etc/cni/net.d
+ name: cni-net-dir
+ readOnly: false
+ - mountPath: /lib/modules
+ name: lib-modules
+ readOnly: true
+ - mountPath: /run/xtables.lock
+ name: xtables-lock
+ readOnly: false
+ - mountPath: /var/run/calico
+ name: var-run-calico
+ readOnly: false
+ - mountPath: /var/lib/calico
+ name: var-lib-calico
+ readOnly: false
+ - name: policysync
+ mountPath: /var/run/nodeagent
+ # For eBPF mode, we need to be able to mount the BPF filesystem at /sys/fs/bpf so we mount in the
+ # parent directory.
+ - name: bpffs
+ mountPath: /sys/fs/bpf
+ - name: cni-log-dir
+ mountPath: /var/log/calico/cni
+ readOnly: true
+ volumes:
+ # Used by calico-node.
+ - name: lib-modules
+ hostPath:
+ path: /lib/modules
+ - name: var-run-calico
+ hostPath:
+ path: /var/run/calico
+ - name: var-lib-calico
+ hostPath:
+ path: /var/lib/calico
+ - name: xtables-lock
+ hostPath:
+ path: /run/xtables.lock
+ type: FileOrCreate
+ - name: sys-fs
+ hostPath:
+ path: /sys/fs/
+ type: DirectoryOrCreate
+ - name: bpffs
+ hostPath:
+ path: /sys/fs/bpf
+ type: Directory
+ # mount /proc at /nodeproc to be used by mount-bpffs initContainer to mount root cgroup2 fs.
+ - name: nodeproc
+ hostPath:
+ path: /proc
+ # Used to install CNI.
+ - name: cni-bin-dir
+ hostPath:
+ path: /opt/cni/bin
+ - name: cni-net-dir
+ hostPath:
+ path: /etc/cni/net.d
+ # Used to access CNI logs.
+ - name: cni-log-dir
+ hostPath:
+ path: /var/log/calico/cni
+ # Mount in the directory for host-local IPAM allocations. This is
+ # used when upgrading from host-local to calico-ipam, and can be removed
+ # if not using the upgrade-ipam init container.
+ - name: host-local-net-dir
+ hostPath:
+ path: /var/lib/cni/networks
+ # Used to create per-pod Unix Domain Sockets
+ - name: policysync
+ hostPath:
+ type: DirectoryOrCreate
+ path: /var/run/nodeagent
+---
+# Source: calico/templates/calico-kube-controllers.yaml
+# See https://github.com/projectcalico/kube-controllers
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: calico-kube-controllers
+ namespace: kube-system
+ labels:
+ k8s-app: calico-kube-controllers
+spec:
+ # The controllers can only have a single active instance.
+ replicas: {{ .cni.calico.replicas }}
+ selector:
+ matchLabels:
+ k8s-app: calico-kube-controllers
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: calico-kube-controllers
+ namespace: kube-system
+ labels:
+ k8s-app: calico-kube-controllers
+ spec:
+ nodeSelector:
+ kubernetes.io/os: linux
+{{- if .cni.calico.node_selector }}
+{{ .cni.calico.node_selector | toYaml | indent 8 }}
+{{- end }}
+ tolerations:
+ # Mark the pod as a critical add-on for rescheduling.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/control-plane
+ effect: NoSchedule
+ affinity:
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ podAffinityTerm:
+ labelSelector:
+ matchExpressions:
+ - key: k8s-app
+ operator: In
+ values:
+ - calico-kube-controllers
+ topologyKey: kubernetes.io/hostname
+ serviceAccountName: calico-kube-controllers
+ priorityClassName: system-cluster-critical
+ containers:
+ - name: calico-kube-controllers
+ image: {{ .cni.calico.kube_controller_image }}
+ imagePullPolicy: IfNotPresent
+ env:
+ # Choose which controllers to run.
+ - name: ENABLED_CONTROLLERS
+ value: node
+ - name: DATASTORE_TYPE
+ value: kubernetes
+ livenessProbe:
+ exec:
+ command:
+ - /usr/bin/check-status
+ - -l
+ periodSeconds: 10
+ initialDelaySeconds: 10
+ failureThreshold: 6
+ timeoutSeconds: 10
+ readinessProbe:
+ exec:
+ command:
+ - /usr/bin/check-status
+ - -r
+ periodSeconds: 10
+
+{{- if .cni.calico.typha }}
+---
+# Source: calico/templates/calico-typha.yaml
+# This manifest creates a Deployment of Typha to back the above service.
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: calico-typha
+ namespace: kube-system
+ labels:
+ k8s-app: calico-typha
+spec:
+ # Number of Typha replicas. To enable Typha, set this to a non-zero value *and* set the
+ # typha_service_name variable in the calico-config ConfigMap above.
+ #
+ # We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is essential
+ # (when using the Kubernetes datastore). Use one replica for every 100-200 nodes. In
+ # production, we recommend running at least 3 replicas to reduce the impact of rolling upgrade.
+ replicas: {{ .cni.calico.replicas }}
+ revisionHistoryLimit: 2
+ selector:
+ matchLabels:
+ k8s-app: calico-typha
+ strategy:
+ rollingUpdate:
+ # 100% surge allows a complete up-level set of typha instances to start and become ready,
+ # which in turn allows all the back-level typha instances to start shutting down. This
+ # means that connections tend to bounce directly from a back-level instance to an up-level
+ # instance.
+ maxSurge: 100%
+ # In case the cluster is unable to schedule extra surge instances, allow at most one instance
+ # to shut down to make room. You can set this to 0 if you're sure there'll always be enough room to
+ # schedule extra typha instances during an upgrade (because setting it to 0 blocks shutdown until
+ # up-level typha instances are online and ready).
+ maxUnavailable: 1
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ k8s-app: calico-typha
+ annotations:
+ cluster-autoscaler.kubernetes.io/safe-to-evict: 'true'
+ spec:
+ nodeSelector:
+ kubernetes.io/os: linux
+{{- if .cni.calico.node_selector }}
+{{ .cni.calico.node_selector| toYaml | indent 8 }}
+{{- end }}
+ hostNetwork: true
+ # Typha supports graceful shut down, disconnecting clients slowly during the grace period.
+ # The TYPHA_SHUTDOWNTIMEOUTSECS env var should be kept in sync with this value.
+ terminationGracePeriodSeconds: 300
+ affinity:
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ podAffinityTerm:
+ labelSelector:
+ matchExpressions:
+ - key: k8s-app
+ operator: In
+ values:
+ - calico-typha
+ topologyKey: kubernetes.io/hostname
+ tolerations:
+ # Mark the pod as a critical add-on for rescheduling.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ # Make sure Typha can get scheduled on any nodes.
+ - effect: NoSchedule
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ # Since Calico can't network a pod until Typha is up, we need to run Typha itself
+ # as a host-networked pod.
+ serviceAccountName: calico-node
+ priorityClassName: system-cluster-critical
+ # fsGroup allows using projected serviceaccount tokens as described here kubernetes/kubernetes#82573
+ securityContext:
+ fsGroup: 65534
+ containers:
+ - image: {{ .cni.calico.typha_image }}
+ imagePullPolicy: IfNotPresent
+ name: calico-typha
+ ports:
+ - containerPort: 5473
+ name: calico-typha
+ protocol: TCP
+ envFrom:
+ - configMapRef:
+ # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
+ name: kubernetes-services-endpoint
+ optional: true
+ env:
+ # Enable "info" logging by default. Can be set to "debug" to increase verbosity.
+ - name: TYPHA_LOGSEVERITYSCREEN
+ value: "info"
+ # Disable logging to file and syslog since those don't make sense in Kubernetes.
+ - name: TYPHA_LOGFILEPATH
+ value: "none"
+ - name: TYPHA_LOGSEVERITYSYS
+ value: "none"
+ # Monitor the Kubernetes API to find the number of running instances and rebalance
+ # connections.
+ - name: TYPHA_CONNECTIONREBALANCINGMODE
+ value: "kubernetes"
+ - name: TYPHA_DATASTORETYPE
+ value: "kubernetes"
+ - name: TYPHA_HEALTHENABLED
+ value: "true"
+ # Set this to the same value as terminationGracePeriodSeconds; it tells Typha how much time
+ # it has to shut down.
+ - name: TYPHA_SHUTDOWNTIMEOUTSECS
+ value: "300"
+ # Uncomment these lines to enable prometheus metrics. Since Typha is host-networked,
+ # this opens a port on the host, which may need to be secured.
+ #- name: TYPHA_PROMETHEUSMETRICSENABLED
+ # value: "true"
+ #- name: TYPHA_PROMETHEUSMETRICSPORT
+ # value: "9093"
+ livenessProbe:
+ httpGet:
+ path: /liveness
+ port: 9098
+ host: localhost
+ periodSeconds: 30
+ initialDelaySeconds: 30
+ timeoutSeconds: 10
+ securityContext:
+ runAsNonRoot: true
+ allowPrivilegeEscalation: false
+ readinessProbe:
+ httpGet:
+ path: /readiness
+ port: 9098
+ host: localhost
+ periodSeconds: 10
+ timeoutSeconds: 10
+{{- end }}
diff --git a/feature/builtin/roles/addons/cni/templates/flannel/flannel.yaml b/feature/builtin/roles/addons/cni/templates/flannel/flannel.yaml
new file mode 100644
index 000000000..814be8428
--- /dev/null
+++ b/feature/builtin/roles/addons/cni/templates/flannel/flannel.yaml
@@ -0,0 +1,213 @@
+---
+kind: Namespace
+apiVersion: v1
+metadata:
+ name: kube-flannel
+ labels:
+ k8s-app: flannel
+ pod-security.kubernetes.io/enforce: privileged
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ labels:
+ k8s-app: flannel
+ name: flannel
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - pods
+ verbs:
+ - get
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - nodes/status
+ verbs:
+ - patch
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ labels:
+ k8s-app: flannel
+ name: flannel
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: flannel
+subjects:
+ - kind: ServiceAccount
+ name: flannel
+ namespace: kube-flannel
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ labels:
+ k8s-app: flannel
+ name: flannel
+ namespace: kube-flannel
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: kube-flannel-cfg
+ namespace: kube-flannel
+ labels:
+ tier: node
+ k8s-app: flannel
+ app: flannel
+data:
+ cni-conf.json: |
+ {
+ "name": "cbr0",
+ "cniVersion": "0.3.1",
+ "plugins": [
+ {
+ "type": "flannel",
+ "delegate": {
+ "hairpinMode": true,
+ "isDefaultGateway": true
+ }
+ },
+ {
+ "type": "portmap",
+ "capabilities": {
+ "portMappings": true
+ }
+ }
+ ]
+ }
+ net-conf.json: |
+ {
+ "Network": "{{ .cni.kube_pods_v4_cidr }}",
+ {{- if .cni.ipv6_support }}
+ "EnableIPv6": true,
+ "IPv6Network":"{{ .cni.kube_pods_v6_cidr }}",
+ {{- end }}
+ "EnableNFTables": {{ .cni.kube_proxy }},
+ "Backend": {
+ "Type": "{{ .cni.flannel.backend }}"
+ }
+ }
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: kube-flannel-ds
+ namespace: kube-flannel
+ labels:
+ tier: node
+ app: flannel
+ k8s-app: flannel
+spec:
+ selector:
+ matchLabels:
+ app: flannel
+ template:
+ metadata:
+ labels:
+ tier: node
+ app: flannel
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: kubernetes.io/os
+ operator: In
+ values:
+ - linux
+ hostNetwork: true
+ priorityClassName: system-node-critical
+ tolerations:
+ - operator: Exists
+ effect: NoSchedule
+ serviceAccountName: flannel
+ initContainers:
+ - name: install-cni-plugin
+ image: {{ .cni.flannel.cni_plugin_image }}
+ command:
+ - cp
+ args:
+ - -f
+ - /flannel
+ - /opt/cni/bin/flannel
+ volumeMounts:
+ - name: cni-plugin
+ mountPath: /opt/cni/bin
+ - name: install-cni
+ image: {{ .cni.flannel.flannel_image }}
+ command:
+ - cp
+ args:
+ - -f
+ - /etc/kube-flannel/cni-conf.json
+ - /etc/cni/net.d/10-flannel.conflist
+ volumeMounts:
+ - name: cni
+ mountPath: /etc/cni/net.d
+ - name: flannel-cfg
+ mountPath: /etc/kube-flannel/
+ containers:
+ - name: kube-flannel
+ image: {{ .cni.flannel.flannel_image }}
+ command:
+ - /opt/bin/flanneld
+ args:
+ - --ip-masq
+ - --kube-subnet-mgr
+ resources:
+ requests:
+ cpu: "100m"
+ memory: "50Mi"
+ securityContext:
+ privileged: false
+ capabilities:
+ add: ["NET_ADMIN", "NET_RAW"]
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: EVENT_QUEUE_DEPTH
+ value: "5000"
+ volumeMounts:
+ - name: run
+ mountPath: /run/flannel
+ - name: flannel-cfg
+ mountPath: /etc/kube-flannel/
+ - name: xtables-lock
+ mountPath: /run/xtables.lock
+ volumes:
+ - name: run
+ hostPath:
+ path: /run/flannel
+ - name: cni-plugin
+ hostPath:
+ path: /opt/cni/bin
+ - name: cni
+ hostPath:
+ path: /etc/cni/net.d
+ - name: flannel-cfg
+ configMap:
+ name: kube-flannel-cfg
+ - name: xtables-lock
+ hostPath:
+ path: /run/xtables.lock
+ type: FileOrCreate
diff --git a/feature/builtin/roles/addons/cni/templates/multus/multus.yaml b/feature/builtin/roles/addons/cni/templates/multus/multus.yaml
new file mode 100644
index 000000000..913487341
--- /dev/null
+++ b/feature/builtin/roles/addons/cni/templates/multus/multus.yaml
@@ -0,0 +1,206 @@
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: network-attachment-definitions.k8s.cni.cncf.io
+spec:
+ group: k8s.cni.cncf.io
+ scope: Namespaced
+ names:
+ plural: network-attachment-definitions
+ singular: network-attachment-definition
+ kind: NetworkAttachmentDefinition
+ shortNames:
+ - net-attach-def
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ schema:
+ openAPIV3Schema:
+ description: 'NetworkAttachmentDefinition is a CRD schema specified by the Network Plumbing
+ Working Group to express the intent for attaching pods to one or more logical or physical
+ networks. More information available at: https://github.com/k8snetworkplumbingwg/multi-net-spec'
+ type: object
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this represen
+ tation of an object. Servers should convert recognized schemas to the
+ latest internal value, and may reject unrecognized values. More info:
+ https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: 'NetworkAttachmentDefinition spec defines the desired state of a network attachment'
+ type: object
+ properties:
+ config:
+ description: 'NetworkAttachmentDefinition config is a JSON-formatted CNI configuration'
+ type: string
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: multus
+rules:
+ - apiGroups: ["k8s.cni.cncf.io"]
+ resources:
+ - '*'
+ verbs:
+ - '*'
+ - apiGroups:
+ - ""
+ resources:
+ - pods
+ - pods/status
+ verbs:
+ - get
+ - update
+ - apiGroups:
+ - ""
+ - events.k8s.io
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+ - update
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: multus
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: multus
+subjects:
+ - kind: ServiceAccount
+ name: multus
+ namespace: kube-system
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: multus
+ namespace: kube-system
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: multus-cni-config
+ namespace: kube-system
+ labels:
+ tier: node
+ app: multus
+data:
+ # NOTE: If you'd prefer to manually apply a configuration file, you may create one here.
+ # In the case you'd like to customize the Multus installation, you should change the arguments to the Multus pod
+ # change the "args" line below from
+ # - "--multus-conf-file=auto"
+ # to:
+ # "--multus-conf-file=/tmp/multus-conf/70-multus.conf"
+ # Additionally -- you should ensure that the name "70-multus.conf" is the alphabetically first name in the
+ # /etc/cni/net.d/ directory on each node, otherwise, it will not be used by the Kubelet.
+ cni-conf.json: |
+ {
+ "name": "multus-cni-network",
+ "type": "multus",
+ "capabilities": {
+ "portMappings": true
+ },
+ "delegates": [
+ {
+ "cniVersion": "0.3.1",
+ "name": "default-cni-network",
+ "plugins": [
+ {
+ "type": "flannel",
+ "name": "flannel.1",
+ "delegate": {
+ "isDefaultGateway": true,
+ "hairpinMode": true
+ }
+ },
+ {
+ "type": "portmap",
+ "capabilities": {
+ "portMappings": true
+ }
+ }
+ ]
+ }
+ ],
+ "kubeconfig": "/etc/cni/net.d/multus.d/multus.kubeconfig"
+ }
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: kube-multus-ds
+ namespace: kube-system
+ labels:
+ tier: node
+ app: multus
+ name: multus
+spec:
+ selector:
+ matchLabels:
+ name: multus
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ tier: node
+ app: multus
+ name: multus
+ spec:
+ hostNetwork: true
+ tolerations:
+ - operator: Exists
+ effect: NoSchedule
+ serviceAccountName: multus
+ containers:
+ - name: kube-multus
+ image: {{ .cni.multus.image }}
+ command: ["/entrypoint.sh"]
+ args:
+ - "--multus-conf-file=auto"
+ - "--cni-version=0.3.1"
+ resources:
+ requests:
+ cpu: "100m"
+ memory: "50Mi"
+ limits:
+ cpu: "100m"
+ memory: "50Mi"
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - name: cni
+ mountPath: /host/etc/cni/net.d
+ - name: cnibin
+ mountPath: /host/opt/cni/bin
+ - name: multus-cfg
+ mountPath: /tmp/multus-conf
+ terminationGracePeriodSeconds: 10
+ volumes:
+ - name: cni
+ hostPath:
+ path: /etc/cni/net.d
+ - name: cnibin
+ hostPath:
+ path: /opt/cni/bin
+ - name: multus-cfg
+ configMap:
+ name: multus-cni-config
+ items:
+ - key: cni-conf.json
+ path: 70-multus.conf
diff --git a/feature/builtin/roles/addons/kata/defaults/main.yaml b/feature/builtin/roles/addons/kata/defaults/main.yaml
new file mode 100644
index 000000000..5b158dd42
--- /dev/null
+++ b/feature/builtin/roles/addons/kata/defaults/main.yaml
@@ -0,0 +1,4 @@
+kata:
+ enabled: false
+ image: |
+ {{ .dockerio_registry }}/kubesphere/kata-deploy:stable
diff --git a/feature/builtin/roles/addons/kata/tasks/main.yaml b/feature/builtin/roles/addons/kata/tasks/main.yaml
new file mode 100644
index 000000000..fa6690068
--- /dev/null
+++ b/feature/builtin/roles/addons/kata/tasks/main.yaml
@@ -0,0 +1,11 @@
+---
+- name: Generate kata deploy file
+ template:
+ src: kata-deploy.yaml
+ dest: /etc/kubernetes/addons/kata-deploy.yaml
+ when: .kata.enabled
+
+- name: Deploy kata
+ command: |
+ kubectl apply -f /etc/kubernetes/addons/kata-deploy.yaml
+ when: .kata.enabled
diff --git a/feature/builtin/roles/addons/kata/templates/kata-deploy.yaml b/feature/builtin/roles/addons/kata/templates/kata-deploy.yaml
new file mode 100644
index 000000000..f2afd42cd
--- /dev/null
+++ b/feature/builtin/roles/addons/kata/templates/kata-deploy.yaml
@@ -0,0 +1,127 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: kata-label-node
+ namespace: kube-system
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: node-labeler
+rules:
+ - apiGroups: [""]
+ resources: ["nodes"]
+ verbs: ["get", "patch"]
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: kata-label-node-rb
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: node-labeler
+subjects:
+ - kind: ServiceAccount
+ name: kata-label-node
+ namespace: kube-system
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: kata-deploy
+ namespace: kube-system
+spec:
+ selector:
+ matchLabels:
+ name: kata-deploy
+ template:
+ metadata:
+ labels:
+ name: kata-deploy
+ spec:
+ serviceAccountName: kata-label-node
+ containers:
+ - name: kube-kata
+ image: {{ .kata.image }}
+ imagePullPolicy: IfNotPresent
+ lifecycle:
+ preStop:
+ exec:
+ command: ["bash", "-c", "/opt/kata-artifacts/scripts/kata-deploy.sh cleanup"]
+ command: [ "bash", "-c", "/opt/kata-artifacts/scripts/kata-deploy.sh install" ]
+ env:
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ securityContext:
+ privileged: false
+ volumeMounts:
+ - name: crio-conf
+ mountPath: /etc/crio/
+ - name: containerd-conf
+ mountPath: /etc/containerd/
+ - name: kata-artifacts
+ mountPath: /opt/kata/
+ - name: dbus
+ mountPath: /var/run/dbus
+ - name: systemd
+ mountPath: /run/systemd
+ - name: local-bin
+ mountPath: /usr/local/bin/
+ volumes:
+ - name: crio-conf
+ hostPath:
+ path: /etc/crio/
+ - name: containerd-conf
+ hostPath:
+ path: /etc/containerd/
+ - name: kata-artifacts
+ hostPath:
+ path: /opt/kata/
+ type: DirectoryOrCreate
+ - name: dbus
+ hostPath:
+ path: /var/run/dbus
+ - name: systemd
+ hostPath:
+ path: /run/systemd
+ - name: local-bin
+ hostPath:
+ path: /usr/local/bin/
+ updateStrategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ type: RollingUpdate
+---
+kind: RuntimeClass
+apiVersion: node.k8s.io/v1beta1
+metadata:
+ name: kata-qemu
+handler: kata-qemu
+overhead:
+ podFixed:
+ memory: "160Mi"
+ cpu: "250m"
+---
+kind: RuntimeClass
+apiVersion: node.k8s.io/v1beta1
+metadata:
+ name: kata-clh
+handler: kata-clh
+overhead:
+ podFixed:
+ memory: "130Mi"
+ cpu: "250m"
+---
+kind: RuntimeClass
+apiVersion: node.k8s.io/v1beta1
+metadata:
+ name: kata-fc
+handler: kata-fc
+overhead:
+ podFixed:
+ memory: "130Mi"
+ cpu: "250m"
diff --git a/feature/builtin/roles/addons/nfd/defaults/main.yaml b/feature/builtin/roles/addons/nfd/defaults/main.yaml
new file mode 100644
index 000000000..aebb39547
--- /dev/null
+++ b/feature/builtin/roles/addons/nfd/defaults/main.yaml
@@ -0,0 +1,4 @@
+nfd:
+ enabled: false
+ image: |
+ {{ .dockerio_registry }}/kubesphere/node-feature-discovery:v0.10.0
diff --git a/feature/builtin/roles/addons/nfd/tasks/main.yaml b/feature/builtin/roles/addons/nfd/tasks/main.yaml
new file mode 100644
index 000000000..d472d9899
--- /dev/null
+++ b/feature/builtin/roles/addons/nfd/tasks/main.yaml
@@ -0,0 +1,11 @@
+---
+- name: Generate nfd deploy file
+ template:
+ src: nfd-deploy.yaml
+ dest: /etc/kubernetes/addons/nfd-deploy.yaml
+ when: .nfd.enabled
+
+- name: Deploy nfd
+ command: |
+ kubectl apply -f /etc/kubernetes/addons/nfd-deploy.yaml
+ when: .nfd.enabled
diff --git a/feature/builtin/roles/addons/nfd/templates/nfd-deploy.yaml b/feature/builtin/roles/addons/nfd/templates/nfd-deploy.yaml
new file mode 100644
index 000000000..189c28baa
--- /dev/null
+++ b/feature/builtin/roles/addons/nfd/templates/nfd-deploy.yaml
@@ -0,0 +1,620 @@
+---
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: node-feature-discovery
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.7.0
+ name: nodefeaturerules.nfd.k8s-sigs.io
+spec:
+ group: nfd.k8s-sigs.io
+ names:
+ kind: NodeFeatureRule
+ listKind: NodeFeatureRuleList
+ plural: nodefeaturerules
+ singular: nodefeaturerule
+ scope: Cluster
+ versions:
+ - name: v1alpha1
+ schema:
+ openAPIV3Schema:
+ description: NodeFeatureRule resource specifies a configuration for feature-based customization of node objects, such as node labeling.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: NodeFeatureRuleSpec describes a NodeFeatureRule.
+ properties:
+ rules:
+ description: Rules is a list of node customization rules.
+ items:
+ description: Rule defines a rule for node customization such as labeling.
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ description: Labels to create if the rule matches.
+ type: object
+ labelsTemplate:
+ description: LabelsTemplate specifies a template to expand for dynamically generating multiple labels. Data (after template expansion) must be keys with an optional value ([=]) separated by newlines.
+ type: string
+ matchAny:
+ description: MatchAny specifies a list of matchers one of which must match.
+ items:
+ description: MatchAnyElem specifies one sub-matcher of MatchAny.
+ properties:
+ matchFeatures:
+ description: MatchFeatures specifies a set of matcher terms all of which must match.
+ items:
+ description: FeatureMatcherTerm defines requirements against one feature set. All requirements (specified as MatchExpressions) are evaluated against each element in the feature set.
+ properties:
+ feature:
+ type: string
+ matchExpressions:
+ additionalProperties:
+ description: "MatchExpression specifies an expression to evaluate against a set of input values. It contains an operator that is applied when matching the input and an array of values that the operator evaluates the input against. \n NB: CreateMatchExpression or MustCreateMatchExpression() should be used for creating new instances. NB: Validate() must be called if Op or Value fields are modified or if a new instance is created from scratch without using the helper functions."
+ properties:
+ op:
+ description: Op is the operator to be applied.
+ enum:
+ - In
+ - NotIn
+ - InRegexp
+ - Exists
+ - DoesNotExist
+ - Gt
+ - Lt
+ - GtLt
+ - IsTrue
+ - IsFalse
+ type: string
+ value:
+ description: Value is the list of values that the operand evaluates the input against. Value should be empty if the operator is Exists, DoesNotExist, IsTrue or IsFalse. Value should contain exactly one element if the operator is Gt or Lt and exactly two elements if the operator is GtLt. In other cases Value should contain at least one element.
+ items:
+ type: string
+ type: array
+ required:
+ - op
+ type: object
+ description: MatchExpressionSet contains a set of MatchExpressions, each of which is evaluated against a set of input values.
+ type: object
+ required:
+ - feature
+ - matchExpressions
+ type: object
+ type: array
+ required:
+ - matchFeatures
+ type: object
+ type: array
+ matchFeatures:
+ description: MatchFeatures specifies a set of matcher terms all of which must match.
+ items:
+ description: FeatureMatcherTerm defines requirements against one feature set. All requirements (specified as MatchExpressions) are evaluated against each element in the feature set.
+ properties:
+ feature:
+ type: string
+ matchExpressions:
+ additionalProperties:
+ description: "MatchExpression specifies an expression to evaluate against a set of input values. It contains an operator that is applied when matching the input and an array of values that the operator evaluates the input against. \n NB: CreateMatchExpression or MustCreateMatchExpression() should be used for creating new instances. NB: Validate() must be called if Op or Value fields are modified or if a new instance is created from scratch without using the helper functions."
+ properties:
+ op:
+ description: Op is the operator to be applied.
+ enum:
+ - In
+ - NotIn
+ - InRegexp
+ - Exists
+ - DoesNotExist
+ - Gt
+ - Lt
+ - GtLt
+ - IsTrue
+ - IsFalse
+ type: string
+ value:
+ description: Value is the list of values that the operand evaluates the input against. Value should be empty if the operator is Exists, DoesNotExist, IsTrue or IsFalse. Value should contain exactly one element if the operator is Gt or Lt and exactly two elements if the operator is GtLt. In other cases Value should contain at least one element.
+ items:
+ type: string
+ type: array
+ required:
+ - op
+ type: object
+ description: MatchExpressionSet contains a set of MatchExpressions, each of which is evaluated against a set of input values.
+ type: object
+ required:
+ - feature
+ - matchExpressions
+ type: object
+ type: array
+ name:
+ description: Name of the rule.
+ type: string
+ vars:
+ additionalProperties:
+ type: string
+ description: Vars is the variables to store if the rule matches. Variables do not directly inflict any changes in the node object. However, they can be referenced from other rules enabling more complex rule hierarchies, without exposing intermediary output values as labels.
+ type: object
+ varsTemplate:
+ description: VarsTemplate specifies a template to expand for dynamically generating multiple variables. Data (after template expansion) must be keys with an optional value ([=]) separated by newlines.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ required:
+ - rules
+ type: object
+ required:
+ - spec
+ type: object
+ served: true
+ storage: true
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: nfd-master
+ namespace: node-feature-discovery
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: nfd-master
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - get
+ - patch
+ - update
+ - list
+ - apiGroups:
+ - topology.node.k8s.io
+ resources:
+ - noderesourcetopologies
+ verbs:
+ - create
+ - get
+ - update
+ - apiGroups:
+ - nfd.k8s-sigs.io
+ resources:
+ - nodefeaturerules
+ verbs:
+ - get
+ - list
+ - watch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: nfd-master
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: nfd-master
+subjects:
+ - kind: ServiceAccount
+ name: nfd-master
+ namespace: node-feature-discovery
+---
+apiVersion: v1
+data:
+ nfd-worker.conf: |
+ #core:
+ # labelWhiteList:
+ # noPublish: false
+ # sleepInterval: 60s
+ # featureSources: [all]
+ # labelSources: [all]
+ # klog:
+ # addDirHeader: false
+ # alsologtostderr: false
+ # logBacktraceAt:
+ # logtostderr: true
+ # skipHeaders: false
+ # stderrthreshold: 2
+ # v: 0
+ # vmodule:
+ ## NOTE: the following options are not dynamically run-time configurable
+ ## and require a nfd-worker restart to take effect after being changed
+ # logDir:
+ # logFile:
+ # logFileMaxSize: 1800
+ # skipLogHeaders: false
+ #sources:
+ # cpu:
+ # cpuid:
+ ## NOTE: whitelist has priority over blacklist
+ # attributeBlacklist:
+ # - "BMI1"
+ # - "BMI2"
+ # - "CLMUL"
+ # - "CMOV"
+ # - "CX16"
+ # - "ERMS"
+ # - "F16C"
+ # - "HTT"
+ # - "LZCNT"
+ # - "MMX"
+ # - "MMXEXT"
+ # - "NX"
+ # - "POPCNT"
+ # - "RDRAND"
+ # - "RDSEED"
+ # - "RDTSCP"
+ # - "SGX"
+ # - "SSE"
+ # - "SSE2"
+ # - "SSE3"
+ # - "SSE4"
+ # - "SSE42"
+ # - "SSSE3"
+ # attributeWhitelist:
+ # kernel:
+ # kconfigFile: "/path/to/kconfig"
+ # configOpts:
+ # - "NO_HZ"
+ # - "X86"
+ # - "DMI"
+ # pci:
+ # deviceClassWhitelist:
+ # - "0200"
+ # - "03"
+ # - "12"
+ # deviceLabelFields:
+ # - "class"
+ # - "vendor"
+ # - "device"
+ # - "subsystem_vendor"
+ # - "subsystem_device"
+ # usb:
+ # deviceClassWhitelist:
+ # - "0e"
+ # - "ef"
+ # - "fe"
+ # - "ff"
+ # deviceLabelFields:
+ # - "class"
+ # - "vendor"
+ # - "device"
+ # custom:
+ # # The following feature demonstrates the capabilities of the matchFeatures
+ # - name: "my custom rule"
+ # labels:
+ # my-ng-feature: "true"
+ # # matchFeatures implements a logical AND over all matcher terms in the
+ # # list (i.e. all of the terms, or per-feature matchers, must match)
+ # matchFeatures:
+ # - feature: cpu.cpuid
+ # matchExpressions:
+ # AVX512F: {op: Exists}
+ # - feature: cpu.cstate
+ # matchExpressions:
+ # enabled: {op: IsTrue}
+ # - feature: cpu.pstate
+ # matchExpressions:
+ # no_turbo: {op: IsFalse}
+ # scaling_governor: {op: In, value: ["performance"]}
+ # - feature: cpu.rdt
+ # matchExpressions:
+ # RDTL3CA: {op: Exists}
+ # - feature: cpu.sst
+ # matchExpressions:
+ # bf.enabled: {op: IsTrue}
+ # - feature: cpu.topology
+ # matchExpressions:
+ # hardware_multithreading: {op: IsFalse}
+ #
+ # - feature: kernel.config
+ # matchExpressions:
+ # X86: {op: Exists}
+ # LSM: {op: InRegexp, value: ["apparmor"]}
+ # - feature: kernel.loadedmodule
+ # matchExpressions:
+ # e1000e: {op: Exists}
+ # - feature: kernel.selinux
+ # matchExpressions:
+ # enabled: {op: IsFalse}
+ # - feature: kernel.version
+ # matchExpressions:
+ # major: {op: In, value: ["5"]}
+ # minor: {op: Gt, value: ["10"]}
+ #
+ # - feature: storage.block
+ # matchExpressions:
+ # rotational: {op: In, value: ["0"]}
+ # dax: {op: In, value: ["0"]}
+ #
+ # - feature: network.device
+ # matchExpressions:
+ # operstate: {op: In, value: ["up"]}
+ # speed: {op: Gt, value: ["100"]}
+ #
+ # - feature: memory.numa
+ # matchExpressions:
+ # node_count: {op: Gt, value: ["2"]}
+ # - feature: memory.nv
+ # matchExpressions:
+ # devtype: {op: In, value: ["nd_dax"]}
+ # mode: {op: In, value: ["memory"]}
+ #
+ # - feature: system.osrelease
+ # matchExpressions:
+ # ID: {op: In, value: ["fedora", "centos"]}
+ # - feature: system.name
+ # matchExpressions:
+ # nodename: {op: InRegexp, value: ["^worker-X"]}
+ #
+ # - feature: local.label
+ # matchExpressions:
+ # custom-feature-knob: {op: Gt, value: ["100"]}
+ #
+ # # The following feature demonstrates the capabilities of the matchAny
+ # - name: "my matchAny rule"
+ # labels:
+ # my-ng-feature-2: "my-value"
+ # # matchAny implements a logical IF over all elements (sub-matchers) in
+ # # the list (i.e. at least one feature matcher must match)
+ # matchAny:
+ # - matchFeatures:
+ # - feature: kernel.loadedmodule
+ # matchExpressions:
+ # driver-module-X: {op: Exists}
+ # - feature: pci.device
+ # matchExpressions:
+ # vendor: {op: In, value: ["8086"]}
+ # class: {op: In, value: ["0200"]}
+ # - matchFeatures:
+ # - feature: kernel.loadedmodule
+ # matchExpressions:
+ # driver-module-Y: {op: Exists}
+ # - feature: usb.device
+ # matchExpressions:
+ # vendor: {op: In, value: ["8086"]}
+ # class: {op: In, value: ["02"]}
+ #
+ # # The following features demonstreate label templating capabilities
+ # - name: "my template rule"
+ # labelsTemplate: |
+ # matchFeatures:
+ # - feature: system.osrelease
+ # matchExpressions:
+ # ID: {op: InRegexp, value: ["^open.*"]}
+ # VERSION_ID.major: {op: In, value: ["13", "15"]}
+ #
+ # - name: "my template rule 2"
+ # matchFeatures:
+ # - feature: pci.device
+ # matchExpressions:
+ # class: {op: InRegexp, value: ["^06"]}
+ # vendor: ["8086"]
+ # - feature: cpu.cpuid
+ # matchExpressions:
+ # AVX: {op: Exists}
+ #
+ # # The following examples demonstrate vars field and back-referencing
+ # # previous labels and vars
+ # - name: "my dummy kernel rule"
+ # labels:
+ # "my.kernel.feature": "true"
+ # matchFeatures:
+ # - feature: kernel.version
+ # matchExpressions:
+ # major: {op: Gt, value: ["2"]}
+ #
+ # - name: "my dummy rule with no labels"
+ # vars:
+ # "my.dummy.var": "1"
+ # matchFeatures:
+ # - feature: cpu.cpuid
+ # matchExpressions: {}
+ #
+ # - name: "my rule using backrefs"
+ # labels:
+ # "my.backref.feature": "true"
+ # matchFeatures:
+ # - feature: rule.matched
+ # matchExpressions:
+ # my.kernel.feature: {op: IsTrue}
+ # my.dummy.var: {op: Gt, value: ["0"]}
+ #
+kind: ConfigMap
+metadata:
+ name: nfd-worker-conf
+ namespace: node-feature-discovery
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: nfd-master
+ namespace: node-feature-discovery
+spec:
+ ports:
+ - port: 8080
+ protocol: TCP
+ selector:
+ app: nfd-master
+ type: ClusterIP
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app: nfd
+ name: nfd-master
+ namespace: node-feature-discovery
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: nfd-master
+ template:
+ metadata:
+ labels:
+ app: nfd-master
+ spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - preference:
+ matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: In
+ values:
+ - ""
+ weight: 1
+ - preference:
+ matchExpressions:
+ - key: node-role.kubernetes.io/control-plane
+ operator: In
+ values:
+ - ""
+ weight: 1
+ containers:
+ - args: []
+ command:
+ - nfd-master
+ env:
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ image: {{ .nfd.image }}
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ exec:
+ command:
+ - /usr/bin/grpc_health_probe
+ - -addr=:8080
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ name: nfd-master
+ readinessProbe:
+ exec:
+ command:
+ - /usr/bin/grpc_health_probe
+ - -addr=:8080
+ failureThreshold: 10
+ initialDelaySeconds: 5
+ periodSeconds: 10
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ readOnlyRootFilesystem: true
+ runAsNonRoot: true
+ volumeMounts: []
+ serviceAccount: nfd-master
+ tolerations:
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/master
+ operator: Equal
+ value: ""
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ operator: Equal
+ value: ""
+ volumes: []
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ labels:
+ app: nfd
+ name: nfd-worker
+ namespace: node-feature-discovery
+spec:
+ selector:
+ matchLabels:
+ app: nfd-worker
+ template:
+ metadata:
+ labels:
+ app: nfd-worker
+ spec:
+ containers:
+ - args:
+ - -server=nfd-master:8080
+ command:
+ - nfd-worker
+ env:
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ image: {{ .nfd.image }}
+ imagePullPolicy: IfNotPresent
+ name: nfd-worker
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ readOnlyRootFilesystem: true
+ runAsNonRoot: true
+ volumeMounts:
+ - mountPath: /host-boot
+ name: host-boot
+ readOnly: true
+ - mountPath: /host-etc/os-release
+ name: host-os-release
+ readOnly: true
+ - mountPath: /host-sys
+ name: host-sys
+ readOnly: true
+ - mountPath: /host-usr/lib
+ name: host-usr-lib
+ readOnly: true
+ - mountPath: /etc/kubernetes/node-feature-discovery/source.d/
+ name: source-d
+ readOnly: true
+ - mountPath: /etc/kubernetes/node-feature-discovery/features.d/
+ name: features-d
+ readOnly: true
+ - mountPath: /etc/kubernetes/node-feature-discovery
+ name: nfd-worker-conf
+ readOnly: true
+ dnsPolicy: ClusterFirstWithHostNet
+ volumes:
+ - hostPath:
+ path: /boot
+ name: host-boot
+ - hostPath:
+ path: /etc/os-release
+ name: host-os-release
+ - hostPath:
+ path: /sys
+ name: host-sys
+ - hostPath:
+ path: /usr/lib
+ name: host-usr-lib
+ - hostPath:
+ path: /etc/kubernetes/node-feature-discovery/source.d/
+ name: source-d
+ - hostPath:
+ path: /etc/kubernetes/node-feature-discovery/features.d/
+ name: features-d
+ - configMap:
+ name: nfd-worker-conf
+ name: nfd-worker-conf
diff --git a/feature/builtin/roles/addons/sc/defaults/main.yaml b/feature/builtin/roles/addons/sc/defaults/main.yaml
new file mode 100644
index 000000000..a1234abd6
--- /dev/null
+++ b/feature/builtin/roles/addons/sc/defaults/main.yaml
@@ -0,0 +1,15 @@
+sc:
+ local:
+ enabled: true
+ default: true
+ provisioner_image: |
+ {{ .dockerio_registry }}/openebs/provisioner-localpv:3.3.0
+ linux_utils_image: |
+ {{ .dockerio_registry }}/openebs/linux-utils:3.3.0
+ path: /var/openebs/local
+ nfs: # each k8s_cluster node should install nfs-utils
+ enabled: false
+ default: false
+ server: |
+ {{ .groups.nfs | default list | first }}
+ path: /share/kubernetes
diff --git a/feature/builtin/roles/addons/sc/tasks/local.yaml b/feature/builtin/roles/addons/sc/tasks/local.yaml
new file mode 100644
index 000000000..4b09e706d
--- /dev/null
+++ b/feature/builtin/roles/addons/sc/tasks/local.yaml
@@ -0,0 +1,9 @@
+---
+- name: Generate local manifest
+ template:
+ src: local-volume.yaml
+ dest: /etc/kubernetes/addons/local-volume.yaml
+
+- name: deploy local
+ command: |
+ kubectl apply -f /etc/kubernetes/addons/local-volume.yaml
diff --git a/feature/builtin/roles/addons/sc/tasks/main.yaml b/feature/builtin/roles/addons/sc/tasks/main.yaml
new file mode 100644
index 000000000..59bd771d4
--- /dev/null
+++ b/feature/builtin/roles/addons/sc/tasks/main.yaml
@@ -0,0 +1,6 @@
+---
+- include_tasks: local.yaml
+ when: .sc.local.enabled
+
+- include_tasks: nfs.yaml
+ when: .sc.nfs.enabled
diff --git a/feature/builtin/roles/addons/sc/tasks/nfs.yaml b/feature/builtin/roles/addons/sc/tasks/nfs.yaml
new file mode 100644
index 000000000..53dfe317e
--- /dev/null
+++ b/feature/builtin/roles/addons/sc/tasks/nfs.yaml
@@ -0,0 +1,13 @@
+---
+- name: Sync nfs provisioner helm to remote
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/sc/nfs-subdir-external-provisioner-{{ .nfs_provisioner_version }}.tgz
+ dest: |
+ /etc/kubernetes/addons/nfs-subdir-external-provisioner-{{ .nfs_provisioner_version }}.tgz
+
+- name: Deploy nfs provisioner
+ command: |
+ helm upgrade --install nfs-subdir-external-provisioner /etc/kubernetes/addons/nfs-subdir-external-provisioner-{{ .nfs_provisioner_version }}.tgz --namespace kube-system \
+ --set nfs.server={{ .sc.nfs.server }} --set nfs.path={{ .sc.nfs.path }} \
+ --set storageClass.defaultClass={{ if .sc.local.default }}true{{ else }}false{{ end }}
diff --git a/feature/builtin/roles/addons/sc/templates/local-volume.yaml b/feature/builtin/roles/addons/sc/templates/local-volume.yaml
new file mode 100644
index 000000000..2b12dcb01
--- /dev/null
+++ b/feature/builtin/roles/addons/sc/templates/local-volume.yaml
@@ -0,0 +1,150 @@
+---
+#Sample storage classes for OpenEBS Local PV
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: local
+ annotations:
+ storageclass.kubesphere.io/supported-access-modes: '["ReadWriteOnce"]'
+ storageclass.beta.kubernetes.io/is-default-class: "{{ if .sc.local.default }}true{{ else }}false{{ end }}"
+ openebs.io/cas-type: local
+ cas.openebs.io/config: |
+ - name: StorageType
+ value: "hostpath"
+ - name: BasePath
+ value: "{{ .sc.local.path }}"
+provisioner: openebs.io/local
+volumeBindingMode: WaitForFirstConsumer
+reclaimPolicy: Delete
+---
+# Create Maya Service Account
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: openebs-maya-operator
+ namespace: kube-system
+---
+# Define Role that allows operations on K8s pods/deployments
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: openebs-maya-operator
+rules:
+ - apiGroups: ["*"]
+ resources: ["nodes", "nodes/proxy"]
+ verbs: ["*"]
+ - apiGroups: ["*"]
+ resources: ["namespaces", "services", "pods", "pods/exec", "deployments", "deployments/finalizers", "replicationcontrollers", "replicasets", "events", "endpoints", "configmaps", "secrets", "jobs", "cronjobs"]
+ verbs: ["*"]
+ - apiGroups: ["*"]
+ resources: ["statefulsets", "daemonsets"]
+ verbs: ["*"]
+ - apiGroups: ["*"]
+ resources: ["resourcequotas", "limitranges"]
+ verbs: ["list", "watch"]
+ - apiGroups: ["*"]
+ resources: ["ingresses", "horizontalpodautoscalers", "verticalpodautoscalers", "poddisruptionbudgets", "certificatesigningrequests"]
+ verbs: ["list", "watch"]
+ - apiGroups: ["*"]
+ resources: ["storageclasses", "persistentvolumeclaims", "persistentvolumes"]
+ verbs: ["*"]
+ - apiGroups: ["apiextensions.k8s.io"]
+ resources: ["customresourcedefinitions"]
+ verbs: [ "get", "list", "create", "update", "delete", "patch"]
+ - apiGroups: ["openebs.io"]
+ resources: [ "*"]
+ verbs: ["*"]
+ - nonResourceURLs: ["/metrics"]
+ verbs: ["get"]
+---
+# Bind the Service Account with the Role Privileges.
+# TODO: Check if default account also needs to be there
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: openebs-maya-operator
+subjects:
+ - kind: ServiceAccount
+ name: openebs-maya-operator
+ namespace: kube-system
+roleRef:
+ kind: ClusterRole
+ name: openebs-maya-operator
+ apiGroup: rbac.authorization.k8s.io
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: openebs-localpv-provisioner
+ namespace: kube-system
+ labels:
+ name: openebs-localpv-provisioner
+ openebs.io/component-name: openebs-localpv-provisioner
+ openebs.io/version: 3.3.0
+spec:
+ selector:
+ matchLabels:
+ name: openebs-localpv-provisioner
+ openebs.io/component-name: openebs-localpv-provisioner
+ replicas: 1
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ labels:
+ name: openebs-localpv-provisioner
+ openebs.io/component-name: openebs-localpv-provisioner
+ openebs.io/version: 3.3.0
+ spec:
+ serviceAccountName: openebs-maya-operator
+ containers:
+ - name: openebs-provisioner-hostpath
+ imagePullPolicy: IfNotPresent
+ image: {{ .sc.local.provisioner_image }}
+ env:
+ # OPENEBS_IO_K8S_MASTER enables openebs provisioner to connect to K8s
+ # based on this address. This is ignored if empty.
+ # This is supported for openebs provisioner version 0.5.2 onwards
+ #- name: OPENEBS_IO_K8S_MASTER
+ # value: "http://10.128.0.12:8080"
+ # OPENEBS_IO_KUBE_CONFIG enables openebs provisioner to connect to K8s
+ # based on this config. This is ignored if empty.
+ # This is supported for openebs provisioner version 0.5.2 onwards
+ #- name: OPENEBS_IO_KUBE_CONFIG
+ # value: "/home/ubuntu/.kube/config"
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: OPENEBS_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ # OPENEBS_SERVICE_ACCOUNT provides the service account of this pod as
+ # environment variable
+ - name: OPENEBS_SERVICE_ACCOUNT
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.serviceAccountName
+ - name: OPENEBS_IO_ENABLE_ANALYTICS
+ value: "true"
+ - name: OPENEBS_IO_INSTALLER_TYPE
+ value: "openebs-operator-lite"
+ - name: OPENEBS_IO_HELPER_IMAGE
+ value: "{{ .sc.local.linux_utils_image }}"
+ # LEADER_ELECTION_ENABLED is used to enable/disable leader election. By default
+ # leader election is enabled.
+ #- name: LEADER_ELECTION_ENABLED
+ # value: "true"
+ # OPENEBS_IO_IMAGE_PULL_SECRETS environment variable is used to pass the image pull secrets
+ # to the helper pod launched by local-pv hostpath provisioner
+ #- name: OPENEBS_IO_IMAGE_PULL_SECRETS
+ # value: ""
+ livenessProbe:
+ exec:
+ command:
+ - sh
+ - -c
+ - test $(pgrep -c "^provisioner-loc.*") = 1
+ initialDelaySeconds: 30
+ periodSeconds: 60
diff --git a/feature/builtin/roles/certs/renew-etcd/tasks/main.yaml b/feature/builtin/roles/certs/renew-etcd/tasks/main.yaml
new file mode 100644
index 000000000..e7467368c
--- /dev/null
+++ b/feature/builtin/roles/certs/renew-etcd/tasks/main.yaml
@@ -0,0 +1,26 @@
+---
+- name: Sync ca file to remote
+ tags: ["certs"]
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/pki/root.crt
+ dest: /etc/ssl/etcd/ssl/ca.crt
+
+- name: Sync etcd cert file to remote
+ tags: ["certs"]
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/pki/etcd.crt
+ dest: /etc/ssl/etcd/ssl/server.crt
+
+- name: Sync etcd key file to remote
+ tags: ["certs"]
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/pki/etcd.key
+ dest: |
+ /etc/ssl/etcd/ssl/server.key
+
+- name: Restart etcd service
+ tags: ["certs"]
+ command: systemctl restart etcd
diff --git a/feature/builtin/roles/certs/renew-kubernetes/tasks/etcd.yaml b/feature/builtin/roles/certs/renew-kubernetes/tasks/etcd.yaml
new file mode 100644
index 000000000..4399caf8c
--- /dev/null
+++ b/feature/builtin/roles/certs/renew-kubernetes/tasks/etcd.yaml
@@ -0,0 +1,19 @@
+---
+- name: Sync etcd ca file to remote
+ tags: ["certs"]
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/pki/root.crt
+ dest: /etc/kubernetes/pki/etcd/ca.crt
+- name: Sync etcd cert files to remote
+ tags: ["certs"]
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/pki/etcd.crt
+ dest: /etc/kubernetes/pki/etcd/client.crt
+- name: Sync etcd key files to remote
+ tags: ["certs"]
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/pki/etcd.key
+ dest: /etc/kubernetes/pki/etcd/client.key
diff --git a/feature/builtin/roles/certs/renew-kubernetes/tasks/kube.yaml b/feature/builtin/roles/certs/renew-kubernetes/tasks/kube.yaml
new file mode 100644
index 000000000..564a8b029
--- /dev/null
+++ b/feature/builtin/roles/certs/renew-kubernetes/tasks/kube.yaml
@@ -0,0 +1,51 @@
+---
+- name: Check kubeadm version
+ tags: ["certs"]
+ run_once: true
+ command: kubeadm version -o short
+ register: kubeadm_install_version
+
+- name: Renew cert by kubeadm
+ tags: ["certs"]
+ run_once: true
+ command: |
+ {{- if .kubeadm_install_version.stdout | semverCompare "> $chronyConfigFile
+ # delete local
+ sed -i '/^local/d' $chronyConfigFile
+ # add local
+ echo "local stratum 10" >> $chronyConfigFile
+ # add server
+ {{- range $server := .ntp_servers }}
+ {{- range $.inventory_hosts }}
+ {{- if eq .hostname $server }}
+ {{- $server = .internal_ipv4 }}
+ {{- end }}
+ {{- end }}
+ grep -q '^server {{ $server }} iburst' $chronyConfigFile || sed '1a server {{ $server }} iburst' -i $chronyConfigFile
+ {{- end }}
+
+- name: Set timezone
+ command: |
+ timedatectl set-timezone {{ .timezone }}
+ timedatectl set-ntp true
+ when: or (.ntp_servers | len | lt 0) (.timezone | ne "")
+
+- name: Restart ntp server
+ command: |
+ {{- if or (.os.release.ID | eq "ubuntu") (.os.release.ID_LIKE | eq "debian") }}
+ systemctl restart chrony.service
+ {{- end }}
+ systemctl restart chronyd.service
+ when: or (.ntp_servers | len | lt 0) (.timezone | ne "")
diff --git a/feature/builtin/roles/init/init-os/tasks/init_repository.yaml b/feature/builtin/roles/init/init-os/tasks/init_repository.yaml
new file mode 100644
index 000000000..ea80cd0fb
--- /dev/null
+++ b/feature/builtin/roles/init/init-os/tasks/init_repository.yaml
@@ -0,0 +1,78 @@
+---
+- name: Sync repository
+ block:
+ - name: Sync repository file
+ ignore_errors: true
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/repository/{{ .os.release.ID_LIKE }}-{{ .os.release.VERSION_ID }}-{{ .binary_type.stdout }}.iso
+ dest: /tmp/kubekey/repository.iso
+ - name: Mount iso file
+ command: |
+ if [ -f "/tmp/kubekey/repository.iso" ]; then
+ mount -t iso9660 -o loop /tmp/kubekey/repository.iso /tmp/kubekey/iso
+ fi
+ rescue:
+ - name: Unmount iso file
+ command: |
+ if [ -f "/tmp/kubekey/repository.iso" ]; then
+ umount /tmp/kubekey/iso
+ fi
+
+- name: Init repository
+ block:
+ - name: Init debian repository
+ command: |
+ now=$(date +"%Y-%m-%d %H:%M:%S")
+ if [ -f "/tmp/kubekey/repository.iso" ];then
+ # backup
+ mv /etc/apt/sources.list /etc/apt/sources.list.kubekey-$now.bak
+ mv /etc/apt/sources.list.d /etc/apt/sources.list.d.kubekey-$now.bak
+ mkdir -p /etc/apt/sources.list.d
+ # add repository
+ rm -rf /etc/apt/sources.list.d/*
+ echo 'deb [trusted=yes] file://tmp/kubekey/iso /' > /etc/apt/sources.list.d/kubekey.list
+ # update repository
+ apt-get update
+ # install
+ apt install -y socat conntrack ipset ebtables chrony ipvsadm
+ # reset repository
+ rm -rf /etc/apt/sources.list.d
+ mv /etc/apt/sources.list.kubekey.bak-$now /etc/apt/sources.list
+ mv /etc/apt/sources.list.d.kubekey.bak-$now /etc/apt/sources.list.d
+ else
+ apt-get update && apt install -y socat conntrack ipset ebtables chrony ipvsadm
+ fi
+ when: .os.release.ID_LIKE | eq "debian"
+ - name: Init rhel repository
+ command: |
+ now=$(date +"%Y-%m-%d %H:%M:%S")
+ if [ -f "/tmp/kubekey/repository.iso" ];then
+ # backup
+ mv /etc/yum.repos.d /etc/yum.repos.d.kubekey-$now.bak
+ mkdir -p /etc/yum.repos.d
+ # add repository
+ rm -rf /etc/yum.repos.d/*
+ cat << EOF > /etc/yum.repos.d/CentOS-local.repo
+ [base-local]
+ name=rpms-local
+
+ baseurl=file:///tmp/kubekey/repository.iso
+
+ enabled=1
+
+ gpgcheck=0
+
+ EOF
+ # update repository
+ yum clean all && yum makecache
+ # install
+ yum install -y openssl socat conntrack ipset ebtables chrony ipvsadm
+ # reset repository
+ rm -rf /etc/yum.repos.d
+ mv /etc/yum.repos.d.kubekey.bak-$now /etc/yum.repos.d
+ else
+ # install
+ yum install -y openssl socat conntrack ipset ebtables chrony ipvsadm
+ fi
+ when: .os.release.ID_LIKE | eq "\"rhel fedora\""
diff --git a/feature/builtin/roles/init/init-os/tasks/main.yaml b/feature/builtin/roles/init/init-os/tasks/main.yaml
new file mode 100644
index 000000000..58bdf1087
--- /dev/null
+++ b/feature/builtin/roles/init/init-os/tasks/main.yaml
@@ -0,0 +1,27 @@
+---
+- include_tasks: init_repository.yaml
+
+- include_tasks: init_ntpserver.yaml
+
+- name: Reset tmp dir
+ command: |
+ if [ -d /tmp/kubekey ]; then
+ rm -rf /tmp/kubekey
+ fi
+ mkdir -m 777 -p /tmp/kubekey
+
+- name: Set hostname
+ command: |
+ hostnamectl set-hostname {{ .inventory_name }} \
+ && sed -i '/^127.0.1.1/s/.*/127.0.1.1 {{ .inventory_name }}/g' /etc/hosts
+ when: .inventory_name | ne "localhost"
+
+- name: Sync init os to remote
+ template:
+ src: init-os.sh
+ dest: /etc/kubekey/scripts/init-os.sh
+ mode: 0755
+
+- name: Execute init os script
+ command: |
+ chmod +x /etc/kubekey/scripts/init-os.sh && /etc/kubekey/scripts/init-os.sh
diff --git a/feature/builtin/roles/init/init-os/templates/init-os.sh b/feature/builtin/roles/init/init-os/templates/init-os.sh
new file mode 100644
index 000000000..e863aad82
--- /dev/null
+++ b/feature/builtin/roles/init/init-os/templates/init-os.sh
@@ -0,0 +1,229 @@
+#!/usr/bin/env bash
+
+# Copyright 2020 The KubeSphere Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+swapoff -a
+sed -i /^[^#]*swap*/s/^/\#/g /etc/fstab
+
+# See https://github.com/kubernetes/website/issues/14457
+if [ -f /etc/selinux/config ]; then
+ sed -ri 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
+fi
+# for ubuntu: sudo apt install selinux-utils
+# for centos: yum install selinux-policy
+if command -v setenforce &> /dev/null
+then
+ setenforce 0
+ getenforce
+fi
+
+echo 'net.ipv4.ip_forward = 1' >> /etc/sysctl.conf
+echo 'net.bridge.bridge-nf-call-arptables = 1' >> /etc/sysctl.conf
+echo 'net.bridge.bridge-nf-call-ip6tables = 1' >> /etc/sysctl.conf
+echo 'net.bridge.bridge-nf-call-iptables = 1' >> /etc/sysctl.conf
+echo 'net.ipv4.ip_local_reserved_ports = 30000-32767' >> /etc/sysctl.conf
+echo 'net.core.netdev_max_backlog = 65535' >> /etc/sysctl.conf
+echo 'net.core.rmem_max = 33554432' >> /etc/sysctl.conf
+echo 'net.core.wmem_max = 33554432' >> /etc/sysctl.conf
+echo 'net.core.somaxconn = 32768' >> /etc/sysctl.conf
+echo 'net.ipv4.tcp_max_syn_backlog = 1048576' >> /etc/sysctl.conf
+echo 'net.ipv4.neigh.default.gc_thresh1 = 512' >> /etc/sysctl.conf
+echo 'net.ipv4.neigh.default.gc_thresh2 = 2048' >> /etc/sysctl.conf
+echo 'net.ipv4.neigh.default.gc_thresh3 = 4096' >> /etc/sysctl.conf
+echo 'net.ipv4.tcp_retries2 = 15' >> /etc/sysctl.conf
+echo 'net.ipv4.tcp_max_tw_buckets = 1048576' >> /etc/sysctl.conf
+echo 'net.ipv4.tcp_max_orphans = 65535' >> /etc/sysctl.conf
+echo 'net.ipv4.udp_rmem_min = 131072' >> /etc/sysctl.conf
+echo 'net.ipv4.udp_wmem_min = 131072' >> /etc/sysctl.conf
+echo 'net.ipv4.conf.all.rp_filter = 1' >> /etc/sysctl.conf
+echo 'net.ipv4.conf.default.rp_filter = 1' >> /etc/sysctl.conf
+echo 'net.ipv4.conf.all.arp_accept = 1' >> /etc/sysctl.conf
+echo 'net.ipv4.conf.default.arp_accept = 1' >> /etc/sysctl.conf
+echo 'net.ipv4.conf.all.arp_ignore = 1' >> /etc/sysctl.conf
+echo 'net.ipv4.conf.default.arp_ignore = 1' >> /etc/sysctl.conf
+echo 'vm.max_map_count = 262144' >> /etc/sysctl.conf
+echo 'vm.swappiness = 0' >> /etc/sysctl.conf
+echo 'vm.overcommit_memory = 1' >> /etc/sysctl.conf
+echo 'fs.inotify.max_user_instances = 524288' >> /etc/sysctl.conf
+echo 'fs.inotify.max_user_watches = 10240001' >> /etc/sysctl.conf
+echo 'fs.pipe-max-size = 4194304' >> /etc/sysctl.conf
+echo 'fs.aio-max-nr = 262144' >> /etc/sysctl.conf
+echo 'kernel.pid_max = 65535' >> /etc/sysctl.conf
+echo 'kernel.watchdog_thresh = 5' >> /etc/sysctl.conf
+echo 'kernel.hung_task_timeout_secs = 5' >> /etc/sysctl.conf
+
+#add for ipv6
+echo 'net.ipv6.conf.all.disable_ipv6 = 0' >> /etc/sysctl.conf
+echo 'net.ipv6.conf.default.disable_ipv6 = 0' >> /etc/sysctl.conf
+echo 'net.ipv6.conf.lo.disable_ipv6 = 0' >> /etc/sysctl.conf
+echo 'net.ipv6.conf.all.forwarding=1' >> /etc/sysctl.conf
+
+#See https://help.aliyun.com/document_detail/118806.html#uicontrol-e50-ddj-w0y
+sed -r -i "s@#{0,}?net.ipv4.tcp_tw_recycle ?= ?(0|1|2)@net.ipv4.tcp_tw_recycle = 0@g" /etc/sysctl.conf
+sed -r -i "s@#{0,}?net.ipv4.tcp_tw_reuse ?= ?(0|1)@net.ipv4.tcp_tw_reuse = 0@g" /etc/sysctl.conf
+sed -r -i "s@#{0,}?net.ipv4.conf.all.rp_filter ?= ?(0|1|2)@net.ipv4.conf.all.rp_filter = 1@g" /etc/sysctl.conf
+sed -r -i "s@#{0,}?net.ipv4.conf.default.rp_filter ?= ?(0|1|2)@net.ipv4.conf.default.rp_filter = 1@g" /etc/sysctl.conf
+sed -r -i "s@#{0,}?net.ipv4.ip_forward ?= ?(0|1)@net.ipv4.ip_forward = 1@g" /etc/sysctl.conf
+sed -r -i "s@#{0,}?net.bridge.bridge-nf-call-arptables ?= ?(0|1)@net.bridge.bridge-nf-call-arptables = 1@g" /etc/sysctl.conf
+sed -r -i "s@#{0,}?net.bridge.bridge-nf-call-ip6tables ?= ?(0|1)@net.bridge.bridge-nf-call-ip6tables = 1@g" /etc/sysctl.conf
+sed -r -i "s@#{0,}?net.bridge.bridge-nf-call-iptables ?= ?(0|1)@net.bridge.bridge-nf-call-iptables = 1@g" /etc/sysctl.conf
+sed -r -i "s@#{0,}?net.ipv4.ip_local_reserved_ports ?= ?([0-9]{1,}-{0,1},{0,1}){1,}@net.ipv4.ip_local_reserved_ports = 30000-32767@g" /etc/sysctl.conf
+sed -r -i "s@#{0,}?vm.max_map_count ?= ?([0-9]{1,})@vm.max_map_count = 262144@g" /etc/sysctl.conf
+sed -r -i "s@#{0,}?vm.swappiness ?= ?([0-9]{1,})@vm.swappiness = 0@g" /etc/sysctl.conf
+sed -r -i "s@#{0,}?fs.inotify.max_user_instances ?= ?([0-9]{1,})@fs.inotify.max_user_instances = 524288@g" /etc/sysctl.conf
+sed -r -i "s@#{0,}?kernel.pid_max ?= ?([0-9]{1,})@kernel.pid_max = 65535@g" /etc/sysctl.conf
+sed -r -i "s@#{0,}?vm.overcommit_memory ?= ?(0|1|2)@vm.overcommit_memory = 0@g" /etc/sysctl.conf
+sed -r -i "s@#{0,}?fs.inotify.max_user_watches ?= ?([0-9]{1,})@fs.inotify.max_user_watches = 524288@g" /etc/sysctl.conf
+sed -r -i "s@#{0,}?fs.pipe-max-size ?= ?([0-9]{1,})@fs.pipe-max-size = 4194304@g" /etc/sysctl.conf
+sed -r -i "s@#{0,}?net.core.netdev_max_backlog ?= ?([0-9]{1,})@net.core.netdev_max_backlog = 65535@g" /etc/sysctl.conf
+sed -r -i "s@#{0,}?net.core.rmem_max ?= ?([0-9]{1,})@net.core.rmem_max = 33554432@g" /etc/sysctl.conf
+sed -r -i "s@#{0,}?net.core.wmem_max ?= ?([0-9]{1,})@net.core.wmem_max = 33554432@g" /etc/sysctl.conf
+sed -r -i "s@#{0,}?net.ipv4.tcp_max_syn_backlog ?= ?([0-9]{1,})@net.ipv4.tcp_max_syn_backlog = 1048576@g" /etc/sysctl.conf
+sed -r -i "s@#{0,}?net.ipv4.neigh.default.gc_thresh1 ?= ?([0-9]{1,})@net.ipv4.neigh.default.gc_thresh1 = 512@g" /etc/sysctl.conf
+sed -r -i "s@#{0,}?net.ipv4.neigh.default.gc_thresh2 ?= ?([0-9]{1,})@net.ipv4.neigh.default.gc_thresh2 = 2048@g" /etc/sysctl.conf
+sed -r -i "s@#{0,}?net.ipv4.neigh.default.gc_thresh3 ?= ?([0-9]{1,})@net.ipv4.neigh.default.gc_thresh3 = 4096@g" /etc/sysctl.conf
+sed -r -i "s@#{0,}?net.core.somaxconn ?= ?([0-9]{1,})@net.core.somaxconn = 32768@g" /etc/sysctl.conf
+sed -r -i "s@#{0,}?net.ipv4.conf.eth0.arp_accept ?= ?(0|1)@net.ipv4.conf.eth0.arp_accept = 1@g" /etc/sysctl.conf
+sed -r -i "s@#{0,}?fs.aio-max-nr ?= ?([0-9]{1,})@fs.aio-max-nr = 262144@g" /etc/sysctl.conf
+sed -r -i "s@#{0,}?net.ipv4.tcp_retries2 ?= ?([0-9]{1,})@net.ipv4.tcp_retries2 = 15@g" /etc/sysctl.conf
+sed -r -i "s@#{0,}?net.ipv4.tcp_max_tw_buckets ?= ?([0-9]{1,})@net.ipv4.tcp_max_tw_buckets = 1048576@g" /etc/sysctl.conf
+sed -r -i "s@#{0,}?net.ipv4.tcp_max_orphans ?= ?([0-9]{1,})@net.ipv4.tcp_max_orphans = 65535@g" /etc/sysctl.conf
+sed -r -i "s@#{0,}?net.ipv4.udp_rmem_min ?= ?([0-9]{1,})@net.ipv4.udp_rmem_min = 131072@g" /etc/sysctl.conf
+sed -r -i "s@#{0,}?net.ipv4.udp_wmem_min ?= ?([0-9]{1,})@net.ipv4.udp_wmem_min = 131072@g" /etc/sysctl.conf
+sed -r -i "s@#{0,}?net.ipv4.conf.all.arp_ignore ?= ??(0|1|2)@net.ipv4.conf.all.arp_ignore = 1@g" /etc/sysctl.conf
+sed -r -i "s@#{0,}?net.ipv4.conf.default.arp_ignore ?= ??(0|1|2)@net.ipv4.conf.default.arp_ignore = 1@g" /etc/sysctl.conf
+sed -r -i "s@#{0,}?kernel.watchdog_thresh ?= ?([0-9]{1,})@kernel.watchdog_thresh = 5@g" /etc/sysctl.conf
+sed -r -i "s@#{0,}?kernel.hung_task_timeout_secs ?= ?([0-9]{1,})@kernel.hung_task_timeout_secs = 5@g" /etc/sysctl.conf
+
+tmpfile="$$.tmp"
+awk ' !x[$0]++{print > "'$tmpfile'"}' /etc/sysctl.conf
+mv $tmpfile /etc/sysctl.conf
+
+# ulimit
+echo "* soft nofile 1048576" >> /etc/security/limits.conf
+echo "* hard nofile 1048576" >> /etc/security/limits.conf
+echo "* soft nproc 65536" >> /etc/security/limits.conf
+echo "* hard nproc 65536" >> /etc/security/limits.conf
+echo "* soft memlock unlimited" >> /etc/security/limits.conf
+echo "* hard memlock unlimited" >> /etc/security/limits.conf
+
+sed -r -i "s@#{0,}?\* soft nofile ?([0-9]{1,})@\* soft nofile 1048576@g" /etc/security/limits.conf
+sed -r -i "s@#{0,}?\* hard nofile ?([0-9]{1,})@\* hard nofile 1048576@g" /etc/security/limits.conf
+sed -r -i "s@#{0,}?\* soft nproc ?([0-9]{1,})@\* soft nproc 65536@g" /etc/security/limits.conf
+sed -r -i "s@#{0,}?\* hard nproc ?([0-9]{1,})@\* hard nproc 65536@g" /etc/security/limits.conf
+sed -r -i "s@#{0,}?\* soft memlock ?([0-9]{1,}([TGKM]B){0,1}|unlimited)@\* soft memlock unlimited@g" /etc/security/limits.conf
+sed -r -i "s@#{0,}?\* hard memlock ?([0-9]{1,}([TGKM]B){0,1}|unlimited)@\* hard memlock unlimited@g" /etc/security/limits.conf
+
+tmpfile="$$.tmp"
+awk ' !x[$0]++{print > "'$tmpfile'"}' /etc/security/limits.conf
+mv $tmpfile /etc/security/limits.conf
+
+systemctl stop firewalld 1>/dev/null 2>/dev/null
+systemctl disable firewalld 1>/dev/null 2>/dev/null
+systemctl stop ufw 1>/dev/null 2>/dev/null
+systemctl disable ufw 1>/dev/null 2>/dev/null
+
+modinfo br_netfilter > /dev/null 2>&1
+if [ $? -eq 0 ]; then
+ modprobe br_netfilter
+ mkdir -p /etc/modules-load.d
+ echo 'br_netfilter' > /etc/modules-load.d/kubekey-br_netfilter.conf
+fi
+
+modinfo overlay > /dev/null 2>&1
+if [ $? -eq 0 ]; then
+ modprobe overlay
+ echo 'overlay' >> /etc/modules-load.d/kubekey-br_netfilter.conf
+fi
+
+modprobe ip_vs
+modprobe ip_vs_rr
+modprobe ip_vs_wrr
+modprobe ip_vs_sh
+
+cat > /etc/modules-load.d/kube_proxy-ipvs.conf << EOF
+ip_vs
+ip_vs_rr
+ip_vs_wrr
+ip_vs_sh
+EOF
+
+modprobe nf_conntrack_ipv4 1>/dev/null 2>/dev/null
+if [ $? -eq 0 ]; then
+ echo 'nf_conntrack_ipv4' > /etc/modules-load.d/kube_proxy-ipvs.conf
+else
+ modprobe nf_conntrack
+ echo 'nf_conntrack' > /etc/modules-load.d/kube_proxy-ipvs.conf
+fi
+sysctl -p
+
+sed -i ':a;$!{N;ba};s@# kubekey hosts BEGIN.*# kubekey hosts END@@' /etc/hosts
+sed -i '/^$/N;/\n$/N;//D' /etc/hosts
+
+cat >>/etc/hosts< /proc/sys/vm/drop_caches
+
+# Make sure the iptables utility doesn't use the nftables backend.
+update-alternatives --set iptables /usr/sbin/iptables-legacy >/dev/null 2>&1 || true
+update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy >/dev/null 2>&1 || true
+update-alternatives --set arptables /usr/sbin/arptables-legacy >/dev/null 2>&1 || true
+update-alternatives --set ebtables /usr/sbin/ebtables-legacy >/dev/null 2>&1 || true
diff --git a/feature/builtin/roles/install/certs/defaults/main.yaml b/feature/builtin/roles/install/certs/defaults/main.yaml
new file mode 100644
index 000000000..5094d1f7b
--- /dev/null
+++ b/feature/builtin/roles/install/certs/defaults/main.yaml
@@ -0,0 +1,14 @@
+renew_certs:
+ enabled: false
+ is_docker: |
+ {{- if .cri.container_manager | eq "docker" }}
+ true
+ {{- else }}
+ false
+ {{- end }}
+ is_kubeadm_alpha: |
+ {{- if .kube_version | semverCompare ">/dev/null >>/dev/tcp/127.0.0.1/6443; do sleep 1; done
+echo "## Expiration after renewal ##"
+${kubeadmCerts} check-expiration
diff --git a/feature/builtin/roles/install/cri/defaults/main.yaml b/feature/builtin/roles/install/cri/defaults/main.yaml
new file mode 100644
index 000000000..c116584eb
--- /dev/null
+++ b/feature/builtin/roles/install/cri/defaults/main.yaml
@@ -0,0 +1,32 @@
+cri:
+ # support: systemd, cgroupfs
+ cgroup_driver: systemd
+ sandbox_image: |
+ {{ .k8s_registry }}/pause:3.5
+ # support: containerd,docker,crio
+ container_manager: docker
+ # the endpoint of containerd
+ cri_socket: |
+ {{- if .cri.container_manager | eq "containerd" }}
+ unix:///var/run/containerd.sock
+ {{- end }}
+# containerd:
+# data_root: /var/lib/containerd
+ docker:
+ data_root: /var/lib/docker
+ registry:
+ mirrors: ["https://registry-1.docker.io"]
+ insecure_registries: []
+ auths: []
+
+image_registry:
+ # ha_vip: 192.168.122.59
+ auth:
+ registry: |
+ {{- if and .image_registry.ha_vip (ne .image_registry.ha_vip "") }}
+ {{ .image_registry.ha_vip }}
+ {{- else }}
+ {{ index .inventory_hosts (.groups.image_registry | default list | first) "internal_ipv4" }}
+ {{- end }}
+ username: admin
+ password: Harbor12345
diff --git a/feature/builtin/roles/install/cri/files/containerd.service b/feature/builtin/roles/install/cri/files/containerd.service
new file mode 100644
index 000000000..5f67110ab
--- /dev/null
+++ b/feature/builtin/roles/install/cri/files/containerd.service
@@ -0,0 +1,26 @@
+[Unit]
+Description=containerd container runtime
+Documentation=https://containerd.io
+After=network.target local-fs.target
+
+[Service]
+ExecStartPre=-/sbin/modprobe overlay
+ExecStart=/usr/local/bin/containerd
+
+Type=notify
+Delegate=yes
+KillMode=process
+Restart=always
+RestartSec=5
+# Having non-zero Limit*s causes performance problems due to accounting overhead
+# in the kernel. We recommend using cgroups to do container-local accounting.
+LimitNPROC=infinity
+LimitCORE=infinity
+LimitNOFILE=1048576
+# Comment TasksMax if your systemd version does not supports it.
+# Only systemd 226 and above support this version.
+TasksMax=infinity
+OOMScoreAdjust=-999
+
+[Install]
+WantedBy=multi-user.target
diff --git a/feature/builtin/roles/install/cri/files/cri_docker.service b/feature/builtin/roles/install/cri/files/cri_docker.service
new file mode 100644
index 000000000..8de02ba12
--- /dev/null
+++ b/feature/builtin/roles/install/cri/files/cri_docker.service
@@ -0,0 +1,36 @@
+[Unit]
+Description=CRI Interface for Docker Application Container Engine
+Documentation=https://docs.mirantis.com
+
+[Service]
+Type=notify
+ExecStart=/usr/local/bin/cri-dockerd --pod-infra-container-image {{ .SandBoxImage }}
+ExecReload=/bin/kill -s HUP $MAINPID
+TimeoutSec=0
+RestartSec=2
+Restart=always
+
+# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
+# Both the old, and new location are accepted by systemd 229 and up, so using the old location
+# to make them work for either version of systemd.
+StartLimitBurst=3
+
+# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
+# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
+# this option work for either version of systemd.
+StartLimitInterval=60s
+
+# Having non-zero Limit*s causes performance problems due to accounting overhead
+# in the kernel. We recommend using cgroups to do container-local accounting.
+LimitNOFILE=infinity
+LimitNPROC=infinity
+LimitCORE=infinity
+
+# Comment TasksMax if your systemd version does not support it.
+# Only systemd 226 and above support this option.
+TasksMax=infinity
+Delegate=yes
+KillMode=process
+
+[Install]
+WantedBy=multi-user.target
diff --git a/feature/builtin/roles/install/cri/files/docker.service b/feature/builtin/roles/install/cri/files/docker.service
new file mode 100644
index 000000000..929b8a436
--- /dev/null
+++ b/feature/builtin/roles/install/cri/files/docker.service
@@ -0,0 +1,47 @@
+[Unit]
+Description=Docker Application Container Engine
+Documentation=https://docs.docker.com
+# After=network-online.target firewalld.service containerd.service
+# Wants=network-online.target
+# Requires=docker.socket containerd.service
+
+[Service]
+Type=notify
+# the default is not to use systemd for cgroups because the delegate issues still
+# exists and systemd currently does not support the cgroup feature set required
+# for containers run by docker
+ExecStart=/usr/local/bin/dockerd --containerd=/run/containerd/containerd.sock
+ExecReload=/bin/kill -s HUP $MAINPID
+TimeoutSec=0
+RestartSec=2
+Restart=always
+
+# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
+# Both the old, and new location are accepted by systemd 229 and up, so using the old location
+# to make them work for either version of systemd.
+StartLimitBurst=3
+
+# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
+# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
+# this option work for either version of systemd.
+StartLimitInterval=60s
+
+# Having non-zero Limit*s causes performance problems due to accounting overhead
+# in the kernel. We recommend using cgroups to do container-local accounting.
+LimitNOFILE=infinity
+LimitNPROC=infinity
+LimitCORE=infinity
+
+# Comment TasksMax if your systemd version does not support it.
+# Only systemd 226 and above support this option.
+TasksMax=infinity
+
+# set delegate yes so that systemd does not reset the cgroups of docker containers
+Delegate=yes
+
+# kill only the docker process, not all processes in the cgroup
+KillMode=process
+OOMScoreAdjust=-500
+
+[Install]
+WantedBy=multi-user.target
diff --git a/feature/builtin/roles/install/cri/tasks/install_containerd.yaml b/feature/builtin/roles/install/cri/tasks/install_containerd.yaml
new file mode 100644
index 000000000..7df7776e6
--- /dev/null
+++ b/feature/builtin/roles/install/cri/tasks/install_containerd.yaml
@@ -0,0 +1,62 @@
+---
+- name: Check if runc is installed
+ ignore_errors: true
+ command: runc --version
+ register: runc_install_version
+- name: Sync runc binary to remote
+ when: or (.runc_install_version.stderr | ne "") (.runc_install_version.stdout | contains (printf "runc version %s\n" (.runc_version | default "" | trimPrefix "v" )) | not)
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/runc/{{ .runc_version }}/{{ .binary_type.stdout }}/runc.{{ .binary_type.stdout }}
+ dest: /usr/local/bin/runc
+ mode: 0755
+
+- name: Check if containerd is installed
+ ignore_errors: true
+ command: containerd --version
+ register: containerd_install_version
+- name: Install containerd
+ when: or (.containerd_install_version.stderr | ne "") (.containerd_install_version.stdout | contains (printf " %s " .containerd_version) | not)
+ block:
+ - name: Sync containerd binary to remote
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/containerd/{{ .containerd_version }}/{{ .binary_type.stdout }}/containerd-{{ .containerd_version | default "" | trimPrefix "v" }}-linux-{{ .binary_type.stdout }}.tar.gz
+ dest: |
+ /tmp/kubekey/containerd-{{ .containerd_version | default "" | trimPrefix "v" }}-linux-{{ .binary_type.stdout }}.tar.gz
+ - name: Unpackage containerd binary
+ command: |
+ tar -xvf /tmp/kubekey/containerd-{{ .containerd_version | default "" | trimPrefix "v" }}-linux-{{ .binary_type.stdout }}.tar.gz -C /usr/local/bin/
+ - name: Generate containerd config file
+ template:
+ src: containerd.config
+ dest: /etc/containerd/config.toml
+ - name: Generate containerd Service file
+ copy:
+ src: containerd.service
+ dest: /etc/systemd/system/containerd.service
+ - name: Start containerd
+ command: |
+ systemctl daemon-reload && systemctl start containerd.service && systemctl enable containerd.service
+
+- name: Sync image registry tls to remote
+ when: .groups.image_registry | default list | len | lt 0
+ block:
+ - name: Sync image registry cert file to remote
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/pki/root.crt
+ dest: |
+ /etc/containerd/certs.d/{{ .image_registry.auth.registry }}/ca.crt
+ - name: Sync image registry cert file to remote
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/pki/image_registry.crt
+ dest: |
+ /etc/containerd/certs.d/{{ .image_registry.auth.registry }}/server.crt
+ - name: Sync image registry key file to remote
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/pki/image_registry.key
+ dest: |
+ /etc/containerd/certs.d/{{ .image_registry.auth.registry }}/server.key
diff --git a/feature/builtin/roles/install/cri/tasks/install_crictl.yaml b/feature/builtin/roles/install/cri/tasks/install_crictl.yaml
new file mode 100644
index 000000000..ca0244ff9
--- /dev/null
+++ b/feature/builtin/roles/install/cri/tasks/install_crictl.yaml
@@ -0,0 +1,22 @@
+---
+- name: Check if crictl is installed
+ ignore_errors: true
+ command: crictl --version
+ register: crictl_install_version
+
+- name: Install crictl
+ when: or (.crictl_install_version.stderr | ne "") (.crictl_install_version.stdout | ne (printf "crictl version %s" .crictl_version))
+ block:
+ - name: Sync crictl binary to remote
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/crictl/{{ .crictl_version }}/{{ .binary_type.stdout }}/crictl-{{ .crictl_version }}-linux-{{ .binary_type.stdout }}.tar.gz
+ dest: |
+ /tmp/kubekey/crictl-{{ .crictl_version }}-linux-{{ .binary_type.stdout }}.tar.gz
+ - name: Unpackage crictl binary
+ command: |
+ tar -xvf /tmp/kubekey/crictl-{{ .crictl_version }}-linux-{{ .binary_type.stdout }}.tar.gz -C /usr/local/bin/
+ - name: Generate crictl config file
+ template:
+ src: crictl.config
+ dest: /etc/crictl.yaml
diff --git a/feature/builtin/roles/install/cri/tasks/install_cridockerd.yaml b/feature/builtin/roles/install/cri/tasks/install_cridockerd.yaml
new file mode 100644
index 000000000..d044b7590
--- /dev/null
+++ b/feature/builtin/roles/install/cri/tasks/install_cridockerd.yaml
@@ -0,0 +1,29 @@
+---
+- name: Check if cri-dockerd is installed
+ ignore_errors: true
+ command: cri-dockerd --version
+ register: cridockerd_install_version
+
+- name: Install cri-dockerd
+ when: or (.cridockerd_install_version.stderr | ne "") (.cridockerd_install_version.stdout | hasPrefix (printf "cri-dockerd %s " .cridockerd_version) | not)
+ block:
+ - name: Sync cri-dockerd Binary to remote
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/cri-dockerd/{{ .cridockerd_version }}/{{ .binary_type.stdout }}/cri-dockerd-{{ .cridockerd_version }}-linux-{{ .binary_type.stdout }}.tar.gz
+ dest: |
+ /tmp/kubekey/cri-dockerd-{{ .cridockerd_version }}-linux-{{ .binary_type.stdout }}.tar.gz
+ - name: Generate cri-dockerd config file
+ template:
+ src: cri-dockerd.config
+ dest: /etc/cri-dockerd.yaml
+ - name: Unpackage cri-dockerd binary
+ command: |
+ tar -xvf /tmp/kubekey/cri-dockerd-{{ .cridockerd_version }}-linux-{{ .binary_type.stdout }}.tar.gz -C /usr/local/bin/
+ - name: Generate cri-dockerd Service file
+ template:
+ src: cri-dockerd.service
+ dest: /etc/systemd/system/cri-dockerd.service
+ - name: Start cri-dockerd service
+ command: |
+ systemctl daemon-reload && systemctl start cri-dockerd.service && systemctl enable cri-dockerd.service
diff --git a/feature/builtin/roles/install/cri/tasks/install_docker.yaml b/feature/builtin/roles/install/cri/tasks/install_docker.yaml
new file mode 100644
index 000000000..760ef3c67
--- /dev/null
+++ b/feature/builtin/roles/install/cri/tasks/install_docker.yaml
@@ -0,0 +1,56 @@
+---
+- name: Check if docker is installed
+ ignore_errors: true
+ command: docker --version
+ register: docker_install_version
+
+- name: Install docker
+ when: or (.docker_install_version.stderr | ne "") (.docker_install_version.stdout | hasPrefix (printf "Docker version %s," .docker_version) | not)
+ block:
+ - name: Sync docker binary to remote
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/docker/{{ .docker_version }}/{{ .binary_type.stdout }}/docker-{{ .docker_version }}.tgz
+ dest: |
+ /tmp/kubekey/docker-{{ .docker_version }}.tgz
+ - name: Unpackage docker binary
+ command: |
+ tar -C /usr/local/bin/ --strip-components=1 -xvf /tmp/kubekey/docker-{{ .docker_version }}.tgz --wildcards docker/*
+ - name: Generate docker config file
+ template:
+ src: docker.config
+ dest: /etc/docker/daemon.json
+ - name: Generate docker service file
+ copy:
+ src: docker.service
+ dest: /etc/systemd/system/docker.service
+ - name: Generate containerd service file
+ copy:
+ src: containerd.service
+ dest: /etc/systemd/system/containerd.service
+ - name: Start docker service
+ command: |
+ systemctl daemon-reload && systemctl start containerd.service && systemctl enable containerd.service
+ systemctl daemon-reload && systemctl start docker.service && systemctl enable docker.service
+
+- name: Sync image registry tls to remote
+ when: .groups.image_registry | default list | len | lt 0
+ block:
+ - name: Sync image registry cert file to remote
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/pki/root.crt
+ dest: |
+ /etc/docker/certs.d/{{ .image_registry.auth.registry }}/ca.crt
+ - name: Sync image registry cert file to remote
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/pki/image_registry.crt
+ dest: |
+ /etc/docker/certs.d/{{ .image_registry.auth.registry }}/client.cert
+ - name: Sync image registry key file to remote
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/pki/image_registry.key
+ dest: |
+ /etc/docker/certs.d/{{ .image_registry.auth.registry }}/client.key
diff --git a/feature/builtin/roles/install/cri/tasks/main.yaml b/feature/builtin/roles/install/cri/tasks/main.yaml
new file mode 100644
index 000000000..d3b957cb6
--- /dev/null
+++ b/feature/builtin/roles/install/cri/tasks/main.yaml
@@ -0,0 +1,19 @@
+---
+# install crictl
+- include_tasks: install_crictl.yaml
+
+# install docker
+- include_tasks: install_docker.yaml
+ when: .cri.container_manager | eq "docker"
+
+ # install containerd
+- include_tasks: install_containerd.yaml
+ when: .cri.container_manager | eq "containerd"
+
+# install cridockerd
+- include_tasks: install_cridockerd.yaml
+ when:
+ - .cri.container_manager | eq "docker"
+ - .kube_version | semverCompare ">=v1.24.0"
+
+
diff --git a/feature/builtin/roles/install/cri/templates/containerd.config b/feature/builtin/roles/install/cri/templates/containerd.config
new file mode 100644
index 000000000..5b83af1cf
--- /dev/null
+++ b/feature/builtin/roles/install/cri/templates/containerd.config
@@ -0,0 +1,84 @@
+version = 2
+
+root = {{ .cri.containerd.data_root | default "/var/lib/containerd" }}
+state = "/run/containerd"
+
+[grpc]
+ address = "/run/containerd/containerd.sock"
+ uid = 0
+ gid = 0
+ max_recv_message_size = 16777216
+ max_send_message_size = 16777216
+
+[ttrpc]
+ address = ""
+ uid = 0
+ gid = 0
+
+[debug]
+ address = ""
+ uid = 0
+ gid = 0
+ level = ""
+
+[metrics]
+ address = ""
+ grpc_histogram = false
+
+[cgroup]
+ path = ""
+
+[timeouts]
+ "io.containerd.timeout.shim.cleanup" = "5s"
+ "io.containerd.timeout.shim.load" = "5s"
+ "io.containerd.timeout.shim.shutdown" = "3s"
+ "io.containerd.timeout.task.state" = "2s"
+
+[plugins]
+ [plugins."io.containerd.grpc.v1.cri"]
+ sandbox_image = "{{ .cri.sandbox_image }}"
+ [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
+ runtime_type = "io.containerd.runc.v2"
+ [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
+ SystemdCgroup = {{ if .cri.cgroup_driver | eq "systemd") }}true{{ else }}false{{ end }}
+ [plugins."io.containerd.grpc.v1.cri".cni]
+ bin_dir = "/opt/cni/bin"
+ conf_dir = "/etc/cni/net.d"
+ max_conf_num = 1
+ conf_template = ""
+ [plugins."io.containerd.grpc.v1.cri".registry]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors]
+{{- if .cri.registry.mirrors | len | lt 0 }}
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
+ endpoint = {{ .cri.registry.mirrors | toJson }}
+{{- end }}
+{{- range .cri.registry.insecure_registries }}
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."{{ . }}"]
+ endpoint = ["http://{{ . }}"]
+{{- end }}
+{{- if or (.cri.registry.auths | len | lt 0) (.groups.image_registry | default list | len | lt 0) }}
+ [plugins."io.containerd.grpc.v1.cri".registry.configs]
+ [plugins."io.containerd.grpc.v1.cri".registry.configs."{{ .image_registry.auth.registry }}".auth]
+ username = "{{ .image_registry.auth.username }}"
+ password = "{{ .image_registry.auth.password }}"
+ [plugins."io.containerd.grpc.v1.cri".registry.configs."{{ .image_registry.auth.registry }}".tls]
+ ca_file = "/etc/containerd/certs.d/{{ .image_registry.auth.registry }}/ca.crt"
+ cert_file = "/etc/containerd/certs.d/{{ .image_registry.auth.registry }}/server.crt"
+ key_file = "/etc/containerd/certs.d/{{ image_registry.auth.registry }}/server.key"
+ {{- range .cri.registry.auths }}
+ [plugins."io.containerd.grpc.v1.cri".registry.configs."{{ .repo }}".auth]
+ username = "{{ .username }}"
+ password = "{{ .password }}"
+ [plugins."io.containerd.grpc.v1.cri".registry.configs."{{ .repo }}".tls]
+ {{- if.ca_file }}
+ ca_file = {{ .ca_file }}
+ {{- end }}
+ {{- if .crt_file }}
+ cert_file = {{ .crt_file }}
+ {{- end }}
+ {{- if .key_file }}
+ key_file = {{ .key_file }}
+ {{- end }}
+ insecure_skip_verify = {{ .skip_ssl | default true }}
+ {{- end }}
+{{- end }}
diff --git a/feature/builtin/roles/install/cri/templates/crictl.config b/feature/builtin/roles/install/cri/templates/crictl.config
new file mode 100644
index 000000000..9a8544e69
--- /dev/null
+++ b/feature/builtin/roles/install/cri/templates/crictl.config
@@ -0,0 +1,5 @@
+runtime-endpoint: {{ .cri.cri_socket }}
+image-endpoint: {{ .cri.cri_socket }}
+timeout: 5
+debug: false
+pull-image-on-create: false
diff --git a/feature/builtin/roles/install/cri/templates/docker.config b/feature/builtin/roles/install/cri/templates/docker.config
new file mode 100644
index 000000000..7eb1cc766
--- /dev/null
+++ b/feature/builtin/roles/install/cri/templates/docker.config
@@ -0,0 +1,19 @@
+{
+ "log-opts": {
+ "max-size": "5m",
+ "max-file":"3"
+ },
+{{- if .cri.docker.data_root }}
+ "data-root": "{{ .cri.docker.data_root }}",
+{{- end }}
+{{- if .cri.registry.mirrors }}
+ "registry-mirrors": {{ .cri.registry.mirrors | toJson }},
+{{- end }}
+ {{- if .cri.registry.insecure_registries }}
+ "insecure-registries": {{ .cri.registry.insecure_registries | toJson }},
+{{- end }}
+ {{- if .cri.docker.bridge_ip }}
+ "bip": "{{ .cri.docker.bridge_ip }}",
+{{- end }}
+ "exec-opts": ["native.cgroupdriver={{ .cri.cgroup_driver }}"]
+}
diff --git a/feature/builtin/roles/install/etcd/defaults/main.yaml b/feature/builtin/roles/install/etcd/defaults/main.yaml
new file mode 100644
index 000000000..6f017bcc1
--- /dev/null
+++ b/feature/builtin/roles/install/etcd/defaults/main.yaml
@@ -0,0 +1,27 @@
+etcd:
+ # endpoints: ["https://172.1.1.1:2379"]
+ # etcd binary
+ state: new
+# env config
+ env:
+ election_timeout: 5000
+ heartbeat_interval: 250
+ compaction_retention: 8
+ snapshot_count: 10000
+ data_dir: /var/lib/etcd
+ token: k8s_etcd
+# metrics: basic
+# quota_backend_bytes: 100
+# max_request_bytes: 100
+# max_snapshots: 100
+# max_wals: 5
+# log_level: info
+# unsupported_arch: arm64
+# backup config
+ backup:
+ backup_dir: /var/lib/etcd-backup
+ keep_backup_number: 5
+ etcd_backup_script: "backup.sh"
+ on_calendar: "*-*-* *:00/30:00"
+ performance: false
+ traffic_priority: false
diff --git a/feature/builtin/roles/install/etcd/files/backup.service b/feature/builtin/roles/install/etcd/files/backup.service
new file mode 100644
index 000000000..a03f4226a
--- /dev/null
+++ b/feature/builtin/roles/install/etcd/files/backup.service
@@ -0,0 +1,5 @@
+[Unit]
+Description=Backup ETCD
+[Service]
+Type=oneshot
+ExecStart=/usr/local/bin/kube-scripts/backup_etcd.sh
diff --git a/feature/builtin/roles/install/etcd/files/etcd.service b/feature/builtin/roles/install/etcd/files/etcd.service
new file mode 100644
index 000000000..178c3e246
--- /dev/null
+++ b/feature/builtin/roles/install/etcd/files/etcd.service
@@ -0,0 +1,18 @@
+[Unit]
+Description=etcd
+After=network.target
+
+[Service]
+User=root
+Type=notify
+Nice=-20
+OOMScoreAdjust=-1000
+EnvironmentFile=/etc/etcd.env
+ExecStart=/usr/local/bin/etcd
+NotifyAccess=all
+RestartSec=10s
+LimitNOFILE=40000
+Restart=always
+
+[Install]
+WantedBy=multi-user.target
diff --git a/feature/builtin/roles/install/etcd/tasks/backup_etcd.yaml b/feature/builtin/roles/install/etcd/tasks/backup_etcd.yaml
new file mode 100644
index 000000000..772071b37
--- /dev/null
+++ b/feature/builtin/roles/install/etcd/tasks/backup_etcd.yaml
@@ -0,0 +1,21 @@
+---
+- name: Sync custom backup etcd script
+ template:
+ src: |
+ {{ .etcd.backup.etcd_backup_script }}
+ dest: /usr/local/bin/kube-scripts/backup-etcd.sh
+ mode: 777
+
+- name: Generate backup etcd service
+ copy:
+ src: backup.service
+ dest: /etc/systemd/system/backup-etcd.service
+
+- name: Generate backup etcd timer
+ template:
+ src: backup.timer
+ dest: /etc/systemd/system/backup-etcd.timer
+
+- name: Enable etcd timer
+ command: |
+ systemctl daemon-reload && systemctl enable --now backup-etcd.timer
diff --git a/feature/builtin/roles/install/etcd/tasks/install_etcd.yaml b/feature/builtin/roles/install/etcd/tasks/install_etcd.yaml
new file mode 100644
index 000000000..b8e5be242
--- /dev/null
+++ b/feature/builtin/roles/install/etcd/tasks/install_etcd.yaml
@@ -0,0 +1,58 @@
+---
+- name: Sync etcd binary to node
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/etcd/{{ .etcd_version }}/{{ .binary_type.stdout }}/etcd-{{ .etcd_version }}-linux-{{ .binary_type.stdout }}.tar.gz
+ dest: |
+ /tmp/kubekey/etcd-{{ .etcd_version }}-linux-{{ .binary_type.stdout }}.tar.gz
+
+- name: Extract etcd binary
+ command: |
+ tar --strip-components=1 -C /usr/local/bin/ -xvf /tmp/kubekey/etcd-{{ .etcd_version }}-linux-{{ .binary_type.stdout }}.tar.gz \
+ --wildcards etcd-{{ .etcd_version }}-linux-{{ .binary_type.stdout }}/etcd*
+
+- name: Sync ca file to remote
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/pki/root.crt
+ dest: /etc/ssl/etcd/ssl/ca.crt
+
+- name: Sync etcd cert file to remote
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/pki/etcd.crt
+ dest: /etc/ssl/etcd/ssl/server.crt
+
+- name: Sync etcd key file to remote
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/pki/etcd.key
+ dest: /etc/ssl/etcd/ssl/server.key
+
+- name: Generate etcd env file
+ template:
+ src: etcd.env
+ dest: /etc/etcd.env
+
+- name: Generate etcd systemd service file
+ copy:
+ src: etcd.service
+ dest: /etc/systemd/system/etcd.service
+
+# refer: https://etcd.io/docs/v3.5/tuning/
+- name: Set cpu to performance
+ command: |
+ echo performance | tee /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor
+ when: .etcd.performance
+
+- name: Set Traffic Priority
+ command: |
+ tc qdisc add dev eth0 root handle 1: prio bands 3
+ tc filter add dev eth0 parent 1: protocol ip prio 1 u32 match ip sport 2380 0xffff flowid 1:1
+ tc filter add dev eth0 parent 1: protocol ip prio 1 u32 match ip dport 2380 0xffff flowid 1:1
+ tc filter add dev eth0 parent 1: protocol ip prio 2 u32 match ip sport 2379 0xffff flowid 1:1
+ tc filter add dev eth0 parent 1: protocol ip prio 2 u32 match ip dport 2379 0xffff flowid 1:1
+ when: .etcd.traffic_priority
+
+- name: Start etcd service
+ command: systemctl daemon-reload && systemctl start etcd && systemctl enable etcd
diff --git a/feature/builtin/roles/install/etcd/tasks/main.yaml b/feature/builtin/roles/install/etcd/tasks/main.yaml
new file mode 100644
index 000000000..2c3531d72
--- /dev/null
+++ b/feature/builtin/roles/install/etcd/tasks/main.yaml
@@ -0,0 +1,27 @@
+---
+- name: Check if etcd is installed
+ ignore_errors: true
+ command: etcd --version
+ run_once: true
+ register: etcd_install_version
+
+- name: Install etcd
+ when: |
+ or (.etcd_install_version.stderr | ne "") (.etcd_install_version.stdout | hasPrefix (printf "etcd Version: %s\n" (.etcd_version | default "" | trimPrefix "v")) | not)
+ block:
+ - name: Init etcd
+ block:
+ - name: Add etcd user
+ command: |
+ useradd -M -c 'Etcd user' -s /sbin/nologin -r etcd || :
+ - name: Create etcd directories
+ command: |
+ if [ ! -d "{{ .item }}" ]; then
+ mkdir -p {{ .item }} && chown -R etcd {{ .item }}
+ fi
+ loop:
+ - "/var/lib/etcd"
+
+ - include_tasks: install_etcd.yaml
+
+ - include_tasks: backup_etcd.yaml
diff --git a/feature/builtin/roles/install/etcd/templates/backup.sh b/feature/builtin/roles/install/etcd/templates/backup.sh
new file mode 100644
index 000000000..61e70235a
--- /dev/null
+++ b/feature/builtin/roles/install/etcd/templates/backup.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+ETCDCTL_PATH='/usr/local/bin/etcdctl'
+ENDPOINTS='https://{{ .internal_ipv4 }}:2379'
+ETCD_DATA_DIR="{{ .etcd.env.data_dir }}"
+BACKUP_DIR="{{ .etcd.backup.backup_dir }}/etcd-$(date +%Y-%m-%d-%H-%M-%S)"
+KEEPBACKUPNUMBER='{{ .etcd.backup.keep_backup_number }}'
+((KEEPBACKNUMBER++))
+
+ETCDCTL_CERT="/etc/ssl/etcd/ssl/server.crt"
+ETCDCTL_KEY="/etc/ssl/etcd/ssl/server.key"
+ETCDCTL_CA_FILE="/etc/ssl/etcd/ssl/ca.crt"
+
+[ ! -d $BACKUP_DIR ] && mkdir -p $BACKUP_DIR
+
+export ETCDCTL_API=2;$ETCDCTL_PATH backup --data-dir $ETCD_DATA_DIR --backup-dir $BACKUP_DIR
+
+sleep 3
+
+{
+export ETCDCTL_API=3;$ETCDCTL_PATH --endpoints="$ENDPOINTS" snapshot save $BACKUP_DIR/snapshot.db \
+ --cacert="$ETCDCTL_CA_FILE" \
+ --cert="$ETCDCTL_CERT" \
+ --key="$ETCDCTL_KEY"
+} > /dev/null
+
+sleep 3
+
+cd $BACKUP_DIR/../ && ls -lt |awk '{if(NR > '$KEEPBACKUPNUMBER'){print "rm -rf "$9}}'|sh
diff --git a/feature/builtin/roles/install/etcd/templates/backup.timer b/feature/builtin/roles/install/etcd/templates/backup.timer
new file mode 100644
index 000000000..4b73c6bc2
--- /dev/null
+++ b/feature/builtin/roles/install/etcd/templates/backup.timer
@@ -0,0 +1,7 @@
+[Unit]
+Description=Timer to backup ETCD
+[Timer]
+OnCalendar={{ .etcd.backup.on_calendar }}
+Unit=backup-etcd.service
+[Install]
+WantedBy=multi-user.target
diff --git a/feature/builtin/roles/install/etcd/templates/etcd.env b/feature/builtin/roles/install/etcd/templates/etcd.env
new file mode 100644
index 000000000..ed85a2097
--- /dev/null
+++ b/feature/builtin/roles/install/etcd/templates/etcd.env
@@ -0,0 +1,57 @@
+ETCD_DATA_DIR={{ .etcd.env.data_dir }}
+ETCD_ADVERTISE_CLIENT_URLS={{ printf "https://%s:2379" .internal_ipv4 }}
+ETCD_INITIAL_ADVERTISE_PEER_URLS={{ printf "https://%s:2380" .internal_ipv4 }}
+ETCD_INITIAL_CLUSTER_STATE={{ .etcd.state }}
+ETCD_LISTEN_CLIENT_URLS={{ printf "https://%s:2379" .internal_ipv4 }},https://127.0.0.1:2379
+ETCD_INITIAL_CLUSTER_TOKEN={{ .etcd.env.token }}
+ETCD_LISTEN_PEER_URLS={{ printf "https://%s:2380" .internal_ipv4 }}
+ETCD_NAME={{ .hostname }}
+ETCD_PROXY=off
+ETCD_ENABLE_V2=true
+{{- $ips := list }}
+{{- range .groups.etcd | default list }}
+ {{- $ips = append $ips (printf "%s=https://%s:2380" (index $.inventory_hosts . "hostname") (index $.inventory_hosts . "internal_ipv4")) }}
+{{- end }}
+ETCD_INITIAL_CLUSTER={{ $ips | join "," }}
+ETCD_ELECTION_TIMEOUT={{ .etcd.env.election_timeout }}
+ETCD_HEARTBEAT_INTERVAL={{ .etcd.env.heartbeat_interval }}
+ETCD_AUTO_COMPACTION_RETENTION={{ .etcd.env.compaction_retention }}
+ETCD_SNAPSHOT_COUNT={{ .etcd.env.snapshot_count }}
+{{- if .etcd.metrics }}
+ETCD_METRICS={{ .etcd.env.metrics }}
+{{- end }}
+{{- if .etcd.env.quota_backend_bytes }}
+ETCD_QUOTA_BACKEND_BYTES={{ .etcd.env.quota_backend_bytes }}
+{{- end }}
+{{- if .etcd.env.max_request_bytes }}
+ETCD_MAX_REQUEST_BYTES={{ .etcd.env.max_request_bytes }}
+{{- end }}
+{{- if .etcd.env.max_snapshots }}
+ETCD_MAX_SNAPSHOTS={{ .etcd.env.max_snapshots }}
+{{- end }}
+{{- if .etcd.env.max_wals }}
+ETCD_MAX_WALS={{ .etcd.env.max_wals }}
+{{- end }}
+{{- if .etcd.env.log_level }}
+ETCD_LOG_LEVEL={{ .etcd.env.log_level }}
+{{- end }}
+{{- if .etcd.env.unsupported_arch }}
+ETCD_UNSUPPORTED_ARCH={{ .etcd.env.unsupported_arch }}
+{{- end }}
+
+# TLS settings
+ETCD_TRUSTED_CA_FILE=/etc/ssl/etcd/ssl/ca.crt
+ETCD_CERT_FILE=/etc/ssl/etcd/ssl/server.crt
+ETCD_KEY_FILE=/etc/ssl/etcd/ssl/server.key
+ETCD_CLIENT_CERT_AUTH=true
+
+ETCD_PEER_TRUSTED_CA_FILE=/etc/ssl/etcd/ssl/ca.crt
+ETCD_PEER_CERT_FILE=/etc/ssl/etcd/ssl/server.crt
+ETCD_PEER_KEY_FILE=/etc/ssl/etcd/ssl/server.key
+ETCD_PEER_CLIENT_CERT_AUTH=true
+
+# CLI settings
+ETCDCTL_ENDPOINTS=https://127.0.0.1:2379
+ETCDCTL_CACERT=/etc/ssl/etcd/ssl/ca.crt
+ETCDCTL_CERT=/etc/ssl/etcd/ssl/server.crt
+ETCDCTL_KEY=/etc/ssl/etcd/ssl/server.key
diff --git a/feature/builtin/roles/install/image-registry/defaults/main.yaml b/feature/builtin/roles/install/image-registry/defaults/main.yaml
new file mode 100644
index 000000000..1b5c0e82b
--- /dev/null
+++ b/feature/builtin/roles/install/image-registry/defaults/main.yaml
@@ -0,0 +1,51 @@
+image_registry:
+ # ha_vip: 192.168.122.59
+ namespace_override: ""
+ auth:
+ registry: |
+ {{- if and .image_registry.ha_vip (ne .image_registry.ha_vip "") }}
+ {{ .image_registry.ha_vip }}
+ {{- else }}
+ {{ index .inventory_hosts (.groups.image_registry | default list | first) "internal_ipv4" }}
+ {{- end }}
+ username: admin
+ password: Harbor12345
+ # registry type. support: harbor, registry
+ type: harbor
+ # Virtual IP address for repository High Availability. the Virtual IP address should be available.
+ registry:
+ version: 2
+ config:
+ storage: nfs
+ nfs_dir: /share/registry
+ storage:
+ filesystem:
+ rootdirectory: /var/lib/registry
+# nfs_mount: /repository/registry # if set. will mount rootdirectory to nfs server in nfs_mount.
+# azure:
+# accountname: accountname
+# accountkey: base64encodedaccountkey
+# container: containername
+# gcs:
+# bucket: bucketname
+# keyfile: /path/to/keyfile
+# credentials:
+# type: service_account
+# project_id: project_id_string
+# private_key_id: private_key_id_string
+# private_key: private_key_string
+# client_email: client@example.com
+# client_id: client_id_string
+# auth_uri: http://example.com/auth_uri
+# token_uri: http://example.com/token_uri
+# auth_provider_x509_cert_url: http://example.com/provider_cert_url
+# client_x509_cert_url: http://example.com/client_cert_url
+# rootdirectory: /gcs/object/name/prefix
+# s3:
+# accesskey: awsaccesskey
+# secretkey: awssecretkey
+# region: us-west-1
+# regionendpoint: http://myobjects.local
+# bucket: bucketname
+# keyid: mykeyid
+# rootdirectory: /s3/object/name/prefix
diff --git a/feature/builtin/roles/install/image-registry/files/containerd.service b/feature/builtin/roles/install/image-registry/files/containerd.service
new file mode 100644
index 000000000..5f67110ab
--- /dev/null
+++ b/feature/builtin/roles/install/image-registry/files/containerd.service
@@ -0,0 +1,26 @@
+[Unit]
+Description=containerd container runtime
+Documentation=https://containerd.io
+After=network.target local-fs.target
+
+[Service]
+ExecStartPre=-/sbin/modprobe overlay
+ExecStart=/usr/local/bin/containerd
+
+Type=notify
+Delegate=yes
+KillMode=process
+Restart=always
+RestartSec=5
+# Having non-zero Limit*s causes performance problems due to accounting overhead
+# in the kernel. We recommend using cgroups to do container-local accounting.
+LimitNPROC=infinity
+LimitCORE=infinity
+LimitNOFILE=1048576
+# Comment TasksMax if your systemd version does not supports it.
+# Only systemd 226 and above support this version.
+TasksMax=infinity
+OOMScoreAdjust=-999
+
+[Install]
+WantedBy=multi-user.target
diff --git a/feature/builtin/roles/install/image-registry/files/docker.service b/feature/builtin/roles/install/image-registry/files/docker.service
new file mode 100644
index 000000000..929b8a436
--- /dev/null
+++ b/feature/builtin/roles/install/image-registry/files/docker.service
@@ -0,0 +1,47 @@
+[Unit]
+Description=Docker Application Container Engine
+Documentation=https://docs.docker.com
+# After=network-online.target firewalld.service containerd.service
+# Wants=network-online.target
+# Requires=docker.socket containerd.service
+
+[Service]
+Type=notify
+# the default is not to use systemd for cgroups because the delegate issues still
+# exists and systemd currently does not support the cgroup feature set required
+# for containers run by docker
+ExecStart=/usr/local/bin/dockerd --containerd=/run/containerd/containerd.sock
+ExecReload=/bin/kill -s HUP $MAINPID
+TimeoutSec=0
+RestartSec=2
+Restart=always
+
+# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
+# Both the old, and new location are accepted by systemd 229 and up, so using the old location
+# to make them work for either version of systemd.
+StartLimitBurst=3
+
+# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
+# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
+# this option work for either version of systemd.
+StartLimitInterval=60s
+
+# Having non-zero Limit*s causes performance problems due to accounting overhead
+# in the kernel. We recommend using cgroups to do container-local accounting.
+LimitNOFILE=infinity
+LimitNPROC=infinity
+LimitCORE=infinity
+
+# Comment TasksMax if your systemd version does not support it.
+# Only systemd 226 and above support this option.
+TasksMax=infinity
+
+# set delegate yes so that systemd does not reset the cgroups of docker containers
+Delegate=yes
+
+# kill only the docker process, not all processes in the cgroup
+KillMode=process
+OOMScoreAdjust=-500
+
+[Install]
+WantedBy=multi-user.target
diff --git a/feature/builtin/roles/install/image-registry/tasks/install_docker.yaml b/feature/builtin/roles/install/image-registry/tasks/install_docker.yaml
new file mode 100644
index 000000000..0d27a5a8c
--- /dev/null
+++ b/feature/builtin/roles/install/image-registry/tasks/install_docker.yaml
@@ -0,0 +1,34 @@
+---
+- name: Check if docker is installed
+ ignore_errors: true
+ command: docker --version
+ register: docker_install_version
+
+- name: Install docker
+ when: or (.docker_install_version.stderr | ne "") (.docker_install_version.stdout | hasPrefix (printf "Docker version %s," .docker_version) | not)
+ block:
+ - name: Sync docker binary to remote
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/docker/{{ .docker_version }}/{{ .binary_type.stdout }}/docker-{{ .docker_version }}.tgz
+ dest: |
+ /tmp/kubekey/docker-{{ .docker_version }}.tgz
+ - name: Generate docker config file
+ template:
+ src: docker.config
+ dest: /etc/docker/daemon.json
+ - name: Unpackage docker binary
+ command: |
+ tar -C /usr/local/bin/ --strip-components=1 -xvf /tmp/kubekey/docker-{{ .docker_version }}.tgz --wildcards docker/*
+ - name: Generate docker service file
+ copy:
+ src: docker.service
+ dest: /etc/systemd/system/docker.service
+ - name: Generate containerd service file
+ copy:
+ src: containerd.service
+ dest: /etc/systemd/system/containerd.service
+ - name: Start docker service
+ command: |
+ systemctl daemon-reload && systemctl start containerd.service && systemctl enable containerd.service
+ systemctl daemon-reload && systemctl start docker.service && systemctl enable docker.service
diff --git a/feature/builtin/roles/install/image-registry/tasks/install_docker_compose.yaml b/feature/builtin/roles/install/image-registry/tasks/install_docker_compose.yaml
new file mode 100644
index 000000000..de8bdc8ed
--- /dev/null
+++ b/feature/builtin/roles/install/image-registry/tasks/install_docker_compose.yaml
@@ -0,0 +1,13 @@
+---
+- name: Check if docker-compose is installed
+ ignore_errors: true
+ command: docker-compose --version
+ register: dockercompose_install_version
+
+- name: Sync docker-compose to remote
+ when: or (.dockercompose_install_version.stderr | ne "") (.dockercompose_install_version.stdout | ne (printf "Docker Compose version %s" .dockercompose_version))
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/image-registry/docker-compose/{{ .dockercompose_version }}/{{ .binary_type.stdout }}/docker-compose
+ dest: /usr/local/bin/docker-compose
+ mode: 0755
diff --git a/feature/builtin/roles/install/image-registry/tasks/install_harbor.yaml b/feature/builtin/roles/install/image-registry/tasks/install_harbor.yaml
new file mode 100644
index 000000000..5018d4d73
--- /dev/null
+++ b/feature/builtin/roles/install/image-registry/tasks/install_harbor.yaml
@@ -0,0 +1,52 @@
+---
+- name: Sync harbor package to remote
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/image-registry/harbor/{{ .harbor_version }}/{{ .binary_type.stdout }}/harbor-offline-installer-{{ .harbor_version }}.tgz
+ dest: |
+ /opt/harbor/{{ .harbor_version }}/harbor-offline-installer-{{ .harbor_version }}.tgz
+
+- name: Untar harbor package
+ command: |
+ cd /opt/harbor/{{ .harbor_version }}/ && tar -zxvf harbor-offline-installer-{{ .harbor_version }}.tgz
+
+- name: Sync image registry cert file to remote
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/pki/image_registry.crt
+ dest: |
+ /opt/harbor/{{ .harbor_version }}/ssl/server.crt
+
+- name: Sync image registry key file to remote
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/pki/image_registry.key
+ dest: |
+ /opt/harbor/{{ .harbor_version }}/ssl/server.key
+
+- name: Generate harbor config
+ template:
+ src: harbor.config
+ dest: |
+ /opt/harbor/{{ .harbor_version }}/harbor/harbor.yml
+
+- name: Generate keepalived docker compose
+ template:
+ src: harbor_keepalived.docker-compose
+ dest: |
+ /opt/harbor/{{ .harbor_version }}/harbor/docker-compose-keepalived.yml
+ when:
+ - and .image_registry.ha_vip (ne .image_registry.ha_vip "")
+ - .image_registry_service.stderr | ne ""
+
+- name: Install harbor
+ command: |
+ cd /opt/harbor/{{ .harbor_version }}/harbor && /bin/bash install.sh
+
+- name: Register harbor service
+ template:
+ src: harbor.service
+ dest: /etc/systemd/system/harbor.service
+
+- name: Start harbor service
+ command: systemctl daemon-reload && systemctl start harbor.service && systemctl enable harbor.service
diff --git a/feature/builtin/roles/install/image-registry/tasks/install_keepalived.yaml b/feature/builtin/roles/install/image-registry/tasks/install_keepalived.yaml
new file mode 100644
index 000000000..11bf206bb
--- /dev/null
+++ b/feature/builtin/roles/install/image-registry/tasks/install_keepalived.yaml
@@ -0,0 +1,23 @@
+---
+- name: Sync keepalived image to remote
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/image-registry/keepalived/{{ .keepalived_version }}/{{ .binary_type.stdout }}/keepalived-{{ .keepalived_version }}-linux-{{ .binary_type.stdout }}.tgz
+ dest: |
+ /opt/keepalived/{{ .keepalived_version }}/keepalived-{{ .keepalived_version }}-linux-{{ .binary_type.stdout }}.tgz
+
+- name: Load keeplived image
+ command: |
+ docker load -i /opt/keepalived/{{ .keepalived_version }}/keepalived-{{ .keepalived_version }}-linux-{{ .binary_type.stdout }}.tgz
+
+- name: Sync keeplived config to remote
+ template:
+ src: keeplived.config
+ dest: |
+ /opt/keeplived/{{ .keepalived_version }}/keepalived.conf
+
+- name: Sync healthcheck shell to remote
+ template:
+ src: keepalived.healthcheck
+ dest: |
+ /opt/keeplived/{{ .keepalived_version }}/healthcheck.sh
diff --git a/feature/builtin/roles/install/image-registry/tasks/install_registry.yaml b/feature/builtin/roles/install/image-registry/tasks/install_registry.yaml
new file mode 100644
index 000000000..d6812c82c
--- /dev/null
+++ b/feature/builtin/roles/install/image-registry/tasks/install_registry.yaml
@@ -0,0 +1,58 @@
+---
+- name: Sync registry image to remote
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/image-registry/registry/{{ .registry_version }}/{{ .binary_type.stdout }}/registry-{{ .registry_version }}-linux-{{ .binary_type.stdout }}.tgz
+ dest: |
+ /opt/registry/{{ .registry_version }}/registry-{{ .registry_version }}-linux-{{ .binary_type.stdout }}.tgz
+
+- name: Mount NFS dir
+ command: |
+ {{- if .os.release.ID_LIKE | eq "debian" }}
+ yum update && yum install -y nfs-utils
+ {{- else if .os.release.ID_LIKE | eq "rhel fedora" }}
+ apt update && apt install -y nfs-common
+ {{- end }}
+ mount -t nfs {{ index .inventory_hosts (.groups.nfs | default list | first) "internal_ipv4" }}:{{ .image_registry.registry.storage.filesystem.nfs_mount }} {{ .image_registryregistry.storage.filesystem.rootdirectory }}
+ when:
+ - and .image_registry.registry.storage.filesystem.nfs_mount (ne .image_registry.registry.storage.filesystem.nfs_mount "")
+ - .groups.nfs | default list | len | eq 1
+ - .image_registry_service.stderr | ne ""
+
+- name: Load registry image
+ command: |
+ docker load -i /opt/registry/{{ .registry_version }}/registry-{{ .registry_version }}-linux-{{ .binary_type.stdout }}.tgz
+
+- name: Sync image registry cert file to remote
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/pki/image_registry.crt
+ dest: |
+ /opt/registry/{{ .registry_version }}/ssl/server.crt
+
+- name: Sync image registry key file to remote
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/pki/image_registry.key
+ dest: |
+ /opt/registry/{{ .registry_version }}/ssl/server.key
+
+- name: Generate registry docker compose
+ template:
+ src: registry.docker-compose
+ dest: |
+ /opt/registry/{{ .registry_version }}/docker-compose.yml
+
+- name: Generate registry config
+ template:
+ src: registry.config
+ dest: |
+ /opt/registry/{{ .registry_version }}/config.yml
+
+- name: Register registry service
+ copy:
+ src: registry.service
+ dest: /etc/systemd/system/registry.service
+
+- name: Start registry service
+ command: systemctl daemon-reload && systemctl start registry.service && systemctl enable registry.service
diff --git a/feature/builtin/roles/install/image-registry/tasks/load_images.yaml b/feature/builtin/roles/install/image-registry/tasks/load_images.yaml
new file mode 100644
index 000000000..dee13e147
--- /dev/null
+++ b/feature/builtin/roles/install/image-registry/tasks/load_images.yaml
@@ -0,0 +1,55 @@
+---
+- name: Sync images to remote
+ tags: ["only_image"]
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/images/
+ dest: /tmp/kubekey/images/
+
+- name: Create harbor project for each image
+ tags: ["only_image"]
+ command: |
+ {{- if .image_registry.namespace_override | eq "" }}
+ for dir in /tmp/kubekey/images/*; do
+ if [ ! -d "$dir" ]; then
+ # only deal with directories
+ continue
+ fi
+
+ project=${dir##*/}
+
+ if [ "$project" == "blobs" ]; then
+ # skip blobs dir
+ continue
+ fi
+
+ # if project is not exist, create if
+ http_code=$(curl -Iks -u "{{ .image_registry.auth.username }}:{{ .image_registry.auth.password }}" 'https://localhost/api/v2.0/projects?project_name=${project}' | grep HTTP | awk '{print $2}')
+ if [ $http_code == 404 ]; then
+ # create project
+ curl -u "{{ .image_registry.auth.username }}:{{ .image_registry.auth.password }}" -k -X POST -H "Content-Type: application/json" "https://localhost/api/v2.0/projects" -d "{ \"project_name\": \"${project}\", \"public\": true}"
+ fi
+ done
+ {{- else }}
+ # if project is not exist, create if
+ http_code=$(curl -Iks -u "{{ .image_registry.auth.username }}:{{ .image_registry.auth.password }}" 'https://localhost/api/v2.0/projects?project_name={{ .image_registry.namespace_override }}' | grep HTTP | awk '{print $2}')
+ if [ $http_code == 404 ]; then
+ # create project
+ curl -u "{{ .image_registry.auth.username }}:{{ .image_registry.auth.password }}" -k -X POST -H "Content-Type: application/json" "https://localhost/api/v2.0/projects" -d "{ \"project_name\": \"{{ .image_registry.namespace_override }}\", \"public\": true}"
+ fi
+ {{- end }}
+ when: .image_registry.type | eq "harbor"
+
+- name: Sync images package to harbor
+ tags: ["only_image"]
+ image:
+ push:
+ images_dir: /tmp/kubekey/images/
+ registry: |
+ {{ .image_registry.auth.registry }}
+ namespace_override: |
+ {{ .image_registry.namespace_override }}
+ username: |
+ {{ .image_registry.auth.username }}
+ password: |
+ {{ .image_registry.auth.password }}
diff --git a/feature/builtin/roles/install/image-registry/tasks/main.yaml b/feature/builtin/roles/install/image-registry/tasks/main.yaml
new file mode 100644
index 000000000..310d799a3
--- /dev/null
+++ b/feature/builtin/roles/install/image-registry/tasks/main.yaml
@@ -0,0 +1,30 @@
+---
+- include_tasks: install_docker.yaml
+
+- include_tasks: install_docker_compose.yaml
+
+- include_tasks: install_keepalived.yaml
+ when: and .image_registry.ha_vip (ne .image_registry.ha_vip "")
+
+- name: Install harbor
+ when: .image_registry.type | eq "harbor"
+ block:
+ - name: Check if harbor installed
+ ignore_errors: true
+ command: systemctl status harbor.service
+ register: harbor_service_status
+ - include_tasks: install_harbor.yaml
+ when: .harbor_service_status.stderr | ne ""
+
+- name: Install registry
+ when: .image_registry.type | eq "registry"
+ block:
+ - name: Check if registry installed
+ ignore_errors: true
+ command: systemctl status registry.service
+ register: registry_service_status
+ - include_tasks: install_registry.yaml
+ when: .registry_service_status.stderr | ne ""
+
+- include_tasks: load_images.yaml
+ tags: ["only_image"]
diff --git a/feature/builtin/roles/install/image-registry/templates/docker.config b/feature/builtin/roles/install/image-registry/templates/docker.config
new file mode 100644
index 000000000..23767c217
--- /dev/null
+++ b/feature/builtin/roles/install/image-registry/templates/docker.config
@@ -0,0 +1,19 @@
+{
+ "log-opts": {
+ "max-size": "5m",
+ "max-file":"3"
+ },
+{{- if and .cri.docker.data_root (ne .cri.docker.data_root "") }}
+ "data-root": "{{ .cri.docker.data_root }}",
+{{- end }}
+{{- if and .cri.registry.mirrors (ne .cri.registry.mirrors "") }}
+ "registry-mirrors": {{ .cri.registry.mirrors | toJson }},
+{{- end }}
+ {{- if and .cri.registry.insecure_registries (ne .cri.registry.insecure_registries "") }}
+ "insecure-registries": {{ .cri.registry.insecure_registries | toJson }},
+{{- end }}
+ {{- if and .cri.docker.bridge_ip (ne .cri.docker.bridge_ip "") }}
+ "bip": "{{ .cri.docker.bridge_ip }}",
+{{- end }}
+ "exec-opts": ["native.cgroupdriver={{ .cri.cgroup_driver | default "systemd" }}"]
+}
diff --git a/feature/builtin/roles/install/image-registry/templates/harbor.config b/feature/builtin/roles/install/image-registry/templates/harbor.config
new file mode 100644
index 000000000..7feac11ce
--- /dev/null
+++ b/feature/builtin/roles/install/image-registry/templates/harbor.config
@@ -0,0 +1,311 @@
+# Configuration file of Harbor
+
+# The IP address or hostname to access admin UI and registry service.
+# DO NOT use localhost or 127.0.0.1, because Harbor needs to be accessed by external clients.
+hostname: {{ .internal_ipv4 }}
+
+# http related config
+http:
+ # port for http, default is 80. If https enabled, this port will redirect to https port
+ port: 80
+
+# https related config
+https:
+ # https port for harbor, default is 443
+ port: 443
+ # The path of cert and key files for nginx
+ certificate: /opt/harbor/{{ .harbor_version }}/ssl/server.crt
+ private_key: /opt/harbor/{{ .harbor_version }}/ssl/server.key
+ # enable strong ssl ciphers (default: false)
+ # strong_ssl_ciphers: false
+
+# # Uncomment following will enable tls communication between all harbor components
+# internal_tls:
+# # set enabled to true means internal tls is enabled
+# enabled: true
+# # put your cert and key files on dir
+# dir: /etc/harbor/tls/internal
+
+
+# Uncomment external_url if you want to enable external proxy
+# And when it enabled the hostname will no longer used
+# external_url: https://reg.mydomain.com:8433
+
+# The initial password of Harbor admin
+# It only works in first time to install harbor
+# Remember Change the admin password from UI after launching Harbor.
+harbor_admin_password: {{ .image_registry.auth.password }}
+
+# Harbor DB configuration
+database:
+ # The password for the root user of Harbor DB. Change this before any production use.
+ password: root123
+ # The maximum number of connections in the idle connection pool. If it <=0, no idle connections are retained.
+ max_idle_conns: 100
+ # The maximum number of open connections to the database. If it <= 0, then there is no limit on the number of open connections.
+ # Note: the default number of connections is 1024 for postgres of harbor.
+ max_open_conns: 900
+ # The maximum amount of time a connection may be reused. Expired connections may be closed lazily before reuse. If it <= 0, connections are not closed due to a connection's age.
+ # The value is a duration string. A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
+ conn_max_lifetime: 5m
+ # The maximum amount of time a connection may be idle. Expired connections may be closed lazily before reuse. If it <= 0, connections are not closed due to a connection's idle time.
+ # The value is a duration string. A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
+ conn_max_idle_time: 0
+
+# The default data volume
+data_volume: /data
+
+# Harbor Storage settings by default is using /data dir on local filesystem
+# Uncomment storage_service setting If you want to using external storage
+# storage_service:
+# # ca_bundle is the path to the custom root ca certificate, which will be injected into the truststore
+# # of registry's containers. This is usually needed when the user hosts a internal storage with self signed certificate.
+# ca_bundle:
+
+# # storage backend, default is filesystem, options include filesystem, azure, gcs, s3, swift and oss
+# # for more info about this configuration please refer https://docs.docker.com/registry/configuration/
+# filesystem:
+# maxthreads: 100
+# # set disable to true when you want to disable registry redirect
+# redirect:
+# disable: false
+
+# Trivy configuration
+#
+# Trivy DB contains vulnerability information from NVD, Red Hat, and many other upstream vulnerability databases.
+# It is downloaded by Trivy from the GitHub release page https://github.com/aquasecurity/trivy-db/releases and cached
+# in the local file system. In addition, the database contains the update timestamp so Trivy can detect whether it
+# should download a newer version from the Internet or use the cached one. Currently, the database is updated every
+# 12 hours and published as a new release to GitHub.
+trivy:
+ # ignoreUnfixed The flag to display only fixed vulnerabilities
+ ignore_unfixed: false
+ # skipUpdate The flag to enable or disable Trivy DB downloads from GitHub
+ #
+ # You might want to enable this flag in test or CI/CD environments to avoid GitHub rate limiting issues.
+ # If the flag is enabled you have to download the `trivy-offline.tar.gz` archive manually, extract `trivy.db` and
+ # `metadata.json` files and mount them in the `/home/scanner/.cache/trivy/db` path.
+ skip_update: false
+ #
+ # skipJavaDBUpdate If the flag is enabled you have to manually download the `trivy-java.db` file and mount it in the
+ # `/home/scanner/.cache/trivy/java-db/trivy-java.db` path
+ skip_java_db_update: false
+ #
+ # The offline_scan option prevents Trivy from sending API requests to identify dependencies.
+ # Scanning JAR files and pom.xml may require Internet access for better detection, but this option tries to avoid it.
+ # For example, the offline mode will not try to resolve transitive dependencies in pom.xml when the dependency doesn't
+ # exist in the local repositories. It means a number of detected vulnerabilities might be fewer in offline mode.
+ # It would work if all the dependencies are in local.
+ # This option doesn't affect DB download. You need to specify "skip-update" as well as "offline-scan" in an air-gapped environment.
+ offline_scan: false
+ #
+ # Comma-separated list of what security issues to detect. Possible values are `vuln`, `config` and `secret`. Defaults to `vuln`.
+ security_check: vuln
+ #
+ # insecure The flag to skip verifying registry certificate
+ insecure: false
+ # github_token The GitHub access token to download Trivy DB
+ #
+ # Anonymous downloads from GitHub are subject to the limit of 60 requests per hour. Normally such rate limit is enough
+ # for production operations. If, for any reason, it's not enough, you could increase the rate limit to 5000
+ # requests per hour by specifying the GitHub access token. For more details on GitHub rate limiting please consult
+ # https://docs.github.com/rest/overview/resources-in-the-rest-api#rate-limiting
+ #
+ # You can create a GitHub token by following the instructions in
+ # https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line
+ #
+ # github_token: xxx
+
+jobservice:
+ # Maximum number of job workers in job service
+ max_job_workers: 10
+ # The jobLoggers backend name, only support "STD_OUTPUT", "FILE" and/or "DB"
+ job_loggers:
+ - STD_OUTPUT
+ - FILE
+ # - DB
+ # The jobLogger sweeper duration (ignored if `jobLogger` is `stdout`)
+ logger_sweeper_duration: 1 #days
+
+notification:
+ # Maximum retry count for webhook job
+ webhook_job_max_retry: 3
+ # HTTP client timeout for webhook job
+ webhook_job_http_client_timeout: 3 #seconds
+
+# Log configurations
+log:
+ # options are debug, info, warning, error, fatal
+ level: info
+ # configs for logs in local storage
+ local:
+ # Log files are rotated log_rotate_count times before being removed. If count is 0, old versions are removed rather than rotated.
+ rotate_count: 50
+ # Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes.
+ # If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G
+ # are all valid.
+ rotate_size: 200M
+ # The directory on your host that store log
+ location: /var/log/harbor
+
+ # Uncomment following lines to enable external syslog endpoint.
+ # external_endpoint:
+ # # protocol used to transmit log to external endpoint, options is tcp or udp
+ # protocol: tcp
+ # # The host of external endpoint
+ # host: localhost
+ # # Port of external endpoint
+ # port: 5140
+
+#This attribute is for migrator to detect the version of the .cfg file, DO NOT MODIFY!
+_version: 2.10.0
+
+# Uncomment external_database if using external database.
+# external_database:
+# harbor:
+# host: harbor_db_host
+# port: harbor_db_port
+# db_name: harbor_db_name
+# username: harbor_db_username
+# password: harbor_db_password
+# ssl_mode: disable
+# max_idle_conns: 2
+# max_open_conns: 0
+
+# Uncomment redis if need to customize redis db
+# redis:
+# # db_index 0 is for core, it's unchangeable
+# # registry_db_index: 1
+# # jobservice_db_index: 2
+# # trivy_db_index: 5
+# # it's optional, the db for harbor business misc, by default is 0, uncomment it if you want to change it.
+# # harbor_db_index: 6
+# # it's optional, the db for harbor cache layer, by default is 0, uncomment it if you want to change it.
+# # cache_db_index: 7
+
+# Uncomment redis if need to customize redis db
+# redis:
+# # db_index 0 is for core, it's unchangeable
+# # registry_db_index: 1
+# # jobservice_db_index: 2
+# # trivy_db_index: 5
+# # it's optional, the db for harbor business misc, by default is 0, uncomment it if you want to change it.
+# # harbor_db_index: 6
+# # it's optional, the db for harbor cache layer, by default is 0, uncomment it if you want to change it.
+# # cache_layer_db_index: 7
+
+# Uncomment external_redis if using external Redis server
+# external_redis:
+# # support redis, redis+sentinel
+# # host for redis: :
+# # host for redis+sentinel:
+# # :,:,:
+# host: redis:6379
+# password:
+# # Redis AUTH command was extended in Redis 6, it is possible to use it in the two-arguments AUTH form.
+# # there's a known issue when using external redis username ref:https://github.com/goharbor/harbor/issues/18892
+# # if you care about the image pull/push performance, please refer to this https://github.com/goharbor/harbor/wiki/Harbor-FAQs#external-redis-username-password-usage
+# # username:
+# # sentinel_master_set must be set to support redis+sentinel
+# #sentinel_master_set:
+# # db_index 0 is for core, it's unchangeable
+# registry_db_index: 1
+# jobservice_db_index: 2
+# trivy_db_index: 5
+# idle_timeout_seconds: 30
+# # it's optional, the db for harbor business misc, by default is 0, uncomment it if you want to change it.
+# # harbor_db_index: 6
+# # it's optional, the db for harbor cache layer, by default is 0, uncomment it if you want to change it.
+# # cache_layer_db_index: 7
+
+# Uncomment uaa for trusting the certificate of uaa instance that is hosted via self-signed cert.
+# uaa:
+# ca_file: /path/to/ca
+
+# Global proxy
+# Config http proxy for components, e.g. http://my.proxy.com:3128
+# Components doesn't need to connectorVars to each others via http proxy.
+# Remove component from `components` array if want disable proxy
+# for it. If you want use proxy for replication, MUST enable proxy
+# for core and jobservice, and set `http_proxy` and `https_proxy`.
+# Add domain to the `no_proxy` field, when you want disable proxy
+# for some special registry.
+proxy:
+ http_proxy:
+ https_proxy:
+ no_proxy:
+ components:
+ - core
+ - jobservice
+ - trivy
+
+# metric:
+# enabled: false
+# port: 9090
+# path: /metrics
+
+# Trace related config
+# only can enable one trace provider(jaeger or otel) at the same time,
+# and when using jaeger as provider, can only enable it with agent mode or collector mode.
+# if using jaeger collector mode, uncomment endpoint and uncomment username, password if needed
+# if using jaeger agetn mode uncomment agent_host and agent_port
+# trace:
+# enabled: true
+# # set sample_rate to 1 if you wanna sampling 100% of trace data; set 0.5 if you wanna sampling 50% of trace data, and so forth
+# sample_rate: 1
+# # # namespace used to differenciate different harbor services
+# # namespace:
+# # # attributes is a key value dict contains user defined attributes used to initialize trace provider
+# # attributes:
+# # application: harbor
+# # # jaeger should be 1.26 or newer.
+# # jaeger:
+# # endpoint: http://hostname:14268/api/traces
+# # username:
+# # password:
+# # agent_host: hostname
+# # # export trace data by jaeger.thrift in compact mode
+# # agent_port: 6831
+# # otel:
+# # endpoint: hostname:4318
+# # url_path: /v1/traces
+# # compression: false
+# # insecure: true
+# # # timeout is in seconds
+# # timeout: 10
+
+# Enable purge _upload directories
+upload_purging:
+ enabled: true
+ # remove files in _upload directories which exist for a period of time, default is one week.
+ age: 168h
+ # the interval of the purge operations
+ interval: 24h
+ dryrun: false
+
+# Cache layer configurations
+# If this feature enabled, harbor will cache the resource
+# `project/project_metadata/repository/artifact/manifest` in the redis
+# which can especially help to improve the performance of high concurrent
+# manifest pulling.
+# NOTICE
+# If you are deploying Harbor in HA mode, make sure that all the harbor
+# instances have the same behaviour, all with caching enabled or disabled,
+# otherwise it can lead to potential data inconsistency.
+cache:
+ # not enabled by default
+ enabled: false
+ # keep cache for one day by default
+ expire_hours: 24
+
+# Harbor core configurations
+# Uncomment to enable the following harbor core related configuration items.
+# core:
+# # The provider for updating project quota(usage), there are 2 options, redis or db,
+# # by default is implemented by db but you can switch the updation via redis which
+# # can improve the performance of high concurrent pushing to the same project,
+# # and reduce the database connections spike and occupies.
+# # By redis will bring up some delay for quota usage updation for display, so only
+# # suggest switch provider to redis if you were ran into the db connections spike aroud
+# # the scenario of high concurrent pushing to same project, no improvment for other scenes.
+# quota_update_provider: redis # Or db
diff --git a/feature/builtin/roles/install/image-registry/templates/harbor.service b/feature/builtin/roles/install/image-registry/templates/harbor.service
new file mode 100644
index 000000000..9219b1e66
--- /dev/null
+++ b/feature/builtin/roles/install/image-registry/templates/harbor.service
@@ -0,0 +1,12 @@
+[Unit]
+Description=harbor
+After=docker.service systemd-networkd.service systemd-resolved.service
+Requires=docker.service
+
+[Service]
+Type=simple
+ExecStart=/usr/local/bin/docker-compose -p harbor -f /opt/harbor/{{ .harbor_version }}/harbor/docker-compose.yml up{{ if and .image_registry.ha_vip (ne .image_registry.ha_vip "") }} && /usr/local/bin/docker-compose -p harbor -f /opt/harbor/{{ .harbor_version }}/harbor/docker-compose-keepalived.yml up{{ end }}
+ExecStop=/usr/local/bin/docker-compose -p harbor down
+Restart=on-failure
+[Install]
+WantedBy=multi-user.target
diff --git a/feature/builtin/roles/install/image-registry/templates/harbor_keepalived.docker-compose b/feature/builtin/roles/install/image-registry/templates/harbor_keepalived.docker-compose
new file mode 100644
index 000000000..49e86c23c
--- /dev/null
+++ b/feature/builtin/roles/install/image-registry/templates/harbor_keepalived.docker-compose
@@ -0,0 +1,26 @@
+---
+version: '2.3'
+services:
+ keepalived:
+ image: osixia/keepalived: {{ .keepalived_version }}
+ container_name: keepalived
+ restart: always
+ dns_search: .
+ cap_drop:
+ - ALL
+ cap_add:
+ - CHOWN
+ - DAC_OVERRIDE
+ - SETGID
+ - SETUID
+ depends_on:
+ - proxy
+ volumes:
+ - type: bind
+ source: /opt/keeplived/{{ .keepalived_version }}/keepalived.conf
+ target: /container/service/keepalived/assets/keepalived.conf
+ - type: bind
+ source: /opt/keeplived/{{ .keepalived_version }}/healthcheck.sh
+ target: /etc/keepalived/healthcheck.sh
+ networks:
+ - harbor
diff --git a/feature/builtin/roles/install/image-registry/templates/keepalived.config b/feature/builtin/roles/install/image-registry/templates/keepalived.config
new file mode 100644
index 000000000..36c11c2f0
--- /dev/null
+++ b/feature/builtin/roles/install/image-registry/templates/keepalived.config
@@ -0,0 +1,31 @@
+vrrp_script healthcheck {
+ script "/etc/keepalived/healthcheck.sh"
+ interval 10
+ fall 2
+ rise 2
+ timeout 5
+ init_fail
+ }
+ global_defs {
+ script_user root
+ router_id harbor-ha
+ enable_script_security
+ lvs_sync_daemon ens3 VI_1
+ }
+ vrrp_instance VI_1 {
+ state BACKUP
+ interface ens3
+ virtual_router_id 31
+ priority 50
+ advert_int 1
+ authentication {
+ auth_type PASS
+ auth_pass k8s-test
+ }
+ virtual_ipaddress {
+ {{ .image_registry.ha_vip }}
+ }
+ track_script {
+ healthcheck
+ }
+ }
diff --git a/feature/builtin/roles/install/image-registry/templates/keepalived.healthcheck b/feature/builtin/roles/install/image-registry/templates/keepalived.healthcheck
new file mode 100644
index 000000000..c517da8eb
--- /dev/null
+++ b/feature/builtin/roles/install/image-registry/templates/keepalived.healthcheck
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+{{- if .image_registry.type | eq "registry" }}
+# registry service
+service=registry:5000
+{{- else }}
+# harbor service
+service=harbor:80
+{{- end }}
+
+nc -zv -w 2 $service > /dev/null 2>&1
+
+if [ $? -eq 0 ]; then
+ exit 0
+else
+ exit 1
+fi
diff --git a/feature/builtin/roles/install/image-registry/templates/registry.config b/feature/builtin/roles/install/image-registry/templates/registry.config
new file mode 100644
index 000000000..c008f1028
--- /dev/null
+++ b/feature/builtin/roles/install/image-registry/templates/registry.config
@@ -0,0 +1,218 @@
+version: 0.1
+log:
+ accesslog:
+ disabled: true
+ level: info
+ formatter: text
+ fields:
+ service: registry
+ environment: staging
+# hooks:
+# - type: mail
+# disabled: true
+# levels:
+# - panic
+# options:
+# smtp:
+# addr: mail.example.com:25
+# username: mailuser
+# password: password
+# insecure: true
+# from: sender@example.com
+# to:
+# - errors@example.com
+storage:
+{{- if and .image_registry.registry.storage.filesystem.rootdirectory (ne .image_registry.registry.storage.filesystem.rootdirectory "") }}
+ filesystem:
+ rootdirectory: {{ .image_registry.registry.storage.filesystem.rootdirectory }}
+ maxthreads: 100
+{{- end }}
+{{- if .image_registry.registry.storage.azure }}
+ azure:
+ accountname: {{ .image_registry.registry.storage.azure.accountname }}
+ accountkey: {{ .image_registry.registry.storage.azure.accountkey }}
+ container: {{ .image_registry.registry.storage.azure.container }}
+{{- end }}
+{{- if .image_registry.registry.storage.gcs }}
+ gcs:
+ bucket: {{ .image_registry.registry.storage.gcs.bucket }}
+ keyfile: {{ .image_registry.registry.storage.gcs.keyfile }}
+ credentials:
+ type: service_account
+ project_id: {{ .image_registry.registry.storage.gcs.credentials.project_id }}
+ private_key_id: {{ .image_registry.registry.storage.gcs.credentials.private_key_id }}
+ private_key: {{ .image_registry.registry.storage.gcs.credentials.private_key }}
+ client_email: {{ .image_registry.registry.storage.gcs.credentials.client_email }}
+ client_id: {{ .image_registry.registry.storage.gcs.credentials.client_id }}
+ auth_uri: {{ .image_registry.registry.storage.gcs.credentials.auth_uri }}
+ token_uri: {{ .image_registry.registry.storage.gcs.credentials.token_uri }}
+ auth_provider_x509_cert_url: {{ .image_registry.registry.storage.gcs.credentials.auth_provider_x509_cert_url }}
+ client_x509_cert_url: {{ .image_registry.registry.storage.gcs.credentials.client_x509_cert_url }}
+ rootdirectory: {{ .image_registry.registry.storage.gcs.rootdirectory }}
+{{- end }}
+{{- if .image_registry.registry.storage.s3 }}
+ s3:
+ accesskey: {{ .image_registry.registry.storage.s3.accesskey }}
+ secretkey: {{ .image_registry.registry.storage.s3.secretkey }}
+ region: {{ .image_registry.registry.storage.s3.region }}
+ regionendpoint: {{ .image_registry.registry.storage.s3.regionendpoint }}
+ forcepathstyle: true
+ accelerate: false
+ bucket: {{ .image_registry.registry.storage.s3.bucket }}
+ encrypt: true
+ keyid: {{ .image_registry.registry.storage.s3.keyid }}
+ secure: true
+ v4auth: true
+ chunksize: 5242880
+ multipartcopychunksize: 33554432
+ multipartcopymaxconcurrency: 100
+ multipartcopythresholdsize: 33554432
+ rootdirectory: {{ .image_registry.registry.storage.s3.rootdirectory }}
+ usedualstack: false
+ loglevel: debug
+{{- end }}
+ inmemory: # This driver takes no parameters
+ delete:
+ enabled: false
+ redirect:
+ disable: false
+ cache:
+ blobdescriptor: redis
+ blobdescriptorsize: 10000
+ maintenance:
+ uploadpurging:
+ enabled: true
+ age: 168h
+ interval: 24h
+ dryrun: false
+ readonly:
+ enabled: false
+#auth:
+# silly:
+# realm: silly-realm
+# service: silly-service
+# token:
+# autoredirect: true
+# realm: token-realm
+# service: token-service
+# issuer: registry-token-issuer
+# rootcertbundle: /root/certs/bundle
+# htpasswd:
+# realm: basic-realm
+# path: /path/to/htpasswd
+#middleware:
+# registry:
+# - name: ARegistryMiddleware
+# options:
+# foo: bar
+# repository:
+# - name: ARepositoryMiddleware
+# options:
+# foo: bar
+# storage:
+# - name: cloudfront
+# options:
+# baseurl: https://my.cloudfronted.domain.com/
+# privatekey: /path/to/pem
+# keypairid: cloudfrontkeypairid
+# duration: 3000s
+# ipfilteredby: awsregion
+# awsregion: us-east-1, use-east-2
+# updatefrequency: 12h
+# iprangesurl: https://ip-ranges.amazonaws.com/ip-ranges.json
+# - name: redirect
+# options:
+# baseurl: https://example.com/
+http:
+ addr: localhost:5000
+# prefix: /my/nested/registry/
+# host: https://myregistryaddress.org:5000
+ secret: asecretforlocaldevelopment
+ relativeurls: false
+ draintimeout: 60s
+ tls:
+ certificate: /etc/registry/ssl/server.crt
+ key: /etc/registry/ssl/server.key
+# clientcas:
+# - /path/to/ca.pem
+# - /path/to/another/ca.pem
+# letsencrypt:
+# cachefile: /path/to/cache-file
+# email: emailused@letsencrypt.com
+# hosts: [myregistryaddress.org]
+# directoryurl: https://acme-v02.api.letsencrypt.org/directory
+# debug:
+# addr: localhost:5001
+# prometheus:
+# enabled: true
+# path: /metrics
+ headers:
+ X-Content-Type-Options: [nosniff]
+ http2:
+ disabled: false
+ h2c:
+ enabled: false
+#notifications:
+# events:
+# includereferences: true
+# endpoints:
+# - name: alistener
+# disabled: false
+# url: https://my.listener.com/event
+# headers:
+# timeout: 1s
+# threshold: 10
+# backoff: 1s
+# ignoredmediatypes:
+# - application/octet-stream
+# ignore:
+# mediatypes:
+# - application/octet-stream
+# actions:
+# - pull
+#redis:
+# addr: localhost:6379
+# password: asecret
+# db: 0
+# dialtimeout: 10ms
+# readtimeout: 10ms
+# writetimeout: 10ms
+# pool:
+# maxidle: 16
+# maxactive: 64
+# idletimeout: 300s
+# tls:
+# enabled: false
+health:
+ storagedriver:
+ enabled: true
+ interval: 10s
+ threshold: 3
+# file:
+# - file: /path/to/checked/file
+# interval: 10s
+# http:
+# - uri: http://server.to.check/must/return/200
+# headers:
+# Authorization: [Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==]
+# statuscode: 200
+# timeout: 3s
+# interval: 10s
+# threshold: 3
+# tcp:
+# - addr: redis-server.domain.com:6379
+# timeout: 3s
+# interval: 10s
+## threshold: 3
+#proxy:
+# remoteurl: https://registry-1.docker.io
+# username: [username]
+# password: [password]
+# ttl: 168h
+#validation:
+# manifests:
+# urls:
+# allow:
+# - ^https?://([^/]+\.)*example\.com/
+# deny:
+# - ^https?://www\.example\.com/
diff --git a/feature/builtin/roles/install/image-registry/templates/registry.docker-compose b/feature/builtin/roles/install/image-registry/templates/registry.docker-compose
new file mode 100644
index 000000000..573603764
--- /dev/null
+++ b/feature/builtin/roles/install/image-registry/templates/registry.docker-compose
@@ -0,0 +1,54 @@
+---
+version: '2.3'
+services:
+ registry:
+ image: registry:{{ .registry_version }}
+ container_name: registry
+ restart: always
+ dns_search: .
+ cap_drop:
+ - ALL
+ cap_add:
+ - CHOWN
+ - DAC_OVERRIDE
+ - SETGID
+ - SETUID
+ volumes:
+ - type: bind
+ source: /opt/registry/{{ .registry_version }}/ssl/
+ target: /etc/registry/ssl/
+ - type: bind
+ source: /opt/registry/{{ .registry_version }}/config.yml
+ target: /etc/docker/registry/config.yml
+ port:
+ - 443:5000
+ networks:
+ - registry
+{{- if and .image_registry.ha_vip (ne .image_registry.ha_vip "") }}
+ keepalived:
+ image: osixia/keepalived:{{ .keepalived_version }}
+ container_name: keepalived
+ restart: always
+ dns_search: .
+ cap_drop:
+ - ALL
+ cap_add:
+ - CHOWN
+ - DAC_OVERRIDE
+ - SETGID
+ - SETUID
+ depends_on:
+ - registry
+ volumes:
+ - type: bind
+ source: /opt/keeplived/{{ .keepalived_version }}/keepalived.conf
+ target: /container/service/keepalived/assets/keepalived.conf
+ - type: bind
+ source: /opt/keeplived/{{ .keepalived_version }}/healthcheck.sh
+ target: /etc/keepalived/healthcheck.sh
+ networks:
+ - registry
+{{- end }}
+networks:
+ registry:
+ external: false
diff --git a/feature/builtin/roles/install/image-registry/templates/registry.service b/feature/builtin/roles/install/image-registry/templates/registry.service
new file mode 100644
index 000000000..f6e7f56cf
--- /dev/null
+++ b/feature/builtin/roles/install/image-registry/templates/registry.service
@@ -0,0 +1,12 @@
+[Unit]
+Description=harbor
+After=docker.service systemd-networkd.service systemd-resolved.service
+Requires=docker.service
+
+[Service]
+Type=simple
+ExecStart=/usr/local/bin/docker-compose -p registry -f /opt/registry/{{ .registry_version }}/docker-compose.yml up
+ExecStop=/usr/local/bin/docker-compose -p registry down
+Restart=on-failure
+[Install]
+WantedBy=multi-user.target
diff --git a/feature/builtin/roles/install/kubernetes/defaults/main.yaml b/feature/builtin/roles/install/kubernetes/defaults/main.yaml
new file mode 100644
index 000000000..4796709f0
--- /dev/null
+++ b/feature/builtin/roles/install/kubernetes/defaults/main.yaml
@@ -0,0 +1,166 @@
+kubernetes:
+ cluster_name: cluster.local
+ # support: flannel, calico
+ kube_network_plugin: calico
+ # the image repository of kubernetes.
+ image_repository: |
+ {{ .k8s_registry }}
+ # memory size for each kube_worker node.(unit kB)
+ # should be greater than or equal to minimal_node_memory_mb.
+ minimal_node_memory_mb: 10
+ # the maximum number of pods that can be run on each node.
+ max_pods: 110
+ audit: false
+ networking:
+ dns_domain: cluster.local
+ # it supports two value like value1,value2.
+ # the first value is ipv4_cidr, the last value is ipv6_cidr.
+ pod_cidr: 10.233.64.0/18
+ service_cidr: 10.233.0.0/18
+ dns_image: |
+ {{ .k8s_registry }}/coredns/coredns:1.8.6
+ dns_cache_image: |
+ {{ .dockerio_registry }}/kubesphere/k8s-dns-node-cache:1.22.20
+ dns_service_ip: |
+ {{ .kubernetes.networking.service_cidr | ipInCIDR 2 }}
+ # Specify a stable IP address or DNS name for the control plane.
+# control_plane_endpoint: lb.kubesphere.local
+ apiserver:
+ port: 6443
+ certSANs: []
+ extra_args:
+ bind-address: 0.0.0.0
+ feature-gates: ExpandCSIVolumes=true,CSIStorageCapacity=true,RotateKubeletServerCertificate=true
+ controller_manager:
+ # Set the Pod CIDR size of a node.
+ kube_network_node_prefix: 24
+ extra_args:
+ feature-gates: ExpandCSIVolumes=true,CSIStorageCapacity=true,RotateKubeletServerCertificate=true
+ scheduler:
+ extra_args:
+ feature-gates: ExpandCSIVolumes=true,CSIStorageCapacity=true,RotateKubeletServerCertificate=true
+ kube_proxy:
+ enabled: true
+ # support ipvs and iptables
+ mode: "ipvs"
+ config:
+ iptables:
+ masqueradeAll: false
+ masqueradeBit: 14
+ minSyncPeriod: 0s
+ syncPeriod: 30s
+ kubelet:
+ max_pod: 110
+ pod_pids_limit: 10000
+# feature_gates:
+ container_log_max_size: 5Mi
+ container_log_max_files: 3
+# extra_args:
+ coredns:
+ dns_etc_hosts: []
+ # the config for zones
+ zone_configs:
+ # DNS zones to match. default use port of 53. the format like this.
+ # .: all dns zone.
+ # example.com: match *.example.com use dns server with port 53
+ # example.com:54: match *.example.com use dns server with port 54
+ - zones: [".:53"]
+ additional_configs:
+ - errors
+ - ready
+ - prometheus :9153
+ - loop
+ - reload
+ - loadbalance
+ cache: 30
+ kubernetes:
+ zones:
+ - "{{ .kubernetes.networking.dns_domain }}"
+ # rewrite performs internal message rewriting.
+# rewrite:
+# # specify multiple rules and an incoming query matches multiple rules.
+# # continue: if the rewrite rule is not matched, the next rule will be matched.
+# # stop: if the rewrite rule is not matched, the next rule will not be matched.
+# - rule: continue
+# # support: type, name, class, edns0, ttl, cname
+# # type: the type field of the request will be rewritten. FROM/TO must be a DNS record type (A, MX, etc.).
+# # name: the query name in the request is rewritten; by default this is a full match of the name
+# # class: the class of the message will be rewritten.
+# # edns0: an EDNS0 option can be appended to the request as described below in the EDNS0 Options section.
+# # ttl: the TTL value in the response is rewritten.
+# # cname: the CNAME target if the response has a CNAME record
+# field: name
+# # this optional element can be specified for a name or ttl field.
+# # exact: the name must be exactly the same as the value.
+# # prefix: the name must start with the value.
+# # suffix: the name must end with the value.
+# # substring: the name must contain the value.
+# # regex: the name must match the value.
+# type: exact
+# value: "example.com example2.com"
+# # for field name further options are possible controlling the response rewrites.
+# # answer auto: the names in the response is rewritten in a best effort manner.
+# # answer name FROM TO: the query name in the response is rewritten matching the from regex pattern.
+# # answer value FROM TO: the names in the response is rewritten matching the from regex pattern.
+# options: ""
+ forward:
+ # the base domain to match for the request to be forwarded.
+ - from: "."
+ # the destination endpoints to forward to. The TO syntax allows you to specify a protocol
+ to: ["/etc/resolv.conf"]
+ # a space-separated list of domains to exclude from forwarding.
+ except: []
+ # use TCP even when the request comes in over UDP.
+ force_tcp: false
+ # try first using UDP even when the request comes in over TCP.
+ # If response is truncated (TC flag set in response) then do another attempt over TCP.
+ prefer_udp: false
+ # the number of subsequent failed health checks that are needed before considering an upstream to be down
+ # If 0, the upstream will never be marked as down (nor health checked).
+# max_fails: 2
+ # expire (cached) connections after this time,
+# expire: 10s
+ # define the TLS properties for TLS connection.
+# tls:
+# # the path to the certificate file.
+# cert_file: ""
+# # the path to the key file.
+# key_file: ""
+# # the path to the CA certificate file.
+# ca_file: ""
+# # allows you to set a server name in the TLS configuration
+# tls_servername: ""
+ # specifies the policy to use for selecting upstream servers. The default is random.
+ # random: a policy that implements random upstream selection.
+ # round_robin: a policy that selects hosts based on round robin ordering.
+ # sequential: a policy that selects hosts based on sequential ordering.
+# policy: "random"
+ # configure the behaviour of health checking of the upstream servers
+ # format: DURATION [no_rec] [domain FQDN]
+ # : use a different duration for health checking, the default duration is 0.5s.
+ # no_rec:optional argument that sets the RecursionDesired-flag of the dns-query used in health checking to false. The flag is default true.
+ # domain FQDN: set the domain name used for health checks to FQDN. If not configured, the domain name used for health checks is .
+# health_check: ""
+ # limit the number of concurrent queries to MAX.
+ max_concurrent: 1000
+ kube_vip:
+ enabled: false
+ address: |
+ {{ .kubernetes.control_plane_endpoint }}
+ # support:BGP, ARP
+ mode: BGP
+ image: |
+ {{ .dockerio_registry }}/plndr/kube-vip:v0.7.2
+ haproxy:
+ enabled: false
+ health_port: 8081
+ image: |
+ {{ .dockerio_registry }}/library/haproxy:2.9.6-alpine
+ etcd:
+ # It is possible to deploy etcd with three methods.
+ # external: Deploy etcd cluster with external etcd cluster.
+ # internal: Deploy etcd cluster by static pod.
+ deployment_type: external
+ image: |
+ {{ .k8s_registry }}/etcd:3.5.0
+ custom_label: {}
diff --git a/feature/builtin/roles/install/kubernetes/files/audit/audit_policy.yaml b/feature/builtin/roles/install/kubernetes/files/audit/audit_policy.yaml
new file mode 100644
index 000000000..1ef9eb677
--- /dev/null
+++ b/feature/builtin/roles/install/kubernetes/files/audit/audit_policy.yaml
@@ -0,0 +1,123 @@
+apiVersion: audit.k8s.io/v1
+kind: Policy
+rules:
+ # The following requests were manually identified as high-volume and low-risk,
+ # so drop them.
+ - level: None
+ users: ["system:kube-proxy"]
+ verbs: ["watch"]
+ resources:
+ - group: "" # core
+ resources: ["endpoints", "services", "services/status"]
+ - level: None
+ users: ["system:unsecured"]
+ namespaces: ["kube-system"]
+ verbs: ["get"]
+ resources:
+ - group: "" # core
+ resources: ["configmaps"]
+ - level: None
+ users: ["kubelet"] # legacy kubelet identity
+ verbs: ["get"]
+ resources:
+ - group: "" # core
+ resources: ["nodes", "nodes/status"]
+ - level: None
+ userGroups: ["system:nodes"]
+ verbs: ["get"]
+ resources:
+ - group: "" # core
+ resources: ["nodes", "nodes/status"]
+ - level: None
+ users:
+ - system:kube-controller-manager
+ - system:kube-scheduler
+ - system:serviceaccount:kube-system:endpoint-controller
+ verbs: ["get", "update"]
+ namespaces: ["kube-system"]
+ resources:
+ - group: "" # core
+ resources: ["endpoints"]
+ - level: None
+ users: ["system:apiserver"]
+ verbs: ["get"]
+ resources:
+ - group: "" # core
+ resources: ["namespaces", "namespaces/status", "namespaces/finalize"]
+ # Don't log HPA fetching metrics.
+ - level: None
+ users:
+ - system:kube-controller-manager
+ verbs: ["get", "list"]
+ resources:
+ - group: "metrics.k8s.io"
+ # Don't log these read-only URLs.
+ - level: None
+ nonResourceURLs:
+ - /healthz*
+ - /version
+ - /swagger*
+ # Don't log events requests.
+ - level: None
+ resources:
+ - group: "" # core
+ resources: ["events"]
+ # Secrets, ConfigMaps, TokenRequest and TokenReviews can contain sensitive & binary data,
+ # so only log at the Metadata level.
+ - level: Metadata
+ resources:
+ - group: "" # core
+ resources: ["secrets", "configmaps", "serviceaccounts/token"]
+ - group: authentication.k8s.io
+ resources: ["tokenreviews"]
+ omitStages:
+ - "RequestReceived"
+ # Get responses can be large; skip them.
+ - level: Request
+ verbs: ["get", "list", "watch"]
+ resources:
+ - group: "" # core
+ - group: "admissionregistration.k8s.io"
+ - group: "apiextensions.k8s.io"
+ - group: "apiregistration.k8s.io"
+ - group: "apps"
+ - group: "authentication.k8s.io"
+ - group: "authorization.k8s.io"
+ - group: "autoscaling"
+ - group: "batch"
+ - group: "certificates.k8s.io"
+ - group: "extensions"
+ - group: "metrics.k8s.io"
+ - group: "networking.k8s.io"
+ - group: "policy"
+ - group: "rbac.authorization.k8s.io"
+ - group: "settings.k8s.io"
+ - group: "storage.k8s.io"
+ omitStages:
+ - "RequestReceived"
+ # Default level for known APIs
+ - level: RequestResponse
+ resources:
+ - group: "" # core
+ - group: "admissionregistration.k8s.io"
+ - group: "apiextensions.k8s.io"
+ - group: "apiregistration.k8s.io"
+ - group: "apps"
+ - group: "authentication.k8s.io"
+ - group: "authorization.k8s.io"
+ - group: "autoscaling"
+ - group: "batch"
+ - group: "certificates.k8s.io"
+ - group: "extensions"
+ - group: "metrics.k8s.io"
+ - group: "networking.k8s.io"
+ - group: "policy"
+ - group: "rbac.authorization.k8s.io"
+ - group: "settings.k8s.io"
+ - group: "storage.k8s.io"
+ omitStages:
+ - "RequestReceived"
+ # Default level for all other requests.
+ - level: Metadata
+ omitStages:
+ - "RequestReceived"
diff --git a/feature/builtin/roles/install/kubernetes/files/audit/audit_webhook.yaml b/feature/builtin/roles/install/kubernetes/files/audit/audit_webhook.yaml
new file mode 100644
index 000000000..9ea361472
--- /dev/null
+++ b/feature/builtin/roles/install/kubernetes/files/audit/audit_webhook.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Config
+clusters:
+ - name: kube-auditing
+ cluster:
+ server: https://SHOULD_BE_REPLACED:6443/audit/webhook/event
+ insecure-skip-tls-verify: true
+contexts:
+ - context:
+ cluster: kube-auditing
+ user: ""
+ name: default-context
+current-context: default-context
+preferences: {}
+users: []
diff --git a/feature/builtin/roles/install/kubernetes/files/kubelet.service b/feature/builtin/roles/install/kubernetes/files/kubelet.service
new file mode 100644
index 000000000..ca57d677e
--- /dev/null
+++ b/feature/builtin/roles/install/kubernetes/files/kubelet.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=kubelet: The Kubernetes Node Agent
+Documentation=http://kubernetes.io/docs/
+
+[Service]
+CPUAccounting=true
+MemoryAccounting=true
+ExecStart=/usr/local/bin/kubelet
+Restart=always
+StartLimitInterval=0
+RestartSec=10
+
+[Install]
+WantedBy=multi-user.target
+
diff --git a/feature/builtin/roles/install/kubernetes/tasks/deploy_cluster_dns.yaml b/feature/builtin/roles/install/kubernetes/tasks/deploy_cluster_dns.yaml
new file mode 100644
index 000000000..8ed126327
--- /dev/null
+++ b/feature/builtin/roles/install/kubernetes/tasks/deploy_cluster_dns.yaml
@@ -0,0 +1,19 @@
+---
+- name: Generate coredns config
+ template:
+ src: dns/coredns.deployment
+ dest: /etc/kubernetes/coredns.yaml
+
+- name: Apply coredns config
+ command: |
+ kubectl delete svc kube-dns -n kube-system
+ kubectl apply -f /etc/kubernetes/coredns.yaml
+
+- name: Generate nodelocaldns deployment
+ template:
+ src: dns/nodelocaldns.daemonset
+ dest: /etc/kubernetes/nodelocaldns.yaml
+
+- name: Apply coredns deployment
+ command: |
+ kubectl apply -f /etc/kubernetes/nodelocaldns.yaml
diff --git a/feature/builtin/roles/install/kubernetes/tasks/deploy_haproxy.yaml b/feature/builtin/roles/install/kubernetes/tasks/deploy_haproxy.yaml
new file mode 100644
index 000000000..e1eb37c00
--- /dev/null
+++ b/feature/builtin/roles/install/kubernetes/tasks/deploy_haproxy.yaml
@@ -0,0 +1,15 @@
+---
+- name: Generate haproxy config
+ template:
+ src: haproxy/haproxy.cfg
+ dest: /etc/kubekey/haproxy/haproxy.cfg
+
+- name: Get md5 for haproxy config
+ command: |
+ md5sum /etc/kubekey/haproxy/haproxy.cfg | cut -d\" \" -f1
+ register: cfg_md5
+
+- name: Genrate haproxy manifest
+ template:
+ src: haproxy/haproxy.yaml
+ dest: /etc/kubernetes/manifests/haproxy.yaml
diff --git a/feature/builtin/roles/install/kubernetes/tasks/deploy_kube_vip.yaml b/feature/builtin/roles/install/kubernetes/tasks/deploy_kube_vip.yaml
new file mode 100644
index 000000000..8f33ec433
--- /dev/null
+++ b/feature/builtin/roles/install/kubernetes/tasks/deploy_kube_vip.yaml
@@ -0,0 +1,28 @@
+---
+# install with static pod: https://kube-vip.io/docs/installation/static/
+- name: Get interface for ipv4
+ command: |
+ ip route | grep ' {{ .internal_ipv4 }} ' | grep 'proto kernel scope link src' | sed -e \"s/^.*dev.//\" -e \"s/.proto.*//\"| uniq
+ register: interface
+
+- name: Generate kubevip manifest
+ template:
+ src: |
+ kubevip/kubevip.{{ .kubernetes.kube_vip.mode }}
+ dest: /etc/kubernetes/manifests/kubevip.yaml
+
+- name: Update kubelet config
+ command: |
+ sed -i 's#server:.*#server: https://127.0.0.1:{{ .kubernetes.apiserver.port }}#g' /etc/kubernetes/kubelet.conf
+ systemctl restart kubelet
+
+- name: Update kube-proxy config
+ command: |
+ set -o pipefail && /usr/local/bin/kubectl --kubeconfig /etc/kubernetes/admin.conf get configmap kube-proxy -n kube-system -o yaml \
+ | sed 's#server:.*#server: https://127.0.0.1:{{ .kubernetes.apiserver.port }}#g' \
+ | /usr/local/bin/kubectl --kubeconfig /etc/kubernetes/admin.conf replace -f -
+ /usr/local/bin/kubectl --kubeconfig /etc/kubernetes/admin.conf delete pod -n kube-system -l k8s-app=kube-proxy --force --grace-period=0
+
+- name: Update hosts file
+ command: |
+ sed -i 's#.* {{ .kubernetes.control_plane_endpoint }}#127.0.0.1 {{ .kubernetes.control_plane_endpoint }}s#g' /etc/hosts
diff --git a/feature/builtin/roles/install/kubernetes/tasks/init_kubernetes.yaml b/feature/builtin/roles/install/kubernetes/tasks/init_kubernetes.yaml
new file mode 100644
index 000000000..aa09aab11
--- /dev/null
+++ b/feature/builtin/roles/install/kubernetes/tasks/init_kubernetes.yaml
@@ -0,0 +1,85 @@
+---
+- name: Add kube user
+ command: |
+ useradd -M -c 'Kubernetes user' -s /sbin/nologin -r kube || :
+
+- name: Create kube directories
+ command: |
+ if [ ! -d "{{ .item.path }}" ]; then
+ mkdir -p {{ .item.path }} && chown kube -R {{ .item.chown }}
+ fi
+ loop:
+ - {path: "/usr/local/bin", chown: "/usr/local/bin"}
+ - {path: "/etc/kubernetes", chown: "/etc/kubernetes"}
+ - {path: "/etc/kubernetes/pki", chown: "/etc/kubernetes/pki"}
+ - {path: "/etc/kubernetes/manifests", chown: "/etc/kubernetes/manifests"}
+ - {path: "/usr/local/bin/kube-scripts", chown: "/usr/local/bin/kube-scripts"}
+ - {path: "/usr/libexec/kubernetes/kubelet-plugins/volume/exec", chown: "/usr/libexec/kubernetes"}
+ - {path: "/etc/cni/net.d", chown: "/etc/cni"}
+ - {path: "/opt/cni/bin", chown: "/opt/cni"}
+ - {path: "/var/lib/calico", chown: "/var/lib/calico"}
+
+- name: Sync external etcd config
+ when: and (.kubernetes.etcd.deployment_type | eq "external") (.groups.etcd | default list | len | lt 0)
+ block:
+ - name: Sync etcd ca file to remote
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/pki/root.crt
+ dest: /etc/kubernetes/pki/etcd/ca.crt
+ - name: Sync etcd cert files to remote
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/pki/etcd.crt
+ dest: /etc/kubernetes/pki/etcd/client.crt
+ - name: Sync etcd key files to remote
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/pki/etcd.key
+ dest: /etc/kubernetes/pki/etcd/client.key
+
+- name: Sync audit policy file to remote
+ copy:
+ src: audit
+ dest: /etc/kubernetes/audit/
+ when: .kubernetes.audit
+
+- name: Generate kubeadm init config
+ template:
+ src: |
+ {{- if .kube_version | semverCompare ">=v1.24.0" }}
+ kubeadm/kubeadm-init.v1beta3
+ {{- else }}
+ kubeadm/kubeadm-init.v1beta2
+ {{- end }}
+ dest: /etc/kubernetes/kubeadm-config.yaml
+
+- name: Init kubernetes cluster
+ block:
+ - name: Init kubernetes by kubeadm
+ command: |
+ /usr/local/bin/kubeadm init --config=/etc/kubernetes/kubeadm-config.yaml --ignore-preflight-errors=FileExisting-crictl,ImagePull {{ if not .kubernetes.kube_proxy.enabled }}--skip-phases=addon/kube-proxy{{ end }}
+ rescue:
+ - name: Reset kubeadm if init failed
+ command: |
+ kubeadm reset -f {{ if and .cri.cri_socket (ne .cri.cri_socket "") }}--cri-socket {{ .cri.cri_socket }}{{ end }}
+
+- name: Copy kubeconfig to default dir
+ command: |
+ if [ ! -d /root/.kube ]; then
+ mkdir -p /root/.kube
+ fi
+ cp -f /etc/kubernetes/admin.conf /root/.kube/config
+ when: .kube_node_info_important.stderr | ne ""
+
+- name: Set to worker node
+ when: .groups.kube_worker | default list | has .inventory_name
+ block:
+ - name: Remote master taint
+ ignore_errors: true
+ command: |
+ /usr/local/bin/kubectl taint nodes {{ .hostname }} node-role.kubernetes.io/master=:NoSchedule-
+ /usr/local/bin/kubectl taint nodes {{ .hostname }} node-role.kubernetes.io/control-plane=:NoSchedule-
+ - name: Add work label
+ command: |
+ /usr/local/bin/kubectl label --overwrite node {{ .hostname }} node-role.kubernetes.io/worker=
diff --git a/feature/builtin/roles/install/kubernetes/tasks/install_binaries.yaml b/feature/builtin/roles/install/kubernetes/tasks/install_binaries.yaml
new file mode 100644
index 000000000..be14fff84
--- /dev/null
+++ b/feature/builtin/roles/install/kubernetes/tasks/install_binaries.yaml
@@ -0,0 +1,92 @@
+---
+- name: Check if helm is installed
+ ignore_errors: true
+ command: helm version
+ register: helm_install_version
+- name: Install helm
+ when: or (.helm_install_version.stderr | ne "") (.helm_install_version.stdout | contains (printf "Version:\"%s\"" .helm_version) | not)
+ block:
+ - name: Sync helm to remote
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/helm/{{ .helm_version }}/{{ .binary_type.stdout }}/helm-{{ .helm_version }}-linux-{{ .binary_type.stdout }}.tar.gz
+ dest: |
+ /tmp/kubekey/helm-{{ .helm_version }}-linux-{{ .binary_type.stdout }}.tar.gz
+ - name: Install helm
+ command: |
+ tar --strip-components=1 -zxvf /tmp/kubekey/helm-{{ .helm_version }}-linux-{{ .binary_type.stdout }}.tar.gz -C /usr/local/bin linux-{{ .binary_type.stdout }}/helm
+
+- name: Check if kubeadm is installed
+ ignore_errors: true
+ command: kubeadm version -o short
+ register: kubeadm_install_version
+- name: Install kubeadm
+ when: or (.kubeadm_install_version.stderr | ne "") (.kubeadm_install_version.stdout | ne .kube_version)
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/kube/{{ .kube_version }}/{{ .binary_type.stdout }}/kubeadm
+ dest: /usr/local/bin/kubeadm
+ mode: 0755
+
+- name: Check if kubectl is installed
+ ignore_errors: true
+ command: kubectl version
+ register: kubectl_install_version
+- name: Sync kubectl to remote
+ when: or (.kubectl_install_version.stderr | ne "") (.kubectl_install_version.stdout | contains (printf "GitVersion:\"%s\"" .kube_version) | not)
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/kube/{{ .kube_version }}/{{ .binary_type.stdout }}/kubectl
+ dest: /usr/local/bin/kubectl
+ mode: 0755
+
+- name: Check if kubelet is installed
+ ignore_errors: true
+ command: kubelet --version
+ register: kubelet_install_version
+- name: Install kubelet
+ when: or (.kubelet_install_version.stderr | ne "") (.kubelet_install_version.stdout | ne (printf "Kubernetes %s" .kube_version))
+ block:
+ - name: Sync kubelet to remote
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/kube/{{ .kube_version }}/{{ .binary_type.stdout }}/kubelet
+ dest: /usr/local/bin/kubelet
+ mode: 0755
+ - name: Sync kubelet env to remote
+ template:
+ src: kubeadm/kubelet.env
+ dest: /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
+ - name: Sync kubelet service to remote
+ copy:
+ src: kubelet.service
+ dest: /etc/systemd/system/kubelet.service
+ - name: Register kubelet service
+ command: systemctl daemon-reload && systemctl enable kubelet.service
+
+- name: Check if calicoctl is installed
+ ignore_errors: true
+ command: calicoctl --version
+ register: calicoctl_install_version
+- name: Install calicoctl
+ when:
+ - and .calico_version (ne .calico_version "")
+ - |
+ or (.calicoctl_install_version.stderr | ne "") (.calicoctl_install_version.stdout | contains (printf "Client Version: %s" .calico_version) | not)
+ block:
+ - name: Sync calicoctl to remote
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/cni/{{ .calico_version }}/{{ .binary_type.stdout }}/calicoctl
+ dest: /usr/local/bin/calicoctl
+ mode: 0755
+ - name: Sync kubelet env to remote
+ template:
+ src: kubeadm/kubelet.env
+ dest: /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
+ - name: Sync kubelet service to remote
+ copy:
+ src: kubelet.service
+ dest: /etc/systemd/system/kubelet.service
+ - name: Register kubelet service
+ command: systemctl daemon-reload && systemctl enable kubelet.service
diff --git a/feature/builtin/roles/install/kubernetes/tasks/join_kubernetes.yaml b/feature/builtin/roles/install/kubernetes/tasks/join_kubernetes.yaml
new file mode 100644
index 000000000..20b38f005
--- /dev/null
+++ b/feature/builtin/roles/install/kubernetes/tasks/join_kubernetes.yaml
@@ -0,0 +1,43 @@
+---
+- name: Generate kubeadm join config
+ template:
+ src: |
+ {{- if .kube_version | semverCompare ">=v1.24.0" }}
+ kubeadm/kubeadm-join.v1beta3
+ {{- else }}
+ kubeadm/kubeadm-join.v1beta2
+ {{- end }}
+ dest: /etc/kubernetes/kubeadm-config.yaml
+
+- name: Sync audit policy file to remote
+ copy:
+ src: audit
+ dest: /etc/kubernetes/audit/
+ when: .kubernetes.audit
+
+- name: Join kubernetes cluster
+ block:
+ - name: Join kubernetes by kubeadm
+ command: |
+ /usr/local/bin/kubeadm join --config=/etc/kubernetes/kubeadm-config.yaml --ignore-preflight-errors=FileExisting-crictl,ImagePull
+ rescue:
+ - name: Reset kubeadm if join failed
+ command: kubeadm reset -f {{ if and .cri.cri_socket (ne .cri.cri_socket "") }}--cri-socket {{ .cri.cri_socket }}{{ end }}
+
+- name: Sync kubeconfig to remote
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/kubeconfig
+ dest: /root/.kube/config
+
+- name: Set to worker node
+ when: .groups.kube_worker | default list | has .inventory_name
+ block:
+ - name: Remote master taint
+ ignore_errors: true
+ command: |
+ /usr/local/bin/kubectl taint nodes {{ .hostname }} node-role.kubernetes.io/master=:NoSchedule-
+ /usr/local/bin/kubectl taint nodes {{ .hostname }} node-role.kubernetes.io/control-plane=:NoSchedule-
+ - name: Add work label
+ command: |
+ /usr/local/bin/kubectl label --overwrite node {{ .hostname }} node-role.kubernetes.io/worker=
diff --git a/feature/builtin/roles/install/kubernetes/tasks/main.yaml b/feature/builtin/roles/install/kubernetes/tasks/main.yaml
new file mode 100644
index 000000000..13ebfdf1c
--- /dev/null
+++ b/feature/builtin/roles/install/kubernetes/tasks/main.yaml
@@ -0,0 +1,69 @@
+---
+- name: Check kubernetes if installed
+ ignore_errors: true
+ command: kubectl get node --field-selector metadata.name={{ .hostname }}
+ register: kube_node_info_important
+
+- include_tasks: install_binaries.yaml
+
+- include_tasks: deploy_kube_vip.yaml
+ when:
+ - .kubernetes.kube_vip.enabled
+ - .groups.kube_control_plane | default list | has .inventory_name
+
+- name: Select init kubernetes node
+ run_once: true
+ set_fact:
+ init_kubernetes_node: |
+ {{ index .inventory_hosts (.groups.kube_control_plane | default list | first) "hostname" }}
+
+- name: Init kubernetes
+ when: eq .hostname .init_kubernetes_node
+ block:
+ - include_tasks: init_kubernetes.yaml
+ when: .kube_node_info_important.stderr | ne ""
+ - include_tasks: deploy_cluster_dns.yaml
+ - name: Fetch kubeconfig to local
+ fetch:
+ src: /etc/kubernetes/admin.conf
+ dest: |
+ {{ .work_dir }}/kubekey/kubeconfig
+ - name: Generate certificate key by kubeadm
+ command: |
+ /usr/local/bin/kubeadm init phase upload-certs --upload-certs --config /etc/kubernetes/kubeadm-config.yaml 2>&1 \
+ | awk '/Using certificate key:/{getline; print}'
+ register: kubeadm_cert_result
+ - name: Set_Fact certificate key to all hosts
+ set_fact:
+ kubeadm_cert: |
+ {{ .kubeadm_cert_result.stdout }}
+ - name: Generate kubeadm token
+ block:
+ - name: Generate token by kubeadm
+ command: /usr/local/bin/kubeadm token create
+ register: kubeadm_token_result
+ - name: Set_Fact token to all hosts
+ set_fact:
+ kubeadm_token: |
+ {{ .kubeadm_token_result.stdout }}
+ - name: Set_Fact init endpoint
+ set_fact:
+ init_kubernetes_endpoint: |
+ {{ .inventory_name }}
+
+- include_tasks: join_kubernetes.yaml
+ when:
+ - .kube_node_info_important.stderr | ne ""
+ - ne .hostname .init_kubernetes_node
+
+- include_tasks: deploy_haproxy.yaml
+ when:
+ - .kubernetes.haproxy.enabled
+ - .groups.kube_worker | default list | has .inventory_name
+
+- name: Add custom label to cluster
+ command: |
+ {{- range $k, $v := .kubernetes.custom_label }}
+ /usr/local/bin/kubectl label --overwrite node {{ $.hostname }} {{ $k }}={{ $v }}
+ {{- end }}
+ when: .kubernetes.custom_label | len | lt 0
diff --git a/feature/builtin/roles/install/kubernetes/templates/dns/coredns.deployment b/feature/builtin/roles/install/kubernetes/templates/dns/coredns.deployment
new file mode 100644
index 000000000..13dca952f
--- /dev/null
+++ b/feature/builtin/roles/install/kubernetes/templates/dns/coredns.deployment
@@ -0,0 +1,262 @@
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ kubernetes.io/bootstrapping: rbac-defaults
+ addonmanager.kubernetes.io/mode: Reconcile
+ name: system:coredns
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - endpoints
+ - services
+ - pods
+ - namespaces
+ verbs:
+ - list
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - get
+- apiGroups:
+ - discovery.k8s.io
+ resources:
+ - endpointslices
+ verbs:
+ - list
+ - watch
+
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: coredns
+ namespace: kube-system
+ labels:
+ k8s-app: kube-dns
+ kubernetes.io/cluster-service: "true"
+ kubernetes.io/name: "CoreDNS"
+ addonmanager.kubernetes.io/mode: Reconcile
+ annotations:
+ prometheus.io/port: "9153"
+ prometheus.io/scrape: "true"
+ createdby: 'kubekey'
+spec:
+ clusterIP: {{ .kubernetes.networking.dns_service_ip }}
+ selector:
+ k8s-app: kube-dns
+ ports:
+ - name: dns
+ port: 53
+ protocol: UDP
+ - name: dns-tcp
+ port: 53
+ protocol: TCP
+ - name: metrics
+ port: 9153
+ protocol: TCP
+
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: "coredns"
+ namespace: kube-system
+ labels:
+ k8s-app: "kube-dns"
+ addonmanager.kubernetes.io/mode: Reconcile
+ kubernetes.io/name: "coredns"
+spec:
+ strategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 0
+ maxSurge: 10%
+ selector:
+ matchLabels:
+ k8s-app: kube-dns
+ template:
+ metadata:
+ labels:
+ k8s-app: kube-dns
+ annotations:
+ createdby: 'kubekey'
+ spec:
+ securityContext:
+ seccompProfile:
+ type: RuntimeDefault
+ priorityClassName: system-cluster-critical
+ serviceAccountName: coredns
+ nodeSelector:
+ kubernetes.io/os: linux
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/control-plane
+ effect: NoSchedule
+ affinity:
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ podAffinityTerm:
+ labelSelector:
+ matchLabels:
+ k8s-app: kube-dns
+ topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: node-role.kubernetes.io/control-plane
+ operator: In
+ values:
+ - ""
+ containers:
+ - name: coredns
+ image: "{{ .kubernetes.networking.dns_image }}"
+ imagePullPolicy: IfNotPresent
+ resources:
+ # TODO: Set memory limits when we've profiled the container for large
+ # clusters, then set request = limit to keep this container in
+ # guaranteed class. Currently, this container falls into the
+ # "burstable" category so the kubelet doesn't backoff from restarting it.
+ limits:
+ memory: 300Mi
+ requests:
+ cpu: 100m
+ memory: 70Mi
+ args: [ "-conf", "/etc/coredns/Corefile" ]
+ volumeMounts:
+ - name: config-volume
+ mountPath: /etc/coredns
+ ports:
+ - containerPort: 53
+ name: dns
+ protocol: UDP
+ - containerPort: 53
+ name: dns-tcp
+ protocol: TCP
+ - containerPort: 9153
+ name: metrics
+ protocol: TCP
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ add:
+ - NET_BIND_SERVICE
+ drop:
+ - all
+ readOnlyRootFilesystem: true
+ livenessProbe:
+ httpGet:
+ path: /health
+ port: 8080
+ scheme: HTTP
+ timeoutSeconds: 5
+ successThreshold: 1
+ failureThreshold: 10
+ readinessProbe:
+ httpGet:
+ path: /ready
+ port: 8181
+ scheme: HTTP
+ timeoutSeconds: 5
+ successThreshold: 1
+ failureThreshold: 10
+ dnsPolicy: Default
+ volumes:
+ - name: config-volume
+ configMap:
+ name: coredns
+
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: coredns
+ namespace: kube-system
+ labels:
+ addonmanager.kubernetes.io/mode: EnsureExists
+data:
+ Corefile: |
+ {{- range .kubernetes.coredns.zone_configs }}
+ {{ .zones | join " " }} {
+ cache {{ .cache }}
+ {{- range .additional_configs }}
+ {{ . }}
+ {{- end }}
+
+ {{- range .rewrite }}
+ rewrite {{ .rule }} {
+ {{ .field }} {{ .type }} {{ .value }}
+ {{ .options }}
+ }
+ {{- end }}
+
+ health {
+ lameduck 5s
+ }
+
+ {{- if .kubernetes.zones | len | lt 0 }}
+ kubernetes {{ .kubernetes.zones | join " " }} in-addr.arpa ip6.arpa {
+ pods insecure
+ fallthrough in-addr.arpa ip6.arpa
+ ttl 30
+ }
+ {{- end }}
+
+ {{- range .forward }}
+ forward {{ .from }} {{ .to | join " " }} {
+ {{- if .except | len | lt 0 }}
+ except {{ .except | join " " }}
+ {{- end }}
+ {{- if .force_tcp }}
+ force_tcp
+ {{- end }}
+ {{- if .prefer_udp }}
+ prefer_udp
+ {{- end }}
+ {{- if .max_fails }}
+ max_fails {{ .max_fails }}
+ {{- end }}
+ {{- if .expire }}
+ expire {{ .expire }}
+ {{- end }}
+ {{- if .tls }}
+ tls {{ .tls.cert_file }} {{ .tls.key_file }} {{ .tls.ca_file }}
+ {{- end }}
+ {{- if .tls_servername }}
+ tls_servername {{ .tls_servername }}
+ {{- end }}
+ {{- if .policy }}
+ policy {{ .policy }}
+ {{- end }}
+ {{- if .health_check }}
+ health_check {{ .health_check }}
+ {{- end }}
+ {{- if .max_concurrent }}
+ max_concurrent {{ .max_concurrent }}
+ {{- end }}
+ }
+ {{- end }}
+
+ {{- if $.kubernetes.coredns.dns_etc_hosts | len | lt 0 }}
+ hosts /etc/coredns/hosts {
+ fallthrough
+ }
+ {{- end }}
+ }
+ {{- end }}
+
+{{- if .kubernetes.coredns.dns_etc_hosts | len | lt 0 }}
+ hosts: |
+ {{- range .kubernetes.coredns.dns_etc_hosts }}
+ {{ . }}
+ {{- end }}
+{{- end }}
diff --git a/feature/builtin/roles/install/kubernetes/templates/dns/nodelocaldns.daemonset b/feature/builtin/roles/install/kubernetes/templates/dns/nodelocaldns.daemonset
new file mode 100644
index 000000000..c205438a7
--- /dev/null
+++ b/feature/builtin/roles/install/kubernetes/templates/dns/nodelocaldns.daemonset
@@ -0,0 +1,229 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: nodelocaldns
+ namespace: kube-system
+ labels:
+ addonmanager.kubernetes.io/mode: Reconcile
+
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: nodelocaldns
+ namespace: kube-system
+ labels:
+ k8s-app: kube-dns
+ addonmanager.kubernetes.io/mode: Reconcile
+spec:
+ selector:
+ matchLabels:
+ k8s-app: nodelocaldns
+ template:
+ metadata:
+ labels:
+ k8s-app: nodelocaldns
+ annotations:
+ prometheus.io/scrape: 'true'
+ prometheus.io/port: '9253'
+ spec:
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-cluster-critical
+ serviceAccountName: nodelocaldns
+ hostNetwork: true
+ dnsPolicy: Default # Don't use cluster DNS.
+ tolerations:
+ - effect: NoSchedule
+ operator: "Exists"
+ - effect: NoExecute
+ operator: "Exists"
+ - key: "CriticalAddonsOnly"
+ operator: "Exists"
+ containers:
+ - name: node-cache
+ image: {{ .kubernetes.networking.dns_cache_image }}
+ resources:
+ limits:
+ memory: 200Mi
+ requests:
+ cpu: 100m
+ memory: 70Mi
+ args: [ "-localip", "169.254.25.10", "-conf", "/etc/coredns/Corefile", "-upstreamsvc", "coredns" ]
+ securityContext:
+ privileged: true
+ ports:
+ - containerPort: 53
+ name: dns
+ protocol: UDP
+ - containerPort: 53
+ name: dns-tcp
+ protocol: TCP
+ - containerPort: 9253
+ name: metrics
+ protocol: TCP
+ livenessProbe:
+ httpGet:
+ host: 169.254.25.10
+ path: /health
+ port: 9254
+ scheme: HTTP
+ timeoutSeconds: 5
+ successThreshold: 1
+ failureThreshold: 10
+ readinessProbe:
+ httpGet:
+ host: 169.254.25.10
+ path: /health
+ port: 9254
+ scheme: HTTP
+ timeoutSeconds: 5
+ successThreshold: 1
+ failureThreshold: 10
+ volumeMounts:
+ - name: config-volume
+ mountPath: /etc/coredns
+ - name: xtables-lock
+ mountPath: /run/xtables.lock
+ volumes:
+ - name: config-volume
+ configMap:
+ name: nodelocaldns
+ - name: xtables-lock
+ hostPath:
+ path: /run/xtables.lock
+ type: FileOrCreate
+ # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
+ # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
+ terminationGracePeriodSeconds: 0
+ updateStrategy:
+ rollingUpdate:
+ maxUnavailable: 20%
+ type: RollingUpdate
+
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: nodelocaldns
+ namespace: kube-system
+ labels:
+ addonmanager.kubernetes.io/mode: EnsureExists
+data:
+ Corefile: |
+ {{- range .kubernetes.coredns.external_zones }}
+ {{ .zones | join " " }}{
+ log
+ errors
+ loadbalance
+ cache {{ .cache }}
+ reload
+ loop
+ bind 169.254.25.10
+ prometheus :9253
+
+ {{- range .rewrite }}
+ rewrite {{ .rule }} {
+ {{ .field }} {{ .type }} {{ .value }}
+ {{ .options }}
+ }
+ {{- end }}
+
+ {{- range .forward }}
+ forward {{ .from }} {{ .to | join " " }} {
+ {{- if .except | len | lt 0 }}
+ except {{ .except | join " " }}
+ {{- end }}
+ {{- if .force_tcp }}
+ force_tcp
+ {{- end }}
+ {{ if .prefer_udp }}
+ prefer_udp
+ {{- end }}
+ max_fails {{ .max_fails | default 2 }}
+ expire {{ .expire | default "10s" }}
+ {{- if .tls }}
+ tls {{ .tls.cert_file }} {{ .tls.key_file }} {{ .tls.ca_file }}
+ {{- end }}
+ {{- if .tls_servername }}
+ tls_servername {{ .tls_servername }}
+ {{- end }}
+ {{- if .policy }}
+ policy {{ .policy }}
+ {{- end }}
+ {{- if .health_check }}
+ health_check {{ .health_check }}
+ {{- end }}
+ {{- if .max_concurrent }}
+ max_concurrent {{ .max_concurrent }}
+ {{- end }}
+ }
+ {{- end }}
+
+ {{- if $.kubernetes.coredns.dns_etc_hosts | len | lt 0 }}
+ hosts /etc/coredns/hosts {
+ fallthrough
+ }
+ {{- end }}
+ }
+ {{- end }}
+
+ {{ .kubernetes.networking.dns_domain }}:53 {
+ errors
+ cache {
+ success 9984 30
+ denial 9984 5
+ }
+ reload
+ loop
+ bind 169.254.25.10
+ forward . {{ .kubernetes.networking.dns_service_ip }} {
+ force_tcp
+ }
+ prometheus :9253
+ health 169.254.25.10:9254
+ }
+ in-addr.arpa:53 {
+ errors
+ cache 30
+ reload
+ loop
+ bind 169.254.25.10
+ forward . {{ .kubernetes.networking.dns_service_ip }} {
+ force_tcp
+ }
+ prometheus :9253
+ }
+ ip6.arpa:53 {
+ errors
+ cache 30
+ reload
+ loop
+ bind 169.254.25.10
+ forward . {{ .kubernetes.networking.dns_service_ip }} {
+ force_tcp
+ }
+ prometheus :9253
+ }
+ .:53 {
+ errors
+ cache 30
+ reload
+ loop
+ bind 169.254.25.10
+ forward . /etc/resolv.conf
+ prometheus :9253
+ {{- if .kubernetes.coredns.dns_etc_hosts | len | lt 0 }}
+ hosts /etc/coredns/hosts {
+ fallthrough
+ }
+ {{- end }}
+ }
+
+{{- if .kubernetes.coredns.dns_etc_hosts | len | lt 0 }}
+ hosts: |
+ {{- range .kubernetes.coredns.dns_etc_hosts }}
+ {{ . }}
+ {{- end }}
+{{- end }}
diff --git a/feature/builtin/roles/install/kubernetes/templates/haproxy/haproxy.cfg b/feature/builtin/roles/install/kubernetes/templates/haproxy/haproxy.cfg
new file mode 100644
index 000000000..8f095ffe8
--- /dev/null
+++ b/feature/builtin/roles/install/kubernetes/templates/haproxy/haproxy.cfg
@@ -0,0 +1,41 @@
+global
+ maxconn 4000
+ log 127.0.0.1 local0
+
+defaults
+ mode http
+ log global
+ option httplog
+ option dontlognull
+ option http-server-close
+ option redispatch
+ retries 5
+ timeout http-request 5m
+ timeout queue 5m
+ timeout connectorVars 30s
+ timeout client 30s
+ timeout server 15m
+ timeout http-keep-alive 30s
+ timeout check 30s
+ maxconn 4000
+
+frontend healthz
+ bind *:{{ .kubernetes.haproxy.health_port }}
+ mode http
+ monitor-uri /healthz
+
+frontend kube_api_frontend
+ bind 127.0.0.1:{{ .kubernetes.apiserver.port }}
+ mode tcp
+ option tcplog
+ default_backend kube_api_backend
+
+backend kube_api_backend
+ mode tcp
+ balance leastconn
+ default-server inter 15s downinter 15s rise 2 fall 2 slowstart 60s maxconn 1000 maxqueue 256 weight 100
+ option httpchk GET /healthz
+ http-check expect status 200
+{{- range .groups.kube_control_plane | default list }}
+ server {{ index $.inventory_hosts . "hostname" }} {{ index $.inventory_hosts . "internal_ipv4" }}:{{ $.kubernetes.apiserver.port }} check check-ssl verify none
+{{- end }}
diff --git a/feature/builtin/roles/install/kubernetes/templates/haproxy/haproxy.yaml b/feature/builtin/roles/install/kubernetes/templates/haproxy/haproxy.yaml
new file mode 100644
index 000000000..d7b173fa9
--- /dev/null
+++ b/feature/builtin/roles/install/kubernetes/templates/haproxy/haproxy.yaml
@@ -0,0 +1,41 @@
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: haproxy
+ namespace: kube-system
+ labels:
+ addonmanager.kubernetes.io/mode: Reconcile
+ k8s-app: kube-haproxy
+ annotations:
+ cfg-checksum: "{{ .cfg_md5.stdout }}"
+spec:
+ hostNetwork: true
+ dnsPolicy: ClusterFirstWithHostNet
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-node-critical
+ containers:
+ - name: haproxy
+ image: {{ .kubernetes.haproxy.image }}
+ imagePullPolicy: IfNotPresent
+ resources:
+ requests:
+ cpu: 25m
+ memory: 32M
+ livenessProbe:
+ httpGet:
+ path: /healthz
+ port: {{ .kubernetes.haproxy.health_port }}
+ readinessProbe:
+ httpGet:
+ path: /healthz
+ port: {{ .kubernetes.haproxy.health_port }}
+ volumeMounts:
+ - mountPath: /usr/local/etc/haproxy/
+ name: etc-haproxy
+ readOnly: true
+ volumes:
+ - name: etc-haproxy
+ hostPath:
+ path: /etc/kubekey/haproxy
diff --git a/feature/builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-init.v1beta2 b/feature/builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-init.v1beta2
new file mode 100644
index 000000000..a3986464f
--- /dev/null
+++ b/feature/builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-init.v1beta2
@@ -0,0 +1,200 @@
+---
+apiVersion: kubeadm.k8s.io/v1beta2
+kind: ClusterConfiguration
+etcd:
+{{- if .kubernetes.etcd.deployment_type | eq "internal" }}
+ local:
+ imageRepository: {{ slice (.kubernetes.etcd.image | splitList ":" | first | splitList "/") 1 (.kubernetes.etcd.image | splitList ":" | first | splitList "/" | len) | join "/" }}
+ imageTag: {{ .kubernetes.etcd.image | splitList ":" | last }}
+ serverCertSANs:
+ {{- range .groups.etcd | default list }}
+ - https://{{ index $.inventory_hosts . "internal_ipv4" }}:2379
+ {{- end }}
+{{- else }}
+ external:
+ endpoints:
+ {{- range .groups.etcd | default list }}
+ - https://{{ index $.inventory_hosts . "internal_ipv4" }}:2379
+ {{- end }}
+ caFile: /etc/kubernetes/pki/etcd/ca.crt
+ certFile: /etc/kubernetes/pki/etcd/client.crt
+ keyFile: /etc/kubernetes/pki/etcd/client.key
+{{- end }}
+dns:
+ type: CoreDNS
+ imageRepository: {{ slice (.kubernetes.networking.dns_image | splitList ":" | first | splitList "/") 1 (.kubernetes.networking.dns_image | splitList ":" | first | splitList "/" | len) | join "/" }}
+ imageTag: {{ .kubernetes.networking.dns_image | splitList ":" | last }}
+imageRepository: {{ .kubernetes.image_repository }}
+kubernetesVersion: {{ .kube_version }}
+certificatesDir: /etc/kubernetes/pki
+clusterName: {{ .kubernetes.cluster_name }}
+controlPlaneEndpoint: {{ if and .kubernetes.control_plane_endpoint (ne .kubernetes.control_plane_endpoint "") }}{{ .kubernetes.control_plane_endpoint }}{{ else }}{{ .init_kubernetes_node }}{{ end }}
+networking:
+ dnsDomain: {{ .kubernetes.networking.dns_domain }}
+ podSubnet: {{ .kubernetes.networking.pod_cidr }}
+ serviceSubnet: {{ .kubernetes.networking.service_cidr }}
+apiServer:
+ extraArgs:
+{{- if .security_enhancement }}
+ authorization-mode: Node,RBAC
+ enable-admission-plugins: AlwaysPullImages,ServiceAccount,NamespaceLifecycle,NodeRestriction,LimitRanger,ResourceQuota,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,PodNodeSelector,PodSecurity
+ profiling: false
+ request-timeout: 120s
+ service-account-lookup: true
+ tls-min-version: VersionTLS12
+ tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
+{{- end }}
+{{- if .kubernetes.audit }}
+ audit-log-format: json
+ audit-log-maxbackup: 2
+ audit-log-maxsize: 200
+ audit-policy-file: /etc/kubernetes/audit/policy.yaml
+ audit-webhook-config-file: /etc/kubernetes/audit/webhook.yaml
+{{- end }}
+{{ .kubernetes.apiserver.extra_args | toYaml | indent 4 }}
+ certSANs:
+ - kubernetes
+ - kubernetes.default
+ - kubernetes.default.svc
+ - localhost
+ - 127.0.0.1
+ - {{ .kubernetes.networking.service_cidr | ipInCIDR 0 }}
+ - {{ if and .kubernetes.control_plane_endpoint (ne .kubernetes.control_plane_endpoint "") }}{{ .kubernetes.control_plane_endpoint }}{{ else }}{{ .init_kubernetes_node }}{{ end }}
+ - kubernetes.default.svc.{{ if and .kubernetes.control_plane_endpoint (ne .kubernetes.control_plane_endpoint "") }}{{ .kubernetes.control_plane_endpoint }}{{ else }}{{ .init_kubernetes_node }}{{ end }}
+ - kubernetes.default.svc.{{ .kubernetes.networking.dns_domain }}
+ {{- range .groups.k8s_cluster | default list }}
+ - {{ index $.inventory_hosts . "hostname" }}.{{ $.kubernetes.networking.dns_domain }}
+ - {{ index $.inventory_hosts . "internal_ipv4" }}
+ {{- if index $.inventory_hosts . "internal_ipv6" }}
+ - {{ index $.inventory_hosts . "internal_ipv6" }}
+ {{- end }}
+ {{- end }}
+ {{- range .kubernetes.apiserver.certSANs }}
+ - {{ . }}
+ {{- end }}
+{{- if .kubernetes.audit }}
+ extraVolumes:
+ - name: k8s-audit
+ hostPath: /etc/kubernetes/audit
+ mountPath: /etc/kubernetes/audit
+ pathType: DirectoryOrCreate
+{{- end }}
+controllerManager:
+ extraArgs:
+{{- if gt ( .kubernetes.networking.pod_cidr | splitList "," | len) 1 }}
+ node-cidr-mask-size-ipv4: "{{ .kubernetes.controller_manager.kube_network_node_prefix }}"
+ node-cidr-mask-size-ipv6: "64"
+{{- else }}
+ node-cidr-mask-size: "{{ .kubernetes.controller_manager.kube_network_node_prefix }}"
+{{- end }}
+{{- if .kube_version | semverCompare ">=v1.9.0" }}
+ cluster-signing-duration: 87600h
+{{- else }}
+ experimental-cluster-signing-duration: 87600h
+{{- end }}
+{{- if .security_enhancement }}
+ bind-address: 127.0.0.1
+ profiling: false
+ terminated-pod-gc-threshold: 50
+ use-service-account-credentials: true
+{{- else }}
+ bind-address: 0.0.0.0
+{{- end }}
+{{ .kubernetes.controller_manager.extra_args | toYaml | indent 4 }}
+ extraVolumes:
+ - name: host-time
+ hostPath: /etc/localtime
+ mountPath: /etc/localtime
+ readOnly: true
+scheduler:
+ extraArgs:
+{{ if .security_enhancement }}
+ bind-address: 127.0.0.1
+ profiling: false
+{{- else }}
+ bind-address: 0.0.0.0
+{{- end }}
+{{ .kubernetes.scheduler.extra_args | toYaml | indent 4 }}
+
+---
+apiVersion: kubeadm.k8s.io/v1beta2
+kind: InitConfiguration
+localAPIEndpoint:
+ advertiseAddress: {{ .internal_ipv4 }}
+ bindPort: {{ .kubernetes.apiserver.port }}
+nodeRegistration:
+ criSocket: {{ .cri.cri_socket }}
+ kubeletExtraArgs:
+ cgroup-driver: {{ .cri.cgroup_driver }}
+
+---
+apiVersion: kubeproxy.config.k8s.io/v1alpha1
+kind: KubeProxyConfiguration
+clusterCIDR: {{ .kubernetes.networking.pod_cidr }}
+mode: {{ .kubernetes.kube_proxy.mode }}
+{{ .kubernetes.kube_proxy.config | toYaml }}
+
+---
+apiVersion: kubelet.config.k8s.io/v1beta1
+kind: KubeletConfiguration
+clusterDomain: {{ .kubernetes.networking.dns_domain }}
+clusterDNS:
+ - {{ .kubernetes.networking.dns_service_ip }}
+maxPods: {{ .kubernetes.max_pods }}
+podPidsLimit: {{ .kubernetes.kubelet.pod_pids_limit }}
+rotateCertificates: true
+kubeReserved:
+ cpu: 200m
+ memory: 250Mi
+systemReserved:
+ cpu: 200m
+ memory: 250Mi
+evictionHard:
+ memory.available: 5%
+ pid.available: 10%
+evictionSoft:
+ memory.available: 10%
+evictionSoftGracePeriod:
+ memory.available: 2m
+evictionMaxPodGracePeriod: 120
+evictionPressureTransitionPeriod: 30s
+{{- if .security_enhancement }}
+readOnlyPort: 0
+protectKernelDefaults: true
+eventRecordQPS: 1
+streamingConnectionIdleTimeout: 5m
+makeIPTablesUtilChains: true
+tlsCipherSuites:
+ - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256
+ - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
+ - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
+featureGates:
+ RotateKubeletServerCertificate: true
+ SeccompDefault: true
+ {{- if .kube_version | semverCompare ">=v1.24.0" }}
+ TTLAfterFinished: true
+ {{- end }}
+ {{ if .kube_version | semverCompare ">=v1.21.0" }}
+ CSIStorageCapacity: true
+ {{- end }}
+{{ .kubernetes.kubelet.feature_gates | toYaml | indent 2 }}
+{{- else }}
+featureGates:
+ RotateKubeletServerCertificate: true
+ {{- if .kube_version | semverCompare ">=v1.24.0" }}
+ TTLAfterFinished: true
+ {{- end }}
+ {{- if .kube_version | semverCompare ">=v1.21.0" }}
+ CSIStorageCapacity: true
+ ExpandCSIVolumes: true
+ {{- end }}
+ {{- if .kubernetes.kubelet.feature_gates }}
+{{ .kubernetes.kubelet.feature_gates | toYaml | indent 2 }}
+ {{- end }}
+{{- end }}
+cgroupDriver: {{ .cri.cgroup_driver }}
+containerLogMaxSize: {{ .kubernetes.kubelet.container_log_max_size }}
+containerLogMaxFiles: {{ .kubernetes.kubelet.container_log_max_files }}
+{{- if .kubernetes.kubelet.extra_args }}
+{{ .kubernetes.kubelet.extra_args | toYaml }}
+{{- end }}
diff --git a/feature/builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-init.v1beta3 b/feature/builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-init.v1beta3
new file mode 100644
index 000000000..ec02d2ff9
--- /dev/null
+++ b/feature/builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-init.v1beta3
@@ -0,0 +1,195 @@
+---
+apiVersion: kubeadm.k8s.io/v1beta3
+kind: ClusterConfiguration
+etcd:
+{{- if .kubernetes.etcd.deployment_type | eq "internal" }}
+ local:
+ imageRepository: {{ slice (.kubernetes.etcd.image | splitList ":" | first | splitList "/") 1 (.kubernetes.etcd.image | splitList ":" | first | splitList "/" | len) | join "/" }}
+ imageTag: {{ .kubernetes.etcd.image | splitList ":" | last }}
+ serverCertSANs:
+ {{- range .groups.etcd | default list }}
+ - https://{{ index $.inventory_hosts . "internal_ipv4" }}:2379
+ {{- end }}
+{{- else }}
+ external:
+ endpoints:
+ {{- range .groups.etcd | default list }}
+ - https://{{ index $.inventory_hosts . "internal_ipv4" }}:2379
+ {{- end }}
+ caFile: /etc/kubernetes/pki/etcd/ca.crt
+ certFile: /etc/kubernetes/pki/etcd/client.crt
+ keyFile: /etc/kubernetes/pki/etcd/client.key
+{{- end }}
+dns:
+ imageRepository: {{ slice (.kubernetes.networking.dns_image | splitList ":" | first | splitList "/") 1 (.kubernetes.networking.dns_image | splitList ":" | first | splitList "/" | len) | join "/" }}
+ imageTag: {{ .kubernetes.networking.dns_image | splitList ":" | last }}
+imageRepository: {{ .kubernetes.image_repository }}
+kubernetesVersion: {{ .kube_version }}
+certificatesDir: /etc/kubernetes/pki
+clusterName: {{ .kubernetes.cluster_name }}
+controlPlaneEndpoint: {{ if and .kubernetes.control_plane_endpoint (ne .kubernetes.control_plane_endpoint "") }}{{ .kubernetes.control_plane_endpoint }}{{ else }}{{ .init_kubernetes_node }}{{ end }}
+networking:
+ dnsDomain: {{ .kubernetes.networking.dns_domain }}
+ podSubnet: {{ .kubernetes.networking.pod_cidr }}
+ serviceSubnet: {{ .kubernetes.networking.service_cidr }}
+apiServer:
+ extraArgs:
+{{- if .security_enhancement }}
+ authorization-mode: Node,RBAC
+ enable-admission-plugins: AlwaysPullImages,ServiceAccount,NamespaceLifecycle,NodeRestriction,LimitRanger,ResourceQuota,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,PodNodeSelector,PodSecurity
+ profiling: false
+ request-timeout: 120s
+ service-account-lookup: true
+ tls-min-version: VersionTLS12
+ tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
+{{- end }}
+{{- if .kubernetes.audit }}
+ audit-log-format: json
+ audit-log-maxbackup: 2
+ audit-log-maxsize: 200
+ audit-policy-file: /etc/kubernetes/audit/policy.yaml
+ audit-webhook-config-file: /etc/kubernetes/audit/webhook.yaml
+{{- end }}
+{{ .kubernetes.apiserver.extra_args | toYaml | indent 4 }}
+ certSANs:
+ - kubernetes
+ - kubernetes.default
+ - kubernetes.default.svc
+ - localhost
+ - 127.0.0.1
+ - {{ .kubernetes.networking.service_cidr | ipInCIDR 0 }}
+ - {{ if and .kubernetes.control_plane_endpoint (ne .kubernetes.control_plane_endpoint "") }}{{ .kubernetes.control_plane_endpoint }}{{ else }}{{ .init_kubernetes_node }}{{ end }}
+ - kubernetes.default.svc.{{ if and .kubernetes.control_plane_endpoint (ne .kubernetes.control_plane_endpoint "") }}{{ .kubernetes.control_plane_endpoint }}{{ else }}{{ .init_kubernetes_node }}{{ end }}
+ - kubernetes.default.svc.{{ .kubernetes.networking.dns_domain }}
+ {{- range .groups.k8s_cluster | default list }}
+ - {{ index $.inventory_hosts . "hostname" }}.{{ .kubernetes.networking.dns_domain }}
+ - {{ index $.inventory_hosts . "internal_ipv4" }}
+ {{- if index $.inventory_hosts . "internal_ipv6" }}
+ - {{ index $.inventory_hosts . "internal_ipv6" }}
+ {{- end }}
+ {{- end }}
+ {{- range .kubernetes.apiserver.certSANs }}
+ - {{ . }}
+ {{- end }}
+{{- if .kubernetes.audit }}
+ extraVolumes:
+ - name: k8s-audit
+ hostPath: /etc/kubernetes/audit
+ mountPath: /etc/kubernetes/audit
+ pathType: DirectoryOrCreate
+{{- end }}
+controllerManager:
+ extraArgs:
+{{- if gt ( .kubernetes.networking.pod_cidr | splitList "," | len) 1 }}
+ node-cidr-mask-size-ipv4: "{{ .kubernetes.controller_manager.kube_network_node_prefix }}"
+ node-cidr-mask-size-ipv6: "64"
+{{- else }}
+ node-cidr-mask-size: "{{ .kubernetes.controller_manager.kube_network_node_prefix }}"
+{{- end }}
+{{- if .kube_version | semverCompare ">=v1.9.0" }}
+ cluster-signing-duration: 87600h
+{{- else }}
+ experimental-cluster-signing-duration: 87600h
+{{- end }}
+{{- if .security_enhancement }}
+ bind-address: 127.0.0.1
+ profiling: false
+ terminated-pod-gc-threshold: 50
+ use-service-account-credentials: true
+{{- else }}
+ bind-address: 0.0.0.0
+{{- end }}
+{{ .kubernetes.controller_manager.extra_args | toYaml | indent 4 }}
+ extraVolumes:
+ - name: host-time
+ hostPath: /etc/localtime
+ mountPath: /etc/localtime
+ readOnly: true
+scheduler:
+ extraArgs:
+{{ if .security_enhancement }}
+ bind-address: 127.0.0.1
+ profiling: false
+{{- else }}
+ bind-address: 0.0.0.0
+{{- end }}
+{{ .kubernetes.scheduler.extra_args | toYaml | indent 4 }}
+
+---
+apiVersion: kubeadm.k8s.io/v1beta2
+kind: InitConfiguration
+localAPIEndpoint:
+ advertiseAddress: {{ .internal_ipv4 }}
+ bindPort: {{ .kubernetes.apiserver.port }}
+nodeRegistration:
+ criSocket: {{ .cri.cri_socket }}
+ kubeletExtraArgs:
+ cgroup-driver: {{ .cri.cgroup_driver }}
+
+---
+apiVersion: kubeproxy.config.k8s.io/v1alpha1
+kind: KubeProxyConfiguration
+clusterCIDR: {{ .kubernetes.networking.pod_cidr }}
+mode: {{ .kubernetes.kube_proxy.mode }}
+{{ .kubernetes.kube_proxy.config | toYaml }}
+
+---
+apiVersion: kubelet.config.k8s.io/v1beta1
+kind: KubeletConfiguration
+clusterDomain: {{ .kubernetes.networking.dns_domain }}
+clusterDNS:
+ - {{ .kubernetes.networking.dns_service_ip }}
+maxPods: {{ .kubernetes.max_pods }}
+podPidsLimit: {{ .kubernetes.kubelet.pod_pids_limit }}
+rotateCertificates: true
+kubeReserved:
+ cpu: 200m
+ memory: 250Mi
+systemReserved:
+ cpu: 200m
+ memory: 250Mi
+evictionHard:
+ memory.available: 5%
+ pid.available: 10%
+evictionSoft:
+ memory.available: 10%
+evictionSoftGracePeriod:
+ memory.available: 2m
+evictionMaxPodGracePeriod: 120
+evictionPressureTransitionPeriod: 30s
+{{- if .security_enhancement }}
+readOnlyPort: 0
+protectKernelDefaults: true
+eventRecordQPS: 1
+streamingConnectionIdleTimeout: 5m
+makeIPTablesUtilChains: true
+tlsCipherSuites:
+ - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256
+ - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
+ - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
+featureGates:
+ RotateKubeletServerCertificate: true
+ SeccompDefault: true
+ {{- if .kube_version | semverCompare ">=v1.24.0" }}
+ TTLAfterFinished: true
+ {{- end }}
+ {{ if .kube_version | semverCompare ">=v1.21.0" }}
+ CSIStorageCapacity: true
+ {{- end }}
+{{ .kubernetes.kubelet.feature_gates | toYaml | indent 2 }}
+{{- else }}
+featureGates:
+ RotateKubeletServerCertificate: true
+ {{- if .kube_version | semverCompare ">=v1.24.0" }}
+ TTLAfterFinished: true
+ {{- end }}
+ {{- if .kube_version | semverCompare ">=v1.21.0" }}
+ CSIStorageCapacity: true
+ ExpandCSIVolumes: true
+ {{- end }}
+{{ .kubernetes.kubelet.feature_gates | toYaml | indent 2 }}
+{{- end }}
+cgroupDriver: {{ .cri.cgroup_driver }}
+containerLogMaxSize: {{ .kubernetes.kubelet.container_log_max_size }}
+containerLogMaxFiles: {{ .kubernetes.kubelet.container_log_max_files }}
+{{ .kubernetes.kubelet.extra_args | toYaml }}
diff --git a/feature/builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-join.v1beta2 b/feature/builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-join.v1beta2
new file mode 100644
index 000000000..2ffa06fd2
--- /dev/null
+++ b/feature/builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-join.v1beta2
@@ -0,0 +1,19 @@
+---
+apiVersion: kubeadm.k8s.io/v1beta2
+kind: JoinConfiguration
+discovery:
+ bootstrapToken:
+ apiServerEndpoint: {{ if and .kubernetes.control_plane_endpoint (ne .kubernetes.control_plane_endpoint "") }}{{ .kubernetes.control_plane_endpoint }}{{ else }}{{ .init_kubernetes_node }}{{ end }}:{{ .kubernetes.apiserver.port }}
+ token: "{{ .kubeadm_token }}"
+ unsafeSkipCAVerification: true
+{{- if .groups.kube_control_plane | default list | has .inventory_name }}
+controlPlane:
+ localAPIEndpoint:
+ advertiseAddress: {{ .internal_ipv4 }}
+ bindPort: {{ .kubernetes.apiserver.port }}
+ certificateKey: {{ .kubeadm_cert }}
+{{- end }}
+nodeRegistration:
+ criSocket: {{ .cri.cri_socket }}
+ kubeletExtraArgs:
+ cgroup-driver: {{ .cri.cgroup_driver }}
diff --git a/feature/builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-join.v1beta3 b/feature/builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-join.v1beta3
new file mode 100644
index 000000000..20d84f875
--- /dev/null
+++ b/feature/builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-join.v1beta3
@@ -0,0 +1,19 @@
+---
+apiVersion: kubeadm.k8s.io/v1beta3
+kind: JoinConfiguration
+discovery:
+ bootstrapToken:
+ apiServerEndpoint: {{ if and .kubernetes.control_plane_endpoint (ne .kubernetes.control_plane_endpoint "") }}{{ .kubernetes.control_plane_endpoint }}{{ else }}{{ .init_kubernetes_node }}{{ end }}:{{ .kubernetes.apiserver.port }}
+ token: "{{ .kubeadm_token }}"
+ unsafeSkipCAVerification: true
+{{- if .groups.kube_control_plane | default list | has .inventory_name }}
+controlPlane:
+ localAPIEndpoint:
+ advertiseAddress: {{ .internal_ipv4 }}
+ bindPort: {{ .kubernetes.apiserver.port }}
+ certificateKey: {{ .kubeadm_cert }}
+{{- end }}
+nodeRegistration:
+ criSocket: {{ .cri.cri_socket }}
+ kubeletExtraArgs:
+ cgroup-driver: {{ .cri.cgroup_driver }}
diff --git a/feature/builtin/roles/install/kubernetes/templates/kubeadm/kubelet.env b/feature/builtin/roles/install/kubernetes/templates/kubeadm/kubelet.env
new file mode 100644
index 000000000..01ff57a56
--- /dev/null
+++ b/feature/builtin/roles/install/kubernetes/templates/kubeadm/kubelet.env
@@ -0,0 +1,12 @@
+# Note: This dropin only works with kubeadm and kubelet v1.11+
+[Service]
+Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf"
+Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml"
+# This is a file that "kubeadm init" and "kubeadm join" generate at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically
+EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env
+# This is a file that the user can use for overrides of the kubelet args as a last resort. Preferably, the user should use
+# the .NodeRegistration.KubeletExtraArgs object in the configuration files instead. KUBELET_EXTRA_ARGS should be sourced from this file.
+EnvironmentFile=-/etc/default/kubelet
+Environment="KUBELET_EXTRA_ARGS=--node-ip={{ .internal_ipv4 }} --hostname-override={{ .hostname }} {{ range $k,$v := .kubernetes.kubelet.extra_args }}--{{ $k }} {{ $v }} {{ end }}"
+ExecStart=
+ExecStart=/usr/local/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS
diff --git a/feature/builtin/roles/install/kubernetes/templates/kubevip/kubevip.ARP b/feature/builtin/roles/install/kubernetes/templates/kubevip/kubevip.ARP
new file mode 100644
index 000000000..f0909ea8c
--- /dev/null
+++ b/feature/builtin/roles/install/kubernetes/templates/kubevip/kubevip.ARP
@@ -0,0 +1,65 @@
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ creationTimestamp: null
+ name: kube-vip
+ namespace: kube-system
+spec:
+ containers:
+ - args:
+ - manager
+ env:
+ - name: vip_arp
+ value: "true"
+ - name: port
+ value: "6443"
+ - name: vip_interface
+ value: {{ .interface.stdout }}
+ - name: vip_cidr
+ value: "32"
+ - name: cp_enable
+ value: "true"
+ - name: cp_namespace
+ value: kube-system
+ - name: vip_ddns
+ value: "false"
+ - name: svc_enable
+ value: "true"
+ - name: vip_leaderelection
+ value: "true"
+ - name: vip_leaseduration
+ value: "5"
+ - name: vip_renewdeadline
+ value: "3"
+ - name: vip_retryperiod
+ value: "1"
+ - name: lb_enable
+ value: "true"
+ - name: lb_port
+ value: "6443"
+ - name: address
+ value: {{ .kubernetes.kube_vip.address }}
+ image: {{ .kubernetes.kubevip.image }}
+ imagePullPolicy: IfNotPresent
+ name: kube-vip
+ resources: {}
+ securityContext:
+ capabilities:
+ add:
+ - NET_ADMIN
+ - NET_RAW
+ - SYS_TIME
+ volumeMounts:
+ - mountPath: /etc/kubernetes/admin.conf
+ name: kubeconfig
+ hostAliases:
+ - hostnames:
+ - kubernetes
+ ip: 127.0.0.1
+ hostNetwork: true
+ volumes:
+ - hostPath:
+ path: /etc/kubernetes/admin.conf
+ name: kubeconfig
+status: {}
diff --git a/feature/builtin/roles/install/kubernetes/templates/kubevip/kubevip.BGP b/feature/builtin/roles/install/kubernetes/templates/kubevip/kubevip.BGP
new file mode 100644
index 000000000..1578f35c2
--- /dev/null
+++ b/feature/builtin/roles/install/kubernetes/templates/kubevip/kubevip.BGP
@@ -0,0 +1,82 @@
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ creationTimestamp: null
+ name: kube-vip
+ namespace: kube-system
+spec:
+ containers:
+ - args:
+ - manager
+ env:
+ - name: vip_arp
+ value: "false"
+ - name: port
+ value: "6443"
+ - name: vip_interface
+ value: {{ .interface.stdout }}
+ - name: vip_cidr
+ value: "32"
+ - name: cp_enable
+ value: "true"
+ - name: cp_namespace
+ value: kube-system
+ - name: vip_ddns
+ value: "false"
+ - name: svc_enable
+ value: "true"
+ - name: bgp_enable
+ value: "true"
+ - name: bgp_routerid
+ value: |
+ {{- $ips := list }}
+ {{- range .groups.kube_control_plane | default list }}
+ {{- $ips = append $ips (index $.inventory_hosts . "internal_ipv4") }}
+ {{- end }}
+ {{ $ips | join "," }}
+ - name: bgp_as
+ value: "65000"
+ - name: bgp_peeraddress
+ - name: bgp_peerpass
+ - name: bgp_peeras
+ value: "65000"
+ - name: bgp_peers
+ value: |
+ {{- $ips := list }}
+ {{- range .groups.kube_control_plane | default list }}
+ {{- $ips = append $ips (printf "%s:65000::false" (index $.inventory_hosts . "internal_ipv4")) }}
+ {{- end }}
+ {{ $ips | join "," }}
+ - name: lb_enable
+ value: "true"
+ - name: lb_port
+ value: "6443"
+ - name: lb_fwdmethod
+ value: local
+ - name: address
+ value: {{ .kubernetes.kube_vip.address }}
+ - name: prometheus_server
+ value: :2112
+ image: {{ .kubernetes.kubevip.image }}
+ imagePullPolicy: IfNotPresent
+ name: kube-vip
+ resources: {}
+ securityContext:
+ capabilities:
+ add:
+ - NET_ADMIN
+ - NET_RAW
+ volumeMounts:
+ - mountPath: /etc/kubernetes/admin.conf
+ name: kubeconfig
+ hostAliases:
+ - hostnames:
+ - kubernetes
+ ip: 127.0.0.1
+ hostNetwork: true
+ volumes:
+ - hostPath:
+ path: /etc/kubernetes/admin.conf
+ name: kubeconfig
+status: {}
diff --git a/feature/builtin/roles/install/nfs/defaults/main.yaml b/feature/builtin/roles/install/nfs/defaults/main.yaml
new file mode 100644
index 000000000..c02abb0c4
--- /dev/null
+++ b/feature/builtin/roles/install/nfs/defaults/main.yaml
@@ -0,0 +1,3 @@
+nfs:
+ share_dir:
+ - /share/
diff --git a/feature/builtin/roles/install/nfs/tasks/debian.yaml b/feature/builtin/roles/install/nfs/tasks/debian.yaml
new file mode 100644
index 000000000..5ed766aaa
--- /dev/null
+++ b/feature/builtin/roles/install/nfs/tasks/debian.yaml
@@ -0,0 +1,28 @@
+---
+- name: Check if nfs is installed
+ ignore_errors: true
+ command: systemctl status nfs-kernel-server
+ register: nfs_server_install
+
+- name: Install nfs
+ command: apt update && apt install -y nfs-kernel-server
+ when: .nfs_server_install.stderr | ne ""
+
+- name: Create nfs share directory
+ command: |
+ if [ ! -d {{ .item }} ]; then
+ mkdir -p {{ .item }}
+ chmod -R 0755 {{ .item }}
+ chown nobody:nogroup {{ .item }}
+ fi
+ loop: "{{ .nfs.share_dir | toJson }}"
+
+- name: Generate nfs config
+ template:
+ src: exports
+ dest: /etc/exports
+
+- name: Export share directory and start nfs server
+ command: |
+ exportfs -a
+ systemctl enable nfs-kernel-server && systemctl restart nfs-kernel-server
diff --git a/feature/builtin/roles/install/nfs/tasks/main.yaml b/feature/builtin/roles/install/nfs/tasks/main.yaml
new file mode 100644
index 000000000..74cc6865f
--- /dev/null
+++ b/feature/builtin/roles/install/nfs/tasks/main.yaml
@@ -0,0 +1,6 @@
+---
+- include_tasks: debian.yaml
+ when: .os.release.ID_LIKE | eq "debian"
+
+- include_tasks: rhel.yaml
+ when: .os.release.ID_LIKE | eq "rhel fedora"
diff --git a/feature/builtin/roles/install/nfs/tasks/rhel.yaml b/feature/builtin/roles/install/nfs/tasks/rhel.yaml
new file mode 100644
index 000000000..3912da456
--- /dev/null
+++ b/feature/builtin/roles/install/nfs/tasks/rhel.yaml
@@ -0,0 +1,28 @@
+---
+- name: Check if nfs is installed
+ ignore_errors: true
+ command: systemctl status nfs-server
+ register: nfs_server_install
+
+- name: Install nfs
+ command: yum update && yum install -y nfs-utils
+ when: .nfs_server_install.stderr | ne ""
+
+- name: Create nfs share directory
+ command: |
+ if [ ! -d {{ .item }} ]; then
+ mkdir -p {{ .item }}
+ chmod -R 0755 {{ .item }}
+ chown nobody:nobody {{ .item }}
+ fi
+ loop: "{{ .nfs.share_dir }}"
+
+- name: Generate nfs config
+ template:
+ src: exports
+ dest: /etc/exports
+
+- name: Export share directory and start nfs server
+ command: |
+ exportfs -a
+ systemctl enabled nfs-server.service && systemctl restart nfs-server.service
diff --git a/feature/builtin/roles/install/nfs/templates/exports b/feature/builtin/roles/install/nfs/templates/exports
new file mode 100644
index 000000000..01ae4cea1
--- /dev/null
+++ b/feature/builtin/roles/install/nfs/templates/exports
@@ -0,0 +1,3 @@
+{{- range .nfs.share_dir }}
+{{ . }} *(rw,sync,no_subtree_check)
+{{- end }}
diff --git a/feature/builtin/roles/install/security/tasks/main.yaml b/feature/builtin/roles/install/security/tasks/main.yaml
new file mode 100644
index 000000000..dfd993df5
--- /dev/null
+++ b/feature/builtin/roles/install/security/tasks/main.yaml
@@ -0,0 +1,39 @@
+---
+- name: security enhancement for etcd
+ command: |
+ chmod 700 /etc/ssl/etcd/ssl && chown root:root /etc/ssl/etcd/ssl
+ chmod 600 /etc/ssl/etcd/ssl/* && chown root:root /etc/ssl/etcd/ssl/*
+ chmod 700 /var/lib/etcd && chown etcd:etcd /var/lib/etcd
+ chmod 550 /usr/local/bin/etcd* && chown root:root /usr/local/bin/etcd*
+ when: .groups.etcd | default list | has .inventory_name
+
+- name: security enhancement for control plane
+ command: |
+ chmod 644 /etc/kubernetes && chown root:root /etc/kubernetes
+ chmod 600 -R /etc/kubernetes && chown root:root -R /etc/kubernetes/*
+ chmod 644 /etc/kubernetes/manifests && chown root:root /etc/kubernetes/manifests
+ chmod 644 /etc/kubernetes/pki && chown root:root /etc/kubernetes/pki
+ chmod 600 -R /etc/cni/net.d && chown root:root -R /etc/cni/net.d
+ chmod 550 /usr/local/bin/ && chown root:root /usr/local/bin/
+ chmod 550 -R /usr/local/bin/kube* && chown root:root -R /usr/local/bin/kube*
+ chmod 550 /usr/local/bin/helm && chown root:root /usr/local/bin/helm
+ chmod 550 -R /opt/cni/bin && chown root:root -R /opt/cni/bin
+ chmod 640 /var/lib/kubelet/config.yaml && chown root:root /var/lib/kubelet/config.yaml
+ chmod 640 -R /etc/systemd/system/kubelet.service* && chown root:root -R /etc/systemd/system/kubelet.service*
+ chmod 640 /etc/systemd/system/k8s-certs-renew* && chown root:root /etc/systemd/system/k8s-certs-renew*
+ when: .groups.kube_control_plane | default list | has .inventory_name
+
+- name: security enhancement for worker
+ command: |
+ chmod 644 /etc/kubernetes && chown root:root /etc/kubernetes
+ chmod 600 -R /etc/kubernetes && chown root:root -R /etc/kubernetes/*
+ chmod 644 /etc/kubernetes/manifests && chown root:root /etc/kubernetes/manifests
+ chmod 644 /etc/kubernetes/pki && chown root:root /etc/kubernetes/pki
+ chmod 600 -R /etc/cni/net.d && chown root:root -R /etc/cni/net.d
+ chmod 550 /usr/local/bin/ && chown root:root /usr/local/bin/
+ chmod 550 -R /usr/local/bin/kube* && chown root:root -R /usr/local/bin/kube*
+ chmod 550 /usr/local/bin/helm && chown root:root /usr/local/bin/helm
+ chmod 550 -R /opt/cni/bin && chown root:root -R /opt/cni/bin
+ chmod 640 /var/lib/kubelet/config.yaml && chown root:root /var/lib/kubelet/config.yaml
+ chmod 640 -R /etc/systemd/system/kubelet.service* && chown root:root -R /etc/systemd/system/kubelet.service*
+ when: .groups.kube_worker | default list | has .inventory_name
diff --git a/feature/builtin/roles/precheck/artifact_check/tasks/main.yaml b/feature/builtin/roles/precheck/artifact_check/tasks/main.yaml
new file mode 100644
index 000000000..7101b5308
--- /dev/null
+++ b/feature/builtin/roles/precheck/artifact_check/tasks/main.yaml
@@ -0,0 +1,21 @@
+---
+- name: Check artifact is exits
+ command:
+ if [ ! -f "{{ .artifact.artifact_file }}" ]; then
+ exit 1
+ fi
+
+- name: Check artifact file type
+ command:
+ if [[ "{{ .artifact.artifact_file }}" != *{{ .item }} ]]; then
+ exit 1
+ fi
+ loop: ['.tgz','.tar.gz']
+
+- name: Check md5 of artifact
+ command:
+ if [[ "$(md5sum {{ .artifact.artifact_file }})" != "{{ .artifact.artifact_md5 }}" ]]; then
+ exit 1
+ fi
+ when:
+ - and .artifact.artifact_md5 (ne .artifact.artifact_md5 "")
diff --git a/feature/builtin/roles/precheck/env_check/defaults/main.yaml b/feature/builtin/roles/precheck/env_check/defaults/main.yaml
new file mode 100644
index 000000000..50438854e
--- /dev/null
+++ b/feature/builtin/roles/precheck/env_check/defaults/main.yaml
@@ -0,0 +1,31 @@
+cluster_require:
+ # the etcd sync duration for 99%.(unit ns)
+ etcd_disk_wal_fysnc_duration_seconds: 10000000
+ allow_unsupported_distribution_setup: false
+ # support ubuntu, centos.
+ supported_os_distributions:
+ - ubuntu
+ - '"ubuntu"'
+ - centos
+ - '"centos"'
+ require_network_plugin: ['calico', 'flannel', 'cilium', 'hybridnet', 'kube-ovn']
+ # the minimal version of kubernetes to be installed.
+ kube_version_min_required: v1.19.10
+ # memory size for each kube_control_plane node.(unit kB)
+ # should be greater than or equal to minimal_master_memory_mb.
+ minimal_master_memory_mb: 10
+ # memory size for each kube_worker node.(unit kB)
+ # should be greater than or equal to minimal_node_memory_mb.
+ minimal_node_memory_mb: 10
+ require_etcd_deployment_type: ['internal','external']
+ require_container_manager: ['docker', 'containerd']
+ # the minimal required version of containerd to be installed.
+ containerd_min_version_required: v1.6.0
+ supported_architectures:
+ amd64:
+ - amd64
+ - x86_64
+ arm64:
+ - arm64
+ - aarch64
+ min_kernel_version: 4.9.17
diff --git a/feature/builtin/roles/precheck/env_check/tasks/cri.yaml b/feature/builtin/roles/precheck/env_check/tasks/cri.yaml
new file mode 100644
index 000000000..de961da7d
--- /dev/null
+++ b/feature/builtin/roles/precheck/env_check/tasks/cri.yaml
@@ -0,0 +1,18 @@
+---
+- name: Stop if container manager is not docker or containerd
+ assert:
+ that: .cluster_require.require_container_manager | has .cri.container_manager
+ fail_msg: |
+ the container manager:{{ .cri.container_manager }}, must be "docker" or "containerd"
+ run_once: true
+ when: and .cri.container_manager (ne .cri.container_manager "")
+
+- name: Ensure minimum containerd version
+ assert:
+ that: .containerd_version | semverCompare (printf ">=%s" .cluster_require.containerd_min_version_required)
+ fail_msg: |
+ containerd_version is too low. Minimum version {{ .cluster_require.containerd_min_version_required }}
+ run_once: true
+ when:
+ - and .containerd_version (ne .containerd_version "")
+ - .cri.container_manager | eq "containerd"
diff --git a/feature/builtin/roles/precheck/env_check/tasks/etcd.yaml b/feature/builtin/roles/precheck/env_check/tasks/etcd.yaml
new file mode 100644
index 000000000..b4e4bee59
--- /dev/null
+++ b/feature/builtin/roles/precheck/env_check/tasks/etcd.yaml
@@ -0,0 +1,48 @@
+---
+- name: Stop if etcd deployment type is not internal or external
+ assert:
+ that: .cluster_require.require_etcd_deployment_type | has .kubernetes.etcd.deployment_type
+ fail_msg: |
+ the etcd deployment type, should be internal or external but got {{ .kubernetes.etcd.deployment_type }}
+ run_once: true
+ when: and .kubernetes.etcd.deployment_type (ne .kubernetes.etcd.deployment_type "")
+
+- name: Stop if etcd group is empty in internal etcd mode
+ assert:
+ that: .groups.etcd
+ fail_msg: "group \"etcd\" cannot be empty in external etcd mode"
+ run_once: true
+ when: .kubernetes.etcd.deployment_type | eq "external"
+
+- name: Stop if even number of etcd hosts
+ assert:
+ that: (mod (.groups.etcd | len) 2) | eq 1
+ fail_msg: "etcd number should be odd number"
+ when: .groups.etcd
+
+## https://cwiki.yunify.com/pages/viewpage.action?pageId=145920824
+- name: Check dev io for etcd
+ when:
+ - .groups.etcd | default list | has .inventory_name
+ block:
+ - name: Check fio is exist
+ ignore_errors: true
+ command: fio --version
+ register: fio_install_version
+ - name: Test dev io by fio
+ when: .fio_install_version.stderr | eq ""
+ block:
+ - name: Get fio result
+ command: |
+ mkdir -p /tmp/kubekey/etcd/test-data
+ fio --rw=write --ioengine=sync --fdatasync=1 --directory=/tmp/kubekey/etcd/test-data --size=22m --bs=2300 --name=mytest --output-format=json
+ register: fio_result
+ - name: Check fio result
+ assert:
+ that: (index (.fio_result.stdout.jobs | first) "sync" "lat_ns" "percentile" "90.000000") | le .cluster_require.etcd_disk_wal_fysnc_duration_seconds
+ fail_msg: |
+ etcd_disk_wal_fysnc_duration_seconds: {{ index (.fio_result.stdout.jobs | first) "sync" "lat_ns" "percentile" "90.000000" }}ns is more than {{ .cluster_require.etcd_disk_wal_fysnc_duration_seconds }}ns
+ always:
+ - name: Clean test data dir
+ command: rm -rf /tmp/kubekey/etcd/test-data
+
diff --git a/feature/builtin/roles/precheck/env_check/tasks/main.yaml b/feature/builtin/roles/precheck/env_check/tasks/main.yaml
new file mode 100644
index 000000000..ebc767f81
--- /dev/null
+++ b/feature/builtin/roles/precheck/env_check/tasks/main.yaml
@@ -0,0 +1,45 @@
+---
+- name: Should defined internal_ipv4
+ assert:
+ that: and .internal_ipv4 (ne .internal_ipv4 "")
+ fail_msg: |
+ "internal_ipv4" should not be empty
+
+- name: Check kubevip
+ assert:
+ that:
+ - and .kubernetes.kube_vip.address (ne .kubernetes.kube_vip.address "")
+ - .kubernetes.kube_vip.address | regexMatch "^((25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9]?[0-9])\\.(25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9]?[0-9])\\.(25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9]?[0-9])\\.(25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9]?[0-9])|(([0-9a-fA-F]{1,4}:){7}([0-9a-fA-F]{1,4}|:)|(([0-9a-fA-F]{1,4}:){1,6}|:):([0-9a-fA-F]{1,4}|:){1,6}([0-9a-fA-F]{1,4}|:)))$"
+ - |
+ {{- $existIP := false }}
+ {{- range .groups.all | default list }}
+ {{- if eq $.kubernetes.kube_vip.address (index $.inventory_hosts . "internal_ipv4") }}
+ {{- $existIP = true }}
+ {{- end }}
+ {{- end }}
+ {{ not $existIP }}
+ fail_msg: |
+ "kubernetes.control_plane_endpoint" should be a un-used ip address when "kubernetes.kube_vip.enabled" is true
+ when: .kubernetes.kube_vip.enabled
+
+- name: Stop if unsupported version of Kubernetes
+ assert:
+ that: .kube_version | semverCompare (printf ">=%s" .cluster_require.kube_version_min_required)
+ fail_msg: |
+ the current release of Kubespray only support newer version of Kubernetes than {{ .kube_version_min_required }} - You are trying to apply {{ .kube_version }}
+ when: and .kube_version (ne .kube_version "")
+
+- include_tasks: etcd.yaml
+ tags: ["etcd"]
+
+- include_tasks: os.yaml
+ tags: ["os"]
+
+- include_tasks: network.yaml
+ tags: ["network"]
+
+- include_tasks: cri.yaml
+ tags: ["cri"]
+
+- include_tasks: nfs.yaml
+ tags: ["nfs"]
diff --git a/feature/builtin/roles/precheck/env_check/tasks/network.yaml b/feature/builtin/roles/precheck/env_check/tasks/network.yaml
new file mode 100644
index 000000000..892f71da2
--- /dev/null
+++ b/feature/builtin/roles/precheck/env_check/tasks/network.yaml
@@ -0,0 +1,22 @@
+---
+- name: Stop if unknown network plugin
+ assert:
+ that: .cluster_require.require_network_plugin | has .kubernetes.kube_network_plugin
+ fail_msg: |
+ kube_network_plugin:"{{ .kubernetes.kube_network_plugin }}" is not supported
+ when: and .kubernetes.kube_network_plugin (ne .kubernetes.kube_network_plugin "")
+
+# This assertion will fail on the safe side: One can indeed schedule more pods
+# on a node than the CIDR-range has space for when additional pods use the host
+# network namespace. It is impossible to ascertain the number of such pods at
+# provisioning time, so to establish a guarantee, we factor these out.
+# NOTICE: the check blatantly ignores the inet6-case
+- name: Guarantee that enough network address space is available for all pods
+ assert:
+ that: le .kubernetes.kubelet.max_pods (sub (pow 2 (sub 32 .kubernetes.controller_manager.kube_network_node_prefix)) 2)
+ fail_msg: do not schedule more pods on a node than inet addresses are available.
+ when:
+ - .groups.k8s_cluster | default list | has .inventory_name
+ - .kubernetes.controller_manager.kube_network_node_prefix
+ - .kubernetes.kube_network_plugin | ne "calico"
+
diff --git a/feature/builtin/roles/precheck/env_check/tasks/nfs.yaml b/feature/builtin/roles/precheck/env_check/tasks/nfs.yaml
new file mode 100644
index 000000000..8271da20a
--- /dev/null
+++ b/feature/builtin/roles/precheck/env_check/tasks/nfs.yaml
@@ -0,0 +1,6 @@
+---
+- name: Stop if nfs server is not be one
+ assert:
+ that: .groups.nfs | default list | len | eq 1
+ fail_msg: "only one nfs server is supported"
+ when: .groups.nfs
diff --git a/feature/builtin/roles/precheck/env_check/tasks/os.yaml b/feature/builtin/roles/precheck/env_check/tasks/os.yaml
new file mode 100644
index 000000000..bfe134edc
--- /dev/null
+++ b/feature/builtin/roles/precheck/env_check/tasks/os.yaml
@@ -0,0 +1,38 @@
+---
+- name: Stop if bad hostname
+ assert:
+ that: .hostname | regexMatch "^[a-z0-9]([a-z0-9-]*[a-z0-9])?(\\.[a-z0-9]([a-z0-9-]*[a-z0-9])?)*$"
+ fail_msg: "Hostname must consist of lower case alphanumeric characters, '.' or '-', and must start and end with an alphanumeric character"
+
+- name: Stop if the os does not support
+ assert:
+ that: or (.cluster_require.allow_unsupported_distribution_setup) (.cluster_require.supported_os_distributions | has .os.release.ID)
+ fail_msg: "{{ .os.release.ID }} is not a known OS"
+
+- name: Stop if arch supported
+ assert:
+ that: or (.cluster_require.supported_architectures.amd64 | has .os.architecture) (.cluster_require.supported_architectures.arm64 | has .os.architecture)
+ success_msg: |
+ {{- if .cluster_require.supported_architectures.amd64 | has .os.architecture }}
+ amd64
+ {{- else }}
+ arm64
+ {{- end }}
+ fail_msg: "{{ .os.architecture }} is not a known arch"
+ register: binary_type
+
+- name: Stop if memory is too small for masters
+ assert:
+ that: .process.memInfo.MemTotal | trimSuffix " kB" | atoi | le .cluster_require.minimal_master_memory_mb
+ when: .groups.kube_control_plane | default list | has .inventory_name
+
+- name: Stop if memory is too small for nodes
+ assert:
+ that: .process.memInfo.MemTotal | trimSuffix " kB" | atoi | le .cluster_require.minimal_node_memory_mb
+ when:
+ - .groups.kube_worker | default list | has .inventory_name
+
+- name: Stop if kernel version is too low
+ assert:
+ that: .os.kernel_version | splitList "-" | first | semverCompare (printf ">=%s" .cluster_require.min_kernel_version)
+ fail_msg: "kernel version: {{ .os.kernel_version }} is too low, required at least: {{ .cluster_require.min_kernel_version }} "
diff --git a/feature/cmd/controller-manager/app/options/common.go b/feature/cmd/controller-manager/app/options/common.go
new file mode 100644
index 000000000..875c20097
--- /dev/null
+++ b/feature/cmd/controller-manager/app/options/common.go
@@ -0,0 +1,163 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package options
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "strings"
+
+ "github.com/google/gops/agent"
+ "github.com/spf13/pflag"
+ "k8s.io/klog/v2"
+)
+
+// ======================================================================================
+// PROFILING
+// ======================================================================================
+
+var (
+ profileName string
+ profileOutput string
+)
+
+// AddProfilingFlags to NewControllerManagerCommand
+func AddProfilingFlags(flags *pflag.FlagSet) {
+ flags.StringVar(&profileName, "profile", "none", "Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex)")
+ flags.StringVar(&profileOutput, "profile-output", "profile.pprof", "Name of the file to write the profile to")
+}
+
+// InitProfiling for profileName
+func InitProfiling(ctx context.Context) error {
+ var (
+ f *os.File
+ err error
+ )
+
+ switch profileName {
+ case "none":
+ return nil
+ case "cpu":
+ f, err = os.Create(profileOutput)
+ if err != nil {
+ return err
+ }
+
+ err = pprof.StartCPUProfile(f)
+ if err != nil {
+ return err
+ }
+ // Block and mutex profiles need a call to Set{Block,Mutex}ProfileRate to
+ // output anything. We choose to sample all events.
+ case "block":
+ runtime.SetBlockProfileRate(1)
+ case "mutex":
+ runtime.SetMutexProfileFraction(1)
+ default:
+ // Check the profile name is valid.
+ if profile := pprof.Lookup(profileName); profile == nil {
+ return fmt.Errorf("unknown profile '%s'", profileName)
+ }
+ }
+
+ // If the command is interrupted before the end (ctrl-c), flush the
+ // profiling files
+ go func() {
+ <-ctx.Done()
+ if err := f.Close(); err != nil {
+ fmt.Printf("failed to close file. file: %v. error: %v \n", profileOutput, err)
+ }
+ if err := FlushProfiling(); err != nil {
+ fmt.Printf("failed to FlushProfiling. file: %v. error: %v \n", profileOutput, err)
+ }
+ }()
+
+ return nil
+}
+
+// FlushProfiling to local file
+func FlushProfiling() error {
+ switch profileName {
+ case "none":
+ return nil
+ case "cpu":
+ pprof.StopCPUProfile()
+ case "heap":
+ runtime.GC()
+
+ fallthrough
+ default:
+ profile := pprof.Lookup(profileName)
+ if profile == nil {
+ return nil
+ }
+
+ f, err := os.Create(profileOutput)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ if err := profile.WriteTo(f, 0); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ======================================================================================
+// GOPS
+// ======================================================================================
+
+var gops bool
+
+// AddGOPSFlags to NewControllerManagerCommand
+func AddGOPSFlags(flags *pflag.FlagSet) {
+ flags.BoolVar(&gops, "gops", false, "Whether to enable gops or not. When enabled this option, controller-manager will listen on a random port on 127.0.0.1, then you can use the gops tool to list and diagnose the controller-manager currently running.")
+}
+
+// InitGOPS if gops is true
+func InitGOPS() error {
+ if gops {
+ // Add agent to report additional information such as the current stack trace, Go version, memory stats, etc.
+ // Bind to a random port on address 127.0.0.1
+ if err := agent.Listen(agent.Options{}); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ======================================================================================
+// KLOG
+// ======================================================================================
+
+// AddKlogFlags to NewControllerManagerCommand
+func AddKlogFlags(fs *pflag.FlagSet) {
+ local := flag.NewFlagSet("klog", flag.ExitOnError)
+ klog.InitFlags(local)
+ local.VisitAll(func(fl *flag.Flag) {
+ fl.Name = strings.Replace(fl.Name, "_", "-", -1)
+ fs.AddGoFlag(fl)
+ })
+}
diff --git a/feature/cmd/controller-manager/app/options/controller_manager.go b/feature/cmd/controller-manager/app/options/controller_manager.go
new file mode 100644
index 000000000..bcd680d82
--- /dev/null
+++ b/feature/cmd/controller-manager/app/options/controller_manager.go
@@ -0,0 +1,61 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package options
+
+import (
+ cliflag "k8s.io/component-base/cli/flag"
+)
+
+// ControllerManagerServerOptions for NewControllerManagerServerOptions
+type ControllerManagerServerOptions struct {
+ // WorkDir is the baseDir which command find any resource (project etc.)
+ WorkDir string
+ // Debug mode, after a successful execution of Pipeline, will retain runtime data, which includes task execution status and parameters.
+ Debug bool
+
+ MaxConcurrentReconciles int
+ LeaderElection bool
+}
+
+// NewControllerManagerServerOptions for NewControllerManagerCommand
+func NewControllerManagerServerOptions() *ControllerManagerServerOptions {
+ return &ControllerManagerServerOptions{
+ WorkDir: "/kubekey",
+ MaxConcurrentReconciles: 1,
+ }
+}
+
+// Flags add to NewControllerManagerCommand
+func (o *ControllerManagerServerOptions) Flags() cliflag.NamedFlagSets {
+ fss := cliflag.NamedFlagSets{}
+ gfs := fss.FlagSet("generic")
+ gfs.StringVar(&o.WorkDir, "work-dir", o.WorkDir, "the base Dir for kubekey. Default current dir. ")
+ gfs.BoolVar(&o.Debug, "debug", o.Debug, "Debug mode, after a successful execution of Pipeline, "+"will retain runtime data, which includes task execution status and parameters.")
+ cfs := fss.FlagSet("controller-manager")
+ cfs.IntVar(&o.MaxConcurrentReconciles, "max-concurrent-reconciles", o.MaxConcurrentReconciles, "The number of maximum concurrent reconciles for controller.")
+ cfs.BoolVar(&o.LeaderElection, "leader-election", o.LeaderElection, "Whether to enable leader election for controller-manager.")
+
+ return fss
+}
+
+// Complete for ControllerManagerServerOptions
+func (o *ControllerManagerServerOptions) Complete() {
+ // do nothing
+ if o.MaxConcurrentReconciles == 0 {
+ o.MaxConcurrentReconciles = 1
+ }
+}
diff --git a/feature/cmd/controller-manager/app/server.go b/feature/cmd/controller-manager/app/server.go
new file mode 100644
index 000000000..614439c0f
--- /dev/null
+++ b/feature/cmd/controller-manager/app/server.go
@@ -0,0 +1,84 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package app
+
+import (
+ "context"
+ "os"
+
+ "github.com/spf13/cobra"
+ "sigs.k8s.io/controller-runtime/pkg/manager/signals"
+
+ "github.com/kubesphere/kubekey/v4/cmd/controller-manager/app/options"
+ _const "github.com/kubesphere/kubekey/v4/pkg/const"
+ "github.com/kubesphere/kubekey/v4/pkg/manager"
+)
+
+// NewControllerManagerCommand operator command.
+func NewControllerManagerCommand() *cobra.Command {
+ o := options.NewControllerManagerServerOptions()
+ ctx := signals.SetupSignalHandler()
+
+ cmd := &cobra.Command{
+ Use: "controller-manager",
+ Short: "kubekey controller manager",
+ PersistentPreRunE: func(*cobra.Command, []string) error {
+ if err := options.InitGOPS(); err != nil {
+ return err
+ }
+
+ return options.InitProfiling(ctx)
+ },
+ PersistentPostRunE: func(*cobra.Command, []string) error {
+ return options.FlushProfiling()
+ },
+ RunE: func(*cobra.Command, []string) error {
+ o.Complete()
+ // create workdir directory,if not exists
+ _const.SetWorkDir(o.WorkDir)
+ if _, err := os.Stat(o.WorkDir); os.IsNotExist(err) {
+ if err := os.MkdirAll(o.WorkDir, os.ModePerm); err != nil {
+ return err
+ }
+ }
+
+ return run(ctx, o)
+ },
+ }
+
+ // add common flag
+ flags := cmd.PersistentFlags()
+ options.AddProfilingFlags(flags)
+ options.AddKlogFlags(flags)
+ options.AddGOPSFlags(flags)
+
+ fs := cmd.Flags()
+ for _, f := range o.Flags().FlagSets {
+ fs.AddFlagSet(f)
+ }
+
+ cmd.AddCommand(newVersionCommand())
+
+ return cmd
+}
+
+func run(ctx context.Context, o *options.ControllerManagerServerOptions) error {
+ return manager.NewControllerManager(manager.ControllerManagerOptions{
+ MaxConcurrentReconciles: o.MaxConcurrentReconciles,
+ LeaderElection: o.LeaderElection,
+ }).Run(ctx)
+}
diff --git a/feature/cmd/controller-manager/app/version.go b/feature/cmd/controller-manager/app/version.go
new file mode 100644
index 000000000..84a80c450
--- /dev/null
+++ b/feature/cmd/controller-manager/app/version.go
@@ -0,0 +1,33 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package app
+
+import (
+ "github.com/spf13/cobra"
+
+ "github.com/kubesphere/kubekey/v4/version"
+)
+
+func newVersionCommand() *cobra.Command {
+ return &cobra.Command{
+ Use: "version",
+ Short: "Print the version of KubeSphere controller-manager",
+ Run: func(cmd *cobra.Command, _ []string) {
+ cmd.Println(version.Get())
+ },
+ }
+}
diff --git a/feature/cmd/controller-manager/controller_manager.go b/feature/cmd/controller-manager/controller_manager.go
new file mode 100644
index 000000000..b5692ef76
--- /dev/null
+++ b/feature/cmd/controller-manager/controller_manager.go
@@ -0,0 +1,31 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ "os"
+
+ "k8s.io/component-base/cli"
+
+ "github.com/kubesphere/kubekey/v4/cmd/controller-manager/app"
+)
+
+func main() {
+ command := app.NewControllerManagerCommand()
+ code := cli.Run(command)
+ os.Exit(code)
+}
diff --git a/feature/cmd/kk/app/artifact.go b/feature/cmd/kk/app/artifact.go
new file mode 100644
index 000000000..40a7ba912
--- /dev/null
+++ b/feature/cmd/kk/app/artifact.go
@@ -0,0 +1,108 @@
+/*
+Copyright 2024 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package app
+
+import (
+ "os"
+
+ "github.com/spf13/cobra"
+
+ "github.com/kubesphere/kubekey/v4/cmd/kk/app/options"
+ _const "github.com/kubesphere/kubekey/v4/pkg/const"
+)
+
+func newArtifactCommand() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "artifact",
+ Short: "Manage a KubeKey offline installation package",
+ }
+
+ cmd.AddCommand(newArtifactExportCommand())
+ cmd.AddCommand(newArtifactImagesCommand())
+
+ return cmd
+}
+
+func newArtifactExportCommand() *cobra.Command {
+ o := options.NewArtifactExportOptions()
+
+ cmd := &cobra.Command{
+ Use: "export",
+ Short: "Export a KubeKey offline installation package",
+ RunE: func(cmd *cobra.Command, _ []string) error {
+ pipeline, config, inventory, err := o.Complete(cmd, []string{"playbooks/artifact_export.yaml"})
+ if err != nil {
+ return err
+ }
+ // set workdir
+ _const.SetWorkDir(o.WorkDir)
+ // create workdir directory,if not exists
+ if _, err := os.Stat(o.WorkDir); os.IsNotExist(err) {
+ if err := os.MkdirAll(o.WorkDir, os.ModePerm); err != nil {
+ return err
+ }
+ }
+ // when package an artifact, should not contains certs
+ pipeline.Spec.SkipTags = []string{"certs"}
+
+ return run(ctx, pipeline, config, inventory)
+ },
+ }
+
+ flags := cmd.Flags()
+ for _, f := range o.Flags().FlagSets {
+ flags.AddFlagSet(f)
+ }
+
+ return cmd
+}
+
+func newArtifactImagesCommand() *cobra.Command {
+ o := options.NewArtifactImagesOptions()
+
+ cmd := &cobra.Command{
+ Use: "images",
+ Short: "push images to a registry from an artifact",
+ RunE: func(cmd *cobra.Command, _ []string) error {
+ pipeline, config, inventory, err := o.Complete(cmd, []string{"playbooks/artifact_images.yaml"})
+ if err != nil {
+ return err
+ }
+ // set workdir
+ _const.SetWorkDir(o.WorkDir)
+ // create workdir directory,if not exists
+ if _, err := os.Stat(o.WorkDir); os.IsNotExist(err) {
+ if err := os.MkdirAll(o.WorkDir, os.ModePerm); err != nil {
+ return err
+ }
+ }
+
+ return run(ctx, pipeline, config, inventory)
+ },
+ }
+
+ flags := cmd.Flags()
+ for _, f := range o.Flags().FlagSets {
+ flags.AddFlagSet(f)
+ }
+
+ return cmd
+}
+
+func init() {
+ registerInternalCommand(newArtifactCommand())
+}
diff --git a/feature/cmd/kk/app/certs.go b/feature/cmd/kk/app/certs.go
new file mode 100644
index 000000000..fff01d075
--- /dev/null
+++ b/feature/cmd/kk/app/certs.go
@@ -0,0 +1,76 @@
+//go:build builtin
+// +build builtin
+
+/*
+Copyright 2024 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package app
+
+import (
+ "os"
+
+ "github.com/spf13/cobra"
+
+ "github.com/kubesphere/kubekey/v4/cmd/kk/app/options"
+ _const "github.com/kubesphere/kubekey/v4/pkg/const"
+)
+
+func newCertsCommand() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "certs",
+ Short: "cluster certs",
+ }
+
+ cmd.AddCommand(newCertsRenewCommand())
+
+ return cmd
+}
+
+func newCertsRenewCommand() *cobra.Command {
+ o := options.NewCertsRenewOptions()
+
+ cmd := &cobra.Command{
+ Use: "renew",
+ Short: "renew a cluster certs",
+ RunE: func(cmd *cobra.Command, _ []string) error {
+ pipeline, config, inventory, err := o.Complete(cmd, []string{"playbooks/certs_renew.yaml"})
+ if err != nil {
+ return err
+ }
+ // set workdir
+ _const.SetWorkDir(o.WorkDir)
+ // create workdir directory,if not exists
+ if _, err := os.Stat(o.WorkDir); os.IsNotExist(err) {
+ if err := os.MkdirAll(o.WorkDir, os.ModePerm); err != nil {
+ return err
+ }
+ }
+
+ return run(ctx, pipeline, config, inventory)
+ },
+ }
+
+ flags := cmd.Flags()
+ for _, f := range o.Flags().FlagSets {
+ flags.AddFlagSet(f)
+ }
+
+ return cmd
+}
+
+func init() {
+ registerInternalCommand(newCertsCommand())
+}
diff --git a/feature/cmd/kk/app/create.go b/feature/cmd/kk/app/create.go
new file mode 100644
index 000000000..0a17466c3
--- /dev/null
+++ b/feature/cmd/kk/app/create.go
@@ -0,0 +1,76 @@
+//go:build builtin
+// +build builtin
+
+/*
+Copyright 2024 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package app
+
+import (
+ "os"
+
+ "github.com/spf13/cobra"
+
+ "github.com/kubesphere/kubekey/v4/cmd/kk/app/options"
+ _const "github.com/kubesphere/kubekey/v4/pkg/const"
+)
+
+func newCreateCommand() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "create",
+ Short: "Create a cluster or a cluster configuration file",
+ }
+
+ cmd.AddCommand(newCreateClusterCommand())
+
+ return cmd
+}
+
+func newCreateClusterCommand() *cobra.Command {
+ o := options.NewCreateClusterOptions()
+
+ cmd := &cobra.Command{
+ Use: "cluster",
+ Short: "Create a Kubernetes or KubeSphere cluster",
+ RunE: func(cmd *cobra.Command, _ []string) error {
+ pipeline, config, inventory, err := o.Complete(cmd, []string{"playbooks/create_cluster.yaml"})
+ if err != nil {
+ return err
+ }
+ // set workdir
+ _const.SetWorkDir(o.WorkDir)
+ // create workdir directory,if not exists
+ if _, err := os.Stat(o.WorkDir); os.IsNotExist(err) {
+ if err := os.MkdirAll(o.WorkDir, os.ModePerm); err != nil {
+ return err
+ }
+ }
+
+ return run(ctx, pipeline, config, inventory)
+ },
+ }
+
+ flags := cmd.Flags()
+ for _, f := range o.Flags().FlagSets {
+ flags.AddFlagSet(f)
+ }
+
+ return cmd
+}
+
+func init() {
+ registerInternalCommand(newCreateCommand())
+}
diff --git a/feature/cmd/kk/app/init.go b/feature/cmd/kk/app/init.go
new file mode 100644
index 000000000..b37714c89
--- /dev/null
+++ b/feature/cmd/kk/app/init.go
@@ -0,0 +1,106 @@
+/*
+Copyright 2024 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package app
+
+import (
+ "os"
+
+ "github.com/spf13/cobra"
+
+ "github.com/kubesphere/kubekey/v4/cmd/kk/app/options"
+ _const "github.com/kubesphere/kubekey/v4/pkg/const"
+)
+
+func newInitCommand() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "init",
+ Short: "Initializes the installation environment",
+ }
+
+ cmd.AddCommand(newInitOSCommand())
+ cmd.AddCommand(newInitRegistryCommand())
+
+ return cmd
+}
+
+func newInitOSCommand() *cobra.Command {
+ o := options.NewInitOSOptions()
+
+ cmd := &cobra.Command{
+ Use: "os",
+ Short: "Init operating system",
+ RunE: func(cmd *cobra.Command, _ []string) error {
+ pipeline, config, inventory, err := o.Complete(cmd, []string{"playbooks/init_os.yaml"})
+ if err != nil {
+ return err
+ }
+ // set workdir
+ _const.SetWorkDir(o.WorkDir)
+ // create workdir directory,if not exists
+ if _, err := os.Stat(o.WorkDir); os.IsNotExist(err) {
+ if err := os.MkdirAll(o.WorkDir, os.ModePerm); err != nil {
+ return err
+ }
+ }
+
+ return run(ctx, pipeline, config, inventory)
+ },
+ }
+
+ flags := cmd.Flags()
+ for _, f := range o.Flags().FlagSets {
+ flags.AddFlagSet(f)
+ }
+
+ return cmd
+}
+
+func newInitRegistryCommand() *cobra.Command {
+ o := options.NewInitRegistryOptions()
+
+ cmd := &cobra.Command{
+ Use: "registry",
+ Short: "Init a local image registry",
+ RunE: func(cmd *cobra.Command, _ []string) error {
+ pipeline, config, inventory, err := o.Complete(cmd, []string{"playbooks/init_registry.yaml"})
+ if err != nil {
+ return err
+ }
+ // set workdir
+ _const.SetWorkDir(o.WorkDir)
+ // create workdir directory,if not exists
+ if _, err := os.Stat(o.WorkDir); os.IsNotExist(err) {
+ if err := os.MkdirAll(o.WorkDir, os.ModePerm); err != nil {
+ return err
+ }
+ }
+
+ return run(ctx, pipeline, config, inventory)
+ },
+ }
+
+ flags := cmd.Flags()
+ for _, f := range o.Flags().FlagSets {
+ flags.AddFlagSet(f)
+ }
+
+ return cmd
+}
+
+func init() {
+ registerInternalCommand(newInitCommand())
+}
diff --git a/feature/cmd/kk/app/options/artifact.go b/feature/cmd/kk/app/options/artifact.go
new file mode 100644
index 000000000..0bc74c8a4
--- /dev/null
+++ b/feature/cmd/kk/app/options/artifact.go
@@ -0,0 +1,134 @@
+/*
+Copyright 2024 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package options
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ cliflag "k8s.io/component-base/cli/flag"
+
+ kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
+)
+
+// ======================================================================================
+// artifact export
+// ======================================================================================
+
+// ArtifactExportOptions for NewArtifactExportOptions
+type ArtifactExportOptions struct {
+ commonOptions
+}
+
+// Flags add to newArtifactExportCommand
+func (o *ArtifactExportOptions) Flags() cliflag.NamedFlagSets {
+ fss := o.commonOptions.flags()
+
+ return fss
+}
+
+// Complete options. create Pipeline, Config and Inventory
+func (o *ArtifactExportOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Pipeline, *kkcorev1.Config, *kkcorev1.Inventory, error) {
+ pipeline := &kkcorev1.Pipeline{
+ ObjectMeta: metav1.ObjectMeta{
+ GenerateName: "artifact-export-",
+ Namespace: o.Namespace,
+ Annotations: map[string]string{
+ kkcorev1.BuiltinsProjectAnnotation: "",
+ },
+ },
+ }
+
+ // complete playbook. now only support one playbook
+ if len(args) != 1 {
+ return nil, nil, nil, fmt.Errorf("%s\nSee '%s -h' for help and examples", cmd.Use, cmd.CommandPath())
+ }
+ o.Playbook = args[0]
+
+ pipeline.Spec = kkcorev1.PipelineSpec{
+ Playbook: o.Playbook,
+ Debug: o.Debug,
+ }
+
+ config, inventory, err := o.completeRef(pipeline)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ return pipeline, config, inventory, nil
+}
+
+// NewArtifactExportOptions for newArtifactExportCommand
+func NewArtifactExportOptions() *ArtifactExportOptions {
+ // set default value
+ return &ArtifactExportOptions{commonOptions: newCommonOptions()}
+}
+
+// ======================================================================================
+// artifact image
+// ======================================================================================
+
+// ArtifactImagesOptions for NewArtifactImagesOptions
+type ArtifactImagesOptions struct {
+ commonOptions
+}
+
+// Flags add to newArtifactImagesCommand
+func (o *ArtifactImagesOptions) Flags() cliflag.NamedFlagSets {
+ fss := o.commonOptions.flags()
+
+ return fss
+}
+
+// Complete options. create Pipeline, Config and Inventory
+func (o *ArtifactImagesOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Pipeline, *kkcorev1.Config, *kkcorev1.Inventory, error) {
+ pipeline := &kkcorev1.Pipeline{
+ ObjectMeta: metav1.ObjectMeta{
+ GenerateName: "artifact-images-",
+ Namespace: o.Namespace,
+ Annotations: map[string]string{
+ kkcorev1.BuiltinsProjectAnnotation: "",
+ },
+ },
+ }
+
+ // complete playbook. now only support one playbook
+ if len(args) != 1 {
+ return nil, nil, nil, fmt.Errorf("%s\nSee '%s -h' for help and examples", cmd.Use, cmd.CommandPath())
+ }
+ o.Playbook = args[0]
+
+ pipeline.Spec = kkcorev1.PipelineSpec{
+ Playbook: o.Playbook,
+ Debug: o.Debug,
+ Tags: []string{"only_image"},
+ }
+
+ config, inventory, err := o.completeRef(pipeline)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ return pipeline, config, inventory, nil
+}
+
+// NewArtifactImagesOptions for newArtifactImagesCommand
+func NewArtifactImagesOptions() *ArtifactImagesOptions {
+ // set default value
+ return &ArtifactImagesOptions{commonOptions: newCommonOptions()}
+}
diff --git a/feature/cmd/kk/app/options/builtin.go b/feature/cmd/kk/app/options/builtin.go
new file mode 100644
index 000000000..c29062bad
--- /dev/null
+++ b/feature/cmd/kk/app/options/builtin.go
@@ -0,0 +1,36 @@
+//go:build builtin
+// +build builtin
+
+/*
+Copyright 2024 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package options
+
+import (
+ "k8s.io/apimachinery/pkg/util/yaml"
+
+ "github.com/kubesphere/kubekey/v4/builtin"
+)
+
+func init() {
+ if err := yaml.Unmarshal(builtin.DefaultConfig, defaultConfig); err != nil {
+ panic(err)
+ }
+
+ if err := yaml.Unmarshal(builtin.DefaultInventory, defaultInventory); err != nil {
+ panic(err)
+ }
+}
diff --git a/feature/cmd/kk/app/options/certs.go b/feature/cmd/kk/app/options/certs.go
new file mode 100644
index 000000000..da9e1294c
--- /dev/null
+++ b/feature/cmd/kk/app/options/certs.go
@@ -0,0 +1,77 @@
+/*
+Copyright 2024 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package options
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ cliflag "k8s.io/component-base/cli/flag"
+
+ kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
+)
+
+// NewCertsRenewOptions for newCertsRenewCommand
+func NewCertsRenewOptions() *CertsRenewOptions {
+ // set default value
+ return &CertsRenewOptions{commonOptions: newCommonOptions()}
+}
+
+// CertsRenewOptions for NewCertsRenewOptions
+type CertsRenewOptions struct {
+ commonOptions
+}
+
+// Flags add to newCertsRenewCommand
+func (o *CertsRenewOptions) Flags() cliflag.NamedFlagSets {
+ fss := o.commonOptions.flags()
+
+ return fss
+}
+
+// Complete options. create Pipeline, Config and Inventory
+func (o *CertsRenewOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Pipeline, *kkcorev1.Config, *kkcorev1.Inventory, error) {
+ pipeline := &kkcorev1.Pipeline{
+ ObjectMeta: metav1.ObjectMeta{
+ GenerateName: "certs-renew-",
+ Namespace: o.Namespace,
+ Annotations: map[string]string{
+ kkcorev1.BuiltinsProjectAnnotation: "",
+ },
+ },
+ }
+
+ // complete playbook. now only support one playbook
+ if len(args) != 1 {
+ return nil, nil, nil, fmt.Errorf("%s\nSee '%s -h' for help and examples", cmd.Use, cmd.CommandPath())
+ }
+ o.Playbook = args[0]
+
+ pipeline.Spec = kkcorev1.PipelineSpec{
+ Playbook: o.Playbook,
+ Debug: o.Debug,
+ Tags: []string{"certs"},
+ }
+
+ config, inventory, err := o.completeRef(pipeline)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ return pipeline, config, inventory, nil
+}
diff --git a/feature/cmd/kk/app/options/common.go b/feature/cmd/kk/app/options/common.go
new file mode 100644
index 000000000..3c5571ca2
--- /dev/null
+++ b/feature/cmd/kk/app/options/common.go
@@ -0,0 +1,166 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package options
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "strings"
+
+ "github.com/google/gops/agent"
+ "github.com/spf13/pflag"
+ "k8s.io/klog/v2"
+)
+
+// ======================================================================================
+// PROFILING
+// ======================================================================================
+
+var (
+ profileName string
+ profileOutput string
+)
+
+// AddProfilingFlags to NewRootCommand
+func AddProfilingFlags(flags *pflag.FlagSet) {
+ flags.StringVar(&profileName, "profile", "none", "Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex)")
+ flags.StringVar(&profileOutput, "profile-output", "profile.pprof", "Name of the file to write the profile to")
+}
+
+// InitProfiling for profileName
+func InitProfiling(ctx context.Context) error {
+ var (
+ f *os.File
+ err error
+ )
+
+ switch profileName {
+ case "none":
+ return nil
+ case "cpu":
+ f, err = os.Create(profileOutput)
+ if err != nil {
+ return err
+ }
+
+ err = pprof.StartCPUProfile(f)
+ if err != nil {
+ return err
+ }
+ // Block and mutex profiles need a call to Set{Block,Mutex}ProfileRate to
+ // output anything. We choose to sample all events.
+ case "block":
+ runtime.SetBlockProfileRate(1)
+ case "mutex":
+ runtime.SetMutexProfileFraction(1)
+ default:
+ // Check the profile name is valid.
+ if profile := pprof.Lookup(profileName); profile == nil {
+ return fmt.Errorf("unknown profile '%s'", profileName)
+ }
+ }
+
+ // If the command is interrupted before the end (ctrl-c), flush the
+ // profiling files
+
+ go func() {
+ <-ctx.Done()
+ if err := f.Close(); err != nil {
+ fmt.Printf("failed to close file. file: %v. error: %v \n", profileOutput, err)
+ }
+
+ if err := FlushProfiling(); err != nil {
+ fmt.Printf("failed to FlushProfiling. file: %v. error: %v \n", profileOutput, err)
+ }
+ }()
+
+ return nil
+}
+
+// FlushProfiling to local file
+func FlushProfiling() error {
+ switch profileName {
+ case "none":
+ return nil
+ case "cpu":
+ pprof.StopCPUProfile()
+ case "heap":
+ runtime.GC()
+
+ fallthrough
+ default:
+ profile := pprof.Lookup(profileName)
+ if profile == nil {
+ return nil
+ }
+
+ f, err := os.Create(profileOutput)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ if err := profile.WriteTo(f, 0); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ======================================================================================
+// GOPS
+// ======================================================================================
+
+var gops bool
+
+// AddGOPSFlags to NewRootCommand
+func AddGOPSFlags(flags *pflag.FlagSet) {
+ flags.BoolVar(&gops, "gops", false, "Whether to enable gops or not. When enabled this option, "+
+ "controller-manager will listen on a random port on 127.0.0.1, then you can use the gops tool to list and diagnose the controller-manager currently running.")
+}
+
+// InitGOPS if gops is true
+func InitGOPS() error {
+ if gops {
+ // Add agent to report additional information such as the current stack trace, Go version, memory stats, etc.
+ // Bind to a random port on address 127.0.0.1
+ if err := agent.Listen(agent.Options{}); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ======================================================================================
+// KLOG
+// ======================================================================================
+
+// AddKlogFlags to NewRootCommand
+func AddKlogFlags(fs *pflag.FlagSet) {
+ local := flag.NewFlagSet("klog", flag.ExitOnError)
+ klog.InitFlags(local)
+ local.VisitAll(func(fl *flag.Flag) {
+ fl.Name = strings.Replace(fl.Name, "_", "-", -1)
+ fs.AddGoFlag(fl)
+ })
+}
diff --git a/feature/cmd/kk/app/options/create.go b/feature/cmd/kk/app/options/create.go
new file mode 100644
index 000000000..a9c8f810a
--- /dev/null
+++ b/feature/cmd/kk/app/options/create.go
@@ -0,0 +1,96 @@
+/*
+Copyright 2024 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package options
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ cliflag "k8s.io/component-base/cli/flag"
+
+ kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
+)
+
+// NewCreateClusterOptions for newCreateClusterCommand
+func NewCreateClusterOptions() *CreateClusterOptions {
+ // set default value
+ return &CreateClusterOptions{commonOptions: newCommonOptions()}
+}
+
+// CreateClusterOptions for NewCreateClusterOptions
+type CreateClusterOptions struct {
+ commonOptions
+ // kubernetes version which the cluster will install.
+ Kubernetes string
+ // ContainerRuntime for kubernetes. Such as docker, containerd etc.
+ ContainerManager string
+}
+
+// Flags add to newCreateClusterCommand
+func (o *CreateClusterOptions) Flags() cliflag.NamedFlagSets {
+ fss := o.commonOptions.flags()
+ kfs := fss.FlagSet("config")
+ kfs.StringVar(&o.Kubernetes, "with-kubernetes", "", "Specify a supported version of kubernetes")
+ kfs.StringVar(&o.ContainerManager, "container-manager", "", "Container runtime: docker, crio, containerd and isula.")
+
+ return fss
+}
+
+// Complete options. create Pipeline, Config and Inventory
+func (o *CreateClusterOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Pipeline, *kkcorev1.Config, *kkcorev1.Inventory, error) {
+ pipeline := &kkcorev1.Pipeline{
+ ObjectMeta: metav1.ObjectMeta{
+ GenerateName: "create-cluster-",
+ Namespace: o.Namespace,
+ Annotations: map[string]string{
+ kkcorev1.BuiltinsProjectAnnotation: "",
+ },
+ },
+ }
+
+ // complete playbook. now only support one playbook
+ if len(args) != 1 {
+ return nil, nil, nil, fmt.Errorf("%s\nSee '%s -h' for help and examples", cmd.Use, cmd.CommandPath())
+ }
+ o.Playbook = args[0]
+
+ pipeline.Spec = kkcorev1.PipelineSpec{
+ Playbook: o.Playbook,
+ Debug: o.Debug,
+ }
+
+ config, inventory, err := o.completeRef(pipeline)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ if o.Kubernetes != "" {
+ // override kube_version in config
+ if err := config.SetValue("kube_version", o.Kubernetes); err != nil {
+ return nil, nil, nil, err
+ }
+ }
+ if o.ContainerManager != "" {
+ // override container_manager in config
+ if err := config.SetValue("cri.container_manager", o.ContainerManager); err != nil {
+ return nil, nil, nil, err
+ }
+ }
+
+ return pipeline, config, inventory, nil
+}
diff --git a/feature/cmd/kk/app/options/init.go b/feature/cmd/kk/app/options/init.go
new file mode 100644
index 000000000..1183aeea8
--- /dev/null
+++ b/feature/cmd/kk/app/options/init.go
@@ -0,0 +1,132 @@
+/*
+Copyright 2024 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package options
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ cliflag "k8s.io/component-base/cli/flag"
+
+ kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
+)
+
+// ======================================================================================
+// init os
+// ======================================================================================
+
+// InitOSOptions for NewInitOSOptions
+type InitOSOptions struct {
+ commonOptions
+}
+
+// Flags add to newInitOSCommand
+func (o *InitOSOptions) Flags() cliflag.NamedFlagSets {
+ fss := o.commonOptions.flags()
+
+ return fss
+}
+
+// Complete options. create Pipeline, Config and Inventory
+func (o *InitOSOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Pipeline, *kkcorev1.Config, *kkcorev1.Inventory, error) {
+ pipeline := &kkcorev1.Pipeline{
+ ObjectMeta: metav1.ObjectMeta{
+ GenerateName: "init-os-",
+ Namespace: o.Namespace,
+ Annotations: map[string]string{
+ kkcorev1.BuiltinsProjectAnnotation: "",
+ },
+ },
+ }
+
+ // complete playbook. now only support one playbook
+ if len(args) != 1 {
+ return nil, nil, nil, fmt.Errorf("%s\nSee '%s -h' for help and examples", cmd.Use, cmd.CommandPath())
+ }
+ o.Playbook = args[0]
+
+ pipeline.Spec = kkcorev1.PipelineSpec{
+ Playbook: o.Playbook,
+ Debug: o.Debug,
+ }
+
+ config, inventory, err := o.completeRef(pipeline)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ return pipeline, config, inventory, nil
+}
+
+// NewInitOSOptions for newInitOSCommand
+func NewInitOSOptions() *InitOSOptions {
+ // set default value
+ return &InitOSOptions{commonOptions: newCommonOptions()}
+}
+
+// ======================================================================================
+// init registry
+// ======================================================================================
+
+// InitRegistryOptions for NewInitRegistryOptions
+type InitRegistryOptions struct {
+ commonOptions
+}
+
+// Flags add to newInitRegistryCommand
+func (o *InitRegistryOptions) Flags() cliflag.NamedFlagSets {
+ fss := o.commonOptions.flags()
+
+ return fss
+}
+
+// Complete options. create Pipeline, Config and Inventory
+func (o *InitRegistryOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Pipeline, *kkcorev1.Config, *kkcorev1.Inventory, error) {
+ pipeline := &kkcorev1.Pipeline{
+ ObjectMeta: metav1.ObjectMeta{
+ GenerateName: "init-registry-",
+ Namespace: o.Namespace,
+ Annotations: map[string]string{
+ kkcorev1.BuiltinsProjectAnnotation: "",
+ },
+ },
+ }
+
+ // complete playbook. now only support one playbook
+ if len(args) != 1 {
+ return nil, nil, nil, fmt.Errorf("%s\nSee '%s -h' for help and examples", cmd.Use, cmd.CommandPath())
+ }
+ o.Playbook = args[0]
+
+ pipeline.Spec = kkcorev1.PipelineSpec{
+ Playbook: o.Playbook,
+ Debug: o.Debug,
+ }
+ config, inventory, err := o.completeRef(pipeline)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ return pipeline, config, inventory, nil
+}
+
+// NewInitRegistryOptions for newInitRegistryCommand
+func NewInitRegistryOptions() *InitRegistryOptions {
+ // set default value
+ return &InitRegistryOptions{commonOptions: newCommonOptions()}
+}
diff --git a/feature/cmd/kk/app/options/option.go b/feature/cmd/kk/app/options/option.go
new file mode 100644
index 000000000..4cae24f5a
--- /dev/null
+++ b/feature/cmd/kk/app/options/option.go
@@ -0,0 +1,257 @@
+/*
+Copyright 2024 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package options
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ cliflag "k8s.io/component-base/cli/flag"
+ "k8s.io/klog/v2"
+ "sigs.k8s.io/yaml"
+
+ kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
+)
+
+var defaultConfig = &kkcorev1.Config{
+ TypeMeta: metav1.TypeMeta{
+ APIVersion: kkcorev1.SchemeGroupVersion.String(),
+ Kind: "Config",
+ },
+ ObjectMeta: metav1.ObjectMeta{Name: "default"}}
+var defaultInventory = &kkcorev1.Inventory{
+ TypeMeta: metav1.TypeMeta{
+ APIVersion: kkcorev1.SchemeGroupVersion.String(),
+ Kind: "Inventory",
+ },
+ ObjectMeta: metav1.ObjectMeta{Name: "default"}}
+
+type commonOptions struct {
+ // Playbook which to execute.
+ Playbook string
+ // HostFile is the path of host file
+ InventoryFile string
+ // ConfigFile is the path of config file
+ ConfigFile string
+ // Set value in config
+ Set []string
+ // WorkDir is the baseDir which command find any resource (project etc.)
+ WorkDir string
+ // Artifact is the path of offline package for kubekey.
+ Artifact string
+ // Debug mode, after a successful execution of Pipeline, will retain runtime data, which includes task execution status and parameters.
+ Debug bool
+ // Namespace for all resources.
+ Namespace string
+}
+
+func newCommonOptions() commonOptions {
+ o := commonOptions{
+ Namespace: metav1.NamespaceDefault,
+ }
+
+ wd, err := os.Getwd()
+ if err != nil {
+ klog.ErrorS(err, "get current dir error")
+ o.WorkDir = "/tmp/kubekey"
+ } else {
+ o.WorkDir = filepath.Join(wd, "kubekey")
+ }
+
+ return o
+}
+
+func (o *commonOptions) flags() cliflag.NamedFlagSets {
+ fss := cliflag.NamedFlagSets{}
+ gfs := fss.FlagSet("generic")
+ gfs.StringVar(&o.WorkDir, "work-dir", o.WorkDir, "the base Dir for kubekey. Default current dir. ")
+ gfs.StringVarP(&o.Artifact, "artifact", "a", "", "Path to a KubeKey artifact")
+ gfs.StringVarP(&o.ConfigFile, "config", "c", o.ConfigFile, "the config file path. support *.yaml ")
+ gfs.StringArrayVar(&o.Set, "set", o.Set, "set value in config. format --set key=val or --set k1=v1,k2=v2")
+ gfs.StringVarP(&o.InventoryFile, "inventory", "i", o.InventoryFile, "the host list file path. support *.ini")
+ gfs.BoolVarP(&o.Debug, "debug", "d", o.Debug, "Debug mode, after a successful execution of Pipeline, will retain runtime data, which includes task execution status and parameters.")
+ gfs.StringVarP(&o.Namespace, "namespace", "n", o.Namespace, "the namespace which pipeline will be executed, all reference resources(pipeline, config, inventory, task) should in the same namespace")
+
+ return fss
+}
+
+func (o *commonOptions) completeRef(pipeline *kkcorev1.Pipeline) (*kkcorev1.Config, *kkcorev1.Inventory, error) {
+ if !filepath.IsAbs(o.WorkDir) {
+ wd, err := os.Getwd()
+ if err != nil {
+ return nil, nil, fmt.Errorf("get current dir error: %w", err)
+ }
+ o.WorkDir = filepath.Join(wd, o.WorkDir)
+ }
+ // complete config
+ config, err := o.genConfig()
+ if err != nil {
+ return nil, nil, fmt.Errorf("generate config error: %w", err)
+ }
+ pipeline.Spec.ConfigRef = &corev1.ObjectReference{
+ Kind: config.Kind,
+ Namespace: config.Namespace,
+ Name: config.Name,
+ UID: config.UID,
+ APIVersion: config.APIVersion,
+ ResourceVersion: config.ResourceVersion,
+ }
+ // complete inventory
+ inventory, err := o.genInventory()
+ if err != nil {
+ return nil, nil, fmt.Errorf("generate inventory error: %w", err)
+ }
+ pipeline.Spec.InventoryRef = &corev1.ObjectReference{
+ Kind: inventory.Kind,
+ Namespace: inventory.Namespace,
+ Name: inventory.Name,
+ UID: inventory.UID,
+ APIVersion: inventory.APIVersion,
+ ResourceVersion: inventory.ResourceVersion,
+ }
+
+ return config, inventory, nil
+}
+
+// genConfig generate config by ConfigFile and set value by command args.
+func (o *commonOptions) genConfig() (*kkcorev1.Config, error) {
+ config := defaultConfig.DeepCopy()
+ if o.ConfigFile != "" {
+ cdata, err := os.ReadFile(o.ConfigFile)
+ if err != nil {
+ return nil, fmt.Errorf("read config file error: %w", err)
+ }
+ config = &kkcorev1.Config{}
+ if err := yaml.Unmarshal(cdata, config); err != nil {
+ return nil, fmt.Errorf("unmarshal config file error: %w", err)
+ }
+ }
+ // set value by command args
+ if o.Namespace != "" {
+ config.Namespace = o.Namespace
+ }
+ if wd, err := config.GetValue("work_dir"); err == nil && wd != nil {
+ // if work_dir is defined in config, use it. otherwise use current dir.
+ if workDir, ok := wd.(string); ok {
+ o.WorkDir = workDir
+ }
+ } else if err := config.SetValue("work_dir", o.WorkDir); err != nil {
+ return nil, fmt.Errorf("work_dir to config error: %w", err)
+ }
+ if o.Artifact != "" {
+ // override artifact_file in config
+ if err := config.SetValue("artifact_file", o.Artifact); err != nil {
+ return nil, fmt.Errorf("artifact file to config error: %w", err)
+ }
+ }
+ for _, s := range o.Set {
+ for _, setVal := range strings.Split(unescapeString(s), ",") {
+ i := strings.Index(setVal, "=")
+ if i == 0 || i == -1 {
+ return nil, errors.New("--set value should be k=v")
+ }
+ if err := setValue(config, setVal[:i], setVal[i+1:]); err != nil {
+ return nil, fmt.Errorf("--set value to config error: %w", err)
+ }
+ }
+ }
+
+ return config, nil
+}
+
+// genConfig generate config by ConfigFile and set value by command args.
+func (o *commonOptions) genInventory() (*kkcorev1.Inventory, error) {
+ inventory := defaultInventory.DeepCopy()
+ if o.InventoryFile != "" {
+ cdata, err := os.ReadFile(o.InventoryFile)
+ if err != nil {
+ klog.V(4).ErrorS(err, "read config file error")
+
+ return nil, err
+ }
+ inventory = &kkcorev1.Inventory{}
+ if err := yaml.Unmarshal(cdata, inventory); err != nil {
+ klog.V(4).ErrorS(err, "unmarshal config file error")
+
+ return nil, err
+ }
+ }
+ // set value by command args
+ if o.Namespace != "" {
+ inventory.Namespace = o.Namespace
+ }
+
+ return inventory, nil
+}
+
+// setValue set key: val in config.
+// If val is json string. convert to map or slice
+// If val is TRUE,YES,Y. convert to bool type true.
+// If val is FALSE,NO,N. convert to bool type false.
+func setValue(config *kkcorev1.Config, key, val string) error {
+ switch {
+ case strings.HasPrefix(val, "{") && strings.HasSuffix(val, "{"):
+ var value map[string]any
+ err := json.Unmarshal([]byte(val), &value)
+ if err != nil {
+ return err
+ }
+
+ return config.SetValue(key, value)
+ case strings.HasPrefix(val, "[") && strings.HasSuffix(val, "]"):
+ var value []any
+ err := json.Unmarshal([]byte(val), &value)
+ if err != nil {
+ return err
+ }
+
+ return config.SetValue(key, value)
+ case strings.EqualFold(val, "TRUE") || strings.EqualFold(val, "YES") || strings.EqualFold(val, "Y"):
+ return config.SetValue(key, true)
+ case strings.EqualFold(val, "FALSE") || strings.EqualFold(val, "NO") || strings.EqualFold(val, "N"):
+ return config.SetValue(key, false)
+ default:
+ return config.SetValue(key, val)
+ }
+}
+
+// unescapeString handles common escape sequences
+func unescapeString(s string) string {
+ replacements := map[string]string{
+ `\\`: `\`,
+ `\"`: `"`,
+ `\'`: `'`,
+ `\n`: "\n",
+ `\r`: "\r",
+ `\t`: "\t",
+ `\b`: "\b",
+ `\f`: "\f",
+ }
+
+ // Iterate over the replacements map and replace escape sequences in the string
+ for o, n := range replacements {
+ s = strings.ReplaceAll(s, o, n)
+ }
+
+ return s
+}
diff --git a/feature/cmd/kk/app/options/pipeline.go b/feature/cmd/kk/app/options/pipeline.go
new file mode 100644
index 000000000..0624fb8aa
--- /dev/null
+++ b/feature/cmd/kk/app/options/pipeline.go
@@ -0,0 +1,32 @@
+package options
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ cliflag "k8s.io/component-base/cli/flag"
+)
+
+// PipelineOptions for NewPipelineOptions
+type PipelineOptions struct {
+ Name string
+ Namespace string
+ WorkDir string
+}
+
+// NewPipelineOptions for newPipelineCommand
+func NewPipelineOptions() *PipelineOptions {
+ return &PipelineOptions{
+ Namespace: metav1.NamespaceDefault,
+ WorkDir: "/kubekey",
+ }
+}
+
+// Flags add to newPipelineCommand
+func (o *PipelineOptions) Flags() cliflag.NamedFlagSets {
+ fss := cliflag.NamedFlagSets{}
+ pfs := fss.FlagSet("pipeline flags")
+ pfs.StringVar(&o.Name, "name", o.Name, "name of pipeline")
+ pfs.StringVarP(&o.Namespace, "namespace", "n", o.Namespace, "namespace of pipeline")
+ pfs.StringVar(&o.WorkDir, "work-dir", o.WorkDir, "the base Dir for kubekey. Default current dir. ")
+
+ return fss
+}
diff --git a/feature/cmd/kk/app/options/precheck.go b/feature/cmd/kk/app/options/precheck.go
new file mode 100644
index 000000000..49dacc86a
--- /dev/null
+++ b/feature/cmd/kk/app/options/precheck.go
@@ -0,0 +1,79 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package options
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ cliflag "k8s.io/component-base/cli/flag"
+
+ kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
+)
+
+// NewPreCheckOptions for newPreCheckCommand
+func NewPreCheckOptions() *PreCheckOptions {
+ // set default value
+ return &PreCheckOptions{commonOptions: newCommonOptions()}
+}
+
+// PreCheckOptions for NewPreCheckOptions
+type PreCheckOptions struct {
+ commonOptions
+}
+
+// Flags add to newPreCheckCommand
+func (o *PreCheckOptions) Flags() cliflag.NamedFlagSets {
+ return o.commonOptions.flags()
+}
+
+// Complete options. create Pipeline, Config and Inventory
+func (o *PreCheckOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Pipeline, *kkcorev1.Config, *kkcorev1.Inventory, error) {
+ pipeline := &kkcorev1.Pipeline{
+ ObjectMeta: metav1.ObjectMeta{
+ GenerateName: "precheck-",
+ Namespace: o.Namespace,
+ Annotations: map[string]string{
+ kkcorev1.BuiltinsProjectAnnotation: "",
+ },
+ },
+ }
+
+ // complete playbook. now only support one playbook
+ var tags []string
+ if len(args) < 1 {
+ return nil, nil, nil, fmt.Errorf("%s\nSee '%s -h' for help and examples", cmd.Use, cmd.CommandPath())
+ } else if len(args) == 1 {
+ o.Playbook = args[0]
+ } else {
+ tags = args[:len(args)-1]
+ o.Playbook = args[len(args)-1]
+ }
+
+ pipeline.Spec = kkcorev1.PipelineSpec{
+ Playbook: o.Playbook,
+ Debug: o.Debug,
+ Tags: tags,
+ }
+ config, inventory, err := o.completeRef(pipeline)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ return pipeline, config, inventory, nil
+}
diff --git a/feature/cmd/kk/app/options/run.go b/feature/cmd/kk/app/options/run.go
new file mode 100644
index 000000000..814d8b738
--- /dev/null
+++ b/feature/cmd/kk/app/options/run.go
@@ -0,0 +1,117 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package options
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ cliflag "k8s.io/component-base/cli/flag"
+
+ kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
+)
+
+// KubeKeyRunOptions for NewKubeKeyRunOptions
+type KubeKeyRunOptions struct {
+ commonOptions
+ // ProjectAddr is the storage for executable packages (in Ansible format).
+ // When starting with http or https, it will be obtained from a Git repository.
+ // When starting with file path, it will be obtained from the local path.
+ ProjectAddr string
+ // ProjectName is the name of project. it will store to project dir use this name.
+ // If empty generate from ProjectAddr
+ ProjectName string
+ // ProjectBranch is the git branch of the git Addr.
+ ProjectBranch string
+ // ProjectTag if the git tag of the git Addr.
+ ProjectTag string
+ // ProjectInsecureSkipTLS skip tls or not when git addr is https.
+ ProjectInsecureSkipTLS bool
+ // ProjectToken to clone and pull git project
+ ProjectToken string
+ // Tags is the tags of playbook which to execute
+ Tags []string
+ // SkipTags is the tags of playbook which skip execute
+ SkipTags []string
+}
+
+// NewKubeKeyRunOptions for newRunCommand
+func NewKubeKeyRunOptions() *KubeKeyRunOptions {
+ // add default values
+ o := &KubeKeyRunOptions{
+ commonOptions: newCommonOptions(),
+ }
+
+ return o
+}
+
+// Flags add to newRunCommand
+func (o *KubeKeyRunOptions) Flags() cliflag.NamedFlagSets {
+ fss := o.commonOptions.flags()
+ gitfs := fss.FlagSet("project")
+ gitfs.StringVar(&o.ProjectAddr, "project-addr", o.ProjectAddr, "the storage for executable packages (in Ansible format)."+
+ " When starting with http or https, it will be obtained from a Git repository."+
+ "When starting with file path, it will be obtained from the local path.")
+ gitfs.StringVar(&o.ProjectBranch, "project-branch", o.ProjectBranch, "the git branch of the remote Addr")
+ gitfs.StringVar(&o.ProjectTag, "project-tag", o.ProjectTag, "the git tag of the remote Addr")
+ gitfs.BoolVar(&o.ProjectInsecureSkipTLS, "project-insecure-skip-tls", o.ProjectInsecureSkipTLS, "skip tls or not when git addr is https.")
+ gitfs.StringVar(&o.ProjectToken, "project-token", o.ProjectToken, "the token for private project.")
+
+ tfs := fss.FlagSet("tags")
+ tfs.StringArrayVar(&o.Tags, "tags", o.Tags, "the tags of playbook which to execute")
+ tfs.StringArrayVar(&o.SkipTags, "skip-tags", o.SkipTags, "the tags of playbook which skip execute")
+
+ return fss
+}
+
+// Complete options. create Pipeline, Config and Inventory
+func (o *KubeKeyRunOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Pipeline, *kkcorev1.Config, *kkcorev1.Inventory, error) {
+ pipeline := &kkcorev1.Pipeline{
+ ObjectMeta: metav1.ObjectMeta{
+ GenerateName: "run-",
+ Namespace: o.Namespace,
+ Annotations: make(map[string]string),
+ },
+ }
+ // complete playbook. now only support one playbook
+ if len(args) != 1 {
+ return nil, nil, nil, fmt.Errorf("%s\nSee '%s -h' for help and examples", cmd.Use, cmd.CommandPath())
+ }
+ o.Playbook = args[0]
+
+ pipeline.Spec = kkcorev1.PipelineSpec{
+ Project: kkcorev1.PipelineProject{
+ Addr: o.ProjectAddr,
+ Name: o.ProjectName,
+ Branch: o.ProjectBranch,
+ Tag: o.ProjectTag,
+ InsecureSkipTLS: o.ProjectInsecureSkipTLS,
+ Token: o.ProjectToken,
+ },
+ Playbook: o.Playbook,
+ Tags: o.Tags,
+ SkipTags: o.SkipTags,
+ Debug: o.Debug,
+ }
+ config, inventory, err := o.completeRef(pipeline)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ return pipeline, config, inventory, nil
+}
diff --git a/feature/cmd/kk/app/pipeline.go b/feature/cmd/kk/app/pipeline.go
new file mode 100644
index 000000000..0ab3af60a
--- /dev/null
+++ b/feature/cmd/kk/app/pipeline.go
@@ -0,0 +1,83 @@
+package app
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+ "k8s.io/client-go/rest"
+ "k8s.io/klog/v2"
+ ctrl "sigs.k8s.io/controller-runtime"
+ ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
+
+ "github.com/kubesphere/kubekey/v4/cmd/kk/app/options"
+ kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
+ _const "github.com/kubesphere/kubekey/v4/pkg/const"
+ "github.com/kubesphere/kubekey/v4/pkg/manager"
+ "github.com/kubesphere/kubekey/v4/pkg/proxy"
+)
+
+func newPipelineCommand() *cobra.Command {
+ o := options.NewPipelineOptions()
+
+ cmd := &cobra.Command{
+ Use: "pipeline",
+ Short: "Executor a pipeline in kubernetes",
+ RunE: func(*cobra.Command, []string) error {
+ _const.SetWorkDir(o.WorkDir)
+ restconfig, err := ctrl.GetConfig()
+ if err != nil {
+ klog.Infof("kubeconfig in empty, store resources local")
+ restconfig = &rest.Config{}
+ }
+ restconfig, err = proxy.NewConfig(restconfig)
+ if err != nil {
+ return fmt.Errorf("could not get rest config: %w", err)
+ }
+
+ client, err := ctrlclient.New(restconfig, ctrlclient.Options{
+ Scheme: _const.Scheme,
+ })
+ if err != nil {
+ return fmt.Errorf("could not create client: %w", err)
+ }
+ // get pipeline
+ var pipeline = new(kkcorev1.Pipeline)
+ if err := client.Get(ctx, ctrlclient.ObjectKey{
+ Name: o.Name,
+ Namespace: o.Namespace,
+ }, pipeline); err != nil {
+ return err
+ }
+ // get config
+ var config = new(kkcorev1.Config)
+ if err := client.Get(ctx, ctrlclient.ObjectKey{
+ Name: pipeline.Spec.ConfigRef.Name,
+ Namespace: pipeline.Spec.ConfigRef.Namespace,
+ }, config); err != nil {
+ return err
+ }
+ // get inventory
+ var inventory = new(kkcorev1.Inventory)
+ if err := client.Get(ctx, ctrlclient.ObjectKey{
+ Name: pipeline.Spec.InventoryRef.Name,
+ Namespace: pipeline.Spec.InventoryRef.Namespace,
+ }, inventory); err != nil {
+ return err
+ }
+
+ return manager.NewCommandManager(manager.CommandManagerOptions{
+ Pipeline: pipeline,
+ Config: config,
+ Inventory: inventory,
+ Client: client,
+ }).Run(ctx)
+ },
+ }
+
+ fs := cmd.Flags()
+ for _, f := range o.Flags().FlagSets {
+ fs.AddFlagSet(f)
+ }
+
+ return cmd
+}
diff --git a/feature/cmd/kk/app/precheck.go b/feature/cmd/kk/app/precheck.go
new file mode 100644
index 000000000..fdaf84ea6
--- /dev/null
+++ b/feature/cmd/kk/app/precheck.go
@@ -0,0 +1,66 @@
+//go:build builtin
+// +build builtin
+
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package app
+
+import (
+ "os"
+
+ "github.com/spf13/cobra"
+
+ "github.com/kubesphere/kubekey/v4/cmd/kk/app/options"
+ _const "github.com/kubesphere/kubekey/v4/pkg/const"
+)
+
+func newPreCheckCommand() *cobra.Command {
+ o := options.NewPreCheckOptions()
+
+ cmd := &cobra.Command{
+ Use: "precheck tags...",
+ Short: "Check if the nodes is eligible for cluster deployment.",
+ Long: "the tags can specify check items. support: etcd, os, network, cri, nfs.",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ pipeline, config, inventory, err := o.Complete(cmd, append(args, "playbooks/precheck.yaml"))
+ if err != nil {
+ return err
+ }
+ // set workdir
+ _const.SetWorkDir(o.WorkDir)
+ // create workdir directory,if not exists
+ if _, err := os.Stat(o.WorkDir); os.IsNotExist(err) {
+ if err := os.MkdirAll(o.WorkDir, os.ModePerm); err != nil {
+ return err
+ }
+ }
+
+ return run(ctx, pipeline, config, inventory)
+ },
+ }
+
+ flags := cmd.Flags()
+ for _, f := range o.Flags().FlagSets {
+ flags.AddFlagSet(f)
+ }
+
+ return cmd
+}
+
+func init() {
+ registerInternalCommand(newPreCheckCommand())
+}
diff --git a/feature/cmd/kk/app/root.go b/feature/cmd/kk/app/root.go
new file mode 100644
index 000000000..a4225731b
--- /dev/null
+++ b/feature/cmd/kk/app/root.go
@@ -0,0 +1,72 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package app
+
+import (
+ "github.com/spf13/cobra"
+ "sigs.k8s.io/controller-runtime/pkg/manager/signals"
+
+ "github.com/kubesphere/kubekey/v4/cmd/kk/app/options"
+)
+
+// ctx cancel by shutdown signal
+var ctx = signals.SetupSignalHandler()
+
+var internalCommand = make([]*cobra.Command, 0)
+
+func registerInternalCommand(command *cobra.Command) {
+ for _, c := range internalCommand {
+ if c.Name() == command.Name() {
+ // command has register. skip
+ return
+ }
+ }
+ internalCommand = append(internalCommand, command)
+}
+
+// NewRootCommand console command.
+func NewRootCommand() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "kk",
+ Long: "kubekey is a daemon that execute command in a node",
+ PersistentPreRunE: func(*cobra.Command, []string) error {
+ if err := options.InitGOPS(); err != nil {
+ return err
+ }
+
+ return options.InitProfiling(ctx)
+ },
+ PersistentPostRunE: func(*cobra.Command, []string) error {
+ return options.FlushProfiling()
+ },
+ }
+ cmd.SetContext(ctx)
+
+ // add common flag
+ flags := cmd.PersistentFlags()
+ options.AddProfilingFlags(flags)
+ options.AddKlogFlags(flags)
+ options.AddGOPSFlags(flags)
+
+ cmd.AddCommand(newRunCommand())
+ cmd.AddCommand(newPipelineCommand())
+ cmd.AddCommand(newVersionCommand())
+ // internal command
+ cmd.AddCommand(internalCommand...)
+
+ return cmd
+}
diff --git a/feature/cmd/kk/app/run.go b/feature/cmd/kk/app/run.go
new file mode 100644
index 000000000..2f6d09751
--- /dev/null
+++ b/feature/cmd/kk/app/run.go
@@ -0,0 +1,105 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package app
+
+import (
+ "context"
+ "fmt"
+ "os"
+
+ "github.com/spf13/cobra"
+ "k8s.io/client-go/rest"
+ "k8s.io/klog/v2"
+ ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
+
+ "github.com/kubesphere/kubekey/v4/cmd/kk/app/options"
+ kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
+ _const "github.com/kubesphere/kubekey/v4/pkg/const"
+ "github.com/kubesphere/kubekey/v4/pkg/manager"
+ "github.com/kubesphere/kubekey/v4/pkg/proxy"
+)
+
+func newRunCommand() *cobra.Command {
+ o := options.NewKubeKeyRunOptions()
+
+ cmd := &cobra.Command{
+ Use: "run [playbook]",
+ Short: "run a playbook by playbook file. the file source can be git or local",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ kk, config, inventory, err := o.Complete(cmd, args)
+ if err != nil {
+ return err
+ }
+ // set workdir
+ _const.SetWorkDir(o.WorkDir)
+ // create workdir directory,if not exists
+ if _, err := os.Stat(o.WorkDir); os.IsNotExist(err) {
+ if err := os.MkdirAll(o.WorkDir, os.ModePerm); err != nil {
+ return err
+ }
+ }
+
+ return run(ctx, kk, config, inventory)
+ },
+ }
+
+ for _, f := range o.Flags().FlagSets {
+ cmd.Flags().AddFlagSet(f)
+ }
+
+ return cmd
+}
+
+func run(ctx context.Context, pipeline *kkcorev1.Pipeline, config *kkcorev1.Config, inventory *kkcorev1.Inventory) error {
+ restconfig, err := proxy.NewConfig(&rest.Config{})
+ if err != nil {
+ return fmt.Errorf("could not get rest config: %w", err)
+ }
+ client, err := ctrlclient.New(restconfig, ctrlclient.Options{
+ Scheme: _const.Scheme,
+ })
+ if err != nil {
+ return fmt.Errorf("could not get runtime-client: %w", err)
+ }
+
+ // create config
+ if err := client.Create(ctx, config); err != nil {
+ klog.ErrorS(err, "Create config error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline))
+
+ return err
+ }
+ // create inventory
+ if err := client.Create(ctx, inventory); err != nil {
+ klog.ErrorS(err, "Create inventory error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline))
+
+ return err
+ }
+ // create pipeline
+ pipeline.Status.Phase = kkcorev1.PipelinePhaseRunning
+ if err := client.Create(ctx, pipeline); err != nil {
+ klog.ErrorS(err, "Create pipeline error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline))
+
+ return err
+ }
+
+ return manager.NewCommandManager(manager.CommandManagerOptions{
+ Pipeline: pipeline,
+ Config: config,
+ Inventory: inventory,
+ Client: client,
+ }).Run(ctx)
+}
diff --git a/feature/cmd/kk/app/version.go b/feature/cmd/kk/app/version.go
new file mode 100644
index 000000000..84a80c450
--- /dev/null
+++ b/feature/cmd/kk/app/version.go
@@ -0,0 +1,33 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package app
+
+import (
+ "github.com/spf13/cobra"
+
+ "github.com/kubesphere/kubekey/v4/version"
+)
+
+func newVersionCommand() *cobra.Command {
+ return &cobra.Command{
+ Use: "version",
+ Short: "Print the version of KubeSphere controller-manager",
+ Run: func(cmd *cobra.Command, _ []string) {
+ cmd.Println(version.Get())
+ },
+ }
+}
diff --git a/feature/cmd/kk/kubekey.go b/feature/cmd/kk/kubekey.go
new file mode 100644
index 000000000..a87ca768c
--- /dev/null
+++ b/feature/cmd/kk/kubekey.go
@@ -0,0 +1,31 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ "os"
+
+ "k8s.io/component-base/cli"
+
+ "github.com/kubesphere/kubekey/v4/cmd/kk/app"
+)
+
+func main() {
+ command := app.NewRootCommand()
+ code := cli.Run(command)
+ os.Exit(code)
+}
diff --git a/feature/config/kubekey/Chart.yaml b/feature/config/kubekey/Chart.yaml
new file mode 100644
index 000000000..34a9ea8ed
--- /dev/null
+++ b/feature/config/kubekey/Chart.yaml
@@ -0,0 +1,15 @@
+apiVersion: v2
+name: kubekey
+description: A Helm chart for kubekey
+
+type: application
+
+# This is the chart version. This version number should be incremented each time you make changes
+# to the chart and its templates, including the app version.
+# Versions are expected to follow Semantic Versioning (https://semver.org/)
+version: 1.0.0
+
+# This is the version number of the application being deployed. This version number should be
+# incremented each time you make changes to the application. Versions are not expected to
+# follow Semantic Versioning. They should reflect the version the application is using.
+appVersion: "dev"
diff --git a/feature/config/kubekey/crds/kubekey.kubesphere.io_configs.yaml b/feature/config/kubekey/crds/kubekey.kubesphere.io_configs.yaml
new file mode 100644
index 000000000..ebee9237f
--- /dev/null
+++ b/feature/config/kubekey/crds/kubekey.kubesphere.io_configs.yaml
@@ -0,0 +1,44 @@
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.15.0
+ name: configs.kubekey.kubesphere.io
+spec:
+ group: kubekey.kubesphere.io
+ names:
+ kind: Config
+ listKind: ConfigList
+ plural: configs
+ singular: config
+ scope: Namespaced
+ versions:
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ description: Config store global vars for playbook.
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ served: true
+ storage: true
diff --git a/feature/config/kubekey/crds/kubekey.kubesphere.io_inventories.yaml b/feature/config/kubekey/crds/kubekey.kubesphere.io_inventories.yaml
new file mode 100644
index 000000000..dc0ecaf85
--- /dev/null
+++ b/feature/config/kubekey/crds/kubekey.kubesphere.io_inventories.yaml
@@ -0,0 +1,74 @@
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.15.0
+ name: inventories.kubekey.kubesphere.io
+spec:
+ group: kubekey.kubesphere.io
+ names:
+ kind: Inventory
+ listKind: InventoryList
+ plural: inventories
+ singular: inventory
+ scope: Namespaced
+ versions:
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ description: Inventory store hosts vars for playbook.
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: InventorySpec of Inventory
+ properties:
+ groups:
+ additionalProperties:
+ description: InventoryGroup of Inventory
+ properties:
+ groups:
+ items:
+ type: string
+ type: array
+ hosts:
+ items:
+ type: string
+ type: array
+ vars:
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ description: Groups nodes. a group contains repeated nodes
+ type: object
+ hosts:
+ additionalProperties:
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ description: Hosts is all nodes
+ type: object
+ vars:
+ description: 'Vars for all host. the priority for vars is: host vars
+ > group vars > inventory vars'
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ type: object
+ served: true
+ storage: true
diff --git a/feature/config/kubekey/crds/kubekey.kubesphere.io_pipelines.yaml b/feature/config/kubekey/crds/kubekey.kubesphere.io_pipelines.yaml
new file mode 100644
index 000000000..775f66597
--- /dev/null
+++ b/feature/config/kubekey/crds/kubekey.kubesphere.io_pipelines.yaml
@@ -0,0 +1,2034 @@
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.15.0
+ name: pipelines.kubekey.kubesphere.io
+spec:
+ group: kubekey.kubesphere.io
+ names:
+ kind: Pipeline
+ listKind: PipelineList
+ plural: pipelines
+ singular: pipeline
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .spec.playbook
+ name: Playbook
+ type: string
+ - jsonPath: .status.phase
+ name: Phase
+ type: string
+ - jsonPath: .status.taskResult.total
+ name: Total
+ type: integer
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: Pipeline resource executor a playbook.
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: PipelineSpec of pipeline.
+ properties:
+ configRef:
+ description: ConfigRef is the global variable configuration for playbook
+ properties:
+ apiVersion:
+ description: API version of the referent.
+ type: string
+ fieldPath:
+ description: |-
+ If referring to a piece of an object instead of an entire object, this string
+ should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
+ For example, if the object reference is to a container within a pod, this would take on a value like:
+ "spec.containers{name}" (where "name" refers to the name of the container that triggered
+ the event) or if no container name is specified "spec.containers[2]" (container with
+ index 2 in this pod). This syntax is chosen only to have some well-defined way of
+ referencing a part of an object.
+ TODO: this design is not final and this field is subject to change in the future.
+ type: string
+ kind:
+ description: |-
+ Kind of the referent.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ namespace:
+ description: |-
+ Namespace of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
+ type: string
+ resourceVersion:
+ description: |-
+ Specific resourceVersion to which this reference is made, if any.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
+ type: string
+ uid:
+ description: |-
+ UID of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ debug:
+ description: |-
+ If Debug mode is true, It will retain runtime data after a successful execution of Pipeline,
+ which includes task execution status and parameters.
+ type: boolean
+ inventoryRef:
+ description: InventoryRef is the node configuration for playbook
+ properties:
+ apiVersion:
+ description: API version of the referent.
+ type: string
+ fieldPath:
+ description: |-
+ If referring to a piece of an object instead of an entire object, this string
+ should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
+ For example, if the object reference is to a container within a pod, this would take on a value like:
+ "spec.containers{name}" (where "name" refers to the name of the container that triggered
+ the event) or if no container name is specified "spec.containers[2]" (container with
+ index 2 in this pod). This syntax is chosen only to have some well-defined way of
+ referencing a part of an object.
+ TODO: this design is not final and this field is subject to change in the future.
+ type: string
+ kind:
+ description: |-
+ Kind of the referent.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ namespace:
+ description: |-
+ Namespace of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
+ type: string
+ resourceVersion:
+ description: |-
+ Specific resourceVersion to which this reference is made, if any.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
+ type: string
+ uid:
+ description: |-
+ UID of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ jobSpec:
+ description: when execute in kubernetes, pipeline will create ob or
+ cornJob to execute.
+ properties:
+ activeDeadlineSeconds:
+ description: |-
+ Specifies the duration in seconds relative to the startTime that the job
+ may be continuously active before the system tries to terminate it; value
+ must be positive integer. If a Job is suspended (at creation or through an
+ update), this timer will effectively be stopped and reset when the Job is
+ resumed again.
+ format: int64
+ type: integer
+ failedJobsHistoryLimit:
+ description: |-
+ The number of failed finished jobs to retain. Value must be non-negative integer.
+ Defaults to 1.
+ format: int32
+ type: integer
+ schedule:
+ description: |-
+ when Schedule is not empty, pipeline will create CornJob, otherwise pipeline will create Job.
+ The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.
+ type: string
+ successfulJobsHistoryLimit:
+ description: |-
+ The number of successful finished jobs to retain. Value must be non-negative integer.
+ Defaults to 3.
+ format: int32
+ type: integer
+ suspend:
+ description: |-
+ suspend specifies whether the Job controller should create Pods or not. If
+ a Job is created with suspend set to true, no Pods are created by the Job
+ controller. If a Job is suspended after creation (i.e. the flag goes from
+ false to true), the Job controller will delete all active Pods associated
+ with this Job. Users must design their workload to gracefully handle this.
+ Suspending a Job will reset the StartTime field of the Job, effectively
+ resetting the ActiveDeadlineSeconds timer too. Defaults to false.
+ type: boolean
+ ttlSecondsAfterFinished:
+ description: |-
+ ttlSecondsAfterFinished limits the lifetime of a Job that has finished
+ execution (either Complete or Failed). If this field is set,
+ ttlSecondsAfterFinished after the Job finishes, it is eligible to be
+ automatically deleted. When the Job is being deleted, its lifecycle
+ guarantees (e.g. finalizers) will be honored. If this field is unset,
+ the Job won't be automatically deleted. If this field is set to zero,
+ the Job becomes eligible to be deleted immediately after it finishes.
+ format: int32
+ type: integer
+ volumeMounts:
+ description: VolumeMounts in job pod.
+ items:
+ description: VolumeMount describes a mounting of a Volume within
+ a container.
+ properties:
+ mountPath:
+ description: |-
+ Path within the container at which the volume should be mounted. Must
+ not contain ':'.
+ type: string
+ mountPropagation:
+ description: |-
+ mountPropagation determines how mounts are propagated from the host
+ to container and the other way around.
+ When not set, MountPropagationNone is used.
+ This field is beta in 1.10.
+ type: string
+ name:
+ description: This must match the Name of a Volume.
+ type: string
+ readOnly:
+ description: |-
+ Mounted read-only if true, read-write otherwise (false or unspecified).
+ Defaults to false.
+ type: boolean
+ subPath:
+ description: |-
+ Path within the volume from which the container's volume should be mounted.
+ Defaults to "" (volume's root).
+ type: string
+ subPathExpr:
+ description: |-
+ Expanded path within the volume from which the container's volume should be mounted.
+ Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.
+ Defaults to "" (volume's root).
+ SubPathExpr and SubPath are mutually exclusive.
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ type: array
+ workVolume:
+ description: Volumes in job pod.
+ items:
+ description: Volume represents a named volume in a pod that
+ may be accessed by any container in the pod.
+ properties:
+ awsElasticBlockStore:
+ description: |-
+ awsElasticBlockStore represents an AWS Disk resource that is attached to a
+ kubelet's host machine and then exposed to the pod.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type of the volume that you want to mount.
+ Tip: Ensure that the filesystem type is supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+ TODO: how do we prevent errors in the filesystem from compromising the machine
+ type: string
+ partition:
+ description: |-
+ partition is the partition in the volume that you want to mount.
+ If omitted, the default is to mount by volume name.
+ Examples: For volume /dev/sda1, you specify the partition as "1".
+ Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
+ format: int32
+ type: integer
+ readOnly:
+ description: |-
+ readOnly value true will force the readOnly setting in VolumeMounts.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+ type: boolean
+ volumeID:
+ description: |-
+ volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume).
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+ type: string
+ required:
+ - volumeID
+ type: object
+ azureDisk:
+ description: azureDisk represents an Azure Data Disk mount
+ on the host and bind mount to the pod.
+ properties:
+ cachingMode:
+ description: 'cachingMode is the Host Caching mode:
+ None, Read Only, Read Write.'
+ type: string
+ diskName:
+ description: diskName is the Name of the data disk in
+ the blob storage
+ type: string
+ diskURI:
+ description: diskURI is the URI of data disk in the
+ blob storage
+ type: string
+ fsType:
+ description: |-
+ fsType is Filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ kind:
+ description: 'kind expected values are Shared: multiple
+ blob disks per storage account Dedicated: single
+ blob disk per storage account Managed: azure managed
+ data disk (only in managed availability set). defaults
+ to shared'
+ type: string
+ readOnly:
+ description: |-
+ readOnly Defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ required:
+ - diskName
+ - diskURI
+ type: object
+ azureFile:
+ description: azureFile represents an Azure File Service
+ mount on the host and bind mount to the pod.
+ properties:
+ readOnly:
+ description: |-
+ readOnly defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ secretName:
+ description: secretName is the name of secret that
+ contains Azure Storage Account Name and Key
+ type: string
+ shareName:
+ description: shareName is the azure share Name
+ type: string
+ required:
+ - secretName
+ - shareName
+ type: object
+ cephfs:
+ description: cephFS represents a Ceph FS mount on the host
+ that shares a pod's lifetime
+ properties:
+ monitors:
+ description: |-
+ monitors is Required: Monitors is a collection of Ceph monitors
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ items:
+ type: string
+ type: array
+ path:
+ description: 'path is Optional: Used as the mounted
+ root, rather than the full Ceph tree, default is /'
+ type: string
+ readOnly:
+ description: |-
+ readOnly is Optional: Defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ type: boolean
+ secretFile:
+ description: |-
+ secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ type: string
+ secretRef:
+ description: |-
+ secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty.
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ properties:
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ user:
+ description: |-
+ user is optional: User is the rados user name, default is admin
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ type: string
+ required:
+ - monitors
+ type: object
+ cinder:
+ description: |-
+ cinder represents a cinder volume attached and mounted on kubelets host machine.
+ More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ type: string
+ readOnly:
+ description: |-
+ readOnly defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef is optional: points to a secret object containing parameters used to connect
+ to OpenStack.
+ properties:
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ volumeID:
+ description: |-
+ volumeID used to identify the volume in cinder.
+ More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ type: string
+ required:
+ - volumeID
+ type: object
+ configMap:
+ description: configMap represents a configMap that should
+ populate this volume
+ properties:
+ defaultMode:
+ description: |-
+ defaultMode is optional: mode bits used to set permissions on created files by default.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ Defaults to 0644.
+ Directories within the path are not affected by this setting.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ items:
+ description: |-
+ items if unspecified, each key-value pair in the Data field of the referenced
+ ConfigMap will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the ConfigMap,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description: Maps a string key to a path within a
+ volume.
+ properties:
+ key:
+ description: key is the key to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ optional:
+ description: optional specify whether the ConfigMap
+ or its keys must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ csi:
+ description: csi (Container Storage Interface) represents
+ ephemeral storage that is handled by certain external
+ CSI drivers (Beta feature).
+ properties:
+ driver:
+ description: |-
+ driver is the name of the CSI driver that handles this volume.
+ Consult with your admin for the correct name as registered in the cluster.
+ type: string
+ fsType:
+ description: |-
+ fsType to mount. Ex. "ext4", "xfs", "ntfs".
+ If not provided, the empty value is passed to the associated CSI driver
+ which will determine the default filesystem to apply.
+ type: string
+ nodePublishSecretRef:
+ description: |-
+ nodePublishSecretRef is a reference to the secret object containing
+ sensitive information to pass to the CSI driver to complete the CSI
+ NodePublishVolume and NodeUnpublishVolume calls.
+ This field is optional, and may be empty if no secret is required. If the
+ secret object contains more than one secret, all secret references are passed.
+ properties:
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ readOnly:
+ description: |-
+ readOnly specifies a read-only configuration for the volume.
+ Defaults to false (read/write).
+ type: boolean
+ volumeAttributes:
+ additionalProperties:
+ type: string
+ description: |-
+ volumeAttributes stores driver-specific properties that are passed to the CSI
+ driver. Consult your driver's documentation for supported values.
+ type: object
+ required:
+ - driver
+ type: object
+ downwardAPI:
+ description: downwardAPI represents downward API about the
+ pod that should populate this volume
+ properties:
+ defaultMode:
+ description: |-
+ Optional: mode bits to use on created files by default. Must be a
+ Optional: mode bits used to set permissions on created files by default.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ Defaults to 0644.
+ Directories within the path are not affected by this setting.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ items:
+ description: Items is a list of downward API volume
+ file
+ items:
+ description: DownwardAPIVolumeFile represents information
+ to create the file containing the pod field
+ properties:
+ fieldRef:
+ description: 'Required: Selects a field of the
+ pod: only annotations, labels, name and namespace
+ are supported.'
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath
+ is written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in
+ the specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ mode:
+ description: |-
+ Optional: mode bits used to set permissions on this file, must be an octal value
+ between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: 'Required: Path is the relative
+ path name of the file to be created. Must not
+ be absolute or contain the ''..'' path. Must
+ be utf-8 encoded. The first item of the relative
+ path must not start with ''..'''
+ type: string
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
+ properties:
+ containerName:
+ description: 'Container name: required for
+ volumes, optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format of
+ the exposed resources, defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - path
+ type: object
+ type: array
+ type: object
+ emptyDir:
+ description: |-
+ emptyDir represents a temporary directory that shares a pod's lifetime.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
+ properties:
+ medium:
+ description: |-
+ medium represents what type of storage medium should back this directory.
+ The default is "" which means to use the node's default medium.
+ Must be an empty string (default) or Memory.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
+ type: string
+ sizeLimit:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ sizeLimit is the total amount of local storage required for this EmptyDir volume.
+ The size limit is also applicable for memory medium.
+ The maximum usage on memory medium EmptyDir would be the minimum value between
+ the SizeLimit specified here and the sum of memory limits of all containers in a pod.
+ The default is nil which means that the limit is undefined.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ ephemeral:
+ description: |-
+ ephemeral represents a volume that is handled by a cluster storage driver.
+ The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts,
+ and deleted when the pod is removed.
+
+
+ Use this if:
+ a) the volume is only needed while the pod runs,
+ b) features of normal volumes like restoring from snapshot or capacity
+ tracking are needed,
+ c) the storage driver is specified through a storage class, and
+ d) the storage driver supports dynamic volume provisioning through
+ a PersistentVolumeClaim (see EphemeralVolumeSource for more
+ information on the connection between this volume type
+ and PersistentVolumeClaim).
+
+
+ Use PersistentVolumeClaim or one of the vendor-specific
+ APIs for volumes that persist for longer than the lifecycle
+ of an individual pod.
+
+
+ Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to
+ be used that way - see the documentation of the driver for
+ more information.
+
+
+ A pod can use both types of ephemeral volumes and
+ persistent volumes at the same time.
+ properties:
+ volumeClaimTemplate:
+ description: |-
+ Will be used to create a stand-alone PVC to provision the volume.
+ The pod in which this EphemeralVolumeSource is embedded will be the
+ owner of the PVC, i.e. the PVC will be deleted together with the
+ pod. The name of the PVC will be `-` where
+ `` is the name from the `PodSpec.Volumes` array
+ entry. Pod validation will reject the pod if the concatenated name
+ is not valid for a PVC (for example, too long).
+
+
+ An existing PVC with that name that is not owned by the pod
+ will *not* be used for the pod to avoid using an unrelated
+ volume by mistake. Starting the pod is then blocked until
+ the unrelated PVC is removed. If such a pre-created PVC is
+ meant to be used by the pod, the PVC has to updated with an
+ owner reference to the pod once the pod exists. Normally
+ this should not be necessary, but it may be useful when
+ manually reconstructing a broken cluster.
+
+
+ This field is read-only and no changes will be made by Kubernetes
+ to the PVC after it has been created.
+
+
+ Required, must not be nil.
+ properties:
+ metadata:
+ description: |-
+ May contain labels and annotations that will be copied into the PVC
+ when creating it. No other fields are allowed and will be rejected during
+ validation.
+ type: object
+ spec:
+ description: |-
+ The specification for the PersistentVolumeClaim. The entire content is
+ copied unchanged into the PVC that gets created from this
+ template. The same fields as in a PersistentVolumeClaim
+ are also valid here.
+ properties:
+ accessModes:
+ description: |-
+ accessModes contains the desired access modes the volume should have.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
+ items:
+ type: string
+ type: array
+ dataSource:
+ description: |-
+ dataSource field can be used to specify either:
+ * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)
+ * An existing PVC (PersistentVolumeClaim)
+ If the provisioner or an external controller can support the specified data source,
+ it will create a new volume based on the contents of the specified data source.
+ When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef,
+ and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified.
+ If the namespace is specified, then dataSourceRef will not be copied to dataSource.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource
+ being referenced
+ type: string
+ name:
+ description: Name is the name of resource
+ being referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ x-kubernetes-map-type: atomic
+ dataSourceRef:
+ description: |-
+ dataSourceRef specifies the object from which to populate the volume with data, if a non-empty
+ volume is desired. This may be any object from a non-empty API group (non
+ core object) or a PersistentVolumeClaim object.
+ When this field is specified, volume binding will only succeed if the type of
+ the specified object matches some installed volume populator or dynamic
+ provisioner.
+ This field will replace the functionality of the dataSource field and as such
+ if both fields are non-empty, they must have the same value. For backwards
+ compatibility, when namespace isn't specified in dataSourceRef,
+ both fields (dataSource and dataSourceRef) will be set to the same
+ value automatically if one of them is empty and the other is non-empty.
+ When namespace is specified in dataSourceRef,
+ dataSource isn't set to the same value and must be empty.
+ There are three important differences between dataSource and dataSourceRef:
+ * While dataSource only allows two specific types of objects, dataSourceRef
+ allows any non-core object, as well as PersistentVolumeClaim objects.
+ * While dataSource ignores disallowed values (dropping them), dataSourceRef
+ preserves all values, and generates an error if a disallowed value is
+ specified.
+ * While dataSource only allows local objects, dataSourceRef allows objects
+ in any namespaces.
+ (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.
+ (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource
+ being referenced
+ type: string
+ name:
+ description: Name is the name of resource
+ being referenced
+ type: string
+ namespace:
+ description: |-
+ Namespace is the namespace of resource being referenced
+ Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details.
+ (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ resources:
+ description: |-
+ resources represents the minimum resources the volume should have.
+ If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements
+ that are lower than previous value but must still be higher than capacity recorded in the
+ status field of the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ selector:
+ description: selector is a label query over
+ volumes to consider for binding.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list
+ of label selector requirements. The requirements
+ are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key
+ that the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ storageClassName:
+ description: |-
+ storageClassName is the name of the StorageClass required by the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1
+ type: string
+ volumeAttributesClassName:
+ description: |-
+ volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
+ If specified, the CSI driver will create or update the volume with the attributes defined
+ in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
+ it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
+ will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
+ If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
+ will be set by the persistentvolume controller if it exists.
+ If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
+ set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
+ exists.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass
+ (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.
+ type: string
+ volumeMode:
+ description: |-
+ volumeMode defines what type of volume is required by the claim.
+ Value of Filesystem is implied when not included in claim spec.
+ type: string
+ volumeName:
+ description: volumeName is the binding reference
+ to the PersistentVolume backing this claim.
+ type: string
+ type: object
+ required:
+ - spec
+ type: object
+ type: object
+ fc:
+ description: fc represents a Fibre Channel resource that
+ is attached to a kubelet's host machine and then exposed
+ to the pod.
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ TODO: how do we prevent errors in the filesystem from compromising the machine
+ type: string
+ lun:
+ description: 'lun is Optional: FC target lun number'
+ format: int32
+ type: integer
+ readOnly:
+ description: |-
+ readOnly is Optional: Defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ targetWWNs:
+ description: 'targetWWNs is Optional: FC target worldwide
+ names (WWNs)'
+ items:
+ type: string
+ type: array
+ wwids:
+ description: |-
+ wwids Optional: FC volume world wide identifiers (wwids)
+ Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.
+ items:
+ type: string
+ type: array
+ type: object
+ flexVolume:
+ description: |-
+ flexVolume represents a generic volume resource that is
+ provisioned/attached using an exec based plugin.
+ properties:
+ driver:
+ description: driver is the name of the driver to use
+ for this volume.
+ type: string
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.
+ type: string
+ options:
+ additionalProperties:
+ type: string
+ description: 'options is Optional: this field holds
+ extra command options if any.'
+ type: object
+ readOnly:
+ description: |-
+ readOnly is Optional: defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef is Optional: secretRef is reference to the secret object containing
+ sensitive information to pass to the plugin scripts. This may be
+ empty if no secret object is specified. If the secret object
+ contains more than one secret, all secrets are passed to the plugin
+ scripts.
+ properties:
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - driver
+ type: object
+ flocker:
+ description: flocker represents a Flocker volume attached
+ to a kubelet's host machine. This depends on the Flocker
+ control service being running
+ properties:
+ datasetName:
+ description: |-
+ datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker
+ should be considered as deprecated
+ type: string
+ datasetUUID:
+ description: datasetUUID is the UUID of the dataset.
+ This is unique identifier of a Flocker dataset
+ type: string
+ type: object
+ gcePersistentDisk:
+ description: |-
+ gcePersistentDisk represents a GCE Disk resource that is attached to a
+ kubelet's host machine and then exposed to the pod.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ properties:
+ fsType:
+ description: |-
+ fsType is filesystem type of the volume that you want to mount.
+ Tip: Ensure that the filesystem type is supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ TODO: how do we prevent errors in the filesystem from compromising the machine
+ type: string
+ partition:
+ description: |-
+ partition is the partition in the volume that you want to mount.
+ If omitted, the default is to mount by volume name.
+ Examples: For volume /dev/sda1, you specify the partition as "1".
+ Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ format: int32
+ type: integer
+ pdName:
+ description: |-
+ pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the ReadOnly setting in VolumeMounts.
+ Defaults to false.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ type: boolean
+ required:
+ - pdName
+ type: object
+ gitRepo:
+ description: |-
+ gitRepo represents a git repository at a particular revision.
+ DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an
+ EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir
+ into the Pod's container.
+ properties:
+ directory:
+ description: |-
+ directory is the target directory name.
+ Must not contain or start with '..'. If '.' is supplied, the volume directory will be the
+ git repository. Otherwise, if specified, the volume will contain the git repository in
+ the subdirectory with the given name.
+ type: string
+ repository:
+ description: repository is the URL
+ type: string
+ revision:
+ description: revision is the commit hash for the specified
+ revision.
+ type: string
+ required:
+ - repository
+ type: object
+ glusterfs:
+ description: |-
+ glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
+ More info: https://examples.k8s.io/volumes/glusterfs/README.md
+ properties:
+ endpoints:
+ description: |-
+ endpoints is the endpoint name that details Glusterfs topology.
+ More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+ type: string
+ path:
+ description: |-
+ path is the Glusterfs volume path.
+ More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the Glusterfs volume to be mounted with read-only permissions.
+ Defaults to false.
+ More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+ type: boolean
+ required:
+ - endpoints
+ - path
+ type: object
+ hostPath:
+ description: |-
+ hostPath represents a pre-existing file or directory on the host
+ machine that is directly exposed to the container. This is generally
+ used for system agents or other privileged things that are allowed
+ to see the host machine. Most containers will NOT need this.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+ ---
+ TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not
+ mount host directories as read/write.
+ properties:
+ path:
+ description: |-
+ path of the directory on the host.
+ If the path is a symlink, it will follow the link to the real path.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+ type: string
+ type:
+ description: |-
+ type for HostPath Volume
+ Defaults to ""
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+ type: string
+ required:
+ - path
+ type: object
+ iscsi:
+ description: |-
+ iscsi represents an ISCSI Disk resource that is attached to a
+ kubelet's host machine and then exposed to the pod.
+ More info: https://examples.k8s.io/volumes/iscsi/README.md
+ properties:
+ chapAuthDiscovery:
+ description: chapAuthDiscovery defines whether support
+ iSCSI Discovery CHAP authentication
+ type: boolean
+ chapAuthSession:
+ description: chapAuthSession defines whether support
+ iSCSI Session CHAP authentication
+ type: boolean
+ fsType:
+ description: |-
+ fsType is the filesystem type of the volume that you want to mount.
+ Tip: Ensure that the filesystem type is supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi
+ TODO: how do we prevent errors in the filesystem from compromising the machine
+ type: string
+ initiatorName:
+ description: |-
+ initiatorName is the custom iSCSI Initiator Name.
+ If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface
+ : will be created for the connection.
+ type: string
+ iqn:
+ description: iqn is the target iSCSI Qualified Name.
+ type: string
+ iscsiInterface:
+ description: |-
+ iscsiInterface is the interface Name that uses an iSCSI transport.
+ Defaults to 'default' (tcp).
+ type: string
+ lun:
+ description: lun represents iSCSI Target Lun number.
+ format: int32
+ type: integer
+ portals:
+ description: |-
+ portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port
+ is other than default (typically TCP ports 860 and 3260).
+ items:
+ type: string
+ type: array
+ readOnly:
+ description: |-
+ readOnly here will force the ReadOnly setting in VolumeMounts.
+ Defaults to false.
+ type: boolean
+ secretRef:
+ description: secretRef is the CHAP Secret for iSCSI
+ target and initiator authentication
+ properties:
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ targetPortal:
+ description: |-
+ targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port
+ is other than default (typically TCP ports 860 and 3260).
+ type: string
+ required:
+ - iqn
+ - lun
+ - targetPortal
+ type: object
+ name:
+ description: |-
+ name of the volume.
+ Must be a DNS_LABEL and unique within the pod.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ nfs:
+ description: |-
+ nfs represents an NFS mount on the host that shares a pod's lifetime
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ properties:
+ path:
+ description: |-
+ path that is exported by the NFS server.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the NFS export to be mounted with read-only permissions.
+ Defaults to false.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ type: boolean
+ server:
+ description: |-
+ server is the hostname or IP address of the NFS server.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ type: string
+ required:
+ - path
+ - server
+ type: object
+ persistentVolumeClaim:
+ description: |-
+ persistentVolumeClaimVolumeSource represents a reference to a
+ PersistentVolumeClaim in the same namespace.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
+ properties:
+ claimName:
+ description: |-
+ claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
+ type: string
+ readOnly:
+ description: |-
+ readOnly Will force the ReadOnly setting in VolumeMounts.
+ Default false.
+ type: boolean
+ required:
+ - claimName
+ type: object
+ photonPersistentDisk:
+ description: photonPersistentDisk represents a PhotonController
+ persistent disk attached and mounted on kubelets host
+ machine
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ pdID:
+ description: pdID is the ID that identifies Photon Controller
+ persistent disk
+ type: string
+ required:
+ - pdID
+ type: object
+ portworxVolume:
+ description: portworxVolume represents a portworx volume
+ attached and mounted on kubelets host machine
+ properties:
+ fsType:
+ description: |-
+ fSType represents the filesystem type to mount
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ readOnly:
+ description: |-
+ readOnly defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ volumeID:
+ description: volumeID uniquely identifies a Portworx
+ volume
+ type: string
+ required:
+ - volumeID
+ type: object
+ projected:
+ description: projected items for all in one resources secrets,
+ configmaps, and downward API
+ properties:
+ defaultMode:
+ description: |-
+ defaultMode are the mode bits used to set permissions on created files by default.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ Directories within the path are not affected by this setting.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ sources:
+ description: sources is the list of volume projections
+ items:
+ description: Projection that may be projected along
+ with other supported volume types
+ properties:
+ clusterTrustBundle:
+ description: |-
+ ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field
+ of ClusterTrustBundle objects in an auto-updating file.
+
+
+ Alpha, gated by the ClusterTrustBundleProjection feature gate.
+
+
+ ClusterTrustBundle objects can either be selected by name, or by the
+ combination of signer name and a label selector.
+
+
+ Kubelet performs aggressive normalization of the PEM contents written
+ into the pod filesystem. Esoteric PEM features such as inter-block
+ comments and block headers are stripped. Certificates are deduplicated.
+ The ordering of certificates within the file is arbitrary, and Kubelet
+ may change the order over time.
+ properties:
+ labelSelector:
+ description: |-
+ Select all ClusterTrustBundles that match this label selector. Only has
+ effect if signerName is set. Mutually-exclusive with name. If unset,
+ interpreted as "match nothing". If set but empty, interpreted as "match
+ everything".
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list
+ of label selector requirements. The
+ requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key
+ that the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ name:
+ description: |-
+ Select a single ClusterTrustBundle by object name. Mutually-exclusive
+ with signerName and labelSelector.
+ type: string
+ optional:
+ description: |-
+ If true, don't block pod startup if the referenced ClusterTrustBundle(s)
+ aren't available. If using name, then the named ClusterTrustBundle is
+ allowed not to exist. If using signerName, then the combination of
+ signerName and labelSelector is allowed to match zero
+ ClusterTrustBundles.
+ type: boolean
+ path:
+ description: Relative path from the volume
+ root to write the bundle.
+ type: string
+ signerName:
+ description: |-
+ Select all ClusterTrustBundles that match this signer name.
+ Mutually-exclusive with name. The contents of all selected
+ ClusterTrustBundles will be unified and deduplicated.
+ type: string
+ required:
+ - path
+ type: object
+ configMap:
+ description: configMap information about the configMap
+ data to project
+ properties:
+ items:
+ description: |-
+ items if unspecified, each key-value pair in the Data field of the referenced
+ ConfigMap will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the ConfigMap,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description: Maps a string key to a path
+ within a volume.
+ properties:
+ key:
+ description: key is the key to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ optional:
+ description: optional specify whether the
+ ConfigMap or its keys must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ downwardAPI:
+ description: downwardAPI information about the
+ downwardAPI data to project
+ properties:
+ items:
+ description: Items is a list of DownwardAPIVolume
+ file
+ items:
+ description: DownwardAPIVolumeFile represents
+ information to create the file containing
+ the pod field
+ properties:
+ fieldRef:
+ description: 'Required: Selects a field
+ of the pod: only annotations, labels,
+ name and namespace are supported.'
+ properties:
+ apiVersion:
+ description: Version of the schema
+ the FieldPath is written in terms
+ of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to
+ select in the specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ mode:
+ description: |-
+ Optional: mode bits used to set permissions on this file, must be an octal value
+ between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: 'Required: Path is the
+ relative path name of the file to
+ be created. Must not be absolute or
+ contain the ''..'' path. Must be utf-8
+ encoded. The first item of the relative
+ path must not start with ''..'''
+ type: string
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
+ properties:
+ containerName:
+ description: 'Container name: required
+ for volumes, optional for env
+ vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output
+ format of the exposed resources,
+ defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource
+ to select'
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - path
+ type: object
+ type: array
+ type: object
+ secret:
+ description: secret information about the secret
+ data to project
+ properties:
+ items:
+ description: |-
+ items if unspecified, each key-value pair in the Data field of the referenced
+ Secret will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the Secret,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description: Maps a string key to a path
+ within a volume.
+ properties:
+ key:
+ description: key is the key to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ optional:
+ description: optional field specify whether
+ the Secret or its key must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ serviceAccountToken:
+ description: serviceAccountToken is information
+ about the serviceAccountToken data to project
+ properties:
+ audience:
+ description: |-
+ audience is the intended audience of the token. A recipient of a token
+ must identify itself with an identifier specified in the audience of the
+ token, and otherwise should reject the token. The audience defaults to the
+ identifier of the apiserver.
+ type: string
+ expirationSeconds:
+ description: |-
+ expirationSeconds is the requested duration of validity of the service
+ account token. As the token approaches expiration, the kubelet volume
+ plugin will proactively rotate the service account token. The kubelet will
+ start trying to rotate the token if the token is older than 80 percent of
+ its time to live or if the token is older than 24 hours.Defaults to 1 hour
+ and must be at least 10 minutes.
+ format: int64
+ type: integer
+ path:
+ description: |-
+ path is the path relative to the mount point of the file to project the
+ token into.
+ type: string
+ required:
+ - path
+ type: object
+ type: object
+ type: array
+ type: object
+ quobyte:
+ description: quobyte represents a Quobyte mount on the host
+ that shares a pod's lifetime
+ properties:
+ group:
+ description: |-
+ group to map volume access to
+ Default is no group
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the Quobyte volume to be mounted with read-only permissions.
+ Defaults to false.
+ type: boolean
+ registry:
+ description: |-
+ registry represents a single or multiple Quobyte Registry services
+ specified as a string as host:port pair (multiple entries are separated with commas)
+ which acts as the central registry for volumes
+ type: string
+ tenant:
+ description: |-
+ tenant owning the given Quobyte volume in the Backend
+ Used with dynamically provisioned Quobyte volumes, value is set by the plugin
+ type: string
+ user:
+ description: |-
+ user to map volume access to
+ Defaults to serivceaccount user
+ type: string
+ volume:
+ description: volume is a string that references an already
+ created Quobyte volume by name.
+ type: string
+ required:
+ - registry
+ - volume
+ type: object
+ rbd:
+ description: |-
+ rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
+ More info: https://examples.k8s.io/volumes/rbd/README.md
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type of the volume that you want to mount.
+ Tip: Ensure that the filesystem type is supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd
+ TODO: how do we prevent errors in the filesystem from compromising the machine
+ type: string
+ image:
+ description: |-
+ image is the rados image name.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: string
+ keyring:
+ description: |-
+ keyring is the path to key ring for RBDUser.
+ Default is /etc/ceph/keyring.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: string
+ monitors:
+ description: |-
+ monitors is a collection of Ceph monitors.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ items:
+ type: string
+ type: array
+ pool:
+ description: |-
+ pool is the rados pool name.
+ Default is rbd.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the ReadOnly setting in VolumeMounts.
+ Defaults to false.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef is name of the authentication secret for RBDUser. If provided
+ overrides keyring.
+ Default is nil.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ properties:
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ user:
+ description: |-
+ user is the rados user name.
+ Default is admin.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: string
+ required:
+ - image
+ - monitors
+ type: object
+ scaleIO:
+ description: scaleIO represents a ScaleIO persistent volume
+ attached and mounted on Kubernetes nodes.
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs".
+ Default is "xfs".
+ type: string
+ gateway:
+ description: gateway is the host address of the ScaleIO
+ API Gateway.
+ type: string
+ protectionDomain:
+ description: protectionDomain is the name of the ScaleIO
+ Protection Domain for the configured storage.
+ type: string
+ readOnly:
+ description: |-
+ readOnly Defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef references to the secret for ScaleIO user and other
+ sensitive information. If this is not provided, Login operation will fail.
+ properties:
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ sslEnabled:
+ description: sslEnabled Flag enable/disable SSL communication
+ with Gateway, default false
+ type: boolean
+ storageMode:
+ description: |-
+ storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
+ Default is ThinProvisioned.
+ type: string
+ storagePool:
+ description: storagePool is the ScaleIO Storage Pool
+ associated with the protection domain.
+ type: string
+ system:
+ description: system is the name of the storage system
+ as configured in ScaleIO.
+ type: string
+ volumeName:
+ description: |-
+ volumeName is the name of a volume already created in the ScaleIO system
+ that is associated with this volume source.
+ type: string
+ required:
+ - gateway
+ - secretRef
+ - system
+ type: object
+ secret:
+ description: |-
+ secret represents a secret that should populate this volume.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
+ properties:
+ defaultMode:
+ description: |-
+ defaultMode is Optional: mode bits used to set permissions on created files by default.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values
+ for mode bits. Defaults to 0644.
+ Directories within the path are not affected by this setting.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ items:
+ description: |-
+ items If unspecified, each key-value pair in the Data field of the referenced
+ Secret will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the Secret,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description: Maps a string key to a path within a
+ volume.
+ properties:
+ key:
+ description: key is the key to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ optional:
+ description: optional field specify whether the Secret
+ or its keys must be defined
+ type: boolean
+ secretName:
+ description: |-
+ secretName is the name of the secret in the pod's namespace to use.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
+ type: string
+ type: object
+ storageos:
+ description: storageOS represents a StorageOS volume attached
+ and mounted on Kubernetes nodes.
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ readOnly:
+ description: |-
+ readOnly defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef specifies the secret to use for obtaining the StorageOS API
+ credentials. If not specified, default values will be attempted.
+ properties:
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ volumeName:
+ description: |-
+ volumeName is the human-readable name of the StorageOS volume. Volume
+ names are only unique within a namespace.
+ type: string
+ volumeNamespace:
+ description: |-
+ volumeNamespace specifies the scope of the volume within StorageOS. If no
+ namespace is specified then the Pod's namespace will be used. This allows the
+ Kubernetes name scoping to be mirrored within StorageOS for tighter integration.
+ Set VolumeName to any name to override the default behaviour.
+ Set to "default" if you are not using namespaces within StorageOS.
+ Namespaces that do not pre-exist within StorageOS will be created.
+ type: string
+ type: object
+ vsphereVolume:
+ description: vsphereVolume represents a vSphere volume attached
+ and mounted on kubelets host machine
+ properties:
+ fsType:
+ description: |-
+ fsType is filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ storagePolicyID:
+ description: storagePolicyID is the storage Policy Based
+ Management (SPBM) profile ID associated with the StoragePolicyName.
+ type: string
+ storagePolicyName:
+ description: storagePolicyName is the storage Policy
+ Based Management (SPBM) profile name.
+ type: string
+ volumePath:
+ description: volumePath is the path that identifies
+ vSphere volume vmdk
+ type: string
+ required:
+ - volumePath
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ type: object
+ playbook:
+ description: Playbook which to execute.
+ type: string
+ project:
+ description: Project is storage for executable packages
+ properties:
+ addr:
+ description: |-
+ Addr is the storage for executable packages (in Ansible file format).
+ When starting with http or https, it will be obtained from a Git repository.
+ When starting with file path, it will be obtained from the local path.
+ type: string
+ branch:
+ description: Branch is the git branch of the git Addr.
+ type: string
+ insecureSkipTLS:
+ description: InsecureSkipTLS skip tls or not when git addr is
+ https.
+ type: boolean
+ name:
+ description: Name is the project name base project
+ type: string
+ tag:
+ description: Tag is the git branch of the git Addr.
+ type: string
+ token:
+ description: Token of Authorization for http request
+ type: string
+ type: object
+ skipTags:
+ description: SkipTags is the tags of playbook which skip execute
+ items:
+ type: string
+ type: array
+ tags:
+ description: Tags is the tags of playbook which to execute
+ items:
+ type: string
+ type: array
+ required:
+ - playbook
+ type: object
+ status:
+ description: PipelineStatus of Pipeline
+ properties:
+ failedDetail:
+ description: FailedDetail will record the failed tasks.
+ items:
+ description: PipelineFailedDetail store failed message when pipeline
+ run failed.
+ properties:
+ hosts:
+ description: failed Hosts Result of failed task.
+ items:
+ description: PipelineFailedDetailHost detail failed message
+ for each host.
+ properties:
+ host:
+ description: Host name of failed task.
+ type: string
+ stdErr:
+ description: StdErr of failed task.
+ type: string
+ stdout:
+ description: Stdout of failed task.
+ type: string
+ type: object
+ type: array
+ task:
+ description: Task name of failed task.
+ type: string
+ type: object
+ type: array
+ phase:
+ description: Phase of pipeline.
+ type: string
+ reason:
+ description: failed Reason of pipeline.
+ type: string
+ taskResult:
+ description: TaskResult total related tasks execute result.
+ properties:
+ failed:
+ description: Failed number of tasks.
+ type: integer
+ ignored:
+ description: Ignored number of tasks.
+ type: integer
+ success:
+ description: Success number of tasks.
+ type: integer
+ total:
+ description: Total number of tasks.
+ type: integer
+ type: object
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
diff --git a/feature/config/kubekey/templates/_helpers.tpl b/feature/config/kubekey/templates/_helpers.tpl
new file mode 100644
index 000000000..74356011a
--- /dev/null
+++ b/feature/config/kubekey/templates/_helpers.tpl
@@ -0,0 +1,45 @@
+{{/*
+Common labels
+*/}}
+{{- define "common.labels" -}}
+helm.sh/chart: {{ include "common.chart" . }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end }}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "common.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
+{{- end }}
+
+{{- define "operator.image" -}}
+{{ include "common.images.image" (dict "imageRoot" .Values.operator.image "global" .Values.global "chart" .Chart )}}
+{{- end -}}
+{{- define "executor.image" -}}
+{{ include "common.images.image" (dict "imageRoot" .Values.executor.image "global" .Values.global "chart" .Chart ) }}
+{{- end -}}
+
+{{- define "common.images.image" -}}
+{{- $registryName := .global.imageRegistry -}}
+{{- $repositoryName := .imageRoot.repository -}}
+{{- $separator := ":" -}}
+{{- $termination := .chart.AppVersion | toString -}}
+{{- if .global.tag }}
+{{- $termination = .global.tag | toString -}}
+{{- end -}}
+{{- if .imageRoot.registry }}
+ {{- $registryName = .imageRoot.registry -}}
+{{- end -}}
+{{- if .imageRoot.tag }}
+ {{- $termination = .imageRoot.tag | toString -}}
+{{- end -}}
+{{- if .imageRoot.digest }}
+ {{- $separator = "@" -}}
+ {{- $termination = .imageRoot.digest | toString -}}
+{{- end -}}
+{{- printf "%s/%s%s%s" $registryName $repositoryName $separator $termination -}}
+{{- end -}}
diff --git a/feature/config/kubekey/templates/_tplvalues.tpl b/feature/config/kubekey/templates/_tplvalues.tpl
new file mode 100644
index 000000000..2db166851
--- /dev/null
+++ b/feature/config/kubekey/templates/_tplvalues.tpl
@@ -0,0 +1,13 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Renders a value that contains template.
+Usage:
+{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }}
+*/}}
+{{- define "common.tplvalues.render" -}}
+ {{- if typeIs "string" .value }}
+ {{- tpl .value .context }}
+ {{- else }}
+ {{- tpl (.value | toYaml) .context }}
+ {{- end }}
+{{- end -}}
diff --git a/feature/config/kubekey/templates/deployment.yaml b/feature/config/kubekey/templates/deployment.yaml
new file mode 100644
index 000000000..d0b11700c
--- /dev/null
+++ b/feature/config/kubekey/templates/deployment.yaml
@@ -0,0 +1,76 @@
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels: {{ include "common.labels" . | nindent 4 }}
+ app: kk-operator
+ name: kk-operator
+ namespace: {{ .Release.Namespace }}
+spec:
+ strategy:
+ rollingUpdate:
+ maxSurge: 0
+ type: RollingUpdate
+ progressDeadlineSeconds: 600
+ replicas: {{ .Values.operator.replicaCount }}
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ app: kk-operator
+ template:
+ metadata:
+ labels: {{ include "common.labels" . | nindent 8 }}
+ app: kk-operator
+ spec:
+ serviceAccountName: kk-operator
+ {{- if .Values.operator.pullSecrets }}
+ imagePullSecrets: {{ .Values.operator.pullSecrets }}
+ {{- end }}
+ {{- if .Values.operator.nodeSelector }}
+ nodeSelector: {{ .Values.operator.nodeSelector }}
+ {{- end }}
+ {{- if .Values.operator.affinity }}
+ affinity: {{ .Values.operator.affinity }}
+ {{- end }}
+ {{- if .Values.operator.tolerations }}
+ tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.operator.tolerations "context" .) | nindent 8 }}
+ {{- end }}
+ dnsPolicy: {{ .Values.operator.dnsPolicy }}
+ restartPolicy: {{ .Values.operator.restartPolicy }}
+ schedulerName: {{ .Values.operator.schedulerName }}
+ terminationGracePeriodSeconds: {{ .Values.operator.terminationGracePeriodSeconds }}
+ containers:
+ - name: ks-controller-manager
+ image: {{ template "operator.image" . }}
+ imagePullPolicy: {{ .Values.operator.image.pullPolicy }}
+ {{- if .Values.operator.command }}
+ command: {{- include "common.tplvalues.render" (dict "value" .Values.operator.command "context" $) | nindent 12 }}
+ {{- end }}
+ env:
+ {{- if .Values.operator.extraEnvVars }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.operator.extraEnvVars "context" $) | nindent 12 }}
+ {{- end }}
+ - name: EXECUTOR_IMAGE
+ value: {{ template "executor.image" . }}
+ - name: EXECUTOR_IMAGE_PULLPOLICY
+ value: {{ .Values.executor.image.pullPolicy }}
+ - name: EXECUTOR_SERVICEACCOUNT
+ value: kk-executor
+ {{- if .Values.operator.resources }}
+ resources: {{- toYaml .Values.operator.resources | nindent 12 }}
+ {{- end }}
+ volumeMounts:
+ - mountPath: /etc/localtime
+ name: host-time
+ readOnly: true
+ {{- if .Values.operator.extraVolumeMounts }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.operator.extraVolumeMounts "context" $) | nindent 12 }}
+ {{- end }}
+ volumes:
+ - hostPath:
+ path: /etc/localtime
+ type: ""
+ name: host-time
+ {{- if .Values.operator.extraVolumes }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.operator.extraVolumes "context" $) | nindent 8 }}
+ {{- end }}
diff --git a/feature/config/kubekey/templates/role.yaml b/feature/config/kubekey/templates/role.yaml
new file mode 100644
index 000000000..a70ec0cc1
--- /dev/null
+++ b/feature/config/kubekey/templates/role.yaml
@@ -0,0 +1,90 @@
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: kk-operator
+ namespace: {{ .Release.Namespace }}
+ labels: {{- include "common.labels" . | nindent 4 }}
+rules:
+- apiGroups:
+ - kubekey.kubesphere.io
+ resources:
+ - configs
+ - inventories
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - kubekey.kubesphere.io
+ resources:
+ - pipelines
+ - pipelines/status
+ verbs:
+ - "*"
+- apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - "*"
+- apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - "*"
+- apiGroups:
+ - batch
+ resources:
+ - jobs
+ - cronjobs
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+- apiGroups:
+ - ""
+ resources:
+ - serviceaccounts
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+- apiGroups:
+ - "rbac.authorization.k8s.io"
+ resources:
+ - clusterrolebindings
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: kk-executor
+ namespace: {{ .Release.Namespace }}
+ labels: {{- include "common.labels" . | nindent 4 }}
+rules:
+ - apiGroups:
+ - kubekey.kubesphere.io
+ resources:
+ - configs
+ - inventories
+ - pipelines
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - kubekey.kubesphere.io
+ resources:
+ - pipelines/status
+ verbs:
+ - update
+ - patch
diff --git a/feature/config/kubekey/templates/serviceaccount.yaml b/feature/config/kubekey/templates/serviceaccount.yaml
new file mode 100644
index 000000000..9dd2d0ec3
--- /dev/null
+++ b/feature/config/kubekey/templates/serviceaccount.yaml
@@ -0,0 +1,23 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: kk-operator
+ namespace: {{ .Release.Namespace }}
+ labels: {{- include "common.labels" . | nindent 4}}
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: kk-operator
+ namespace: {{ .Release.Namespace }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: kk-operator
+subjects:
+ - kind: ServiceAccount
+ name: kk-operator
+ namespace: {{ .Release.Namespace }}
+
diff --git a/feature/config/kubekey/values.yaml b/feature/config/kubekey/values.yaml
new file mode 100644
index 000000000..ffbf3bf5d
--- /dev/null
+++ b/feature/config/kubekey/values.yaml
@@ -0,0 +1,83 @@
+## @section Common parameters
+##
+global:
+ imageRegistry: "docker.io"
+ tag: ""
+ imagePullSecrets: []
+
+operator:
+ # tolerations of operator pod
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ key: node.kubernetes.io/not-ready
+ operator: Exists
+ tolerationSeconds: 60
+ - effect: NoExecute
+ key: node.kubernetes.io/unreachable
+ operator: Exists
+ tolerationSeconds: 60
+ # affinity of operator pod
+ affinity: { }
+ # nodeSelector of operator pod
+ nodeSelector: { }
+ # dnsPolicy of operator pod
+ dnsPolicy: Default
+ # restartPolicy of operator pod
+ restartPolicy: Always
+ # schedulerName of operator pod
+ schedulerName: default-scheduler
+ # terminationGracePeriodSeconds of operator pod
+ terminationGracePeriodSeconds: 30
+ # replica of operator deployment
+ replicaCount: 1
+ ## Optionally specify an array of imagePullSecrets.
+ ## Secrets must be manually created in the namespace.
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+ ## e.g:
+ ## pullSecrets:
+ ## - myRegistryKeySecretName
+ pullSecrets: []
+ image:
+ registry: ""
+ repository: kubesphere/kk-controller-manager
+ tag: ""
+ digest: ""
+ pullPolicy: IfNotPresent
+ ##
+ ## @param resources.limits The resources limits for the operator containers
+ ## @param resources.requests The requested resources for the operator containers
+ ##
+ resources:
+ limits:
+ cpu: 1
+ memory: 1000Mi
+ requests:
+ cpu: 30m
+ memory: 50Mi
+ ## @param command Override default container command (useful when using custom images)
+ ##
+ command:
+ - controller-manager
+ - --logtostderr=true
+ - --leader-election=true
+ ## @param extraEnvVars Array with extra environment variables to add to haproxy nodes
+ ##
+ extraEnvVars: []
+ ## @param extraVolumeMounts Optionally specify extra list of additional volumeMounts for the haproxy container(s)
+ ##
+ extraVolumeMounts: []
+ ## @param extraVolumes Optionally specify extra list of additional volumes for the haproxy pod(s)
+ ##
+ extraVolumes: []
+
+executor:
+ image:
+ registry: ""
+ repository: kubesphere/kk-executor
+ tag: ""
+ digest: ""
+ pullPolicy: IfNotPresent
diff --git a/feature/docs/zh/001-project.md b/feature/docs/zh/001-project.md
new file mode 100644
index 000000000..01acf678b
--- /dev/null
+++ b/feature/docs/zh/001-project.md
@@ -0,0 +1,39 @@
+# 项目
+项目中存放要执行的任务模板. 由一系列的yaml文件构成
+为了便于使用者快速理解和上手,kk在对任务抽象时,参考借鉴了[ansible](https://github.com/ansible/ansible)任务编排规范
+## 目录结构
+```text
+|-- project
+| |-- playbooks/
+| |-- playbook1.yaml
+| |-- playbook2.yaml
+| |-- roles/
+| | |-- roleName1/
+| | |-- roleName2/
+...
+```
+**[playbooks](002-playbook.md)**:执行入口, 存放一系列playbook. 一个playbook中, 可定义多个task或role. 每次执行流程模板时, 会按定义顺序执行对应的任务.
+**[roles](003-role.md)**:role集合. 一个role是一组task.
+## 存放路径
+项目可存放内建, 本地或git服务器上.
+### 内建
+内建项目在`builtin`目录. 会集成到kubekey的命令中.
+执行示例:
+```shell
+kk precheck
+```
+执行`builtin`目录中的`playbooks/precheck.yaml`流程文件.
+### 本地
+执行命令示例:
+```shell
+kk run playbooks/demo.yaml --project-addr=$(ProjectDir)
+```
+执行`$(ProjectDir)`目录中的`playbooks/demo.yaml`流程文件.
+### git
+执行命令示例:
+```shell
+kk run playbooks/demo.yaml
+ --project-addr=$(GIT_URL) \
+ --project-branch=$(GIT_BRANCH)
+```
+执行git地址为`$(GIT_URL)`, 分支为`$(GIT_BRANCH)`上的`playbooks/demo.yaml`流程文件.
diff --git a/feature/docs/zh/002-playbook.md b/feature/docs/zh/002-playbook.md
new file mode 100644
index 000000000..f4435d8ba
--- /dev/null
+++ b/feature/docs/zh/002-playbook.md
@@ -0,0 +1,62 @@
+# 流程
+## 文件定义
+一个playbook文件中, 按定义顺序执行多个playbook, 每个playbook指定在哪些host上执行哪些任务.
+```yaml
+- import_playbook: others/playbook.yaml
+
+- name: Playbook Name
+ tags: ["always"]
+ hosts: ["host1", "host2"]
+ serial: 1
+ run_once: false
+ ignore_errors: false
+ gather_facts: false
+ vars: {a: b}
+ vars_files: ["vars/variables.yaml"]
+ pre_tasks:
+ - name: Task Name
+ debug:
+ msg: "I'm Task"
+ roles:
+ - role: role1
+ when: true
+ tasks:
+ - name: Task Name
+ debug:
+ msg: "I'm Task"
+ post_tasks:
+ - name: Task Name
+ debug:
+ msg: "I'm Task"
+```
+**import_playbooks**: 定义引用的playbook文件名称, 通常为相对路径, 文件查找顺序为:`项目路径/playbooks/`, `当前路径/playbooks/`, `当前路径/`
+**name**: playbook名称, 非必填.
+**tags**: playbook的标签, 非必填.仅作用于playbook, playbook下的role, task不会继承该标签.
+在执行playbook命令时, 通过参数筛选需要执行的playbook. 示例如下:
+- `kk run [playbook] --tags tag1 --tags tag2`: 执行带有tag1标签或带有tag2标签的playbook
+- `kk run [playbook] --skip-tags tag1 --skip-tags tag2`: 执行时跳过带有tag1标签或带有tag2标签的playbook
+其中, 带有`always`标签的playbook始终执行, 带有`never`标签的playbook始终不执行
+传入参数为`all`时, 表示选择所有playbook, 参数参数为`tagged`时, 表示选择打了标签的playbook
+**hosts**: 定义在哪些机器上执行, 必填. 所有hosts需要在`inventory`中定义(localhost除外). 可以填host名称, 也可以填group名称.
+**serial**: 分批次执行playbook, 可以定义单个值(字符串或数字)或一组值(数组), 非必填. 默认一批执行。
+- serial值为一组数字时, 按固定的数量来给`hosts`分组, 超出`serial`定义范围时, 按最后一个`serial`值扩展.
+ 比如serial的值为[1, 2], hosts的值为[a, b, c, d]时. 会分3批来执行playbook, 第一批在[a]上执行, 第二批在[b, c]上执行, 第三批 在[d]上执行.
+- serial值为百分比时, 按百分比计算出每批次实际的`hosts`数量(下行整数), 然后给`hosts`分组, 超出`serial`定义范围时, 按最后一个`serial`值扩展.
+ 比如serial的值为[30%, 60%], hosts的值为[a, b, c, d]时. 先计算出serial为[1.2, 2.4], 即为[1, 2].
+百分比和数字可以混合设置.
+**run_once**: 是否只执行一次, 非必填, 默认false, 会在第一个hosts上执行.
+**ignore_errors**: 该playbook下所关联的task执行失败时, 是否忽略失败, 非必填, 默认false.
+**gather_facts**: 是否获取服务器信息, 非必填, 默认false. 针对不同的host获取不同的数据.
+- localConnector: 获取release(/etc/os-release), kernel_version(uname -r), hostname(hostname), architecture(arch). 目前仅支持linux系统
+- sshConnector: 获取release(/etc/os-release), kernel_version(uname -r), hostname(hostname), architecture(arch). 目前仅支持linux系统
+- kubernetesConnector:暂无
+**vars**: 配置默认参数, 非必填, yaml格式.
+**vars_files**: 配置默认参数, 非必填, yaml文件格式. vars和vars_files定义的字段不能重复.
+**pre_tasks**: 定义需要执行的[tasks](004-task.md), 非必填.
+**roles**: 定义需要执行的[roles](003-role.md), 非必填.
+**tasks**: 定义需要执行的[tasks](004-task.md), 非必填.
+**post_tasks**: 定义需要执行的[tasks](004-task.md), 非必填.
+## playbook执行顺序
+不同的playbook: 按定义的先后顺序执行. 如果包含了import_playbook, 会将引用的playbook文件, 转成playbook.
+同一个playbook中: 任务执行顺序pre_tasks->roles->tasks->post_tasks
+当其中一个task失败时(不包含ignore状态), playbook执行失败.
diff --git a/feature/docs/zh/003-role.md b/feature/docs/zh/003-role.md
new file mode 100644
index 000000000..78f359ed5
--- /dev/null
+++ b/feature/docs/zh/003-role.md
@@ -0,0 +1,41 @@
+# 角色
+角色是一个任务组
+## 在playbook文件定义role引用
+```yaml
+- name: Playbook Name
+ #...
+ roles:
+ - name: Role Name
+ tags: ["always"]
+ when: true
+ run_once: false
+ ignore_errors: false
+ vars: {a: b}
+ role: Role-ref Name
+```
+**name**: role名称, 非必填. 该名称不同于playbook中role的引用名称.
+**tags**: playbook的标签, 非必填. 仅作用于playbook, playbook下的role, task不会继承该标签.
+**when**: 执行条件, 可以定义单个值(字符串)或多个值(数组), 非必填, 默认执行该role. 对每个的host单独计算值.
+**run_once**: 是否只执行一次, 非必填, 默认false, 会在第一个hosts上执行.
+**ignore_errors**: 该role下所关联的task执行失败时, 是否忽略失败, 非必填, 默认false.
+**role**: playbook中引用的名称, 对应roles目录下的子目录, 必填.
+**vars**: 配置默认参数, 非必填, yaml格式.
+## 在role目录结构
+```text
+|-- project
+| |-- roles/
+| | |-- roleName/
+| | | |-- defaults/
+| | | | |-- main.yml
+| | | |-- tasks/
+| | | | |-- main.yml
+| | | |-- templates/
+| | | | |-- template1
+| | | |-- files/
+| | | | |-- file1
+```
+**roleName**:role的引用名称, 一级或多级目录.
+**defaults**:对role下的所有task, 定义默认参数值. 在main.yaml文件中定义.
+**[tasks](004-task.md)**:role下所关联的task模板, 一个角色可以有多个task, 在main.yaml文件中定义.
+**templates**:模板文件, 文件中通常会引用变量, 在`templates`类型的task中使用
+**files**:原始文件, 在`copy`类型的task中使用
diff --git a/feature/docs/zh/004-task.md b/feature/docs/zh/004-task.md
new file mode 100644
index 000000000..066c9f1c2
--- /dev/null
+++ b/feature/docs/zh/004-task.md
@@ -0,0 +1,53 @@
+# 任务
+task分为单层级task,和多层级task
+单层级task: 包含[module](005-module.md)相关字段, 不包含. 一个task只能包含一个module.
+多层级task: 不包含[module](005-module.md)相关字段, 包含block字段.
+task执行时, 会在定义的host分别上执行.
+## 文件定义
+```yaml
+- include_tasks: other/task.yaml
+ tags: ["always"]
+ when: true
+ run_once: false
+ ignore_errors: false
+ vars: {a: b}
+
+- name: Block Name
+ tags: ["always"]
+ when: true
+ run_once: false
+ ignore_errors: false
+ vars: {a: b}
+ block:
+ - name: Task Name
+ # [module]
+ rescue:
+ - name: Task Name
+ # [module]
+ always:
+ - name: Task Name
+ # [module]
+
+- name: Task Name
+ tags: ["always"]
+ when: true
+ loop: [""]
+ #[module]
+```
+**include_tasks**: 该任务中引用其他任务模板文件.
+**name**: task名称, 非必填.
+**tags**: task的标签, 非必填. 仅作用于playbook, playbook下的role, task不会继承该标签.
+**when**: 执行条件, 可以定义单个值(字符串)或多个值(数组), 非必填, 默认执行该role. 值采用[模板语法](101-syntax.md)编写, 对每个的host单独计算值.
+**failed_when**: 失败条件, host满足该条件时,判定为执行失败, 可以定义单个值(字符串)或多个值(数组), 非必填. 值采用[模板语法](101-syntax.md)编写, 对每个的host单独计算值.
+**run_once**: 是否只执行一次, 非必填, 默认false, 会在第一个hosts上执行.
+**ignore_errors**: 是否忽略失败, 非必填, 默认false.
+**vars**: 配置默认参数, 非必填, yaml格式.
+**[module相关字段](005-module.md)**: task实际要执行的操作, 非必填(当未block字段时, 必填).
+**loop**: 循环执行module中定义的操作, 每次执行时,以`item: loop-value`的形式将值传递给module. 可以定义单个值(字符串)或多个值(数组), 非必填, 值采用[模板语法](101-syntax.md)编写, 对每个的host单独计算值.
+**retries**: task执行失败时. 需要重新尝试几次.
+**register**: 值为字符串, 将执行结果注册到[variable](201-variable.md)中, 传递给后续的task. 如果结果为json字符串, 会尝试将该字符串转成json结构层级存入variable中(key为register的值, value为输出值, 输出值包含: stderr和stdout两个字段)
+- stderr: 失败输出
+- stdout: 成功输出
+**block**: task集合, 非必填(当未定义module相关字段时, 必填), 一定会执行.
+**rescue**: task集合, 非必填, 当block执行失败(task集合有一个执行失败即为该block失败)时,执行该task集合.
+**always**: task集合, 非必填, 当block和rescue执行完毕后(无论成功失败)都会执行该task集合.
diff --git a/feature/docs/zh/005-module.md b/feature/docs/zh/005-module.md
new file mode 100644
index 000000000..264ba2628
--- /dev/null
+++ b/feature/docs/zh/005-module.md
@@ -0,0 +1,116 @@
+# 任务执行模块
+module定义了一个任务实际要执行的操作
+## assert
+用于断言host上的variable是否满足某个条件
+```yaml
+assert:
+ that: I'm assertion statement
+ success_msg: I'm success message
+ fail_msg: I'm failed message
+ msg: I'm failed message
+```
+**that**: 断言语句, 必填.值采用[模板语法](101-syntax.md)编写, 对每个的host单独计算值.
+**success_msg**: 成功时输出, 非必填, 默认值为"True".
+**fail_msg**: 失败时输出, 非必填, 默认值为"True".
+**msg**: 失败时输出, 非必填, 默认值为"True". 优先输出`fail_msg`.
+## command/shell
+执行命令, command和shell的用法相同
+```yaml
+command: I'm command statement
+```
+值采用[模板语法](101-syntax.md)编写, 对每个的host单独计算值.
+
+## copy
+复制本地文件到host.
+```yaml
+copy:
+ src: srcpath
+ content: srcpath
+ dest: destpath
+ mode: 0755
+```
+**src**: 来源地址, 可以为绝对路径或相对路径, 可以是目录或者文件, 非必填(`content`未定义时, 必填). 值采用[模板语法](101-syntax.md)编写, 对每个的host单独计算值.
+- 绝对路径: 从执行命令的机器上的绝对路径上获取.
+- 相对路径: 从`project_dir`中获取, 获取顺序: $(project_dir)/roles/roleName/files/$(srcpath) > $(project_dir)/playbooks/.../$(current_playbook)/roles/$(roleName)/files/$(srcpath) > $(project_dir)/files/$(srcpath).
+**content**: 来源文件内容, 非必填(`src`未定义时, 必填). 值采用[模板语法](101-syntax.md)编写, 对每个的host单独计算值.
+**dest**: 目标地址, host上的绝对路径, 可以是目录或者文件(与`src`对应, 如果为文件,需要在末尾添加"/"), 必填. 值采用[模板语法](101-syntax.md)编写, 对每个的host单独计算值.
+**mode**: 复制到host上的文件权限, 非必填, 默认源文件权限.
+## fetch
+从host上获取文件到本地.
+```yaml
+fetch:
+ src: srcpath
+ dest: destpath
+```
+**src**: 来源文件地址, host上的绝对路径, 必填. 值采用[模板语法](101-syntax.md)编写, 对每个的host单独计算值.
+**dest**: 目标文件地址, 本地绝对路径, 可以是目录或者文件(与`src`对应, 如果为文件,需要在末尾添加"/"), 必填. 值采用[模板语法](101-syntax.md)编写, 对每个的host单独计算值.
+## debug
+打印信息
+```yaml
+debug:
+ var: I'm variable statement
+ msg: I'm message statement
+```
+**var**: 打印变量, 非必填, 值采用[模板语法](101-syntax.md)编写, 对每个的host单独计算值.
+**msg**: 打印信息, 非必填, 值采用[模板语法](101-syntax.md)编写, 对每个的host单独计算值.
+## template
+templates中的文件内容采用[模板语法](101-syntax.md)编写, 对每个的host单独计算值.
+将文件内容转换为实际文件后,复制本地文件到host.
+```yaml
+template:
+ src: srcpath
+ dest: destpath
+ mode: 0755
+```
+**src**: 来源地址, 可以为绝对路径或相对路径, 可以是目录或者文件, 必填. 值采用[模板语法](101-syntax.md)编写, 对每个的host单独计算值.
+- 绝对路径: 从执行命令的机器上的绝对路径上获取.
+- 相对路径: 从`project_dir`中获取, 获取顺序: $(project_dir)/roles/roleName/templates/$(srcpath) > $(project_dir)/playbooks/.../$(current_playbook)/roles/$(roleName)/templates/$(srcpath) > $(project_dir)/templates/$(srcpath).
+**dest**: 目标地址, host上的绝对路径, 可以是目录或者文件(与`src`对应, 如果为文件,需要在末尾添加"/"), 必填. 值采用[模板语法](101-syntax.md)编写, 对每个的host单独计算值.
+**mode**: 复制到host上的文件权限, 非必填, 默认源文件权限.
+## set_fact
+给所有host设置variable. 层级结构保持不变
+```yaml
+set_fact:
+ key: value
+```
+**key**: 必填, 可以为可以为多级结构(比如{k1:{k2:value}}).
+**value**: 值采用[模板语法](101-syntax.md)编写, 对每个的host单独计算值.
+## gen_cert
+在工作目录生成证书, $(work_dir)/kubekey/pki/
+```yaml
+gen_cert:
+ root_key: keypath
+ root_cert: certpath
+ date: 87600h
+ sans: ["ip1","dns1"]
+ cn: common name
+ out_key: keypath
+ out_cert: certpath
+```
+**root_key**: 父证书的key文件绝对路径, 用于生成子证书, 非必填. 值采用[模板语法](101-syntax.md)编写, 对每个的host单独计算值.
+**root_cert**: 父证书的cert文件绝对路径, 用于生成子证书, 非必填, 值采用[模板语法](101-syntax.md)编写, 对每个的host单独计算值.
+**date**: 证书失效时间, 时间间隔格式(单位: s,m,h), 非必填, 默认10年.
+**sans**: Subject Alternate Names, 支持数组或数组类型的json字符串格式, 非必填.
+**cn**: Common Name. 必填.
+**out_key**: 输出的证书key文件绝对路径, 必填, 值采用[模板语法](101-syntax.md)编写, 对每个的host单独计算值.
+**out_cert**: 输出的证书cert文件绝对路径, 必填, 值采用[模板语法](101-syntax.md)编写, 对每个的host单独计算值.
+当root_key或root_cert未定义时, 生成自签名证书.
+## image
+拉取镜像到本地目录, 或推送镜像到远程服务器
+```yaml
+image:
+ skip_tls_verify: true
+ pull: ["image1", "image2"]
+ push:
+ registry: local.kubekey
+ username: username
+ password: password
+ namespace_override: new_namespace
+```
+**skip_tls_verify**: 跳过证书认证. 默认true.
+**pull**: 拉取镜像到本地工作目录, 非必填. 值采用[模板语法](101-syntax.md)编写, 对每个的host单独计算值.
+**push**: 推送工作目录中的镜像到远程仓库, 非必填. 值采用[模板语法](101-syntax.md)编写, 对每个的host单独计算值.
+**registry**: 远程仓库地址, 必填. 值采用[模板语法](101-syntax.md)编写, 对每个的host单独计算值.
+**username**: 远程仓库认证用户, 非必填. 值采用[模板语法](101-syntax.md)编写, 对每个的host单独计算值.
+**password**: 远程仓库认证密码, 非必填. 值采用[模板语法](101-syntax.md)编写, 对每个的host单独计算值.
+**namespace_override**: 是否用新的路径, 覆盖镜像原来的路径, 非必填. 值采用[模板语法](101-syntax.md)编写, 对每个的host单独计算值.
diff --git a/feature/docs/zh/101-syntax.md b/feature/docs/zh/101-syntax.md
new file mode 100644
index 000000000..536004aca
--- /dev/null
+++ b/feature/docs/zh/101-syntax.md
@@ -0,0 +1,19 @@
+# 语法
+语法遵循`go template`规范.引用[sprig](https://github.com/Masterminds/sprig)进行函数扩展.
+# 自定义函数
+## pow
+幂运算.
+```yaml
+# 2的3次方, 2 ** 3
+{{ 2 | pow 3 }}
+```
+## toYaml
+将参数转换成yaml字符串. 参数为左移空格数, 值为字符串
+```yaml
+{{ .yaml_variable | toYaml }}
+```
+## ipInCIDR
+获取IP范围(cidr)内特定下标的IP地址
+```yaml
+{{ .cidr_variable | ipInCIDR 1 }}
+```
diff --git a/feature/docs/zh/201-variable.md b/feature/docs/zh/201-variable.md
new file mode 100644
index 000000000..f8d4e4e45
--- /dev/null
+++ b/feature/docs/zh/201-variable.md
@@ -0,0 +1,73 @@
+# 变量
+变量分为静态变量(运行前定义的变量)和动态变量(运行时生成的变量)
+参数优先级为: 动态变量 > 静态变量
+## 静态变量
+静态变量包含节点清单, 全局配置, 模板中定义的参数.
+参数优先级为: 全局配置 > 节点清单 > 模版中定义的参数.
+### 节点清单
+yaml格式文件, 不包含模板语法, 通过`-i`参数传入(`kk -i inventory.yaml ...`), 在每个host上生效
+**定义规范**:
+```yaml
+apiVersion: kubekey.kubesphere.io/v1
+kind: Inventory
+metadata:
+ name: default
+spec:
+ hosts:
+ - hostname1:
+ k1: v1
+ #...
+ - hostname2:
+ k2: v2
+ #...
+ - hostname3:
+ #...
+ groups:
+ groupname1:
+ groups:
+ - groupname2
+ # ...
+ hosts:
+ - hostname1
+ #...
+ vars:
+ k1: v1
+ #...
+ groupname2:
+ #...
+ vars:
+ k1: v1
+ #...
+```
+**hosts**: key为host名称, value为给该host设置的变量.
+**groups**: 给host进行分组. key为组名称, value有groups, hosts和vars
+- groups: 该组包含哪些其他组.
+- hosts: 该组包含哪些hosts.
+- vars: 组级别的变量, 针对组中所有host生效.
+groups包含的总hosts为`groups`包含的host + `hosts`中包含的host.
+**vars**: 全局变量, 针对所有host生效.
+变量优先级为: $(host_variable) > $(group_variable) > $(global_variable)
+### 全局配置
+yaml格式文件, 不包含模板语法, 通过`-c`参数传入(`kk -c config.yaml ...`), 在每个host上生效
+```yaml
+apiVersion: kubekey.kubesphere.io/v1
+kind: Config
+metadata:
+ name: default
+spec:
+ k: v
+ #...
+```
+任意类型的参数
+### 模板中定义的参数
+模板中定义的var参数包含:
+- playbook中`vars`字段和`vars_files`字段定义的参数
+- role中的defaults/main.yaml定义的参数
+- role中`vars`字段定义的参数
+- task中`vars`字段定义的参数
+## 动态变量
+动态变量是节点执行是生成的变量数据, 包含:
+- `gather_facts`定义的参数
+- `register`定义的参数
+- `set_fact`定义的参数
+优先级为参数定义的顺序, 后定义参数高于先定义的参数.
diff --git a/feature/exp/README.md b/feature/exp/README.md
new file mode 100644
index 000000000..3808b322b
--- /dev/null
+++ b/feature/exp/README.md
@@ -0,0 +1,7 @@
+# Experimental
+
+⚠️ This package holds experimental code and API types. ⚠️
+
+## Compatibility notice
+
+This package does not adhere to any compatibility guarantees. Some portions may eventually be promoted out of this package and considered stable/GA, while others may be removed entirely.
\ No newline at end of file
diff --git a/feature/go.mod b/feature/go.mod
new file mode 100644
index 000000000..84ada6967
--- /dev/null
+++ b/feature/go.mod
@@ -0,0 +1,137 @@
+module github.com/kubesphere/kubekey/v4
+
+go 1.22
+
+require (
+ github.com/Masterminds/sprig/v3 v3.2.3
+ github.com/fsnotify/fsnotify v1.7.0
+ github.com/go-git/go-git/v5 v5.11.0
+ github.com/google/gops v0.3.28
+ github.com/opencontainers/image-spec v1.1.0
+ github.com/pkg/sftp v1.13.6
+ github.com/schollz/progressbar/v3 v3.14.5
+ github.com/spf13/cobra v1.8.0
+ github.com/spf13/pflag v1.0.5
+ github.com/stretchr/testify v1.8.4
+ golang.org/x/crypto v0.18.0
+ gopkg.in/yaml.v3 v3.0.1
+ k8s.io/api v0.29.1
+ k8s.io/apimachinery v0.29.1
+ k8s.io/apiserver v0.29.1
+ k8s.io/client-go v0.29.1
+ k8s.io/component-base v0.29.1
+ k8s.io/klog/v2 v2.120.1
+ k8s.io/utils v0.0.0-20240102154912-e7106e64919e
+ oras.land/oras-go/v2 v2.5.0
+ sigs.k8s.io/controller-runtime v0.17.0
+ sigs.k8s.io/structured-merge-diff/v4 v4.4.1
+ sigs.k8s.io/yaml v1.4.0
+)
+
+require (
+ dario.cat/mergo v1.0.0 // indirect
+ github.com/Masterminds/goutils v1.1.1 // indirect
+ github.com/Masterminds/semver/v3 v3.2.0 // indirect
+ github.com/Microsoft/go-winio v0.6.1 // indirect
+ github.com/ProtonMail/go-crypto v1.0.0 // indirect
+ github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect
+ github.com/beorn7/perks v1.0.1 // indirect
+ github.com/blang/semver/v4 v4.0.0 // indirect
+ github.com/cenkalti/backoff/v4 v4.2.1 // indirect
+ github.com/cespare/xxhash/v2 v2.2.0 // indirect
+ github.com/cloudflare/circl v1.3.7 // indirect
+ github.com/coreos/go-semver v0.3.1 // indirect
+ github.com/coreos/go-systemd/v22 v22.5.0 // indirect
+ github.com/cyphar/filepath-securejoin v0.2.4 // indirect
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/emicklei/go-restful/v3 v3.11.2 // indirect
+ github.com/emirpasic/gods v1.18.1 // indirect
+ github.com/evanphx/json-patch v5.8.1+incompatible // indirect
+ github.com/evanphx/json-patch/v5 v5.8.1 // indirect
+ github.com/felixge/httpsnoop v1.0.4 // indirect
+ github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect
+ github.com/go-git/go-billy/v5 v5.5.0 // indirect
+ github.com/go-logr/logr v1.4.1 // indirect
+ github.com/go-logr/stdr v1.2.2 // indirect
+ github.com/go-openapi/jsonpointer v0.20.2 // indirect
+ github.com/go-openapi/jsonreference v0.20.4 // indirect
+ github.com/go-openapi/swag v0.22.7 // indirect
+ github.com/gogo/protobuf v1.3.2 // indirect
+ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
+ github.com/golang/protobuf v1.5.4 // indirect
+ github.com/google/cel-go v0.17.7 // indirect
+ github.com/google/gnostic-models v0.6.8 // indirect
+ github.com/google/go-cmp v0.6.0 // indirect
+ github.com/google/gofuzz v1.2.0 // indirect
+ github.com/google/uuid v1.5.0 // indirect
+ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 // indirect
+ github.com/huandu/xstrings v1.3.3 // indirect
+ github.com/imdario/mergo v0.3.16 // indirect
+ github.com/inconshreveable/mousetrap v1.1.0 // indirect
+ github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
+ github.com/josharian/intern v1.0.0 // indirect
+ github.com/json-iterator/go v1.1.12 // indirect
+ github.com/kevinburke/ssh_config v1.2.0 // indirect
+ github.com/kr/fs v0.1.0 // indirect
+ github.com/mailru/easyjson v0.7.7 // indirect
+ github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect
+ github.com/mitchellh/copystructure v1.0.0 // indirect
+ github.com/mitchellh/reflectwalk v1.0.0 // indirect
+ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
+ github.com/modern-go/reflect2 v1.0.2 // indirect
+ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
+ github.com/opencontainers/go-digest v1.0.0 // indirect
+ github.com/pjbgf/sha1cd v0.3.0 // indirect
+ github.com/pkg/errors v0.9.1 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/prometheus/client_golang v1.18.0 // indirect
+ github.com/prometheus/client_model v0.5.0 // indirect
+ github.com/prometheus/common v0.46.0 // indirect
+ github.com/prometheus/procfs v0.12.0 // indirect
+ github.com/rivo/uniseg v0.4.7 // indirect
+ github.com/sergi/go-diff v1.3.1 // indirect
+ github.com/shopspring/decimal v1.2.0 // indirect
+ github.com/skeema/knownhosts v1.2.1 // indirect
+ github.com/spf13/cast v1.3.1 // indirect
+ github.com/stoewer/go-strcase v1.2.0 // indirect
+ github.com/xanzy/ssh-agent v0.3.3 // indirect
+ go.etcd.io/etcd/api/v3 v3.5.11 // indirect
+ go.etcd.io/etcd/client/pkg/v3 v3.5.11 // indirect
+ go.etcd.io/etcd/client/v3 v3.5.11 // indirect
+ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 // indirect
+ go.opentelemetry.io/otel v1.22.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0 // indirect
+ go.opentelemetry.io/otel/metric v1.22.0 // indirect
+ go.opentelemetry.io/otel/sdk v1.22.0 // indirect
+ go.opentelemetry.io/otel/trace v1.22.0 // indirect
+ go.opentelemetry.io/proto/otlp v1.1.0 // indirect
+ go.uber.org/multierr v1.11.0 // indirect
+ go.uber.org/zap v1.26.0 // indirect
+ golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect
+ golang.org/x/mod v0.14.0 // indirect
+ golang.org/x/net v0.20.0 // indirect
+ golang.org/x/oauth2 v0.16.0 // indirect
+ golang.org/x/sync v0.7.0 // indirect
+ golang.org/x/sys v0.22.0 // indirect
+ golang.org/x/term v0.22.0 // indirect
+ golang.org/x/text v0.14.0 // indirect
+ golang.org/x/time v0.5.0 // indirect
+ golang.org/x/tools v0.17.0 // indirect
+ gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
+ google.golang.org/appengine v1.6.8 // indirect
+ google.golang.org/genproto v0.0.0-20240116215550-a9fa1716bcac // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20240116215550-a9fa1716bcac // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac // indirect
+ google.golang.org/grpc v1.60.1 // indirect
+ google.golang.org/protobuf v1.33.0 // indirect
+ gopkg.in/inf.v0 v0.9.1 // indirect
+ gopkg.in/warnings.v0 v0.1.2 // indirect
+ gopkg.in/yaml.v2 v2.4.0 // indirect
+ k8s.io/apiextensions-apiserver v0.29.1 // indirect
+ k8s.io/kube-openapi v0.0.0-20240117194847-208609032b15 // indirect
+ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 // indirect
+ sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
+)
diff --git a/feature/go.sum b/feature/go.sum
new file mode 100644
index 000000000..b7d8786d3
--- /dev/null
+++ b/feature/go.sum
@@ -0,0 +1,441 @@
+cloud.google.com/go v0.111.0 h1:YHLKNupSD1KqjDbQ3+LVdQ81h/UJbJyZG203cEfnQgM=
+cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk=
+cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI=
+cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
+cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
+dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
+dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
+github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=
+github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
+github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g=
+github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
+github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA=
+github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM=
+github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
+github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
+github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
+github.com/ProtonMail/go-crypto v1.0.0 h1:LRuvITjQWX+WIfr930YHG2HNfjR1uOfyf5vE0kC2U78=
+github.com/ProtonMail/go-crypto v1.0.0/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
+github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
+github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
+github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18=
+github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM=
+github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
+github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
+github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
+github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
+github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
+github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
+github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
+github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
+github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
+github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
+github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU=
+github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA=
+github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k=
+github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
+github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
+github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
+github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
+github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
+github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
+github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
+github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU=
+github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM=
+github.com/emicklei/go-restful/v3 v3.11.2 h1:1onLa9DcsMYO9P+CXaL0dStDqQ2EHHXLiz+BtnqkLAU=
+github.com/emicklei/go-restful/v3 v3.11.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
+github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
+github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
+github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA=
+github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE=
+github.com/evanphx/json-patch v5.8.1+incompatible h1:2toJaoe7/rNa1zpeQx0UnVEjqk6z2ecyA20V/zg8vTU=
+github.com/evanphx/json-patch v5.8.1+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/evanphx/json-patch/v5 v5.8.1 h1:iPEdwg0XayoS+E7Mth9JxwUtOgyVxnDTXHtKhZPlZxA=
+github.com/evanphx/json-patch/v5 v5.8.1/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
+github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
+github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
+github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
+github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
+github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY=
+github.com/gliderlabs/ssh v0.3.5/go.mod h1:8XB4KraRrX39qHhT6yxPsHedjA08I/uBVwj4xC+/+z4=
+github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI=
+github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic=
+github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU=
+github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow=
+github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4=
+github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII=
+github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4=
+github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY=
+github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
+github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
+github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
+github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
+github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q=
+github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs=
+github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU=
+github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4=
+github.com/go-openapi/swag v0.22.7 h1:JWrc1uc/P9cSomxfnsFSVWoE1FW6bNbrVPmpQYpCcR8=
+github.com/go-openapi/swag v0.22.7/go.mod h1:Gl91UqO+btAM0plGGxHqJcQZ1ZTy6jbmridBTsDy8A0=
+github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
+github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
+github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
+github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
+github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
+github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
+github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
+github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
+github.com/google/cel-go v0.17.7 h1:6ebJFzu1xO2n7TLtN+UBqShGBhlD85bhvglh5DpcfqQ=
+github.com/google/cel-go v0.17.7/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY=
+github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
+github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
+github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/gops v0.3.28 h1:2Xr57tqKAmQYRAfG12E+yLcoa2Y42UJo2lOrUFL9ark=
+github.com/google/gops v0.3.28/go.mod h1:6f6+Nl8LcHrzJwi8+p0ii+vmBFSlB4f8cOOkTJ7sk4c=
+github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec=
+github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU=
+github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
+github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
+github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
+github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
+github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
+github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
+github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 h1:Wqo399gCIufwto+VfwCSvsnfGpF/w5E9CNxSwbpD6No=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0/go.mod h1:qmOFXW2epJhM0qSnUUYpldc7gVz2KMQwJ/QYCDIa7XU=
+github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4=
+github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
+github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
+github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
+github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
+github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
+github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
+github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
+github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ=
+github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
+github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
+github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
+github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
+github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
+github.com/k0kubun/go-ansi v0.0.0-20180517002512-3bf9e2903213/go.mod h1:vNUNkEQ1e29fT/6vq2aBdFsgNPmy8qMdSay1npru+Sw=
+github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4=
+github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
+github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
+github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
+github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
+github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
+github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ=
+github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw=
+github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ=
+github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
+github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY=
+github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
+github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/onsi/ginkgo/v2 v2.14.0 h1:vSmGj2Z5YPb9JwCWT6z6ihcUvDhuXLc3sJiqd3jMKAY=
+github.com/onsi/ginkgo/v2 v2.14.0/go.mod h1:JkUdW7JkN0V6rFvsHcJ478egV3XH9NxpD27Hal/PhZw=
+github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8=
+github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
+github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
+github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
+github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
+github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
+github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4=
+github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo=
+github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk=
+github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA=
+github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
+github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
+github.com/prometheus/common v0.46.0 h1:doXzt5ybi1HBKpsZOL0sSkaNHJJqkyfEWZGGqqScV0Y=
+github.com/prometheus/common v0.46.0/go.mod h1:Tp0qkxpb9Jsg54QMe+EAmqXkSV7Evdy1BTn+g2pa/hQ=
+github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
+github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
+github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
+github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
+github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
+github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
+github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/schollz/progressbar/v3 v3.14.5 h1:97RrSxbBASxQuZN9yemnyGrFZ/swnG6IrEe2R0BseX8=
+github.com/schollz/progressbar/v3 v3.14.5/go.mod h1:Nrzpuw3Nl0srLY0VlTvC4V6RL50pcEymjy6qyJAaLa0=
+github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8=
+github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I=
+github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ=
+github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
+github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
+github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
+github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ=
+github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo=
+github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
+github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
+github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
+github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
+github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
+github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU=
+github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
+github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk=
+github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM=
+github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw=
+github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
+github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA=
+go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
+go.etcd.io/etcd/api/v3 v3.5.11 h1:B54KwXbWDHyD3XYAwprxNzTe7vlhR69LuBgZnMVvS7E=
+go.etcd.io/etcd/api/v3 v3.5.11/go.mod h1:Ot+o0SWSyT6uHhA56al1oCED0JImsRiU9Dc26+C2a+4=
+go.etcd.io/etcd/client/pkg/v3 v3.5.11 h1:bT2xVspdiCj2910T0V+/KHcVKjkUrCZVtk8J2JF2z1A=
+go.etcd.io/etcd/client/pkg/v3 v3.5.11/go.mod h1:seTzl2d9APP8R5Y2hFL3NVlD6qC/dOT+3kvrqPyTas4=
+go.etcd.io/etcd/client/v2 v2.305.10 h1:MrmRktzv/XF8CvtQt+P6wLUlURaNpSDJHFZhe//2QE4=
+go.etcd.io/etcd/client/v2 v2.305.10/go.mod h1:m3CKZi69HzilhVqtPDcjhSGp+kA1OmbNn0qamH80xjA=
+go.etcd.io/etcd/client/v3 v3.5.11 h1:ajWtgoNSZJ1gmS8k+icvPtqsqEav+iUorF7b0qozgUU=
+go.etcd.io/etcd/client/v3 v3.5.11/go.mod h1:a6xQUEqFJ8vztO1agJh/KQKOMfFI8og52ZconzcDJwE=
+go.etcd.io/etcd/pkg/v3 v3.5.10 h1:WPR8K0e9kWl1gAhB5A7gEa5ZBTNkT9NdNWrR8Qpo1CM=
+go.etcd.io/etcd/pkg/v3 v3.5.10/go.mod h1:TKTuCKKcF1zxmfKWDkfz5qqYaE3JncKKZPFf8c1nFUs=
+go.etcd.io/etcd/raft/v3 v3.5.10 h1:cgNAYe7xrsrn/5kXMSaH8kM/Ky8mAdMqGOxyYwpP0LA=
+go.etcd.io/etcd/raft/v3 v3.5.10/go.mod h1:odD6kr8XQXTy9oQnyMPBOr0TVe+gT0neQhElQ6jbGRc=
+go.etcd.io/etcd/server/v3 v3.5.10 h1:4NOGyOwD5sUZ22PiWYKmfxqoeh72z6EhYjNosKGLmZg=
+go.etcd.io/etcd/server/v3 v3.5.10/go.mod h1:gBplPHfs6YI0L+RpGkTQO7buDbHv5HJGG/Bst0/zIPo=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 h1:UNQQKPfTDe1J81ViolILjTKPr9WetKW6uei2hFgJmFs=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0/go.mod h1:r9vWsPS/3AQItv3OSlEJ/E4mbrhUbbw18meOjArPtKQ=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 h1:sv9kVfal0MK0wBMCOGr+HeJm9v803BkJxGrk2au7j08=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0/go.mod h1:SK2UL73Zy1quvRPonmOmRDiWk1KBV3LyIeeIxcEApWw=
+go.opentelemetry.io/otel v1.22.0 h1:xS7Ku+7yTFvDfDraDIJVpw7XPyuHlB9MCiqqX5mcJ6Y=
+go.opentelemetry.io/otel v1.22.0/go.mod h1:eoV4iAi3Ea8LkAEI9+GFT44O6T/D0GWAVFyZVCC6pMI=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 h1:9M3+rhx7kZCIQQhQRYaZCdNu1V73tm4TvXs2ntl98C4=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0/go.mod h1:noq80iT8rrHP1SfybmPiRGc9dc5M8RPmGvtwo7Oo7tc=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0 h1:H2JFgRcGiyHg7H7bwcwaQJYrNFqCqrbTQ8K4p1OvDu8=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0/go.mod h1:WfCWp1bGoYK8MeULtI15MmQVczfR+bFkk0DF3h06QmQ=
+go.opentelemetry.io/otel/metric v1.22.0 h1:lypMQnGyJYeuYPhOM/bgjbFM6WE44W1/T45er4d8Hhg=
+go.opentelemetry.io/otel/metric v1.22.0/go.mod h1:evJGjVpZv0mQ5QBRJoBF64yMuOf4xCWdXjK8pzFvliY=
+go.opentelemetry.io/otel/sdk v1.22.0 h1:6coWHw9xw7EfClIC/+O31R8IY3/+EiRFHevmHafB2Gw=
+go.opentelemetry.io/otel/sdk v1.22.0/go.mod h1:iu7luyVGYovrRpe2fmj3CVKouQNdTOkxtLzPvPz1DOc=
+go.opentelemetry.io/otel/trace v1.22.0 h1:Hg6pPujv0XG9QaVbGOBVHunyuLcCC3jN7WEhPx83XD0=
+go.opentelemetry.io/otel/trace v1.22.0/go.mod h1:RbbHXVqKES9QhzZq/fE5UnOSILqRt40a21sPw2He1xo=
+go.opentelemetry.io/proto/otlp v1.1.0 h1:2Di21piLrCqJ3U3eXGCTPHE9R8Nh+0uglSnOyxikMeI=
+go.opentelemetry.io/proto/otlp v1.1.0/go.mod h1:GpBHCBWiqvVLDqmHZsoMM3C5ySeKTC7ej/RNTae6MdY=
+go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
+go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
+go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
+go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
+go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo=
+go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
+golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
+golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
+golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
+golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc=
+golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
+golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA=
+golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0=
+golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
+golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
+golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
+golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo=
+golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
+golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ=
+golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
+golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
+golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
+golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
+golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
+golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk=
+golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
+golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
+golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
+golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc=
+golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw=
+gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
+google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
+google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
+google.golang.org/genproto v0.0.0-20240116215550-a9fa1716bcac h1:ZL/Teoy/ZGnzyrqK/Optxxp2pmVh+fmJ97slxSRyzUg=
+google.golang.org/genproto v0.0.0-20240116215550-a9fa1716bcac/go.mod h1:+Rvu7ElI+aLzyDQhpHMFMMltsD6m7nqpuWDd2CwJw3k=
+google.golang.org/genproto/googleapis/api v0.0.0-20240116215550-a9fa1716bcac h1:OZkkudMUu9LVQMCoRUbI/1p5VCo9BOrlvkqMvWtqa6s=
+google.golang.org/genproto/googleapis/api v0.0.0-20240116215550-a9fa1716bcac/go.mod h1:B5xPO//w8qmBDjGReYLpR6UJPnkldGkCSMoH/2vxJeg=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac h1:nUQEQmH/csSvFECKYRv6HWEyypysidKl2I6Qpsglq/0=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac/go.mod h1:daQN87bsDqDoe316QbbvX60nMoJQa4r6Ds0ZuoAe5yA=
+google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU=
+google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
+google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
+gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
+gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
+gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
+gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+k8s.io/api v0.29.1 h1:DAjwWX/9YT7NQD4INu49ROJuZAAAP/Ijki48GUPzxqw=
+k8s.io/api v0.29.1/go.mod h1:7Kl10vBRUXhnQQI8YR/R327zXC8eJ7887/+Ybta+RoQ=
+k8s.io/apiextensions-apiserver v0.29.1 h1:S9xOtyk9M3Sk1tIpQMu9wXHm5O2MX6Y1kIpPMimZBZw=
+k8s.io/apiextensions-apiserver v0.29.1/go.mod h1:zZECpujY5yTW58co8V2EQR4BD6A9pktVgHhvc0uLfeU=
+k8s.io/apimachinery v0.29.1 h1:KY4/E6km/wLBguvCZv8cKTeOwwOBqFNjwJIdMkMbbRc=
+k8s.io/apimachinery v0.29.1/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU=
+k8s.io/apiserver v0.29.1 h1:e2wwHUfEmMsa8+cuft8MT56+16EONIEK8A/gpBSco+g=
+k8s.io/apiserver v0.29.1/go.mod h1:V0EpkTRrJymyVT3M49we8uh2RvXf7fWC5XLB0P3SwRw=
+k8s.io/client-go v0.29.1 h1:19B/+2NGEwnFLzt0uB5kNJnfTsbV8w6TgQRz9l7ti7A=
+k8s.io/client-go v0.29.1/go.mod h1:TDG/psL9hdet0TI9mGyHJSgRkW3H9JZk2dNEUS7bRks=
+k8s.io/component-base v0.29.1 h1:MUimqJPCRnnHsskTTjKD+IC1EHBbRCVyi37IoFBrkYw=
+k8s.io/component-base v0.29.1/go.mod h1:fP9GFjxYrLERq1GcWWZAE3bqbNcDKDytn2srWuHTtKc=
+k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw=
+k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
+k8s.io/kube-openapi v0.0.0-20240117194847-208609032b15 h1:m6dl1pkxz3HuE2mP9MUYPCCGyy6IIFlv/vTlLBDxIwA=
+k8s.io/kube-openapi v0.0.0-20240117194847-208609032b15/go.mod h1:Pa1PvrP7ACSkuX6I7KYomY6cmMA0Tx86waBhDUgoKPw=
+k8s.io/utils v0.0.0-20240102154912-e7106e64919e h1:eQ/4ljkx21sObifjzXwlPKpdGLrCfRziVtos3ofG/sQ=
+k8s.io/utils v0.0.0-20240102154912-e7106e64919e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+oras.land/oras-go/v2 v2.5.0 h1:o8Me9kLY74Vp5uw07QXPiitjsw7qNXi8Twd+19Zf02c=
+oras.land/oras-go/v2 v2.5.0/go.mod h1:z4eisnLP530vwIOUOJeBIj0aGI0L1C3d53atvCBqZHg=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 h1:/U5vjBbQn3RChhv7P11uhYvCSm5G2GaIi5AIGBS6r4c=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0/go.mod h1:z7+wmGM2dfIiLRfrC6jb5kV2Mq/sK1ZP303cxzkV5Y4=
+sigs.k8s.io/controller-runtime v0.17.0 h1:fjJQf8Ukya+VjogLO6/bNX9HE6Y2xpsO5+fyS26ur/s=
+sigs.k8s.io/controller-runtime v0.17.0/go.mod h1:+MngTvIQQQhfXtwfdGw/UOQ/aIaqsYywfCINOtwMO/s=
+sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
+sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
+sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
+sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
+sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
+sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
diff --git a/feature/hack/auto-update-version.py b/feature/hack/auto-update-version.py
new file mode 100755
index 000000000..f9fa93f99
--- /dev/null
+++ b/feature/hack/auto-update-version.py
@@ -0,0 +1,113 @@
+#!/usr/bin/env python3
+# encoding: utf-8
+
+# Copyright 2022 The KubeSphere Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import requests
+import re
+import json
+from natsort import natsorted
+import collections
+
+GITHUB_BASE_URL = "https://api.github.com"
+ORG = "kubernetes"
+REPO = "kubernetes"
+PER_PAGE = 15
+
+ARCH_LIST = ["amd64", "arm64"]
+K8S_COMPONENTS = ["kubeadm", "kubelet", "kubectl"]
+
+
+def get_releases(org, repo, per_page=30):
+ try:
+ response = requests.get("{}/repos/{}/{}/releases?per_page={}".format(GITHUB_BASE_URL, org, repo, per_page))
+ except:
+ print("fetch {}/{} releases failed".format(org, repo))
+ else:
+ return response.json()
+
+
+def get_new_kubernetes_version(current_version):
+ new_versions = []
+
+ kubernetes_release = get_releases(org=ORG, repo=REPO, per_page=PER_PAGE)
+
+ for release in kubernetes_release:
+ tag = release['tag_name']
+ res = re.search("^v[0-9]+.[0-9]+.[0-9]+$", tag)
+ if res and tag not in current_version['kubeadm']['amd64'].keys():
+ new_versions.append(tag)
+
+ return new_versions
+
+
+def fetch_kubernetes_sha256(versions):
+ new_sha256 = {}
+
+ for version in versions:
+ for binary in K8S_COMPONENTS:
+ for arch in ARCH_LIST:
+ response = requests.get(
+ "https://storage.googleapis.com/kubernetes-release/release/{}/bin/linux/{}/{}.sha256".format(
+ version, arch, binary))
+ if response.status_code == 200:
+ new_sha256["{}-{}-{}".format(binary, arch, version)] = response.text
+
+ return new_sha256
+
+
+def version_sort(data):
+ version_list = natsorted([*data])
+ sorted_data = collections.OrderedDict()
+
+ for v in version_list:
+ sorted_data[v] = data[v]
+
+ return sorted_data
+
+
+def main():
+ # get current support versions
+ with open("version/components.json", "r") as f:
+ data = json.load(f)
+
+ # get new kubernetes versions
+ new_versions = get_new_kubernetes_version(current_version=data)
+
+ if len(new_versions) > 0:
+ # fetch new kubernetes sha256
+ new_sha256 = fetch_kubernetes_sha256(new_versions)
+
+ if new_sha256:
+ for k, v in new_sha256.items():
+ info = k.split('-')
+ data[info[0]][info[1]][info[2]] = v
+
+ for binary in K8S_COMPONENTS:
+ for arch in ARCH_LIST:
+ data[binary][arch] = version_sort(data[binary][arch])
+
+ print(new_versions)
+ # update components.json
+ with open("version/components.json", 'w') as f:
+ json.dump(data, f, indent=4, ensure_ascii=False)
+
+ # set new version to tmp file
+ with open("version.tmp", 'w') as f:
+ f.write("\n".join(new_versions))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/feature/hack/boilerplate.go.txt b/feature/hack/boilerplate.go.txt
new file mode 100644
index 000000000..68fe49d39
--- /dev/null
+++ b/feature/hack/boilerplate.go.txt
@@ -0,0 +1,15 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
diff --git a/feature/hack/fetch-kubernetes-hash.sh b/feature/hack/fetch-kubernetes-hash.sh
new file mode 100755
index 000000000..22a4e510e
--- /dev/null
+++ b/feature/hack/fetch-kubernetes-hash.sh
@@ -0,0 +1,45 @@
+#!/bin/bash
+
+v22_patch_max=15
+v23_patch_max=13
+v24_patch_max=7
+v25_patch_max=3
+
+versions=()
+
+append_k8s_version() {
+ prefix=$1
+ max=$2
+ for i in $(seq 0 "$max");
+ do
+ versions+=("${prefix}${i}")
+ done
+}
+
+append_k8s_version "v1.22." $v22_patch_max
+append_k8s_version "v1.23." $v23_patch_max
+append_k8s_version "v1.24." $v24_patch_max
+append_k8s_version "v1.25." $v25_patch_max
+
+#versions=("v1.22.12" "v1.23.9" "v1.24.3")
+
+arches=("amd64" "arm64")
+apps=("kubeadm" "kubelet" "kubectl")
+json="{}"
+for app in "${apps[@]}";
+do
+ for arch in "${arches[@]}"
+ do
+ echo "${app}@${arch}"
+ for ver in "${versions[@]}"
+ do
+ url="https://storage.googleapis.com/kubernetes-release/release/${ver}/bin/linux/${arch}/${app}.sha256"
+ hash=$(wget --quiet -O - "$url")
+ echo "\"${ver}\": \"${hash}\","
+ json=$(echo "$json" | jq ".${app}.${arch} += {\"${ver}\":\"${hash}\"}")
+ done
+ done
+done
+
+file="kubernetes-hashes.json"
+echo "$json" | jq --indent 4 > "${file}" && echo -e "\n\nThe hash info have saved to file ${file}.\n\n"
diff --git a/feature/hack/gen-repository-iso/dockerfile.almalinux90 b/feature/hack/gen-repository-iso/dockerfile.almalinux90
new file mode 100644
index 000000000..a90c8b70a
--- /dev/null
+++ b/feature/hack/gen-repository-iso/dockerfile.almalinux90
@@ -0,0 +1,21 @@
+FROM almalinux:9.0 as almalinux90
+ARG TARGETARCH
+ARG BUILD_TOOLS="dnf-plugins-core createrepo mkisofs epel-release"
+ARG DIR=almalinux-9.0-${TARGETARCH}-rpms
+ARG PKGS=.common[],.rpms[],.almalinux[],.almalinux90[]
+
+RUN dnf install -q -y ${BUILD_TOOLS} \
+ && dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo \
+ && dnf makecache
+
+WORKDIR package
+COPY packages.yaml .
+COPY --from=mikefarah/yq:4.11.1 /usr/bin/yq /usr/bin/yq
+RUN yq eval ${PKGS} packages.yaml | sed '/^ceph-common$/d' > packages.list
+
+RUN sort -u packages.list | xargs dnf download --resolve --alldeps --downloaddir=${DIR} \
+ && createrepo -d ${DIR} \
+ && mkisofs -r -o ${DIR}.iso ${DIR}
+
+FROM scratch
+COPY --from=almalinux90 /package/*.iso /
diff --git a/feature/hack/gen-repository-iso/dockerfile.centos7 b/feature/hack/gen-repository-iso/dockerfile.centos7
new file mode 100644
index 000000000..e0ed0cfc8
--- /dev/null
+++ b/feature/hack/gen-repository-iso/dockerfile.centos7
@@ -0,0 +1,22 @@
+FROM centos:7 as centos7
+ARG TARGETARCH
+ENV OS=centos
+ENV OS_VERSION=7
+ARG BUILD_TOOLS="yum-utils createrepo mkisofs epel-release"
+ARG DIR=${OS}${OS_VERSION}-${TARGETARCH}-rpms
+
+RUN yum install -q -y ${BUILD_TOOLS} \
+ && yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo \
+ && yum makecache
+
+WORKDIR package
+COPY packages.yaml .
+COPY --from=mikefarah/yq:4.11.1 /usr/bin/yq /usr/bin/yq
+RUN yq eval ".common[],.rpms[],.${OS}[],.${OS}${OS_VERSION}[]" packages.yaml > packages.list
+
+RUN sort -u packages.list | xargs repotrack -p ${DIR} \
+ && createrepo -d ${DIR} \
+ && mkisofs -r -o ${DIR}.iso ${DIR}
+
+FROM scratch
+COPY --from=centos7 /package/*.iso /
diff --git a/feature/hack/gen-repository-iso/dockerfile.debian10 b/feature/hack/gen-repository-iso/dockerfile.debian10
new file mode 100644
index 000000000..635a124d2
--- /dev/null
+++ b/feature/hack/gen-repository-iso/dockerfile.debian10
@@ -0,0 +1,38 @@
+FROM debian:10 as debian10
+ARG TARGETARCH
+ARG OS_RELEASE=buster
+ARG OS_VERSION=10
+ARG DIR=debian-10-${TARGETARCH}-debs
+ARG PKGS=.common[],.debs[],.debian[],.debian10[]
+ARG BUILD_TOOLS="apt-transport-https software-properties-common ca-certificates curl wget gnupg dpkg-dev genisoimage dirmngr"
+ENV DEBIAN_FRONTEND=noninteractive
+
+# dump system package list
+RUN dpkg --get-selections | grep -v deinstall | cut -f1 | cut -d ':' -f1 > packages.list
+RUN ARCH=$(dpkg --print-architecture) \
+ && apt update -qq \
+ && apt install -y --no-install-recommends $BUILD_TOOLS \
+ && if [ "$TARGETARCH" = "amd64" ]; then \
+ curl -fsSL https://download.gluster.org/pub/gluster/glusterfs/7/rsa.pub | apt-key add - ; \
+ echo deb https://download.gluster.org/pub/gluster/glusterfs/7/LATEST/Debian/${OS_VERSION}/amd64/apt ${OS_RELEASE} main > /etc/apt/sources.list.d/gluster.list ; \
+ fi \
+ && curl -fsSL "https://download.docker.com/linux/debian/gpg" | apt-key add -qq - \
+ && echo "deb [arch=$TARGETARCH] https://download.docker.com/linux/debian ${OS_RELEASE} stable" > /etc/apt/sources.list.d/docker.list \
+ && apt update -qq
+
+WORKDIR /package
+COPY packages.yaml .
+
+COPY --from=mikefarah/yq:4.11.1 /usr/bin/yq /usr/bin/yq
+RUN yq eval "${PKGS}" packages.yaml >> packages.list \
+ && sort -u packages.list | xargs apt-get install --yes --reinstall --print-uris | awk -F "'" '{print $2}' | grep -v '^$' | sort -u > packages.urls
+
+RUN mkdir -p ${DIR} \
+ && wget -q -x -P ${DIR} -i packages.urls \
+ && cd ${DIR} \
+ && dpkg-scanpackages ./ /dev/null | gzip -9c > ./Packages.gz
+
+RUN genisoimage -r -o ${DIR}.iso ${DIR}
+
+FROM scratch
+COPY --from=debian10 /package/*.iso /
diff --git a/feature/hack/gen-repository-iso/dockerfile.debian11 b/feature/hack/gen-repository-iso/dockerfile.debian11
new file mode 100644
index 000000000..f99dd95f6
--- /dev/null
+++ b/feature/hack/gen-repository-iso/dockerfile.debian11
@@ -0,0 +1,41 @@
+FROM debian:11.6 as debian11
+ARG TARGETARCH
+ARG OS_RELEASE=bullseye
+ARG OS_VERSION=11
+ARG DIR=debian-11-${TARGETARCH}-debs
+ARG PKGS=.common[],.debs[],.debian[],.debian11[]
+ARG BUILD_TOOLS="apt-transport-https software-properties-common ca-certificates curl wget gnupg dpkg-dev genisoimage dirmngr"
+ENV DEBIAN_FRONTEND=noninteractive
+
+# dump system package list
+RUN dpkg --get-selections | grep -v deinstall | cut -f1 | cut -d ':' -f1 > packages.list
+RUN ARCH=$(dpkg --print-architecture) \
+ && apt update -qq \
+ && apt install -y --no-install-recommends $BUILD_TOOLS \
+ && if [ "$TARGETARCH" = "amd64" ]; then \
+ curl -fsSL https://download.gluster.org/pub/gluster/glusterfs/7/rsa.pub | apt-key add - ; \
+ echo deb https://download.gluster.org/pub/gluster/glusterfs/7/LATEST/Debian/${OS_VERSION}/amd64/apt ${OS_RELEASE} main > /etc/apt/sources.list.d/gluster.list ; \
+ fi \
+ && curl -fsSL "https://download.docker.com/linux/debian/gpg" | apt-key add -qq - \
+ && echo "deb [arch=$TARGETARCH] https://download.docker.com/linux/debian ${OS_RELEASE} stable" > /etc/apt/sources.list.d/docker.list \
+ && apt update -qq \
+ && apt upgrade -y -qq
+
+WORKDIR /package
+COPY packages.yaml .
+
+COPY --from=mikefarah/yq:4.30.8 /usr/bin/yq /usr/bin/yq
+RUN yq eval "${PKGS}" packages.yaml >> packages.list \
+ && sort -u packages.list | xargs apt-get install --yes --reinstall --print-uris | awk -F "'" '{print $2}' | grep -v '^$' | sort -u > packages.urls
+
+RUN cat packages.urls
+
+RUN mkdir -p ${DIR} \
+ && wget -q -x -P ${DIR} -i packages.urls \
+ && cd ${DIR} \
+ && dpkg-scanpackages ./ /dev/null | gzip -9c > ./Packages.gz
+
+RUN genisoimage -r -o ${DIR}.iso ${DIR}
+
+FROM scratch
+COPY --from=debian11 /package/*.iso /
diff --git a/feature/hack/gen-repository-iso/dockerfile.ubuntu1604 b/feature/hack/gen-repository-iso/dockerfile.ubuntu1604
new file mode 100644
index 000000000..719698198
--- /dev/null
+++ b/feature/hack/gen-repository-iso/dockerfile.ubuntu1604
@@ -0,0 +1,33 @@
+FROM ubuntu:16.04 as ubuntu1604
+ARG TARGETARCH
+ARG OS_RELEASE=xenial
+ARG DIR=ubuntu-16.04-${TARGETARCH}-debs
+ARG PKGS=.common[],.debs[],.ubuntu[],.ubuntu1604[]
+ARG BUILD_TOOLS="apt-transport-https software-properties-common ca-certificates curl wget gnupg dpkg-dev genisoimage"
+ENV DEBIAN_FRONTEND=noninteractive
+
+# dump system package list
+RUN dpkg --get-selections | grep -v deinstall | cut -f1 | cut -d ':' -f1 > packages.list
+RUN apt update -qq \
+ && apt install -y --no-install-recommends $BUILD_TOOLS \
+ && add-apt-repository ppa:gluster/glusterfs-7 -y \
+ && curl -fsSL "https://download.docker.com/linux/ubuntu/gpg" | apt-key add -qq - \
+ && echo "deb [arch=$TARGETARCH] https://download.docker.com/linux/ubuntu ${OS_RELEASE} stable" > /etc/apt/sources.list.d/docker.list\
+ && apt update -qq
+
+WORKDIR /package
+COPY packages.yaml .
+
+COPY --from=mikefarah/yq:4.11.1 /usr/bin/yq /usr/bin/yq
+RUN yq eval "${PKGS}" packages.yaml >> packages.list \
+ && sort -u packages.list | xargs apt-get install --yes --reinstall --print-uris | awk -F "'" '{print $2}' | grep -v '^$' | sort -u > packages.urls
+
+RUN mkdir -p ${DIR} \
+ && wget -q -x -P ${DIR} -i packages.urls \
+ && cd ${DIR} \
+ && dpkg-scanpackages ./ /dev/null | gzip -9c > ./Packages.gz
+
+RUN genisoimage -r -o ${DIR}.iso ${DIR}
+
+FROM scratch
+COPY --from=ubuntu1604 /package/*.iso /
diff --git a/feature/hack/gen-repository-iso/dockerfile.ubuntu1804 b/feature/hack/gen-repository-iso/dockerfile.ubuntu1804
new file mode 100644
index 000000000..f9852d8db
--- /dev/null
+++ b/feature/hack/gen-repository-iso/dockerfile.ubuntu1804
@@ -0,0 +1,34 @@
+FROM ubuntu:18.04 as ubuntu1804
+ARG TARGETARCH
+ARG OS_RELEASE=bionic
+ARG DIR=ubuntu-18.04-${TARGETARCH}-debs
+ARG PKGS=.common[],.debs[],.ubuntu[],.ubuntu1804[]
+ARG BUILD_TOOLS="apt-transport-https software-properties-common ca-certificates curl wget gnupg dpkg-dev genisoimage"
+ENV DEBIAN_FRONTEND=noninteractive
+
+# dump system package list
+RUN dpkg --get-selections | grep -v deinstall | cut -f1 | cut -d ':' -f1 > packages.list
+RUN apt update -qq \
+ && apt install -y --no-install-recommends $BUILD_TOOLS \
+ && add-apt-repository ppa:gluster/glusterfs-7 -y \
+ && curl -fsSL "https://download.docker.com/linux/ubuntu/gpg" | apt-key add -qq - \
+ && echo "deb [arch=$TARGETARCH] https://download.docker.com/linux/ubuntu ${OS_RELEASE} stable" > /etc/apt/sources.list.d/docker.list\
+ && apt update -qq
+
+WORKDIR /package
+COPY packages.yaml .
+
+COPY --from=mikefarah/yq:4.11.1 /usr/bin/yq /usr/bin/yq
+RUN yq eval "${PKGS}" packages.yaml >> packages.list \
+ && dpkg --get-selections | grep -v deinstall | cut -f1 | cut -d ':' -f1 >> packages.list \
+ && sort -u packages.list | xargs apt-get install --yes --reinstall --print-uris | awk -F "'" '{print $2}' | grep -v '^$' | sort -u > packages.urls
+
+RUN mkdir -p ${DIR} \
+ && wget -q -x -P ${DIR} -i packages.urls \
+ && cd ${DIR} \
+ && dpkg-scanpackages ./ /dev/null | gzip -9c > ./Packages.gz
+
+RUN genisoimage -r -o ${DIR}.iso ${DIR}
+
+FROM scratch
+COPY --from=ubuntu1804 /package/*.iso /
diff --git a/feature/hack/gen-repository-iso/dockerfile.ubuntu2004 b/feature/hack/gen-repository-iso/dockerfile.ubuntu2004
new file mode 100644
index 000000000..9cb4f0b2f
--- /dev/null
+++ b/feature/hack/gen-repository-iso/dockerfile.ubuntu2004
@@ -0,0 +1,33 @@
+FROM ubuntu:20.04 as ubuntu2004
+ARG TARGETARCH
+ARG OS_RELEASE=focal
+ARG DIR=ubuntu-20.04-${TARGETARCH}-debs
+ARG PKGS=.common[],.debs[],.ubuntu[],.ubuntu2004[]
+ARG BUILD_TOOLS="apt-transport-https software-properties-common ca-certificates curl wget gnupg dpkg-dev genisoimage"
+ENV DEBIAN_FRONTEND=noninteractive
+
+# dump system package list
+RUN dpkg --get-selections | grep -v deinstall | cut -f1 | cut -d ':' -f1 > packages.list
+RUN apt update -qq \
+ && apt install -y --no-install-recommends $BUILD_TOOLS \
+ && add-apt-repository ppa:gluster/glusterfs-7 -y \
+ && curl -fsSL "https://download.docker.com/linux/ubuntu/gpg" | apt-key add -qq - \
+ && echo "deb [arch=$TARGETARCH] https://download.docker.com/linux/ubuntu ${OS_RELEASE} stable" > /etc/apt/sources.list.d/docker.list\
+ && apt update -qq
+
+WORKDIR /package
+COPY packages.yaml .
+
+COPY --from=mikefarah/yq:4.11.1 /usr/bin/yq /usr/bin/yq
+RUN yq eval "${PKGS}" packages.yaml >> packages.list \
+ && sort -u packages.list | xargs apt-get install --yes --reinstall --print-uris | awk -F "'" '{print $2}' | grep -v '^$' | sort -u > packages.urls
+
+RUN mkdir -p ${DIR} \
+ && wget -q -x -P ${DIR} -i packages.urls \
+ && cd ${DIR} \
+ && dpkg-scanpackages ./ /dev/null | gzip -9c > ./Packages.gz
+
+RUN genisoimage -r -o ${DIR}.iso ${DIR}
+
+FROM scratch
+COPY --from=ubuntu2004 /package/*.iso /
diff --git a/feature/hack/gen-repository-iso/dockerfile.ubuntu2204 b/feature/hack/gen-repository-iso/dockerfile.ubuntu2204
new file mode 100644
index 000000000..7a92912a6
--- /dev/null
+++ b/feature/hack/gen-repository-iso/dockerfile.ubuntu2204
@@ -0,0 +1,33 @@
+FROM ubuntu:22.04 as ubuntu2204
+ARG TARGETARCH
+ARG OS_RELEASE=jammy
+ARG DIR=ubuntu-22.04-${TARGETARCH}-debs
+ARG PKGS=.common[],.debs[],.ubuntu[],.ubuntu2204[]
+ARG BUILD_TOOLS="apt-transport-https software-properties-common ca-certificates curl wget gnupg dpkg-dev genisoimage"
+ENV DEBIAN_FRONTEND=noninteractive
+
+# dump system package list
+RUN dpkg --get-selections | grep -v deinstall | cut -f1 | cut -d ':' -f1 > packages.list
+RUN apt update -qq \
+ && apt install -y --no-install-recommends $BUILD_TOOLS \
+ #&& add-apt-repository ppa:gluster/glusterfs-7 -y \
+ && curl -fsSL "https://download.docker.com/linux/ubuntu/gpg" | apt-key add -qq - \
+ && echo "deb [arch=$TARGETARCH] https://download.docker.com/linux/ubuntu ${OS_RELEASE} stable" > /etc/apt/sources.list.d/docker.list\
+ && apt update -qq
+
+WORKDIR /package
+COPY packages.yaml .
+
+COPY --from=mikefarah/yq:4.11.1 /usr/bin/yq /usr/bin/yq
+RUN yq eval "${PKGS}" packages.yaml >> packages.list \
+ && sort -u packages.list | xargs apt-get install --yes --reinstall --print-uris | awk -F "'" '{print $2}' | grep -v '^$' | sort -u > packages.urls
+
+RUN mkdir -p ${DIR} \
+ && wget -q -x -P ${DIR} -i packages.urls \
+ && cd ${DIR} \
+ && dpkg-scanpackages ./ /dev/null | gzip -9c > ./Packages.gz
+
+RUN genisoimage -r -o ${DIR}.iso ${DIR}
+
+FROM scratch
+COPY --from=ubuntu2204 /package/*.iso /
diff --git a/feature/hack/gen-repository-iso/download-pkgs.sh b/feature/hack/gen-repository-iso/download-pkgs.sh
new file mode 100644
index 000000000..ee0afa35e
--- /dev/null
+++ b/feature/hack/gen-repository-iso/download-pkgs.sh
@@ -0,0 +1,7 @@
+#! /bin/sh
+
+for p in ${PACKAGES} ; do
+ echo "\n Download $p ... \n"
+ sudo apt-get download $p 2>>errors.txt
+ for i in $(apt-cache depends $p | grep -E 'Depends|Recommends|Suggests' | cut -d ':' -f 2,3 | sed -e s/' '/''/); do sudo apt-get download $i 2>>errors.txt; done
+done
diff --git a/feature/hack/gen-repository-iso/packages.yaml b/feature/hack/gen-repository-iso/packages.yaml
new file mode 100644
index 000000000..65d89a3eb
--- /dev/null
+++ b/feature/hack/gen-repository-iso/packages.yaml
@@ -0,0 +1,88 @@
+---
+common:
+ - curl
+ - ceph-common
+ - net-tools
+ - lvm2
+ - telnet
+ - tcpdump
+ - socat
+ - openssl
+ - chrony
+ - conntrack
+ - curl
+ - ipvsadm
+ - ipset
+ - psmisc
+ - bash-completion
+ - ebtables
+ - haproxy
+ - keepalived
+rpms:
+ - nfs-utils
+ - yum-utils
+ - bind-utils
+ - glusterfs-fuse
+ - lz4
+ - nss
+ - nss-sysinit
+ - nss-tools
+ - conntrack-tools
+debs:
+ - apt-transport-https
+ - ca-certificates
+ - dnsutils
+ - git
+ - glusterfs-client
+ - gnupg-agent
+ - nfs-common
+ - openssh-server
+ - software-properties-common
+ - sudo
+
+centos:
+ - containerd.io
+
+centos7:
+ - libselinux-python
+ - docker-ce-20.10.8
+ - docker-ce-cli-20.10.8
+
+debian:
+ - containerd.io
+
+debian10:
+ - docker-ce=5:20.10.8~3-0~debian-buster
+ - docker-ce-cli=5:20.10.8~3-0~debian-buster
+
+debian11:
+ - docker-ce=5:20.10.8~3-0~debian-bullseye
+ - docker-ce-cli=5:20.10.8~3-0~debian-bullseye
+
+ubuntu:
+ - containerd.io
+
+ubuntu1604:
+ - docker-ce=5:20.10.8~3-0~ubuntu-xenial
+ - docker-ce-cli=5:20.10.8~3-0~ubuntu-xenial
+
+ubuntu1804:
+ - docker-ce=5:20.10.8~3-0~ubuntu-bionic
+ - docker-ce-cli=5:20.10.8~3-0~ubuntu-bionic
+
+ubuntu2004:
+ - docker-ce=5:20.10.8~3-0~ubuntu-focal
+ - docker-ce-cli=5:20.10.8~3-0~ubuntu-focal
+
+# The minimum version of docker-ce on ubuntu 2204 is 20.10.13
+ubuntu2204:
+ - docker-ce=5:20.10.13~3-0~ubuntu-jammy
+ - docker-ce-cli=5:20.10.13~3-0~ubuntu-jammy
+
+almalinux:
+ - containerd.io
+ - docker-compose-plugin
+
+almalinux90:
+ - docker-ce-20.10.17
+ - docker-ce-cli-20.10.17
diff --git a/feature/hack/go_install.sh b/feature/hack/go_install.sh
new file mode 100755
index 000000000..a07b8e0f1
--- /dev/null
+++ b/feature/hack/go_install.sh
@@ -0,0 +1,45 @@
+#!/usr/bin/env bash
+# Copyright 2021 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+if [ -z "${1}" ]; then
+ echo "must provide module as first parameter"
+ exit 1
+fi
+
+if [ -z "${2}" ]; then
+ echo "must provide binary name as second parameter"
+ exit 1
+fi
+
+if [ -z "${3}" ]; then
+ echo "must provide version as third parameter"
+ exit 1
+fi
+
+if [ -z "${GOBIN}" ]; then
+ echo "GOBIN is not set. Must set GOBIN to install the bin in a specified directory."
+ exit 1
+fi
+
+rm -f "${GOBIN}/${2}"* || true
+
+# install the golang module specified as the first argument
+go install "${1}@${3}"
+mv "${GOBIN}/${2}" "${GOBIN}/${2}-${3}"
+ln -sf "${GOBIN}/${2}-${3}" "${GOBIN}/${2}"
diff --git a/feature/hack/lib/golang.sh b/feature/hack/lib/golang.sh
new file mode 100755
index 000000000..721c8c142
--- /dev/null
+++ b/feature/hack/lib/golang.sh
@@ -0,0 +1,64 @@
+#!/usr/bin/env bash
+
+# This is a modified version of Kubernetes
+KUBE_GO_PACKAGE=kubesphere.io/kubesphere
+
+# Ensure the go tool exists and is a viable version.
+kube::golang::verify_go_version() {
+ if [[ -z "$(command -v go)" ]]; then
+ kube::log::usage_from_stdin <&1)
+# Y=$(kube::readlinkdashf $1 2>&1)
+# if [ "$X" != "$Y" ]; then
+# echo readlinkdashf $1: expected "$X", got "$Y"
+# fi
+# }
+# testone /
+# testone /tmp
+# testone $T
+# testone $T/file
+# testone $T/dir
+# testone $T/linkfile
+# testone $T/linkdir
+# testone $T/nonexistant
+# testone $T/linkdir/file
+# testone $T/linkdir/dir
+# testone $T/linkdir/linkfile
+# testone $T/linkdir/linkdir
+function kube::readlinkdashf {
+ # run in a subshell for simpler 'cd'
+ (
+ if [[ -d "${1}" ]]; then # This also catch symlinks to dirs.
+ cd "${1}"
+ pwd -P
+ else
+ cd "$(dirname "${1}")"
+ local f
+ f=$(basename "${1}")
+ if [[ -L "${f}" ]]; then
+ readlink "${f}"
+ else
+ echo "$(pwd -P)/${f}"
+ fi
+ fi
+ )
+}
+
+# This emulates "realpath" which is not available on MacOS X
+# Test:
+# T=/tmp/$$.$RANDOM
+# mkdir $T
+# touch $T/file
+# mkdir $T/dir
+# ln -s $T/file $T/linkfile
+# ln -s $T/dir $T/linkdir
+# function testone() {
+# X=$(realpath $1 2>&1)
+# Y=$(kube::realpath $1 2>&1)
+# if [ "$X" != "$Y" ]; then
+# echo realpath $1: expected "$X", got "$Y"
+# fi
+# }
+# testone /
+# testone /tmp
+# testone $T
+# testone $T/file
+# testone $T/dir
+# testone $T/linkfile
+# testone $T/linkdir
+# testone $T/nonexistant
+# testone $T/linkdir/file
+# testone $T/linkdir/dir
+# testone $T/linkdir/linkfile
+# testone $T/linkdir/linkdir
+kube::realpath() {
+ if [[ ! -e "${1}" ]]; then
+ echo "${1}: No such file or directory" >&2
+ return 1
+ fi
+ kube::readlinkdashf "${1}"
+}
diff --git a/feature/hack/lib/logging.sh b/feature/hack/lib/logging.sh
new file mode 100755
index 000000000..ac44d0d44
--- /dev/null
+++ b/feature/hack/lib/logging.sh
@@ -0,0 +1,171 @@
+#!/usr/bin/env bash
+
+# Copyright 2014 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Controls verbosity of the script output and logging.
+KUBE_VERBOSE="${KUBE_VERBOSE:-5}"
+
+# Handler for when we exit automatically on an error.
+# Borrowed from https://gist.github.com/ahendrix/7030300
+kube::log::errexit() {
+ local err="${PIPESTATUS[*]}"
+
+ # If the shell we are in doesn't have errexit set (common in subshells) then
+ # don't dump stacks.
+ set +o | grep -qe "-o errexit" || return
+
+ set +o xtrace
+ local code="${1:-1}"
+ # Print out the stack trace described by $function_stack
+ if [ ${#FUNCNAME[@]} -gt 2 ]
+ then
+ kube::log::error "Call tree:"
+ for ((i=1;i<${#FUNCNAME[@]}-1;i++))
+ do
+ kube::log::error " ${i}: ${BASH_SOURCE[${i}+1]}:${BASH_LINENO[${i}]} ${FUNCNAME[${i}]}(...)"
+ done
+ fi
+ kube::log::error_exit "Error in ${BASH_SOURCE[1]}:${BASH_LINENO[0]}. '${BASH_COMMAND}' exited with status ${err}" "${1:-1}" 1
+}
+
+kube::log::install_errexit() {
+ # trap ERR to provide an error handler whenever a command exits nonzero this
+ # is a more verbose version of set -o errexit
+ trap 'kube::log::errexit' ERR
+
+ # setting errtrace allows our ERR trap handler to be propagated to functions,
+ # expansions and subshells
+ set -o errtrace
+}
+
+# Print out the stack trace
+#
+# Args:
+# $1 The number of stack frames to skip when printing.
+kube::log::stack() {
+ local stack_skip=${1:-0}
+ stack_skip=$((stack_skip + 1))
+ if [[ ${#FUNCNAME[@]} -gt ${stack_skip} ]]; then
+ echo "Call stack:" >&2
+ local i
+ for ((i=1 ; i <= ${#FUNCNAME[@]} - stack_skip ; i++))
+ do
+ local frame_no=$((i - 1 + stack_skip))
+ local source_file=${BASH_SOURCE[${frame_no}]}
+ local source_lineno=${BASH_LINENO[$((frame_no - 1))]}
+ local funcname=${FUNCNAME[${frame_no}]}
+ echo " ${i}: ${source_file}:${source_lineno} ${funcname}(...)" >&2
+ done
+ fi
+}
+
+# Log an error and exit.
+# Args:
+# $1 Message to log with the error
+# $2 The error code to return
+# $3 The number of stack frames to skip when printing.
+kube::log::error_exit() {
+ local message="${1:-}"
+ local code="${2:-1}"
+ local stack_skip="${3:-0}"
+ stack_skip=$((stack_skip + 1))
+
+ if [[ ${KUBE_VERBOSE} -ge 4 ]]; then
+ local source_file=${BASH_SOURCE[${stack_skip}]}
+ local source_line=${BASH_LINENO[$((stack_skip - 1))]}
+ echo "!!! Error in ${source_file}:${source_line}" >&2
+ [[ -z ${1-} ]] || {
+ echo " ${1}" >&2
+ }
+
+ kube::log::stack ${stack_skip}
+
+ echo "Exiting with status ${code}" >&2
+ fi
+
+ exit "${code}"
+}
+
+# Log an error but keep going. Don't dump the stack or exit.
+kube::log::error() {
+ timestamp=$(date +"[%m%d %H:%M:%S]")
+ echo "!!! ${timestamp} ${1-}" >&2
+ shift
+ for message; do
+ echo " ${message}" >&2
+ done
+}
+
+# Print an usage message to stderr. The arguments are printed directly.
+kube::log::usage() {
+ echo >&2
+ local message
+ for message; do
+ echo "${message}" >&2
+ done
+ echo >&2
+}
+
+kube::log::usage_from_stdin() {
+ local messages=()
+ while read -r line; do
+ messages+=("${line}")
+ done
+
+ kube::log::usage "${messages[@]}"
+}
+
+# Print out some info that isn't a top level status line
+kube::log::info() {
+ local V="${V:-0}"
+ if [[ ${KUBE_VERBOSE} < ${V} ]]; then
+ return
+ fi
+
+ for message; do
+ echo "${message}"
+ done
+}
+
+# Just like kube::log::info, but no \n, so you can make a progress bar
+kube::log::progress() {
+ for message; do
+ echo -e -n "${message}"
+ done
+}
+
+kube::log::info_from_stdin() {
+ local messages=()
+ while read -r line; do
+ messages+=("${line}")
+ done
+
+ kube::log::info "${messages[@]}"
+}
+
+# Print a status line. Formatted to show up in a stream of output.
+kube::log::status() {
+ local V="${V:-0}"
+ if [[ ${KUBE_VERBOSE} < ${V} ]]; then
+ return
+ fi
+
+ timestamp=$(date +"[%m%d %H:%M:%S]")
+ echo "+++ ${timestamp} ${1}"
+ shift
+ for message; do
+ echo " ${message}"
+ done
+}
diff --git a/feature/hack/lib/util.sh b/feature/hack/lib/util.sh
new file mode 100755
index 000000000..2bb1a14ba
--- /dev/null
+++ b/feature/hack/lib/util.sh
@@ -0,0 +1,765 @@
+#!/usr/bin/env bash
+
+# Copyright 2014 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+function kube::util::sourced_variable {
+ # Call this function to tell shellcheck that a variable is supposed to
+ # be used from other calling context. This helps quiet an "unused
+ # variable" warning from shellcheck and also document your code.
+ true
+}
+
+kube::util::sortable_date() {
+ date "+%Y%m%d-%H%M%S"
+}
+
+# arguments: target, item1, item2, item3, ...
+# returns 0 if target is in the given items, 1 otherwise.
+kube::util::array_contains() {
+ local search="$1"
+ local element
+ shift
+ for element; do
+ if [[ "${element}" == "${search}" ]]; then
+ return 0
+ fi
+ done
+ return 1
+}
+
+kube::util::wait_for_url() {
+ local url=$1
+ local prefix=${2:-}
+ local wait=${3:-1}
+ local times=${4:-30}
+ local maxtime=${5:-1}
+
+ command -v curl >/dev/null || {
+ kube::log::usage "curl must be installed"
+ exit 1
+ }
+
+ local i
+ for i in $(seq 1 "${times}"); do
+ local out
+ if out=$(curl --max-time "${maxtime}" -gkfs "${url}" 2>/dev/null); then
+ kube::log::status "On try ${i}, ${prefix}: ${out}"
+ return 0
+ fi
+ sleep "${wait}"
+ done
+ kube::log::error "Timed out waiting for ${prefix} to answer at ${url}; tried ${times} waiting ${wait} between each"
+ return 1
+}
+
+# Example: kube::util::trap_add 'echo "in trap DEBUG"' DEBUG
+# See: http://stackoverflow.com/questions/3338030/multiple-bash-traps-for-the-same-signal
+kube::util::trap_add() {
+ local trap_add_cmd
+ trap_add_cmd=$1
+ shift
+
+ for trap_add_name in "$@"; do
+ local existing_cmd
+ local new_cmd
+
+ # Grab the currently defined trap commands for this trap
+ existing_cmd=$(trap -p "${trap_add_name}" | awk -F"'" '{print $2}')
+
+ if [[ -z "${existing_cmd}" ]]; then
+ new_cmd="${trap_add_cmd}"
+ else
+ new_cmd="${trap_add_cmd};${existing_cmd}"
+ fi
+
+ # Assign the test. Disable the shellcheck warning telling that trap
+ # commands should be single quoted to avoid evaluating them at this
+ # point instead evaluating them at run time. The logic of adding new
+ # commands to a single trap requires them to be evaluated right away.
+ # shellcheck disable=SC2064
+ trap "${new_cmd}" "${trap_add_name}"
+ done
+}
+
+# Opposite of kube::util::ensure-temp-dir()
+kube::util::cleanup-temp-dir() {
+ rm -rf "${KUBE_TEMP}"
+}
+
+# Create a temp dir that'll be deleted at the end of this bash session.
+#
+# Vars set:
+# KUBE_TEMP
+kube::util::ensure-temp-dir() {
+ if [[ -z ${KUBE_TEMP-} ]]; then
+ KUBE_TEMP=$(mktemp -d 2>/dev/null || mktemp -d -t kubernetes.XXXXXX)
+ kube::util::trap_add kube::util::cleanup-temp-dir EXIT
+ fi
+}
+
+kube::util::host_os() {
+ local host_os
+ case "$(uname -s)" in
+ Darwin)
+ host_os=darwin
+ ;;
+ Linux)
+ host_os=linux
+ ;;
+ *)
+ kube::log::error "Unsupported host OS. Must be Linux or Mac OS X."
+ exit 1
+ ;;
+ esac
+ echo "${host_os}"
+}
+
+kube::util::host_arch() {
+ local host_arch
+ case "$(uname -m)" in
+ x86_64*)
+ host_arch=amd64
+ ;;
+ i?86_64*)
+ host_arch=amd64
+ ;;
+ amd64*)
+ host_arch=amd64
+ ;;
+ aarch64*)
+ host_arch=arm64
+ ;;
+ arm64*)
+ host_arch=arm64
+ ;;
+ arm*)
+ host_arch=arm
+ ;;
+ i?86*)
+ host_arch=x86
+ ;;
+ s390x*)
+ host_arch=s390x
+ ;;
+ ppc64le*)
+ host_arch=ppc64le
+ ;;
+ *)
+ kube::log::error "Unsupported host arch. Must be x86_64, 386, arm, arm64, s390x or ppc64le."
+ exit 1
+ ;;
+ esac
+ echo "${host_arch}"
+}
+
+# This figures out the host platform without relying on golang. We need this as
+# we don't want a golang install to be a prerequisite to building yet we need
+# this info to figure out where the final binaries are placed.
+kube::util::host_platform() {
+ echo "$(kube::util::host_os)/$(kube::util::host_arch)"
+}
+
+# looks for $1 in well-known output locations for the platform ($2)
+# $KUBE_ROOT must be set
+kube::util::find-binary-for-platform() {
+ local -r lookfor="$1"
+ local -r platform="$2"
+ local locations=(
+ "${KUBE_ROOT}/_output/bin/${lookfor}"
+ "${KUBE_ROOT}/_output/dockerized/bin/${platform}/${lookfor}"
+ "${KUBE_ROOT}/_output/local/bin/${platform}/${lookfor}"
+ "${KUBE_ROOT}/platforms/${platform}/${lookfor}"
+ )
+ # Also search for binary in bazel build tree.
+ # The bazel go rules place some binaries in subtrees like
+ # "bazel-bin/source/path/linux_amd64_pure_stripped/binaryname", so make sure
+ # the platform name is matched in the path.
+ while IFS=$'\n' read -r location; do
+ locations+=("$location");
+ done < <(find "${KUBE_ROOT}/bazel-bin/" -type f -executable \
+ \( -path "*/${platform/\//_}*/${lookfor}" -o -path "*/${lookfor}" \) 2>/dev/null || true)
+
+ # List most recently-updated location.
+ local -r bin=$( (ls -t "${locations[@]}" 2>/dev/null || true) | head -1 )
+ echo -n "${bin}"
+}
+
+# looks for $1 in well-known output locations for the host platform
+# $KUBE_ROOT must be set
+kube::util::find-binary() {
+ kube::util::find-binary-for-platform "$1" "$(kube::util::host_platform)"
+}
+
+# Run all known doc generators (today gendocs and genman for kubectl)
+# $1 is the directory to put those generated documents
+kube::util::gen-docs() {
+ local dest="$1"
+
+ # Find binary
+ gendocs=$(kube::util::find-binary "gendocs")
+ genkubedocs=$(kube::util::find-binary "genkubedocs")
+ genman=$(kube::util::find-binary "genman")
+ genyaml=$(kube::util::find-binary "genyaml")
+ genfeddocs=$(kube::util::find-binary "genfeddocs")
+
+ # TODO: If ${genfeddocs} is not used from anywhere (it isn't used at
+ # least from k/k tree), remove it completely.
+ kube::util::sourced_variable "${genfeddocs}"
+
+ mkdir -p "${dest}/docs/user-guide/kubectl/"
+ "${gendocs}" "${dest}/docs/user-guide/kubectl/"
+ mkdir -p "${dest}/docs/admin/"
+ "${genkubedocs}" "${dest}/docs/admin/" "kube-apiserver"
+ "${genkubedocs}" "${dest}/docs/admin/" "kube-controller-manager"
+ "${genkubedocs}" "${dest}/docs/admin/" "kube-proxy"
+ "${genkubedocs}" "${dest}/docs/admin/" "kube-scheduler"
+ "${genkubedocs}" "${dest}/docs/admin/" "kubelet"
+ "${genkubedocs}" "${dest}/docs/admin/" "kubeadm"
+
+ mkdir -p "${dest}/docs/man/man1/"
+ "${genman}" "${dest}/docs/man/man1/" "kube-apiserver"
+ "${genman}" "${dest}/docs/man/man1/" "kube-controller-manager"
+ "${genman}" "${dest}/docs/man/man1/" "kube-proxy"
+ "${genman}" "${dest}/docs/man/man1/" "kube-scheduler"
+ "${genman}" "${dest}/docs/man/man1/" "kubelet"
+ "${genman}" "${dest}/docs/man/man1/" "kubectl"
+ "${genman}" "${dest}/docs/man/man1/" "kubeadm"
+
+ mkdir -p "${dest}/docs/yaml/kubectl/"
+ "${genyaml}" "${dest}/docs/yaml/kubectl/"
+
+ # create the list of generated files
+ pushd "${dest}" > /dev/null || return 1
+ touch docs/.generated_docs
+ find . -type f | cut -sd / -f 2- | LC_ALL=C sort > docs/.generated_docs
+ popd > /dev/null || return 1
+}
+
+# Removes previously generated docs-- we don't want to check them in. $KUBE_ROOT
+# must be set.
+kube::util::remove-gen-docs() {
+ if [ -e "${KUBE_ROOT}/docs/.generated_docs" ]; then
+ # remove all of the old docs; we don't want to check them in.
+ while read -r file; do
+ rm "${KUBE_ROOT}/${file}" 2>/dev/null || true
+ done <"${KUBE_ROOT}/docs/.generated_docs"
+ # The docs/.generated_docs file lists itself, so we don't need to explicitly
+ # delete it.
+ fi
+}
+
+# Takes a group/version and returns the path to its location on disk, sans
+# "pkg". E.g.:
+# * default behavior: extensions/v1beta1 -> apis/extensions/v1beta1
+# * default behavior for only a group: experimental -> apis/experimental
+# * Special handling for empty group: v1 -> api/v1, unversioned -> api/unversioned
+# * Special handling for groups suffixed with ".k8s.io": foo.k8s.io/v1 -> apis/foo/v1
+# * Very special handling for when both group and version are "": / -> api
+#
+# $KUBE_ROOT must be set.
+kube::util::group-version-to-pkg-path() {
+ local group_version="$1"
+
+ while IFS=$'\n' read -r api; do
+ if [[ "${api}" = "${group_version/.*k8s.io/}" ]]; then
+ echo "vendor/k8s.io/api/${group_version/.*k8s.io/}"
+ return
+ fi
+ done < <(cd "${KUBE_ROOT}/staging/src/k8s.io/api" && find . -name types.go -exec dirname {} \; | sed "s|\./||g" | sort)
+
+ # "v1" is the API GroupVersion
+ if [[ "${group_version}" == "v1" ]]; then
+ echo "vendor/k8s.io/api/core/v1"
+ return
+ fi
+
+ # Special cases first.
+ # TODO(lavalamp): Simplify this by moving pkg/api/v1 and splitting pkg/api,
+ # moving the results to pkg/apis/api.
+ case "${group_version}" in
+ # both group and version are "", this occurs when we generate deep copies for internal objects of the legacy v1 API.
+ __internal)
+ echo "pkg/apis/core"
+ ;;
+ meta/v1)
+ echo "vendor/k8s.io/apimachinery/pkg/apis/meta/v1"
+ ;;
+ meta/v1beta1)
+ echo "vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1"
+ ;;
+ *.k8s.io)
+ echo "pkg/apis/${group_version%.*k8s.io}"
+ ;;
+ *.k8s.io/*)
+ echo "pkg/apis/${group_version/.*k8s.io/}"
+ ;;
+ *)
+ echo "pkg/apis/${group_version%__internal}"
+ ;;
+ esac
+}
+
+# Takes a group/version and returns the swagger-spec file name.
+# default behavior: extensions/v1beta1 -> extensions_v1beta1
+# special case for v1: v1 -> v1
+kube::util::gv-to-swagger-name() {
+ local group_version="$1"
+ case "${group_version}" in
+ v1)
+ echo "v1"
+ ;;
+ *)
+ echo "${group_version%/*}_${group_version#*/}"
+ ;;
+ esac
+}
+
+# Returns the name of the upstream remote repository name for the local git
+# repo, e.g. "upstream" or "origin".
+kube::util::git_upstream_remote_name() {
+ git remote -v | grep fetch |\
+ grep -E 'github.com[/:]kubernetes/kubernetes|k8s.io/kubernetes' |\
+ head -n 1 | awk '{print $1}'
+}
+
+# Exits script if working directory is dirty. If it's run interactively in the terminal
+# the user can commit changes in a second terminal. This script will wait.
+kube::util::ensure_clean_working_dir() {
+ while ! git diff HEAD --exit-code &>/dev/null; do
+ echo -e "\nUnexpected dirty working directory:\n"
+ if tty -s; then
+ git status -s
+ else
+ git diff -a # be more verbose in log files without tty
+ exit 1
+ fi | sed 's/^/ /'
+ echo -e "\nCommit your changes in another terminal and then continue here by pressing enter."
+ read -r
+ done 1>&2
+}
+
+# Find the base commit using:
+# $PULL_BASE_SHA if set (from Prow)
+# current ref from the remote upstream branch
+kube::util::base_ref() {
+ local -r git_branch=$1
+
+ if [[ -n ${PULL_BASE_SHA:-} ]]; then
+ echo "${PULL_BASE_SHA}"
+ return
+ fi
+
+ full_branch="$(kube::util::git_upstream_remote_name)/${git_branch}"
+
+ # make sure the branch is valid, otherwise the check will pass erroneously.
+ if ! git describe "${full_branch}" >/dev/null; then
+ # abort!
+ exit 1
+ fi
+
+ echo "${full_branch}"
+}
+
+# Checks whether there are any files matching pattern $2 changed between the
+# current branch and upstream branch named by $1.
+# Returns 1 (false) if there are no changes
+# 0 (true) if there are changes detected.
+kube::util::has_changes() {
+ local -r git_branch=$1
+ local -r pattern=$2
+ local -r not_pattern=${3:-totallyimpossiblepattern}
+
+ local base_ref
+ base_ref=$(kube::util::base_ref "${git_branch}")
+ echo "Checking for '${pattern}' changes against '${base_ref}'"
+
+ # notice this uses ... to find the first shared ancestor
+ if git diff --name-only "${base_ref}...HEAD" | grep -v -E "${not_pattern}" | grep "${pattern}" > /dev/null; then
+ return 0
+ fi
+ # also check for pending changes
+ if git status --porcelain | grep -v -E "${not_pattern}" | grep "${pattern}" > /dev/null; then
+ echo "Detected '${pattern}' uncommitted changes."
+ return 0
+ fi
+ echo "No '${pattern}' changes detected."
+ return 1
+}
+
+kube::util::download_file() {
+ local -r url=$1
+ local -r destination_file=$2
+
+ rm "${destination_file}" 2&> /dev/null || true
+
+ for i in $(seq 5)
+ do
+ if ! curl -fsSL --retry 3 --keepalive-time 2 "${url}" -o "${destination_file}"; then
+ echo "Downloading ${url} failed. $((5-i)) retries left."
+ sleep 1
+ else
+ echo "Downloading ${url} succeed"
+ return 0
+ fi
+ done
+ return 1
+}
+
+# Test whether openssl is installed.
+# Sets:
+# OPENSSL_BIN: The path to the openssl binary to use
+function kube::util::test_openssl_installed {
+ if ! openssl version >& /dev/null; then
+ echo "Failed to run openssl. Please ensure openssl is installed"
+ exit 1
+ fi
+
+ OPENSSL_BIN=$(command -v openssl)
+}
+
+# creates a client CA, args are sudo, dest-dir, ca-id, purpose
+# purpose is dropped in after "key encipherment", you usually want
+# '"client auth"'
+# '"server auth"'
+# '"client auth","server auth"'
+function kube::util::create_signing_certkey {
+ local sudo=$1
+ local dest_dir=$2
+ local id=$3
+ local purpose=$4
+ # Create client ca
+ ${sudo} /usr/bin/env bash -e < "${dest_dir}/${id}-ca-config.json"
+EOF
+}
+
+# signs a client certificate: args are sudo, dest-dir, CA, filename (roughly), username, groups...
+function kube::util::create_client_certkey {
+ local sudo=$1
+ local dest_dir=$2
+ local ca=$3
+ local id=$4
+ local cn=${5:-$4}
+ local groups=""
+ local SEP=""
+ shift 5
+ while [ -n "${1:-}" ]; do
+ groups+="${SEP}{\"O\":\"$1\"}"
+ SEP=","
+ shift 1
+ done
+ ${sudo} /usr/bin/env bash -e < /dev/null
+apiVersion: v1
+kind: Config
+clusters:
+ - cluster:
+ certificate-authority: ${ca_file}
+ server: https://${api_host}:${api_port}/
+ name: local-up-cluster
+users:
+ - user:
+ token: ${token}
+ client-certificate: ${dest_dir}/client-${client_id}.crt
+ client-key: ${dest_dir}/client-${client_id}.key
+ name: local-up-cluster
+contexts:
+ - context:
+ cluster: local-up-cluster
+ user: local-up-cluster
+ name: local-up-cluster
+current-context: local-up-cluster
+EOF
+
+ # flatten the kubeconfig files to make them self contained
+ username=$(whoami)
+ ${sudo} /usr/bin/env bash -e < "/tmp/${client_id}.kubeconfig"
+ mv -f "/tmp/${client_id}.kubeconfig" "${dest_dir}/${client_id}.kubeconfig"
+ chown ${username} "${dest_dir}/${client_id}.kubeconfig"
+EOF
+}
+
+# list_staging_repos outputs a sorted list of repos in staging/src/k8s.io
+# each entry will just be the $repo portion of staging/src/k8s.io/$repo/...
+# $KUBE_ROOT must be set.
+function kube::util::list_staging_repos() {
+ (
+ cd "${KUBE_ROOT}/staging/src/kubesphere.io" && \
+ find . -mindepth 1 -maxdepth 1 -type d | cut -c 3- | sort
+ )
+}
+
+
+# Determines if docker can be run, failures may simply require that the user be added to the docker group.
+function kube::util::ensure_docker_daemon_connectivity {
+ IFS=" " read -ra DOCKER <<< "${DOCKER_OPTS}"
+ # Expand ${DOCKER[@]} only if it's not unset. This is to work around
+ # Bash 3 issue with unbound variable.
+ DOCKER=(docker ${DOCKER[@]:+"${DOCKER[@]}"})
+ if ! "${DOCKER[@]}" info > /dev/null 2>&1 ; then
+ cat <<'EOF' >&2
+Can't connect to 'docker' daemon. please fix and retry.
+
+Possible causes:
+ - Docker Daemon not started
+ - Linux: confirm via your init system
+ - macOS w/ docker-machine: run `docker-machine ls` and `docker-machine start `
+ - macOS w/ Docker for Mac: Check the menu bar and start the Docker application
+ - DOCKER_HOST hasn't been set or is set incorrectly
+ - Linux: domain socket is used, DOCKER_* should be unset. In Bash run `unset ${!DOCKER_*}`
+ - macOS w/ docker-machine: run `eval "$(docker-machine env )"`
+ - macOS w/ Docker for Mac: domain socket is used, DOCKER_* should be unset. In Bash run `unset ${!DOCKER_*}`
+ - Other things to check:
+ - Linux: User isn't in 'docker' group. Add and relogin.
+ - Something like 'sudo usermod -a -G docker ${USER}'
+ - RHEL7 bug and workaround: https://bugzilla.redhat.com/show_bug.cgi?id=1119282#c8
+EOF
+ return 1
+ fi
+}
+
+# Wait for background jobs to finish. Return with
+# an error status if any of the jobs failed.
+kube::util::wait-for-jobs() {
+ local fail=0
+ local job
+ for job in $(jobs -p); do
+ wait "${job}" || fail=$((fail + 1))
+ done
+ return ${fail}
+}
+
+# kube::util::join
+# Concatenates the list elements with the delimiter passed as first parameter
+#
+# Ex: kube::util::join , a b c
+# -> a,b,c
+function kube::util::join {
+ local IFS="$1"
+ shift
+ echo "$*"
+}
+
+# Downloads cfssl/cfssljson into $1 directory if they do not already exist in PATH
+#
+# Assumed vars:
+# $1 (cfssl directory) (optional)
+#
+# Sets:
+# CFSSL_BIN: The path of the installed cfssl binary
+# CFSSLJSON_BIN: The path of the installed cfssljson binary
+#
+function kube::util::ensure-cfssl {
+ if command -v cfssl &>/dev/null && command -v cfssljson &>/dev/null; then
+ CFSSL_BIN=$(command -v cfssl)
+ CFSSLJSON_BIN=$(command -v cfssljson)
+ return 0
+ fi
+
+ host_arch=$(kube::util::host_arch)
+
+ if [[ "${host_arch}" != "amd64" ]]; then
+ echo "Cannot download cfssl on non-amd64 hosts and cfssl does not appear to be installed."
+ echo "Please install cfssl and cfssljson and verify they are in \$PATH."
+ echo "Hint: export PATH=\$PATH:\$GOPATH/bin; go get -u github.com/cloudflare/cfssl/cmd/..."
+ exit 1
+ fi
+
+ # Create a temp dir for cfssl if no directory was given
+ local cfssldir=${1:-}
+ if [[ -z "${cfssldir}" ]]; then
+ kube::util::ensure-temp-dir
+ cfssldir="${KUBE_TEMP}/cfssl"
+ fi
+
+ mkdir -p "${cfssldir}"
+ pushd "${cfssldir}" > /dev/null || return 1
+
+ echo "Unable to successfully run 'cfssl' from ${PATH}; downloading instead..."
+ kernel=$(uname -s)
+ case "${kernel}" in
+ Linux)
+ curl --retry 10 -L -o cfssl https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
+ curl --retry 10 -L -o cfssljson https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
+ ;;
+ Darwin)
+ curl --retry 10 -L -o cfssl https://pkg.cfssl.org/R1.2/cfssl_darwin-amd64
+ curl --retry 10 -L -o cfssljson https://pkg.cfssl.org/R1.2/cfssljson_darwin-amd64
+ ;;
+ *)
+ echo "Unknown, unsupported platform: ${kernel}." >&2
+ echo "Supported platforms: Linux, Darwin." >&2
+ exit 2
+ esac
+
+ chmod +x cfssl || true
+ chmod +x cfssljson || true
+
+ CFSSL_BIN="${cfssldir}/cfssl"
+ CFSSLJSON_BIN="${cfssldir}/cfssljson"
+ if [[ ! -x ${CFSSL_BIN} || ! -x ${CFSSLJSON_BIN} ]]; then
+ echo "Failed to download 'cfssl'. Please install cfssl and cfssljson and verify they are in \$PATH."
+ echo "Hint: export PATH=\$PATH:\$GOPATH/bin; go get -u github.com/cloudflare/cfssl/cmd/..."
+ exit 1
+ fi
+ popd > /dev/null || return 1
+}
+
+# kube::util::ensure_dockerized
+# Confirms that the script is being run inside a kube-build image
+#
+function kube::util::ensure_dockerized {
+ if [[ -f /kube-build-image ]]; then
+ return 0
+ else
+ echo "ERROR: This script is designed to be run inside a kube-build container"
+ exit 1
+ fi
+}
+
+# kube::util::ensure-gnu-sed
+# Determines which sed binary is gnu-sed on linux/darwin
+#
+# Sets:
+# SED: The name of the gnu-sed binary
+#
+function kube::util::ensure-gnu-sed {
+ if LANG=C sed --help 2>&1 | grep -q GNU; then
+ SED="sed"
+ elif command -v gsed &>/dev/null; then
+ SED="gsed"
+ else
+ kube::log::error "Failed to find GNU sed as sed or gsed. If you are on Mac: brew install gnu-sed." >&2
+ return 1
+ fi
+ kube::util::sourced_variable "${SED}"
+}
+
+# kube::util::check-file-in-alphabetical-order
+# Check that the file is in alphabetical order
+#
+function kube::util::check-file-in-alphabetical-order {
+ local failure_file="$1"
+ if ! diff -u "${failure_file}" <(LC_ALL=C sort "${failure_file}"); then
+ {
+ echo
+ echo "${failure_file} is not in alphabetical order. Please sort it:"
+ echo
+ echo " LC_ALL=C sort -o ${failure_file} ${failure_file}"
+ echo
+ } >&2
+ false
+ fi
+}
+
+# kube::util::require-jq
+# Checks whether jq is installed.
+function kube::util::require-jq {
+ if ! command -v jq &>/dev/null; then
+ echo "jq not found. Please install." 1>&2
+ return 1
+ fi
+}
+
+# outputs md5 hash of $1, works on macOS and Linux
+function kube::util::md5() {
+ if which md5 >/dev/null 2>&1; then
+ md5 -q "$1"
+ else
+ md5sum "$1" | awk '{ print $1 }'
+ fi
+}
+
+# kube::util::read-array
+# Reads in stdin and adds it line by line to the array provided. This can be
+# used instead of "mapfile -t", and is bash 3 compatible.
+#
+# Assumed vars:
+# $1 (name of array to create/modify)
+#
+# Example usage:
+# kube::util::read-array files < <(ls -1)
+#
+function kube::util::read-array {
+ local i=0
+ unset -v "$1"
+ while IFS= read -r "$1[i++]"; do :; done
+ eval "[[ \${$1[--i]} ]]" || unset "$1[i]" # ensures last element isn't empty
+}
+
+# Some useful colors.
+if [[ -z "${color_start-}" ]]; then
+ declare -r color_start="\033["
+ declare -r color_red="${color_start}0;31m"
+ declare -r color_yellow="${color_start}0;33m"
+ declare -r color_green="${color_start}0;32m"
+ declare -r color_blue="${color_start}1;34m"
+ declare -r color_cyan="${color_start}1;36m"
+ declare -r color_norm="${color_start}0m"
+
+ kube::util::sourced_variable "${color_start}"
+ kube::util::sourced_variable "${color_red}"
+ kube::util::sourced_variable "${color_yellow}"
+ kube::util::sourced_variable "${color_green}"
+ kube::util::sourced_variable "${color_blue}"
+ kube::util::sourced_variable "${color_cyan}"
+ kube::util::sourced_variable "${color_norm}"
+fi
+
+# ex: ts=2 sw=2 et filetype=sh
diff --git a/feature/hack/sync-components.sh b/feature/hack/sync-components.sh
new file mode 100755
index 000000000..d6585e89c
--- /dev/null
+++ b/feature/hack/sync-components.sh
@@ -0,0 +1,340 @@
+#!/usr/bin/env bash
+
+# Copyright 2022 The KubeSphere Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#####################################################################
+#
+# Usage:
+# Specify the component version through environment variables.
+#
+# For example:
+#
+# KUBERNETES_VERSION=v1.25.3 bash hack/sync-components.sh
+#
+####################################################################
+
+set -e
+
+KUBERNETES_VERSION=${KUBERNETES_VERSION}
+NODE_LOCAL_DNS_VERSION=${NODE_LOCAL_DNS_VERSION}
+COREDNS_VERSION=${COREDNS_VERSION}
+CALICO_VERSION=${CALICO_VERSION}
+KUBE_OVN_VERSION=${KUBE_OVN_VERSION}
+CILIUM_VERSION=${CILIUM_VERSION}
+OPENEBS_VERSION=${OPENEBS_VERSION}
+KUBEVIP_VERSION=${KUBEVIP_VERSION}
+HAPROXY_VERSION=${HAPROXY_VERSION}
+HELM_VERSION=${HELM_VERSION}
+CNI_VERSION=${CNI_VERSION}
+ETCD_VERSION=${ETCD_VERSION}
+CRICTL_VERSION=${CRICTL_VERSION}
+K3S_VERSION=${K3S_VERSION}
+CONTAINERD_VERSION=${CONTAINERD_VERSION}
+RUNC_VERSION=${RUNC_VERSION}
+COMPOSE_VERSION=${COMPOSE_VERSION}
+CALICO_VERSION=${CALICO_VERSION}
+
+# qsctl
+QSCTL_ACCESS_KEY_ID=${QSCTL_ACCESS_KEY_ID}
+QSCTL_SECRET_ACCESS_KEY=${QSCTL_SECRET_ACCESS_KEY}
+
+# docker.io
+DOCKERHUB_USERNAME=${DOCKERHUB_USERNAME}
+DOCKERHUB_PASSWORD=${DOCKERHUB_PASSWORD}
+
+# registry.cn-beijing.aliyuncs.com
+ALIYUNCS_USERNAME=${ALIYUNCS_USERNAME}
+ALIYUNCS_PASSWORD=${ALIYUNCS_PASSWORD}
+
+DOCKERHUB_NAMESPACE="kubesphere"
+ALIYUNCS_NAMESPACE="kubesphereio"
+
+BINARIES=("kubeadm" "kubelet" "kubectl")
+ARCHS=("amd64" "arm64")
+
+# Generate qsctl config
+if [ $QSCTL_ACCESS_KEY_ID ] && [ $QSCTL_SECRET_ACCESS_KEY ];then
+ echo "access_key_id: $QSCTL_ACCESS_KEY_ID" > qsctl-config.yaml
+ echo "secret_access_key: $QSCTL_SECRET_ACCESS_KEY" >> qsctl-config.yaml
+fi
+
+# Login docker.io
+if [ $DOCKERHUB_USERNAME ] && [ $DOCKERHUB_PASSWORD ];then
+ skopeo login docker.io -u $DOCKERHUB_USERNAME -p $DOCKERHUB_PASSWORD
+fi
+
+# Login registry.cn-beijing.aliyuncs.com
+if [ $ALIYUNCS_USERNAME ] && [ $ALIYUNCS_PASSWORD ];then
+ skopeo login registry.cn-beijing.aliyuncs.com -u $ALIYUNCS_USERNAME -p $ALIYUNCS_PASSWORD
+fi
+
+# Sync Kubernetes Binaries and Images
+if [ $KUBERNETES_VERSION ]; then
+ for arch in ${ARCHS[@]}
+ do
+ mkdir -p binaries/kube/$KUBERNETES_VERSION/$arch
+ for binary in ${BINARIES[@]}
+ do
+ echo "Synchronizing $binary-$arch"
+
+ curl -L -o binaries/kube/$KUBERNETES_VERSION/$arch/$binary \
+ https://storage.googleapis.com/kubernetes-release/release/$KUBERNETES_VERSION/bin/linux/$arch/$binary
+
+ qsctl cp binaries/kube/$KUBERNETES_VERSION/$arch/$binary \
+ qs://kubernetes-release/release/$KUBERNETES_VERSION/bin/linux/$arch/$binary \
+ -c qsctl-config.yaml
+ done
+ done
+
+ chmod +x binaries/kube/$KUBERNETES_VERSION/amd64/kubeadm
+ binaries/kube/$KUBERNETES_VERSION/amd64/kubeadm config images list | xargs -I {} skopeo sync --src docker --dest docker {} docker.io/$DOCKERHUB_NAMESPACE/${image##} --all
+ binaries/kube/$KUBERNETES_VERSION/amd64/kubeadm config images list | xargs -I {} skopeo sync --src docker --dest docker {} registry.cn-beijing.aliyuncs.com/$ALIYUNCS_NAMESPACE/${image##} --all
+
+ rm -rf binaries
+fi
+
+# Sync Helm Binary
+if [ $HELM_VERSION ]; then
+ for arch in ${ARCHS[@]}
+ do
+ mkdir -p binaries/helm/$HELM_VERSION/$arch
+ echo "Synchronizing helm-$arch"
+
+ curl -L -o binaries/helm/$HELM_VERSION/$arch/helm-$HELM_VERSION-linux-$arch.tar.gz \
+ https://get.helm.sh/helm-$HELM_VERSION-linux-$arch.tar.gz
+
+ tar -zxf binaries/helm/$HELM_VERSION/$arch/helm-$HELM_VERSION-linux-$arch.tar.gz -C binaries/helm/$HELM_VERSION/$arch
+
+ qsctl cp $KUBERNETES_VERSION/$arch/linux-$arch/helm \
+ qs://kubernetes-helm/linux-$arch/$HELM_VERSION/helm \
+ -c qsctl-config.yaml
+
+ qsctl cp binaries/helm/$HELM_VERSION/$arch/helm-$HELM_VERSION-linux-$arch.tar.gz \
+ qs://kubernetes-helm/linux-$arch/$HELM_VERSION/helm-$HELM_VERSION-linux-$arch.tar.gz \
+ -c qsctl-config.yaml
+ done
+
+ rm -rf binaries
+fi
+
+# Sync ETCD Binary
+if [ $ETCD_VERSION ]; then
+ for arch in ${ARCHS[@]}
+ do
+ mkdir -p binaries/etcd/$ETCD_VERSION/$arch
+ echo "Synchronizing etcd-$arch"
+
+ curl -L -o binaries/etcd/$ETCD_VERSION/$arch/etcd-$ETCD_VERSION-linux-$arch.tar.gz \
+ https://github.com/coreos/etcd/releases/download/$ETCD_VERSION/etcd-$ETCD_VERSION-linux-$arch.tar.gz
+
+ qsctl cp binaries/etcd/$ETCD_VERSION/$arch/etcd-$ETCD_VERSION-linux-$arch.tar.gz \
+ qs://kubernetes-release/etcd/release/download/$ETCD_VERSION/etcd-$ETCD_VERSION-linux-$arch.tar.gz \
+ -c qsctl-config.yaml
+ done
+
+ rm -rf binaries
+fi
+
+# Sync CNI Binary
+if [ $CNI_VERSION ]; then
+ for arch in ${ARCHS[@]}
+ do
+ mkdir -p binaries/cni/$CNI_VERSION/$arch
+ echo "Synchronizing cni-$arch"
+
+ curl -L -o binaries/cni/$CNI_VERSION/$arch/cni-plugins-linux-$arch-$CNI_VERSION.tgz \
+ https://github.com/containernetworking/plugins/releases/download/$CNI_VERSION/cni-plugins-linux-$arch-$CNI_VERSION.tgz
+
+ qsctl cp binaries/cni/$CNI_VERSION/$arch/cni-plugins-linux-$arch-$CNI_VERSION.tgz \
+ qs://containernetworking/plugins/releases/download/$CNI_VERSION/cni-plugins-linux-$arch-$CNI_VERSION.tgz \
+ -c qsctl-config.yaml
+ done
+
+ rm -rf binaries
+fi
+
+# Sync CALICOCTL Binary
+if [ $CALICO_VERSION ]; then
+ for arch in ${ARCHS[@]}
+ do
+ mkdir -p binaries/calicoctl/$CALICO_VERSION/$arch
+ echo "Synchronizing calicoctl-$arch"
+
+ curl -L -o binaries/calicoctl/$CALICO_VERSION/$arch/calicoctl-linux-$arch \
+ https://github.com/projectcalico/calico/releases/download/$CALICO_VERSION/calicoctl-linux-$arch
+
+ qsctl cp binaries/calicoctl/$CALICO_VERSION/$arch/calicoctl-linux-$arch \
+ qs://kubernetes-release/projectcalico/calico/releases/download/$CALICO_VERSION/calicoctl-linux-$arch \
+ -c qsctl-config.yaml
+ done
+
+ rm -rf binaries
+fi
+
+# Sync crictl Binary
+if [ $CRICTL_VERSION ]; then
+ echo "access_key_id: $ACCESS_KEY_ID" > qsctl-config.yaml
+ echo "secret_access_key: $SECRET_ACCESS_KEY" >> qsctl-config.yaml
+
+ for arch in ${ARCHS[@]}
+ do
+ mkdir -p binaries/crictl/$CRICTL_VERSION/$arch
+ echo "Synchronizing crictl-$arch"
+
+ curl -L -o binaries/crictl/$CRICTL_VERSION/$arch/crictl-$CRICTL_VERSION-linux-$arch.tar.gz \
+ https://github.com/kubernetes-sigs/cri-tools/releases/download/$CRICTL_VERSION/crictl-$CRICTL_VERSION-linux-$arch.tar.gz
+
+ qsctl cp binaries/crictl/$CRICTL_VERSION/$arch/crictl-$CRICTL_VERSION-linux-$arch.tar.gz \
+ qs://kubernetes-release/cri-tools/releases/download/$CRICTL_VERSION/crictl-$CRICTL_VERSION-linux-$arch.tar.gz \
+ -c qsctl-config.yaml
+ done
+
+ rm -rf binaries
+fi
+
+# Sync k3s Binary
+if [ $K3S_VERSION ]; then
+ for arch in ${ARCHS[@]}
+ do
+ mkdir -p binaries/k3s/$K3S_VERSION/$arch
+ echo "Synchronizing k3s-$arch"
+ if [ $arch != "amd64" ]; then
+ curl -L -o binaries/k3s/$K3S_VERSION/$arch/k3s \
+ https://github.com/rancher/k3s/releases/download/$K3S_VERSION+k3s1/k3s-$arch
+ else
+ curl -L -o binaries/k3s/$K3S_VERSION/$arch/k3s \
+ https://github.com/rancher/k3s/releases/download/$K3S_VERSION+k3s1/k3s
+ fi
+ qsctl cp binaries/k3s/$K3S_VERSION/$arch/k3s \
+ qs://kubernetes-release/k3s/releases/download/$K3S_VERSION+k3s1/linux/$arch/k3s \
+ -c qsctl-config.yaml
+ done
+
+ rm -rf binaries
+fi
+
+# Sync containerd Binary
+if [ $CONTAINERD_VERSION ]; then
+ for arch in ${ARCHS[@]}
+ do
+ mkdir -p binaries/containerd/$CONTAINERD_VERSION/$arch
+ echo "Synchronizing containerd-$arch"
+
+ curl -L -o binaries/containerd/$CONTAINERD_VERSION/$arch/containerd-$CONTAINERD_VERSION-linux-$arch.tar.gz \
+ https://github.com/containerd/containerd/releases/download/v$CONTAINERD_VERSION/containerd-$CONTAINERD_VERSION-linux-$arch.tar.gz
+
+ qsctl cp binaries/containerd/$CONTAINERD_VERSION/$arch/containerd-$CONTAINERD_VERSION-linux-$arch.tar.gz \
+ qs://kubernetes-releas/containerd/containerd/releases/download/v$CONTAINERD_VERSION/containerd-$CONTAINERD_VERSION-linux-$arch.tar.gz \
+ -c qsctl-config.yaml
+ done
+
+ rm -rf binaries
+fi
+
+# Sync runc Binary
+if [ $RUNC_VERSION ]; then
+ for arch in ${ARCHS[@]}
+ do
+ mkdir -p binaries/runc/$RUNC_VERSION/$arch
+ echo "Synchronizing runc-$arch"
+
+ curl -L -o binaries/runc/$RUNC_VERSION/$arch/runc.$arch \
+ https://github.com/opencontainers/runc/releases/download/$RUNC_VERSION/runc.$arch
+
+ qsctl cp binaries/runc/$RUNC_VERSION/$arch/runc.$arch \
+ qs://kubernetes-release/opencontainers/runc/releases/download/$RUNC_VERSION/runc.$arch \
+ -c qsctl-config.yaml
+ done
+
+ rm -rf binaries
+fi
+
+# Sync docker-compose Binary
+if [ $RUNC_VERSION ]; then
+ for arch in ${ARCHS[@]}
+ do
+ mkdir -p binaries/compose/$COMPOSE_VERSION/$arch
+ echo "Synchronizing runc-$arch"
+ if [ $arch == "amd64" ]; then
+ curl -L -o binaries/compose/$COMPOSE_VERSION/$arch/docker-compose-linux-x86_64 \
+ https://github.com/docker/compose/releases/download/$COMPOSE_VERSION/docker-compose-linux-x86_64
+
+ qsctl cp binaries/compose/$COMPOSE_VERSION/$arch/docker-compose-linux-x86_64 \
+ qs://kubernetes-release/docker/compose/releases/download/$COMPOSE_VERSION/docker-compose-linux-x86_64 \
+ -c qsctl-config.yaml
+
+ elif [ $arch == "arm64" ]; then
+ curl -L -o binaries/compose/$COMPOSE_VERSION/$arch/docker-compose-linux-aarch64 \
+ https://github.com/docker/compose/releases/download/$COMPOSE_VERSION/docker-compose-linux-aarch64
+
+ qsctl cp binaries/compose/$COMPOSE_VERSION/$arch/docker-compose-linux-aarch64 \
+ qs://kubernetes-release/docker/compose/releases/download/$COMPOSE_VERSION/docker-compose-linux-aarch64 \
+ -c qsctl-config.yaml
+
+ fi
+ done
+
+ rm -rf binaries
+fi
+
+rm -rf qsctl-config.yaml
+
+# Sync NodeLocalDns Images
+if [ $NODE_LOCAL_DNS_VERSION ]; then
+ skopeo sync --src docker --dest docker registry.k8s.io/dns/k8s-dns-node-cache:$NODE_LOCAL_DNS_VERSION docker.io/$DOCKERHUB_NAMESPACE/k8s-dns-node-cache:$NODE_LOCAL_DNS_VERSION --all
+ skopeo sync --src docker --dest docker registry.k8s.io/dns/k8s-dns-node-cache:$NODE_LOCAL_DNS_VERSION registry.cn-beijing.aliyuncs.com/$ALIYUNCS_NAMESPACE/k8s-dns-node-cache:$NODE_LOCAL_DNS_VERSION --all
+fi
+
+# Sync Coredns Images
+if [ $COREDNS_VERSION ]; then
+ skopeo sync --src docker --dest docker docker.io/coredns/coredns:$COREDNS_VERSION registry.cn-beijing.aliyuncs.com/$ALIYUNCS_NAMESPACE/coredns:$COREDNS_VERSION --all
+fi
+
+# Sync Calico Images
+if [ $CALICO_VERSION ]; then
+ skopeo sync --src docker --dest docker docker.io/calico/kube-controllers:$CALICO_VERSION registry.cn-beijing.aliyuncs.com/$ALIYUNCS_NAMESPACE/kube-controllers:$CALICO_VERSION --all
+ skopeo sync --src docker --dest docker docker.io/calico/cni:$CALICO_VERSION registry.cn-beijing.aliyuncs.com/$ALIYUNCS_NAMESPACE/cni:$CALICO_VERSION --all
+ skopeo sync --src docker --dest docker docker.io/calico/node:$CALICO_VERSION registry.cn-beijing.aliyuncs.com/$ALIYUNCS_NAMESPACE/node:$CALICO_VERSION --all
+ skopeo sync --src docker --dest docker docker.io/calico/pod2daemon-flexvol:$CALICO_VERSION registry.cn-beijing.aliyuncs.com/$ALIYUNCS_NAMESPACE/pod2daemon-flexvol:$CALICO_VERSION --all
+ skopeo sync --src docker --dest docker docker.io/calico/typha:$CALICO_VERSION registry.cn-beijing.aliyuncs.com/$ALIYUNCS_NAMESPACE/typha:$CALICO_VERSION --all
+fi
+
+# Sync Kube-OVN Images
+if [ $KUBE_OVN_VERSION ]; then
+ skopeo sync --src docker --dest docker docker.io/kubeovn/kube-ovn:$KUBE_OVN_VERSION registry.cn-beijing.aliyuncs.com/$ALIYUNCS_NAMESPACE/kube-ovn:$KUBE_OVN_VERSION --all
+ skopeo sync --src docker --dest docker docker.io/kubeovn/vpc-nat-gateway:$KUBE_OVN_VERSION registry.cn-beijing.aliyuncs.com/$ALIYUNCS_NAMESPACE/vpc-nat-gateway:$KUBE_OVN_VERSION --all
+fi
+
+# Sync Cilium Images
+if [ $CILIUM_VERSION ]; then
+ skopeo sync --src docker --dest docker docker.io/cilium/cilium:$CILIUM_VERSION registry.cn-beijing.aliyuncs.com/$ALIYUNCS_NAMESPACE/cilium:$CILIUM_VERSION --all
+ skopeo sync --src docker --dest docker docker.io/cilium/cilium-operator-generic:$CILIUM_VERSION registry.cn-beijing.aliyuncs.com/$ALIYUNCS_NAMESPACE/cilium-operator-generic:$CILIUM_VERSION --all
+fi
+
+# Sync OpenEBS Images
+if [ $OPENEBS_VERSION ]; then
+ skopeo sync --src docker --dest docker docker.io/openebs/provisioner-localpv:$OPENEBS_VERSION registry.cn-beijing.aliyuncs.com/$ALIYUNCS_NAMESPACE/provisioner-localpv:$OPENEBS_VERSION --all
+ skopeo sync --src docker --dest docker docker.io/openebs/linux-utils:$OPENEBS_VERSION registry.cn-beijing.aliyuncs.com/$ALIYUNCS_NAMESPACE/linux-utils:$OPENEBS_VERSION --all
+fi
+
+# Sync Haproxy Images
+if [ $HAPROXY_VERSION ]; then
+ skopeo sync --src docker --dest docker docker.io/library/haproxy:$HAPROXY_VERSION registry.cn-beijing.aliyuncs.com/$ALIYUNCS_NAMESPACE/haproxy:$HAPROXY_VERSION --all
+fi
+
+# Sync Kube-vip Images
+if [ $KUBEVIP_VERSION ]; then
+ skopeo sync --src docker --dest docker docker.io/plndr/kubevip:$KUBEVIP_VERSION registry.cn-beijing.aliyuncs.com/$ALIYUNCS_NAMESPACE/kubevip:$KUBEVIP_VERSION --all
+fi
diff --git a/feature/hack/update-goimports.sh b/feature/hack/update-goimports.sh
new file mode 100755
index 000000000..53f428992
--- /dev/null
+++ b/feature/hack/update-goimports.sh
@@ -0,0 +1,44 @@
+#!/usr/bin/env bash
+
+# Copyright 2020 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
+source "${KUBE_ROOT}/hack/lib/init.sh"
+source "${KUBE_ROOT}/hack/lib/util.sh"
+
+kube::golang::verify_go_version
+
+# Ensure that we find the binaries we build before anything else.
+export GOBIN="${KUBE_OUTPUT_BINPATH}"
+PATH="${GOBIN}:${PATH}"
+
+# Explicitly opt into go modules, even though we're inside a GOPATH directory
+export GO111MODULE=on
+
+if ! command -v goimports ; then
+# Install goimports
+ echo 'installing goimports'
+ GO111MODULE=auto go install -mod=mod golang.org/x/tools/cmd/goimports@v0.7.0
+fi
+
+cd "${KUBE_ROOT}" || exit 1
+
+IFS=$'\n' read -r -d '' -a files < <( find . -type f -name '*.go' -not -path "./vendor/*" -not -path "./pkg/client/*" -not -name "zz_generated.deepcopy.go" && printf '\0' )
+
+"goimports" -w -local github.com/kubesphere/kubekey/v4 "${files[@]}"
diff --git a/feature/hack/verify-dockerfiles.sh b/feature/hack/verify-dockerfiles.sh
new file mode 100644
index 000000000..e69de29bb
diff --git a/feature/hack/verify-goimports.sh b/feature/hack/verify-goimports.sh
new file mode 100755
index 000000000..527aa3c08
--- /dev/null
+++ b/feature/hack/verify-goimports.sh
@@ -0,0 +1,52 @@
+#!/usr/bin/env bash
+
+# Copyright 2020 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
+source "${KUBE_ROOT}/hack/lib/init.sh"
+source "${KUBE_ROOT}/hack/lib/util.sh"
+
+kube::golang::verify_go_version
+
+# Ensure that we find the binaries we build before anything else.
+export GOBIN="${KUBE_OUTPUT_BINPATH}"
+PATH="${GOBIN}:${PATH}"
+
+# Explicitly opt into go modules, even though we're inside a GOPATH directory
+export GO111MODULE=on
+
+if ! command -v goimports ; then
+# Install goimports
+ echo 'installing goimports'
+ GO111MODULE=auto go install -mod=mod golang.org/x/tools/cmd/goimports@v0.7.0
+fi
+
+cd "${KUBE_ROOT}" || exit 1
+
+IFS=$'\n' read -r -d '' -a files < <( find . -type f -name '*.go' -not -path "./vendor/*" -not -path "./pkg/apis/*" -not -path "./pkg/client/*" -not -name "zz_generated.deepcopy.go" && printf '\0' )
+
+output=$(goimports -local github.com/kubesphere/kubekey/v4 -l "${files[@]}")
+
+if [ "${output}" != "" ]; then
+ echo "The following files are not import formatted"
+ printf '%s\n' "${output[@]}"
+ echo "Please run the following command:"
+ echo "make goimports"
+ exit 1
+fi
diff --git a/feature/hack/version.sh b/feature/hack/version.sh
new file mode 100755
index 000000000..aeeb88626
--- /dev/null
+++ b/feature/hack/version.sh
@@ -0,0 +1,108 @@
+#!/usr/bin/env bash
+# Copyright 2020 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+if [[ "${TRACE-0}" == "1" ]]; then
+ set -o xtrace
+fi
+
+version::get_version_vars() {
+ # shellcheck disable=SC1083
+ GIT_COMMIT="$(git rev-parse HEAD^{commit})"
+
+ if git_status=$(git status --porcelain 2>/dev/null) && [[ -z ${git_status} ]]; then
+ GIT_TREE_STATE="clean"
+ else
+ GIT_TREE_STATE="dirty"
+ fi
+
+ # stolen from k8s.io/hack/lib/version.sh
+ # Use git describe to find the version based on annotated tags.
+ if [[ -n ${GIT_VERSION-} ]] || GIT_VERSION=$(git describe --tags --abbrev=14 --match "v[0-9]*" "${GIT_COMMIT}" 2>/dev/null); then
+ # This translates the "git describe" to an actual semver.org
+ # compatible semantic version that looks something like this:
+ # v1.1.0-alpha.0.6+84c76d1142ea4d
+ #
+ # TODO: We continue calling this "git version" because so many
+ # downstream consumers are expecting it there.
+ # shellcheck disable=SC2001
+ DASHES_IN_VERSION=$(echo "${GIT_VERSION}" | sed "s/[^-]//g")
+ if [[ "${DASHES_IN_VERSION}" == "---" ]] ; then
+ # We have distance to subversion (v1.1.0-subversion-1-gCommitHash)
+ # shellcheck disable=SC2001
+ GIT_VERSION=$(echo "${GIT_VERSION}" | sed "s/-\([0-9]\{1,\}\)-g\([0-9a-f]\{14\}\)$/.\1\-\2/")
+ elif [[ "${DASHES_IN_VERSION}" == "--" ]] ; then
+ # We have distance to base tag (v1.1.0-1-gCommitHash)
+ # shellcheck disable=SC2001
+ GIT_VERSION=$(echo "${GIT_VERSION}" | sed "s/-g\([0-9a-f]\{14\}\)$/-\1/")
+ fi
+ if [[ "${GIT_TREE_STATE}" == "dirty" ]]; then
+ # git describe --dirty only considers changes to existing files, but
+ # that is problematic since new untracked .go files affect the build,
+ # so use our idea of "dirty" from git status instead.
+ GIT_VERSION+="-dirty"
+ fi
+
+
+ # Try to match the "git describe" output to a regex to try to extract
+ # the "major" and "minor" versions and whether this is the exact tagged
+ # version or whether the tree is between two tagged versions.
+ if [[ "${GIT_VERSION}" =~ ^v([0-9]+)\.([0-9]+)(\.[0-9]+)?([-].*)?([+].*)?$ ]]; then
+ GIT_MAJOR=${BASH_REMATCH[1]}
+ GIT_MINOR=${BASH_REMATCH[2]}
+ fi
+
+ # If GIT_VERSION is not a valid Semantic Version, then refuse to build.
+ if ! [[ "${GIT_VERSION}" =~ ^v([0-9]+)\.([0-9]+)(\.[0-9]+)?(-[0-9A-Za-z.-]+)?(\+[0-9A-Za-z.-]+)?$ ]]; then
+ echo "GIT_VERSION should be a valid Semantic Version. Current value: ${GIT_VERSION}"
+ echo "Please see more details here: https://semver.org"
+ exit 1
+ fi
+ fi
+
+ GIT_RELEASE_TAG=$(git describe --abbrev=0 --tags)
+ GIT_RELEASE_COMMIT=$(git rev-list -n 1 "${GIT_RELEASE_TAG}")
+}
+
+# stolen from k8s.io/hack/lib/version.sh and modified
+# Prints the value that needs to be passed to the -ldflags parameter of go build
+version::ldflags() {
+ version::get_version_vars
+
+ local -a ldflags
+ function add_ldflag() {
+ local key=${1}
+ local val=${2}
+ ldflags+=(
+ "-X 'github.com/kubesphere/kubekey/v4/version.${key}=${val}'"
+ )
+ }
+
+ add_ldflag "buildDate" "$(date ${SOURCE_DATE_EPOCH:+"--date=@${SOURCE_DATE_EPOCH}"} -u +'%Y-%m-%dT%H:%M:%SZ')"
+ add_ldflag "gitCommit" "${GIT_COMMIT}"
+ add_ldflag "gitTreeState" "${GIT_TREE_STATE}"
+ add_ldflag "gitMajor" "${GIT_MAJOR}"
+ add_ldflag "gitMinor" "${GIT_MINOR}"
+ add_ldflag "gitVersion" "${GIT_VERSION}"
+ add_ldflag "gitReleaseCommit" "${GIT_RELEASE_COMMIT}"
+
+ # The -ldflags parameter takes a single string, so join the output.
+ echo "${ldflags[*]-}"
+}
+
+version::ldflags
diff --git a/feature/pkg/apis/core/v1/config_types.go b/feature/pkg/apis/core/v1/config_types.go
new file mode 100644
index 000000000..3d3a1a616
--- /dev/null
+++ b/feature/pkg/apis/core/v1/config_types.go
@@ -0,0 +1,120 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/util/json"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:openapi-gen=true
+// +kubebuilder:resource:scope=Namespaced
+
+// Config store global vars for playbook.
+type Config struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+ Spec runtime.RawExtension `json:"spec,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ConfigList of Config
+type ConfigList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []Config `json:"items"`
+}
+
+func init() {
+ SchemeBuilder.Register(&Config{}, &ConfigList{})
+}
+
+// SetValue to config
+// if key contains "." (a.b), will convert map and set value (a:b:value)
+func (c *Config) SetValue(key string, value any) error {
+ configMap := make(map[string]any)
+ if c.Spec.Raw != nil {
+ if err := json.Unmarshal(c.Spec.Raw, &configMap); err != nil {
+ return err
+ }
+ }
+ // set value
+ var f func(input map[string]any, key []string, value any) any
+ f = func(input map[string]any, key []string, value any) any {
+ if len(key) == 0 {
+ return input
+ }
+
+ firstKey := key[0]
+ if len(key) == 1 {
+ input[firstKey] = value
+
+ return input
+ }
+
+ // Handle nested maps
+ if v, ok := input[firstKey]; ok && reflect.TypeOf(v).Kind() == reflect.Map {
+ if vd, ok := v.(map[string]any); ok {
+ input[firstKey] = f(vd, key[1:], value)
+ }
+ } else {
+ input[firstKey] = f(make(map[string]any), key[1:], value)
+ }
+
+ return input
+ }
+ data, err := json.Marshal(f(configMap, strings.Split(key, "."), value))
+ if err != nil {
+ return err
+ }
+ c.Spec.Raw = data
+
+ return nil
+}
+
+// GetValue by key
+// if key contains "." (a.b), find by the key path (if a:b:value in config.and get value)
+func (c *Config) GetValue(key string) (any, error) {
+ configMap := make(map[string]any)
+ if err := json.Unmarshal(c.Spec.Raw, &configMap); err != nil {
+ return nil, err
+ }
+ // get all value
+ if key == "" {
+ return configMap, nil
+ }
+ // get value
+ var result any = configMap
+ for _, k := range strings.Split(key, ".") {
+ r, ok := result.(map[string]any)
+ if !ok {
+ // cannot find value
+ return nil, fmt.Errorf("cannot find key: %s", key)
+ }
+ result = r[k]
+ }
+
+ return result, nil
+}
diff --git a/feature/pkg/apis/core/v1/config_types_test.go b/feature/pkg/apis/core/v1/config_types_test.go
new file mode 100644
index 000000000..52a4ed44d
--- /dev/null
+++ b/feature/pkg/apis/core/v1/config_types_test.go
@@ -0,0 +1,112 @@
+/*
+Copyright 2024 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+func TestSetValue(t *testing.T) {
+ testcases := []struct {
+ name string
+ key string
+ val any
+ except Config
+ }{
+ {
+ name: "one level",
+ key: "a",
+ val: 2,
+ except: Config{Spec: runtime.RawExtension{Raw: []byte(`{"a":2}`)}},
+ },
+ {
+ name: "two level repeat key",
+ key: "a.b",
+ val: 2,
+ except: Config{Spec: runtime.RawExtension{Raw: []byte(`{"a":{"b":2}}`)}},
+ },
+ {
+ name: "two level no-repeat key",
+ key: "b.c",
+ val: 2,
+ except: Config{Spec: runtime.RawExtension{Raw: []byte(`{"a":1,"b":{"c":2}}`)}},
+ },
+ }
+
+ for _, tc := range testcases {
+ in := Config{Spec: runtime.RawExtension{Raw: []byte(`{"a":1}`)}}
+ t.Run(tc.name, func(t *testing.T) {
+ err := in.SetValue(tc.key, tc.val)
+ if err != nil {
+ t.Fatal(err)
+ }
+ assert.Equal(t, tc.except, in)
+ })
+ }
+}
+
+func TestGetValue(t *testing.T) {
+ testcases := []struct {
+ name string
+ key string
+ config Config
+ except any
+ }{
+ {
+ name: "all value",
+ key: "",
+ config: Config{Spec: runtime.RawExtension{Raw: []byte(`{"a":1}`)}},
+ except: map[string]any{
+ "a": int64(1),
+ },
+ },
+ {
+ name: "none value",
+ key: "b",
+ config: Config{Spec: runtime.RawExtension{Raw: []byte(`{"a":1}`)}},
+ except: nil,
+ },
+ {
+ name: "none multi value",
+ key: "b.c",
+ config: Config{Spec: runtime.RawExtension{Raw: []byte(`{"a":1}`)}},
+ except: nil,
+ },
+ {
+ name: "find one value",
+ key: "a",
+ config: Config{Spec: runtime.RawExtension{Raw: []byte(`{"a":1}`)}},
+ except: int64(1),
+ },
+ {
+ name: "find mulit value",
+ key: "a.b",
+ config: Config{Spec: runtime.RawExtension{Raw: []byte(`{"a":{"b":1}}`)}},
+ except: int64(1),
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ value, _ := tc.config.GetValue(tc.key)
+ assert.Equal(t, tc.except, value)
+ })
+ }
+}
diff --git a/feature/pkg/apis/core/v1/inventory_types.go b/feature/pkg/apis/core/v1/inventory_types.go
new file mode 100644
index 000000000..a2d5879af
--- /dev/null
+++ b/feature/pkg/apis/core/v1/inventory_types.go
@@ -0,0 +1,71 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// InventoryHost of Inventory
+type InventoryHost map[string]runtime.RawExtension
+
+// InventoryGroup of Inventory
+type InventoryGroup struct {
+ Groups []string `json:"groups,omitempty"`
+ Hosts []string `json:"hosts,omitempty"`
+ Vars runtime.RawExtension `json:"vars,omitempty"`
+}
+
+// InventorySpec of Inventory
+type InventorySpec struct {
+ // Hosts is all nodes
+ Hosts InventoryHost `json:"hosts,omitempty"`
+ // Vars for all host. the priority for vars is: host vars > group vars > inventory vars
+ // +optional
+ // +kubebuilder:pruning:PreserveUnknownFields
+ Vars runtime.RawExtension `json:"vars,omitempty"`
+ // Groups nodes. a group contains repeated nodes
+ // +optional
+ Groups map[string]InventoryGroup `json:"groups,omitempty"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:openapi-gen=true
+// +kubebuilder:resource:scope=Namespaced
+
+// Inventory store hosts vars for playbook.
+type Inventory struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec InventorySpec `json:"spec,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// InventoryList of Inventory
+type InventoryList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []Inventory `json:"items"`
+}
+
+func init() {
+ SchemeBuilder.Register(&Inventory{}, &InventoryList{})
+}
diff --git a/feature/pkg/apis/core/v1/pipeline_types.go b/feature/pkg/apis/core/v1/pipeline_types.go
new file mode 100644
index 000000000..fa2d93e43
--- /dev/null
+++ b/feature/pkg/apis/core/v1/pipeline_types.go
@@ -0,0 +1,223 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// PipelinePhase of Pipeline
+type PipelinePhase string
+
+const (
+ // PipelinePhasePending of Pipeline. Pipeline has created but not deal
+ PipelinePhasePending PipelinePhase = "Pending"
+ // PipelinePhaseRunning of Pipeline. deal Pipeline.
+ PipelinePhaseRunning PipelinePhase = "Running"
+ // PipelinePhaseFailed of Pipeline. once Task run failed.
+ PipelinePhaseFailed PipelinePhase = "Failed"
+ // PipelinePhaseSucceed of Pipeline. all Tasks run success.
+ PipelinePhaseSucceed PipelinePhase = "Succeed"
+)
+
+const (
+ // BuiltinsProjectAnnotation use builtins project of KubeKey
+ BuiltinsProjectAnnotation = "kubekey.kubesphere.io/builtins-project"
+)
+
+// PipelineSpec of pipeline.
+type PipelineSpec struct {
+ // Project is storage for executable packages
+ // +optional
+ Project PipelineProject `json:"project,omitempty"`
+ // Playbook which to execute.
+ Playbook string `json:"playbook"`
+ // InventoryRef is the node configuration for playbook
+ // +optional
+ InventoryRef *corev1.ObjectReference `json:"inventoryRef,omitempty"`
+ // ConfigRef is the global variable configuration for playbook
+ // +optional
+ ConfigRef *corev1.ObjectReference `json:"configRef,omitempty"`
+ // Tags is the tags of playbook which to execute
+ // +optional
+ Tags []string `json:"tags,omitempty"`
+ // SkipTags is the tags of playbook which skip execute
+ // +optional
+ SkipTags []string `json:"skipTags,omitempty"`
+ // If Debug mode is true, It will retain runtime data after a successful execution of Pipeline,
+ // which includes task execution status and parameters.
+ // +optional
+ Debug bool `json:"debug,omitempty"`
+ // when execute in kubernetes, pipeline will create ob or cornJob to execute.
+ // +optional
+ JobSpec PipelineJobSpec `json:"jobSpec,omitempty"`
+}
+
+// PipelineJobSpec set the spec of the job that allows configuration
+// Each time the pipeline is executed, usually only one pod is created and will not be re-executed after failure.
+type PipelineJobSpec struct {
+ // when Schedule is not empty, pipeline will create CornJob, otherwise pipeline will create Job.
+ // The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.
+ // +optional
+ Schedule string `json:"schedule,omitempty"`
+
+ // The number of successful finished jobs to retain. Value must be non-negative integer.
+ // Defaults to 3.
+ // +optional
+ SuccessfulJobsHistoryLimit *int32 `json:"successfulJobsHistoryLimit,omitempty"`
+
+ // The number of failed finished jobs to retain. Value must be non-negative integer.
+ // Defaults to 1.
+ // +optional
+ FailedJobsHistoryLimit *int32 `json:"failedJobsHistoryLimit,omitempty"`
+
+ // suspend specifies whether the Job controller should create Pods or not. If
+ // a Job is created with suspend set to true, no Pods are created by the Job
+ // controller. If a Job is suspended after creation (i.e. the flag goes from
+ // false to true), the Job controller will delete all active Pods associated
+ // with this Job. Users must design their workload to gracefully handle this.
+ // Suspending a Job will reset the StartTime field of the Job, effectively
+ // resetting the ActiveDeadlineSeconds timer too. Defaults to false.
+ //
+ // +optional
+ Suspend *bool `json:"suspend,omitempty"`
+
+ // Specifies the duration in seconds relative to the startTime that the job
+ // may be continuously active before the system tries to terminate it; value
+ // must be positive integer. If a Job is suspended (at creation or through an
+ // update), this timer will effectively be stopped and reset when the Job is
+ // resumed again.
+ // +optional
+ ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty"`
+
+ // ttlSecondsAfterFinished limits the lifetime of a Job that has finished
+ // execution (either Complete or Failed). If this field is set,
+ // ttlSecondsAfterFinished after the Job finishes, it is eligible to be
+ // automatically deleted. When the Job is being deleted, its lifecycle
+ // guarantees (e.g. finalizers) will be honored. If this field is unset,
+ // the Job won't be automatically deleted. If this field is set to zero,
+ // the Job becomes eligible to be deleted immediately after it finishes.
+ // +optional
+ TTLSecondsAfterFinished *int32 `json:"ttlSecondsAfterFinished,omitempty"`
+
+ // Volumes in job pod.
+ // +optional
+ Volumes []corev1.Volume `json:"workVolume,omitempty"`
+
+ // VolumeMounts in job pod.
+ // +optional
+ VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"`
+}
+
+// PipelineProject respect which playbook store.
+type PipelineProject struct {
+ // Addr is the storage for executable packages (in Ansible file format).
+ // When starting with http or https, it will be obtained from a Git repository.
+ // When starting with file path, it will be obtained from the local path.
+ // +optional
+ Addr string `json:"addr,omitempty"`
+ // Name is the project name base project
+ // +optional
+ Name string `json:"name,omitempty"`
+ // Branch is the git branch of the git Addr.
+ // +optional
+ Branch string `json:"branch,omitempty"`
+ // Tag is the git branch of the git Addr.
+ // +optional
+ Tag string `json:"tag,omitempty"`
+ // InsecureSkipTLS skip tls or not when git addr is https.
+ // +optional
+ InsecureSkipTLS bool `json:"insecureSkipTLS,omitempty"`
+ // Token of Authorization for http request
+ // +optional
+ Token string `json:"token,omitempty"`
+}
+
+// PipelineStatus of Pipeline
+type PipelineStatus struct {
+ // TaskResult total related tasks execute result.
+ TaskResult PipelineTaskResult `json:"taskResult,omitempty"`
+ // Phase of pipeline.
+ Phase PipelinePhase `json:"phase,omitempty"`
+ // failed Reason of pipeline.
+ Reason string `json:"reason,omitempty"`
+ // FailedDetail will record the failed tasks.
+ FailedDetail []PipelineFailedDetail `json:"failedDetail,omitempty"`
+}
+
+// PipelineTaskResult of Pipeline
+type PipelineTaskResult struct {
+ // Total number of tasks.
+ Total int `json:"total,omitempty"`
+ // Success number of tasks.
+ Success int `json:"success,omitempty"`
+ // Failed number of tasks.
+ Failed int `json:"failed,omitempty"`
+ // Ignored number of tasks.
+ Ignored int `json:"ignored,omitempty"`
+}
+
+// PipelineFailedDetail store failed message when pipeline run failed.
+type PipelineFailedDetail struct {
+ // Task name of failed task.
+ Task string `json:"task,omitempty"`
+ // failed Hosts Result of failed task.
+ Hosts []PipelineFailedDetailHost `json:"hosts,omitempty"`
+}
+
+// PipelineFailedDetailHost detail failed message for each host.
+type PipelineFailedDetailHost struct {
+ // Host name of failed task.
+ Host string `json:"host,omitempty"`
+ // Stdout of failed task.
+ Stdout string `json:"stdout,omitempty"`
+ // StdErr of failed task.
+ StdErr string `json:"stdErr,omitempty"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:openapi-gen=true
+// +kubebuilder:resource:scope=Namespaced
+// +kubebuilder:subresource:status
+// +kubebuilder:printcolumn:name="Playbook",type="string",JSONPath=".spec.playbook"
+// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase"
+// +kubebuilder:printcolumn:name="Total",type="integer",JSONPath=".status.taskResult.total"
+// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
+
+// Pipeline resource executor a playbook.
+type Pipeline struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec PipelineSpec `json:"spec,omitempty"`
+ Status PipelineStatus `json:"status,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// PipelineList of Pipeline
+type PipelineList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []Pipeline `json:"items"`
+}
+
+func init() {
+ SchemeBuilder.Register(&Pipeline{}, &PipelineList{})
+}
diff --git a/feature/pkg/apis/core/v1/register.go b/feature/pkg/apis/core/v1/register.go
new file mode 100644
index 000000000..7eb22a496
--- /dev/null
+++ b/feature/pkg/apis/core/v1/register.go
@@ -0,0 +1,36 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package v1 contains API Schema definitions for the kubekey v1 API group
+// +k8s:deepcopy-gen=package,register
+// +groupName=kubekey.kubesphere.io
+package v1
+
+import (
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "sigs.k8s.io/controller-runtime/pkg/scheme"
+)
+
+var (
+ // SchemeGroupVersion is group version used to register these objects
+ SchemeGroupVersion = schema.GroupVersion{Group: "kubekey.kubesphere.io", Version: "v1"}
+
+ // SchemeBuilder is used to add go types to the GroupVersionKind scheme
+ SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion}
+
+ // AddToScheme adds the types in this group-version to the given scheme.
+ AddToScheme = SchemeBuilder.AddToScheme
+)
diff --git a/feature/pkg/apis/core/v1/zz_generated.deepcopy.go b/feature/pkg/apis/core/v1/zz_generated.deepcopy.go
new file mode 100644
index 000000000..7acd5add1
--- /dev/null
+++ b/feature/pkg/apis/core/v1/zz_generated.deepcopy.go
@@ -0,0 +1,457 @@
+//go:build !ignore_autogenerated
+
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by controller-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Config) DeepCopyInto(out *Config) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config.
+func (in *Config) DeepCopy() *Config {
+ if in == nil {
+ return nil
+ }
+ out := new(Config)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Config) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConfigList) DeepCopyInto(out *ConfigList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Config, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigList.
+func (in *ConfigList) DeepCopy() *ConfigList {
+ if in == nil {
+ return nil
+ }
+ out := new(ConfigList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ConfigList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Inventory) DeepCopyInto(out *Inventory) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Inventory.
+func (in *Inventory) DeepCopy() *Inventory {
+ if in == nil {
+ return nil
+ }
+ out := new(Inventory)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Inventory) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *InventoryGroup) DeepCopyInto(out *InventoryGroup) {
+ *out = *in
+ if in.Groups != nil {
+ in, out := &in.Groups, &out.Groups
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Hosts != nil {
+ in, out := &in.Hosts, &out.Hosts
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ in.Vars.DeepCopyInto(&out.Vars)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InventoryGroup.
+func (in *InventoryGroup) DeepCopy() *InventoryGroup {
+ if in == nil {
+ return nil
+ }
+ out := new(InventoryGroup)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in InventoryHost) DeepCopyInto(out *InventoryHost) {
+ {
+ in := &in
+ *out = make(InventoryHost, len(*in))
+ for key, val := range *in {
+ (*out)[key] = *val.DeepCopy()
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InventoryHost.
+func (in InventoryHost) DeepCopy() InventoryHost {
+ if in == nil {
+ return nil
+ }
+ out := new(InventoryHost)
+ in.DeepCopyInto(out)
+ return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *InventoryList) DeepCopyInto(out *InventoryList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Inventory, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InventoryList.
+func (in *InventoryList) DeepCopy() *InventoryList {
+ if in == nil {
+ return nil
+ }
+ out := new(InventoryList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *InventoryList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *InventorySpec) DeepCopyInto(out *InventorySpec) {
+ *out = *in
+ if in.Hosts != nil {
+ in, out := &in.Hosts, &out.Hosts
+ *out = make(InventoryHost, len(*in))
+ for key, val := range *in {
+ (*out)[key] = *val.DeepCopy()
+ }
+ }
+ in.Vars.DeepCopyInto(&out.Vars)
+ if in.Groups != nil {
+ in, out := &in.Groups, &out.Groups
+ *out = make(map[string]InventoryGroup, len(*in))
+ for key, val := range *in {
+ (*out)[key] = *val.DeepCopy()
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InventorySpec.
+func (in *InventorySpec) DeepCopy() *InventorySpec {
+ if in == nil {
+ return nil
+ }
+ out := new(InventorySpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Pipeline) DeepCopyInto(out *Pipeline) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Pipeline.
+func (in *Pipeline) DeepCopy() *Pipeline {
+ if in == nil {
+ return nil
+ }
+ out := new(Pipeline)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Pipeline) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PipelineFailedDetail) DeepCopyInto(out *PipelineFailedDetail) {
+ *out = *in
+ if in.Hosts != nil {
+ in, out := &in.Hosts, &out.Hosts
+ *out = make([]PipelineFailedDetailHost, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineFailedDetail.
+func (in *PipelineFailedDetail) DeepCopy() *PipelineFailedDetail {
+ if in == nil {
+ return nil
+ }
+ out := new(PipelineFailedDetail)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PipelineFailedDetailHost) DeepCopyInto(out *PipelineFailedDetailHost) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineFailedDetailHost.
+func (in *PipelineFailedDetailHost) DeepCopy() *PipelineFailedDetailHost {
+ if in == nil {
+ return nil
+ }
+ out := new(PipelineFailedDetailHost)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PipelineJobSpec) DeepCopyInto(out *PipelineJobSpec) {
+ *out = *in
+ if in.SuccessfulJobsHistoryLimit != nil {
+ in, out := &in.SuccessfulJobsHistoryLimit, &out.SuccessfulJobsHistoryLimit
+ *out = new(int32)
+ **out = **in
+ }
+ if in.FailedJobsHistoryLimit != nil {
+ in, out := &in.FailedJobsHistoryLimit, &out.FailedJobsHistoryLimit
+ *out = new(int32)
+ **out = **in
+ }
+ if in.Suspend != nil {
+ in, out := &in.Suspend, &out.Suspend
+ *out = new(bool)
+ **out = **in
+ }
+ if in.ActiveDeadlineSeconds != nil {
+ in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds
+ *out = new(int64)
+ **out = **in
+ }
+ if in.TTLSecondsAfterFinished != nil {
+ in, out := &in.TTLSecondsAfterFinished, &out.TTLSecondsAfterFinished
+ *out = new(int32)
+ **out = **in
+ }
+ if in.Volumes != nil {
+ in, out := &in.Volumes, &out.Volumes
+ *out = make([]corev1.Volume, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.VolumeMounts != nil {
+ in, out := &in.VolumeMounts, &out.VolumeMounts
+ *out = make([]corev1.VolumeMount, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineJobSpec.
+func (in *PipelineJobSpec) DeepCopy() *PipelineJobSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(PipelineJobSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PipelineList) DeepCopyInto(out *PipelineList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Pipeline, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineList.
+func (in *PipelineList) DeepCopy() *PipelineList {
+ if in == nil {
+ return nil
+ }
+ out := new(PipelineList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PipelineList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PipelineProject) DeepCopyInto(out *PipelineProject) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineProject.
+func (in *PipelineProject) DeepCopy() *PipelineProject {
+ if in == nil {
+ return nil
+ }
+ out := new(PipelineProject)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PipelineSpec) DeepCopyInto(out *PipelineSpec) {
+ *out = *in
+ out.Project = in.Project
+ if in.InventoryRef != nil {
+ in, out := &in.InventoryRef, &out.InventoryRef
+ *out = new(corev1.ObjectReference)
+ **out = **in
+ }
+ if in.ConfigRef != nil {
+ in, out := &in.ConfigRef, &out.ConfigRef
+ *out = new(corev1.ObjectReference)
+ **out = **in
+ }
+ if in.Tags != nil {
+ in, out := &in.Tags, &out.Tags
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.SkipTags != nil {
+ in, out := &in.SkipTags, &out.SkipTags
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ in.JobSpec.DeepCopyInto(&out.JobSpec)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineSpec.
+func (in *PipelineSpec) DeepCopy() *PipelineSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(PipelineSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PipelineStatus) DeepCopyInto(out *PipelineStatus) {
+ *out = *in
+ out.TaskResult = in.TaskResult
+ if in.FailedDetail != nil {
+ in, out := &in.FailedDetail, &out.FailedDetail
+ *out = make([]PipelineFailedDetail, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineStatus.
+func (in *PipelineStatus) DeepCopy() *PipelineStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(PipelineStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PipelineTaskResult) DeepCopyInto(out *PipelineTaskResult) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineTaskResult.
+func (in *PipelineTaskResult) DeepCopy() *PipelineTaskResult {
+ if in == nil {
+ return nil
+ }
+ out := new(PipelineTaskResult)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/feature/pkg/apis/core/v1alpha1/conversion.go b/feature/pkg/apis/core/v1alpha1/conversion.go
new file mode 100644
index 000000000..66ec17119
--- /dev/null
+++ b/feature/pkg/apis/core/v1alpha1/conversion.go
@@ -0,0 +1,43 @@
+/*
+Copyright 2024 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ "fmt"
+
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// TaskOwnerField is the field name of the owner reference in the task.
+// It defined in proxy transport. Not applicable in kube-apiserver.
+const TaskOwnerField = "ownerReferences:pipeline"
+
+// AddConversionFuncs adds the conversion functions to the given scheme.
+// NOTE: ownerReferences:pipeline is valid in proxy client.
+func AddConversionFuncs(scheme *runtime.Scheme) error {
+ return scheme.AddFieldLabelConversionFunc(
+ SchemeGroupVersion.WithKind("Task"),
+ func(label, value string) (string, string, error) {
+ switch label {
+ case "metadata.name", "metadata.namespace", TaskOwnerField:
+ return label, value, nil
+ default:
+ return "", "", fmt.Errorf("field label %q not supported for Task", label)
+ }
+ },
+ )
+}
diff --git a/feature/pkg/apis/core/v1alpha1/register.go b/feature/pkg/apis/core/v1alpha1/register.go
new file mode 100644
index 000000000..986c483a3
--- /dev/null
+++ b/feature/pkg/apis/core/v1alpha1/register.go
@@ -0,0 +1,37 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package v1alpha1 is the internal version, should not register in kubernetes
+// +k8s:deepcopy-gen=package,register
+// +groupName=kubekey.kubesphere.io
+// +kubebuilder:skip
+package v1alpha1
+
+import (
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "sigs.k8s.io/controller-runtime/pkg/scheme"
+)
+
+var (
+ // SchemeGroupVersion is group version used to register these objects
+ SchemeGroupVersion = schema.GroupVersion{Group: "kubekey.kubesphere.io", Version: "v1alpha1"}
+
+ // SchemeBuilder is used to add go types to the GroupVersionKind scheme
+ SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion}
+
+ // AddToScheme adds the types in this group-version to the given scheme.
+ AddToScheme = SchemeBuilder.AddToScheme
+)
diff --git a/feature/pkg/apis/core/v1alpha1/task_types.go b/feature/pkg/apis/core/v1alpha1/task_types.go
new file mode 100644
index 000000000..9fa1dd1dc
--- /dev/null
+++ b/feature/pkg/apis/core/v1alpha1/task_types.go
@@ -0,0 +1,119 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// TaskPhase of Task
+type TaskPhase string
+
+const (
+ // TaskPhasePending of Task. Task has created but not deal
+ TaskPhasePending TaskPhase = "Pending"
+ // TaskPhaseRunning of Task. deal Task
+ TaskPhaseRunning TaskPhase = "Running"
+ // TaskPhaseSuccess of Task. Module of Task run success in each hosts.
+ TaskPhaseSuccess TaskPhase = "Success"
+ // TaskPhaseFailed of Task. once host run failed.
+ TaskPhaseFailed TaskPhase = "Failed"
+ // TaskPhaseIgnored of Task. once host run failed and set ignore_errors.
+ TaskPhaseIgnored TaskPhase = "Ignored"
+)
+
+const (
+ // TaskAnnotationRole is the absolute dir of task in project.
+ TaskAnnotationRole = "kubesphere.io/role"
+)
+
+// TaskSpec of Task
+type TaskSpec struct {
+ Name string `json:"name,omitempty"`
+ Hosts []string `json:"hosts,omitempty"`
+ IgnoreError *bool `json:"ignoreError,omitempty"`
+ Retries int `json:"retries,omitempty"`
+
+ When []string `json:"when,omitempty"`
+ FailedWhen []string `json:"failedWhen,omitempty"`
+ Loop runtime.RawExtension `json:"loop,omitempty"`
+
+ Module Module `json:"module,omitempty"`
+ Register string `json:"register,omitempty"`
+}
+
+// Module of Task
+type Module struct {
+ Name string `json:"name,omitempty"`
+ Args runtime.RawExtension `json:"args,omitempty"`
+}
+
+// TaskStatus of Task
+type TaskStatus struct {
+ RestartCount int `json:"restartCount,omitempty"`
+ Phase TaskPhase `json:"phase,omitempty"`
+ HostResults []TaskHostResult `json:"hostResults,omitempty"`
+}
+
+// TaskHostResult each host result for task
+type TaskHostResult struct {
+ Host string `json:"host,omitempty"`
+ Stdout string `json:"stdout,omitempty"`
+ StdErr string `json:"stdErr,omitempty"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +kubebuilder:resource:scope=Namespaced
+
+// Task of pipeline
+type Task struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec TaskSpec `json:"spec,omitempty"`
+ Status TaskStatus `json:"status,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// TaskList for Task
+type TaskList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []Task `json:"items"`
+}
+
+// IsComplete if Task IsSucceed or IsFailed
+func (t Task) IsComplete() bool {
+ return t.IsSucceed() || t.IsFailed()
+}
+
+// IsSucceed if Task.Status.Phase TaskPhaseSuccess or TaskPhaseIgnored
+func (t Task) IsSucceed() bool {
+ return t.Status.Phase == TaskPhaseSuccess || t.Status.Phase == TaskPhaseIgnored
+}
+
+// IsFailed Task.Status.Phase is failed when reach the retries
+func (t Task) IsFailed() bool {
+ return t.Status.Phase == TaskPhaseFailed && t.Spec.Retries <= t.Status.RestartCount
+}
+
+func init() {
+ SchemeBuilder.Register(&Task{}, &TaskList{})
+}
diff --git a/feature/pkg/apis/core/v1alpha1/zz_generated.deepcopy.go b/feature/pkg/apis/core/v1alpha1/zz_generated.deepcopy.go
new file mode 100644
index 000000000..b00b302a5
--- /dev/null
+++ b/feature/pkg/apis/core/v1alpha1/zz_generated.deepcopy.go
@@ -0,0 +1,172 @@
+//go:build !ignore_autogenerated
+
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by controller-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Module) DeepCopyInto(out *Module) {
+ *out = *in
+ in.Args.DeepCopyInto(&out.Args)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Module.
+func (in *Module) DeepCopy() *Module {
+ if in == nil {
+ return nil
+ }
+ out := new(Module)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Task) DeepCopyInto(out *Task) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Task.
+func (in *Task) DeepCopy() *Task {
+ if in == nil {
+ return nil
+ }
+ out := new(Task)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Task) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TaskHostResult) DeepCopyInto(out *TaskHostResult) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskHostResult.
+func (in *TaskHostResult) DeepCopy() *TaskHostResult {
+ if in == nil {
+ return nil
+ }
+ out := new(TaskHostResult)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TaskList) DeepCopyInto(out *TaskList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Task, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskList.
+func (in *TaskList) DeepCopy() *TaskList {
+ if in == nil {
+ return nil
+ }
+ out := new(TaskList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *TaskList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TaskSpec) DeepCopyInto(out *TaskSpec) {
+ *out = *in
+ if in.Hosts != nil {
+ in, out := &in.Hosts, &out.Hosts
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.IgnoreError != nil {
+ in, out := &in.IgnoreError, &out.IgnoreError
+ *out = new(bool)
+ **out = **in
+ }
+ if in.When != nil {
+ in, out := &in.When, &out.When
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.FailedWhen != nil {
+ in, out := &in.FailedWhen, &out.FailedWhen
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ in.Loop.DeepCopyInto(&out.Loop)
+ in.Module.DeepCopyInto(&out.Module)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskSpec.
+func (in *TaskSpec) DeepCopy() *TaskSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(TaskSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TaskStatus) DeepCopyInto(out *TaskStatus) {
+ *out = *in
+ if in.HostResults != nil {
+ in, out := &in.HostResults, &out.HostResults
+ *out = make([]TaskHostResult, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskStatus.
+func (in *TaskStatus) DeepCopy() *TaskStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(TaskStatus)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/feature/pkg/apis/project/v1/base.go b/feature/pkg/apis/project/v1/base.go
new file mode 100644
index 000000000..275670bd9
--- /dev/null
+++ b/feature/pkg/apis/project/v1/base.go
@@ -0,0 +1,54 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+// Base defined in project.
+type Base struct {
+ Name string `yaml:"name,omitempty"`
+
+ // connection/transport
+ Connection string `yaml:"connection,omitempty"`
+ Port int `yaml:"port,omitempty"`
+ RemoteUser string `yaml:"remote_user,omitempty"`
+
+ // variables
+ Vars map[string]any `yaml:"vars,omitempty"`
+
+ // module default params
+ //ModuleDefaults []map[string]map[string]any `yaml:"module_defaults,omitempty"`
+
+ // flags and misc. settings
+ Environment []map[string]string `yaml:"environment,omitempty"`
+ NoLog bool `yaml:"no_log,omitempty"`
+ RunOnce bool `yaml:"run_once,omitempty"`
+ IgnoreErrors *bool `yaml:"ignore_errors,omitempty"`
+ CheckMode bool `yaml:"check_mode,omitempty"`
+ Diff bool `yaml:"diff,omitempty"`
+ AnyErrorsFatal bool `yaml:"any_errors_fatal,omitempty"`
+ Throttle int `yaml:"throttle,omitempty"`
+ Timeout int `yaml:"timeout,omitempty"`
+
+ // Debugger invoke a debugger on tasks
+ Debugger string `yaml:"debugger,omitempty"`
+
+ // privilege escalation
+ Become bool `yaml:"become,omitempty"`
+ BecomeMethod string `yaml:"become_method,omitempty"`
+ BecomeUser string `yaml:"become_user,omitempty"`
+ BecomeFlags string `yaml:"become_flags,omitempty"`
+ BecomeExe string `yaml:"become_exe,omitempty"`
+}
diff --git a/feature/pkg/apis/project/v1/block.go b/feature/pkg/apis/project/v1/block.go
new file mode 100644
index 000000000..24659d7f4
--- /dev/null
+++ b/feature/pkg/apis/project/v1/block.go
@@ -0,0 +1,190 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "reflect"
+ "strings"
+
+ "k8s.io/klog/v2"
+)
+
+// Block defined in project.
+type Block struct {
+ BlockBase
+ // If it has Block, Task should be empty
+ Task
+ IncludeTasks string `yaml:"include_tasks,omitempty"`
+
+ BlockInfo
+}
+
+// BlockBase defined in project.
+type BlockBase struct {
+ Base `yaml:",inline"`
+ Conditional `yaml:",inline"`
+ CollectionSearch `yaml:",inline"`
+ Taggable `yaml:",inline"`
+ Notifiable `yaml:",inline"`
+ Delegatable `yaml:",inline"`
+}
+
+// BlockInfo defined in project.
+type BlockInfo struct {
+ Block []Block `yaml:"block,omitempty"`
+ Rescue []Block `yaml:"rescue,omitempty"`
+ Always []Block `yaml:"always,omitempty"`
+}
+
+// Task defined in project.
+type Task struct {
+ AsyncVal int `yaml:"async,omitempty"`
+ ChangedWhen When `yaml:"changed_when,omitempty"`
+ Delay int `yaml:"delay,omitempty"`
+ FailedWhen When `yaml:"failed_when,omitempty"`
+ Loop any `yaml:"loop,omitempty"`
+ LoopControl LoopControl `yaml:"loop_control,omitempty"`
+ Poll int `yaml:"poll,omitempty"`
+ Register string `yaml:"register,omitempty"`
+ Retries int `yaml:"retries,omitempty"`
+ Until When `yaml:"until,omitempty"`
+
+ // deprecated, used to be loop and loop_args but loop has been repurposed
+ //LoopWith string `yaml:"loop_with"`
+
+ // UnknownField store undefined filed
+ UnknownField map[string]any `yaml:"-"`
+}
+
+// UnmarshalYAML yaml string to block.
+func (b *Block) UnmarshalYAML(unmarshal func(any) error) error {
+ // fill baseInfo
+ var bb BlockBase
+ if err := unmarshal(&bb); err == nil {
+ b.BlockBase = bb
+ }
+
+ var m map[string]any
+ if err := unmarshal(&m); err != nil {
+ klog.Errorf("unmarshal data to map error: %v", err)
+
+ return err
+ }
+
+ if includeTasks, ok := handleIncludeTasks(m); ok {
+ // Set the IncludeTasks field if "include_tasks" exists and is valid.
+ b.IncludeTasks = includeTasks
+
+ return nil
+ }
+
+ switch {
+ case m["block"] != nil:
+ // If the "block" key exists, unmarshal it into BlockInfo and set the BlockInfo field.
+ bi, err := handleBlock(m, unmarshal)
+ if err != nil {
+ return err
+ }
+ b.BlockInfo = bi
+ default:
+ // If neither "include_tasks" nor "block" are present, treat the data as a task.
+ t, err := handleTask(m, unmarshal)
+ if err != nil {
+ return err
+ }
+ b.Task = t
+ // Set any remaining unknown fields to the Task's UnknownField.
+ b.UnknownField = m
+ }
+
+ return nil
+}
+
+// handleIncludeTasks checks if the "include_tasks" key exists in the map and is of type string.
+// If so, it returns the string value and true, otherwise it returns an empty string and false.
+func handleIncludeTasks(m map[string]any) (string, bool) {
+ if v, ok := m["include_tasks"]; ok {
+ if it, ok := v.(string); ok {
+ return it, true
+ }
+ }
+
+ return "", false
+}
+
+// handleBlock attempts to unmarshal the block data into a BlockInfo structure.
+// If successful, it returns the BlockInfo and nil. If an error occurs, it logs the error and returns it.
+func handleBlock(_ map[string]any, unmarshal func(any) error) (BlockInfo, error) {
+ var bi BlockInfo
+ if err := unmarshal(&bi); err != nil {
+ klog.Errorf("unmarshal data to block error: %v", err)
+
+ return bi, err
+ }
+
+ return bi, nil
+}
+
+// handleTask attempts to unmarshal the task data into a Task structure.
+// If successful, it deletes existing fields from the map, logs the error if it occurs, and returns the Task and nil.
+func handleTask(m map[string]any, unmarshal func(any) error) (Task, error) {
+ var t Task
+ if err := unmarshal(&t); err != nil {
+ klog.Errorf("unmarshal data to task error: %v", err)
+
+ return t, err
+ }
+ deleteExistField(reflect.TypeOf(Block{}), m)
+
+ return t, nil
+}
+
+func deleteExistField(rt reflect.Type, m map[string]any) {
+ for i := range rt.NumField() {
+ field := rt.Field(i)
+ if field.Anonymous {
+ deleteExistField(field.Type, m)
+ } else {
+ if isFound := deleteField(rt.Field(i), m); isFound {
+ break
+ }
+ }
+ }
+}
+
+// deleteField find and delete the filed, return the field if found.
+func deleteField(field reflect.StructField, m map[string]any) bool {
+ yamlTag := field.Tag.Get("yaml")
+ if yamlTag != "" {
+ for _, t := range strings.Split(yamlTag, ",") {
+ if _, ok := m[t]; ok {
+ delete(m, t)
+
+ return true
+ }
+ }
+ } else {
+ t := strings.ToUpper(field.Name[:1]) + field.Name[1:]
+ if _, ok := m[t]; ok {
+ delete(m, t)
+
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/feature/pkg/apis/project/v1/collectionsearch.go b/feature/pkg/apis/project/v1/collectionsearch.go
new file mode 100644
index 000000000..3dd2ce621
--- /dev/null
+++ b/feature/pkg/apis/project/v1/collectionsearch.go
@@ -0,0 +1,22 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+// CollectionSearch defined in project.
+type CollectionSearch struct {
+ Collections []string `yaml:"collections,omitempty"`
+}
diff --git a/feature/pkg/apis/project/v1/conditional.go b/feature/pkg/apis/project/v1/conditional.go
new file mode 100644
index 000000000..e30e827c5
--- /dev/null
+++ b/feature/pkg/apis/project/v1/conditional.go
@@ -0,0 +1,50 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "errors"
+)
+
+// Conditional defined in project.
+type Conditional struct {
+ When When `yaml:"when,omitempty"`
+}
+
+// When defined in project.
+type When struct {
+ Data []string
+}
+
+// UnmarshalYAML yaml string to when
+func (w *When) UnmarshalYAML(unmarshal func(any) error) error {
+ var s string
+ if err := unmarshal(&s); err == nil {
+ w.Data = []string{s}
+
+ return nil
+ }
+
+ var a []string
+ if err := unmarshal(&a); err == nil {
+ w.Data = a
+
+ return nil
+ }
+
+ return errors.New("unsupported type, excepted string or array of strings")
+}
diff --git a/feature/pkg/apis/project/v1/delegatable.go b/feature/pkg/apis/project/v1/delegatable.go
new file mode 100644
index 000000000..feacf29ac
--- /dev/null
+++ b/feature/pkg/apis/project/v1/delegatable.go
@@ -0,0 +1,23 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+// Delegatable defined in project.
+type Delegatable struct {
+ DelegateTo string `yaml:"delegate_to,omitempty"`
+ DelegateFacts bool `yaml:"delegate_facts,omitempty"`
+}
diff --git a/feature/pkg/apis/project/v1/docs.go b/feature/pkg/apis/project/v1/docs.go
new file mode 100644
index 000000000..72082c649
--- /dev/null
+++ b/feature/pkg/apis/project/v1/docs.go
@@ -0,0 +1,188 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+// Playbook keyword in ansible: https://docs.ansible.com/ansible/latest/reference_appendices/playbooks_keywords.html#playbook-keywords
+// support list (base on ansible 2.15.5)
+
+/**
+Play
++------+------------------------+------------+
+| Row | Keyword | Support |
++------+------------------------+------------+
+| 1 | any_errors_fatal | ✘ |
+| 2 | become | ✘ |
+| 3 | become_exe | ✘ |
+| 4 | become_flags | ✘ |
+| 5 | become_method | ✘ |
+| 6 | become_user | ✘ |
+| 7 | check_mode | ✘ |
+| 8 | collections | ✘ |
+| 9 | connection | ✔︎ |
+| 10 | debugger | ✘ |
+| 11 | diff | ✘ |
+| 12 | environment | ✘ |
+| 13 | fact_path | ✘ |
+| 14 | force_handlers | ✘ |
+| 15 | gather_facts | ✔︎ |
+| 16 | gather_subset | ✘ |
+| 17 | gather_timeout | ✘ |
+| 18 | handlers | ✘ |
+| 19 | hosts | ✔︎ |
+| 20 | ignore_errors | ✔︎ |
+| 21 | ignore_unreachable | ✘ |
+| 22 | max_fail_percentage | ✘ |
+| 23 | module_defaults | ✘ |
+| 24 | name | ✔︎ |
+| 25 | no_log | ✘ |
+| 26 | order | ✘ |
+| 27 | port | ✘ |
+| 28 | post_task | ✔︎ |
+| 29 | pre_tasks | ✔︎ |
+| 30 | remote_user | ✘ |
+| 31 | roles | ✔︎ |
+| 32 | run_once | ✔︎ |
+| 33 | serial | ✔︎ |
+| 34 | strategy | ✘ |
+| 35 | tags | ✔︎ |
+| 36 | tasks | ✔︎ |
+| 37 | throttle | ✘ |
+| 38 | timeout | ✘ |
+| 39 | vars | ✔︎ |
+| 40 | vars_files | ✘ |
+| 41 | vars_prompt | ✘ |
++------+------------------------+------------+
+
+Role
++------+------------------------+------------+
+| Row | Keyword | Support |
++------+------------------------+------------+
+| 1 | any_errors_fatal | ✘ |
+| 2 | become | ✘ |
+| 3 | become_exe | ✘ |
+| 4 | become_flags | ✘ |
+| 5 | become_method | ✘ |
+| 6 | become_user | ✘ |
+| 7 | check_mode | ✘ |
+| 8 | collections | ✘ |
+| 9 | connection | ✘ |
+| 10 | debugger | ✘ |
+| 11 | delegate_facts | ✘ |
+| 12 | delegate_to | ✘ |
+| 13 | diff | ✘ |
+| 14 | environment | ✘ |
+| 15 | ignore_errors | ✔︎ |
+| 16 | ignore_unreachable | ✘ |
+| 17 | max_fail_percentage | ✘ |
+| 18 | module_defaults | ✘ |
+| 19 | name | ✔︎ |
+| 20 | no_log | ✘ |
+| 21 | port | ✘ |
+| 22 | remote_user | ✘ |
+| 23 | run_once | ✔︎ |
+| 24 | tags | ✔︎ |
+| 25 | throttle | ✘ |
+| 26 | timeout | ✘ |
+| 27 | vars | ✔︎ |
+| 28 | when | ✔︎ |
++------+------------------------+------------+
+
+Block
++------+------------------------+------------+
+| Row | Keyword | Support |
++------+------------------------+------------+
+| 1 | always | ✔︎ |
+| 2 | any_errors_fatal | ✘ |
+| 3 | become | ✘ |
+| 4 | become_exe | ✘ |
+| 5 | become_flags | ✘ |
+| 6 | become_method | ✘ |
+| 7 | become_user | ✘ |
+| 8 | block | ✔︎ |
+| 9 | check_mode | ✘ |
+| 10 | collections | ✘ |
+| 11 | debugger | ✘ |
+| 12 | delegate_facts | ✘ |
+| 13 | delegate_to | ✘ |
+| 14 | diff | ✘ |
+| 15 | environment | ✘ |
+| 16 | ignore_errors | ✔︎ |
+| 17 | ignore_unreachable | ✘ |
+| 18 | max_fail_percentage | ✘ |
+| 19 | module_defaults | ✘ |
+| 20 | name | ✔︎ |
+| 21 | no_log | ✘ |
+| 22 | notify | ✘ |
+| 23 | port | ✘ |
+| 24 | remote_user | ✘ |
+| 25 | rescue | ✔︎ |
+| 26 | run_once | ✘ |
+| 27 | tags | ✔︎ |
+| 28 | throttle | ✘ |
+| 29 | timeout | ✘ |
+| 30 | vars | ✔︎ |
+| 31 | when | ✔︎ |
++------+------------------------+------------+
+
+
+Task
++------+------------------------+------------+
+| Row | Keyword | Support |
++------+------------------------+------------+
+| 1 | action | ✔︎ |
+| 2 | any_errors_fatal | ✘ |
+| 3 | args | ✔︎ |
+| 4 | async | ✘ |
+| 5 | become | ✘ |
+| 6 | become_exe | ✘ |
+| 7 | become_flags | ✘ |
+| 8 | become_method | ✘ |
+| 9 | become_user | ✘ |
+| 10 | changed_when | ✘ |
+| 11 | check_mode | ✘ |
+| 12 | collections | ✘ |
+| 13 | debugger | ✘ |
+| 14 | delay | ✘ |
+| 15 | delegate_facts | ✘ |
+| 16 | delegate_to | ✘ |
+| 17 | diff | ✘ |
+| 18 | environment | ✘ |
+| 19 | failed_when | ✔︎ |
+| 20 | ignore_errors | ✔︎ |
+| 21 | ignore_unreachable | ✘ |
+| 22 | local_action | ✘ |
+| 23 | loop | ✔︎ |
+| 24 | loop_control | ✘ |
+| 25 | module_defaults | ✘ |
+| 26 | name | ✔︎ |
+| 27 | no_log | ✘ |
+| 28 | notify | ✘ |
+| 29 | poll | ✘ |
+| 30 | port | ✘ |
+| 31 | register | ✔︎ |
+| 32 | remote_user | ✘ |
+| 33 | retries | ✘ |
+| 34 | run_once | ✘ |
+| 35 | tags | ✔︎ |
+| 36 | throttle | ✘ |
+| 37 | timeout | ✘ |
+| 38 | until | ✘ |
+| 39 | vars | ✔︎ |
+| 40 | when | ✔︎ |
+| 41 | with_ | ✔︎ |
++------+------------------------+------------+
+*/
diff --git a/feature/pkg/apis/project/v1/handler.go b/feature/pkg/apis/project/v1/handler.go
new file mode 100644
index 000000000..84da4d443
--- /dev/null
+++ b/feature/pkg/apis/project/v1/handler.go
@@ -0,0 +1,24 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+// Handler defined in project.
+type Handler struct {
+ //Task
+
+ Listen []string `yaml:"listen,omitempty"`
+}
diff --git a/feature/pkg/apis/project/v1/loop.go b/feature/pkg/apis/project/v1/loop.go
new file mode 100644
index 000000000..e180d297a
--- /dev/null
+++ b/feature/pkg/apis/project/v1/loop.go
@@ -0,0 +1,27 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+// LoopControl defined in project.
+type LoopControl struct {
+ LoopVar string `yaml:"loop_var,omitempty"`
+ IndexVar string `yaml:"index_var,omitempty"`
+ Label string `yaml:"label,omitempty"`
+ Pause float32 `yaml:"pause,omitempty"`
+ Extended bool `yaml:"extended,omitempty"`
+ ExtendedAllitems bool `yaml:"extended_allitems,omitempty"`
+}
diff --git a/feature/pkg/apis/project/v1/notifiable.go b/feature/pkg/apis/project/v1/notifiable.go
new file mode 100644
index 000000000..ea59c0649
--- /dev/null
+++ b/feature/pkg/apis/project/v1/notifiable.go
@@ -0,0 +1,22 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+// Notifiable defined in project.
+type Notifiable struct {
+ Notify string `yaml:"notify,omitempty"`
+}
diff --git a/feature/pkg/apis/project/v1/play.go b/feature/pkg/apis/project/v1/play.go
new file mode 100644
index 000000000..5107902ef
--- /dev/null
+++ b/feature/pkg/apis/project/v1/play.go
@@ -0,0 +1,108 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "errors"
+)
+
+// Play defined in project.
+type Play struct {
+ ImportPlaybook string `yaml:"import_playbook,omitempty"`
+
+ Base `yaml:",inline"`
+ Taggable `yaml:",inline"`
+ CollectionSearch `yaml:",inline"`
+
+ PlayHost PlayHost `yaml:"hosts,omitempty"`
+
+ // Facts
+ GatherFacts bool `yaml:"gather_facts,omitempty"`
+
+ // defaults to be deprecated, should be 'None' in future
+ //GatherSubset []GatherSubset
+ //GatherTimeout int
+ //FactPath string
+
+ // Variable Attribute
+ VarsFiles []string `yaml:"vars_files,omitempty"`
+ //VarsPrompt []string `yaml:"vars_prompt,omitempty"`
+
+ // Role Attributes
+ Roles []Role `yaml:"roles,omitempty"`
+
+ // Block (Task) Lists Attributes
+ Handlers []Block `yaml:"handlers,omitempty"`
+ PreTasks []Block `yaml:"pre_tasks,omitempty"`
+ PostTasks []Block `yaml:"post_tasks,omitempty"`
+ Tasks []Block `yaml:"tasks,omitempty"`
+
+ // Flag/Setting Attributes
+ ForceHandlers bool `yaml:"force_handlers,omitempty"`
+ MaxFailPercentage float32 `yaml:"percent,omitempty"`
+ Serial PlaySerial `yaml:"serial,omitempty"`
+ Strategy string `yaml:"strategy,omitempty"`
+ Order string `yaml:"order,omitempty"`
+}
+
+// PlaySerial defined in project.
+type PlaySerial struct {
+ Data []any
+}
+
+// UnmarshalYAML yaml string to serial.
+func (s *PlaySerial) UnmarshalYAML(unmarshal func(any) error) error {
+ var as []any
+ if err := unmarshal(&as); err == nil {
+ s.Data = as
+
+ return nil
+ }
+
+ var a any
+ if err := unmarshal(&a); err == nil {
+ s.Data = []any{a}
+
+ return nil
+ }
+
+ return errors.New("unsupported type, excepted any or array")
+}
+
+// PlayHost defined in project.
+type PlayHost struct {
+ Hosts []string
+}
+
+// UnmarshalYAML yaml string to play
+func (p *PlayHost) UnmarshalYAML(unmarshal func(any) error) error {
+ var hs []string
+ if err := unmarshal(&hs); err == nil {
+ p.Hosts = hs
+
+ return nil
+ }
+
+ var h string
+ if err := unmarshal(&h); err == nil {
+ p.Hosts = []string{h}
+
+ return nil
+ }
+
+ return errors.New("unsupported type, excepted string or string array")
+}
diff --git a/feature/pkg/apis/project/v1/play_test.go b/feature/pkg/apis/project/v1/play_test.go
new file mode 100644
index 000000000..f5b5957cd
--- /dev/null
+++ b/feature/pkg/apis/project/v1/play_test.go
@@ -0,0 +1,223 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "gopkg.in/yaml.v3"
+)
+
+func TestUnmarshalYaml(t *testing.T) {
+ testcases := []struct {
+ name string
+ data []byte
+ excepted []Play
+ }{
+ {
+ name: "Unmarshal hosts with single value",
+ data: []byte(`---
+- name: test play
+ hosts: localhost
+`),
+ excepted: []Play{
+ {
+ Base: Base{Name: "test play"},
+ PlayHost: PlayHost{[]string{"localhost"}},
+ },
+ },
+ },
+ {
+ name: "Unmarshal hosts with multiple value",
+ data: []byte(`---
+- name: test play
+ hosts: ["control-plane", "worker"]
+`),
+ excepted: []Play{
+ {
+ Base: Base{
+ Name: "test play",
+ },
+ PlayHost: PlayHost{[]string{"control-plane", "worker"}},
+ },
+ },
+ },
+ {
+ name: "Unmarshal role with single value",
+ data: []byte(`---
+- name: test play
+ hosts: localhost
+ roles:
+ - test
+`),
+ excepted: []Play{
+ {
+ Base: Base{Name: "test play"},
+ PlayHost: PlayHost{
+ []string{"localhost"},
+ },
+ Roles: []Role{
+ {
+ RoleInfo{
+ Role: "test",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "Unmarshal role with map value",
+ data: []byte(`---
+- name: test play
+ hosts: localhost
+ roles:
+ - role: test
+`),
+ excepted: []Play{
+ {
+ Base: Base{Name: "test play"},
+ PlayHost: PlayHost{
+ []string{"localhost"},
+ },
+ Roles: []Role{
+ {
+ RoleInfo{
+ Role: "test",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "Unmarshal when with single value",
+ data: []byte(`---
+- name: test play
+ hosts: localhost
+ roles:
+ - role: test
+ when: "true"
+`),
+ excepted: []Play{
+ {
+ Base: Base{Name: "test play"},
+ PlayHost: PlayHost{
+ []string{"localhost"},
+ },
+ Roles: []Role{
+ {
+ RoleInfo{
+ Conditional: Conditional{When: When{Data: []string{"true"}}},
+ Role: "test",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "Unmarshal when with multiple value",
+ data: []byte(`---
+- name: test play
+ hosts: localhost
+ roles:
+ - role: test
+ when: ["true","false"]
+`),
+ excepted: []Play{
+ {
+ Base: Base{Name: "test play"},
+ PlayHost: PlayHost{
+ []string{"localhost"},
+ },
+ Roles: []Role{
+ {
+ RoleInfo{
+ Conditional: Conditional{When: When{Data: []string{"true", "false"}}},
+ Role: "test",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "Unmarshal single level block",
+ data: []byte(`---
+- name: test play
+ hosts: localhost
+ tasks:
+ - name: test
+ custom-module: abc
+`),
+ excepted: []Play{
+ {
+ Base: Base{Name: "test play"},
+ PlayHost: PlayHost{Hosts: []string{"localhost"}},
+ Tasks: []Block{
+ {
+ BlockBase: BlockBase{Base: Base{Name: "test"}},
+ Task: Task{UnknownField: map[string]any{"custom-module": "abc"}},
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "Unmarshal multi level block",
+ data: []byte(`---
+- name: test play
+ hosts: localhost
+ tasks:
+ - name: test
+ block:
+ - name: test | test
+ custom-module: abc
+`),
+ excepted: []Play{
+ {
+ Base: Base{Name: "test play"},
+ PlayHost: PlayHost{Hosts: []string{"localhost"}},
+ Tasks: []Block{
+ {
+ BlockBase: BlockBase{Base: Base{Name: "test"}},
+ BlockInfo: BlockInfo{
+ Block: []Block{{
+ BlockBase: BlockBase{Base: Base{Name: "test | test"}},
+ Task: Task{UnknownField: map[string]any{"custom-module": "abc"}},
+ }},
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ var pb []Play
+ err := yaml.Unmarshal(tc.data, &pb)
+ if err != nil {
+ t.Fatal(err)
+ }
+ assert.Equal(t, tc.excepted, pb)
+ })
+ }
+}
diff --git a/feature/pkg/apis/project/v1/playbook.go b/feature/pkg/apis/project/v1/playbook.go
new file mode 100644
index 000000000..8a2fcae1f
--- /dev/null
+++ b/feature/pkg/apis/project/v1/playbook.go
@@ -0,0 +1,45 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "errors"
+)
+
+// Playbook defined in project.
+type Playbook struct {
+ Play []Play
+}
+
+// Validate playbook. delete empty ImportPlaybook which has convert to play.
+func (p *Playbook) Validate() error {
+ var newPlay = make([]Play, 0)
+ for _, play := range p.Play {
+ // import_playbook is a link, should be ignored.
+ if play.ImportPlaybook != "" {
+ continue
+ }
+
+ if len(play.PlayHost.Hosts) == 0 {
+ return errors.New("playbook's hosts must not be empty")
+ }
+ newPlay = append(newPlay, play)
+ }
+ p.Play = newPlay
+
+ return nil
+}
diff --git a/feature/pkg/apis/project/v1/playbook_test.go b/feature/pkg/apis/project/v1/playbook_test.go
new file mode 100644
index 000000000..39310071f
--- /dev/null
+++ b/feature/pkg/apis/project/v1/playbook_test.go
@@ -0,0 +1,47 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestValidate(t *testing.T) {
+ testcases := []struct {
+ name string
+ playbook Playbook
+ }{
+ {
+ name: "host is empty",
+ playbook: Playbook{Play: []Play{
+ {
+ Base: Base{
+ Name: "test",
+ },
+ },
+ }},
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ assert.Error(t, tc.playbook.Validate())
+ })
+ }
+}
diff --git a/feature/pkg/apis/project/v1/role.go b/feature/pkg/apis/project/v1/role.go
new file mode 100644
index 000000000..86fd3bc9b
--- /dev/null
+++ b/feature/pkg/apis/project/v1/role.go
@@ -0,0 +1,54 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+// Role defined in project.
+type Role struct {
+ RoleInfo
+}
+
+// RoleInfo defined in project.
+type RoleInfo struct {
+ Base `yaml:",inline"`
+ Conditional `yaml:",inline"`
+ Taggable `yaml:",inline"`
+ CollectionSearch `yaml:",inline"`
+
+ // Role ref in playbook
+ Role string `yaml:"role,omitempty"`
+
+ Block []Block
+}
+
+// UnmarshalYAML yaml string to role.
+func (r *Role) UnmarshalYAML(unmarshal func(any) error) error {
+ var s string
+ if err := unmarshal(&s); err == nil {
+ r.Role = s
+
+ return nil
+ }
+
+ var info RoleInfo
+ if err := unmarshal(&info); err == nil {
+ r.RoleInfo = info
+
+ return nil
+ }
+
+ return nil
+}
diff --git a/feature/pkg/apis/project/v1/taggable.go b/feature/pkg/apis/project/v1/taggable.go
new file mode 100644
index 000000000..7f36d84ba
--- /dev/null
+++ b/feature/pkg/apis/project/v1/taggable.go
@@ -0,0 +1,96 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import "slices"
+
+// the special tags
+const (
+ // AlwaysTag it always run
+ AlwaysTag = "always"
+ // NeverTag it never run
+ NeverTag = "never"
+ // AllTag represent all tags
+ AllTag = "all"
+ // TaggedTag represent which has tags
+ TaggedTag = "tagged"
+)
+
+// Taggable if it should executor
+type Taggable struct {
+ Tags []string `yaml:"tags,omitempty"`
+}
+
+// IsEnabled check if the block should be executed
+func (t Taggable) IsEnabled(onlyTags []string, skipTags []string) bool {
+ shouldRun := true
+
+ if len(onlyTags) > 0 {
+ switch {
+ case slices.Contains(t.Tags, AlwaysTag):
+ shouldRun = true
+ case slices.Contains(onlyTags, AllTag) && !slices.Contains(t.Tags, NeverTag):
+ shouldRun = true
+ case slices.Contains(onlyTags, TaggedTag) && !slices.Contains(t.Tags, NeverTag):
+ shouldRun = true
+ case !isdisjoint(onlyTags, t.Tags):
+ shouldRun = true
+ default:
+ shouldRun = false
+ }
+ }
+
+ if shouldRun && len(skipTags) > 0 {
+ switch {
+ case slices.Contains(skipTags, AllTag) &&
+ (!slices.Contains(t.Tags, AlwaysTag) || !slices.Contains(skipTags, AlwaysTag)):
+ shouldRun = false
+ case !isdisjoint(skipTags, t.Tags):
+ shouldRun = false
+ case slices.Contains(skipTags, TaggedTag) && len(skipTags) > 0:
+ shouldRun = false
+ }
+ }
+
+ return shouldRun
+}
+
+// JoinTag the child block should inherit tag for parent block
+func JoinTag(child, parent Taggable) Taggable {
+ for _, tag := range parent.Tags {
+ if tag == AlwaysTag { // skip inherit "always" tag
+ continue
+ }
+
+ if !slices.Contains(child.Tags, tag) {
+ child.Tags = append(child.Tags, tag)
+ }
+ }
+
+ return child
+}
+
+// isdisjoint returns true if a and b have no elements in common.
+func isdisjoint(a, b []string) bool {
+ for _, s := range a {
+ if slices.Contains(b, s) {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/feature/pkg/connector/connector.go b/feature/pkg/connector/connector.go
new file mode 100644
index 000000000..cf6f8f796
--- /dev/null
+++ b/feature/pkg/connector/connector.go
@@ -0,0 +1,190 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package connector
+
+import (
+ "context"
+ "io"
+ "io/fs"
+ "net"
+ "os"
+
+ "k8s.io/klog/v2"
+ "k8s.io/utils/exec"
+ "k8s.io/utils/ptr"
+
+ _const "github.com/kubesphere/kubekey/v4/pkg/const"
+ "github.com/kubesphere/kubekey/v4/pkg/variable"
+)
+
+// connectedType for connector
+const (
+ connectedSSH = "ssh"
+ connectedLocal = "local"
+ connectedKubernetes = "kubernetes"
+)
+
+// Connector is the interface for connecting to a remote host
+type Connector interface {
+ // Init initializes the connection
+ Init(ctx context.Context) error
+ // Close closes the connection
+ Close(ctx context.Context) error
+ // PutFile copies a file from src to dst with mode.
+ PutFile(ctx context.Context, src []byte, dst string, mode fs.FileMode) error
+ // FetchFile copies a file from src to dst writer.
+ FetchFile(ctx context.Context, src string, dst io.Writer) error
+ // ExecuteCommand executes a command on the remote host
+ ExecuteCommand(ctx context.Context, cmd string) ([]byte, error)
+}
+
+// NewConnector creates a new connector
+// if set connector to "local", use local connector
+// if set connector to "ssh", use ssh connector
+// if set connector to "kubernetes", use kubernetes connector
+// if connector is not set. when host is localhost, use local connector, else use ssh connector
+// vars contains all inventory for host. It's best to define the connector info in inventory file.
+func NewConnector(host string, connectorVars map[string]any) (Connector, error) {
+ connectedType, _ := variable.StringVar(nil, connectorVars, _const.VariableConnectorType)
+
+ switch connectedType {
+ case connectedLocal:
+ return &localConnector{Cmd: exec.New()}, nil
+ case connectedSSH:
+ // get host in connector variable. if empty, set default host: host_name.
+ hostParam, err := variable.StringVar(nil, connectorVars, _const.VariableConnectorHost)
+ if err != nil {
+ klog.InfoS("get ssh port failed use default port 22", "error", err)
+ hostParam = host
+ }
+ // get port in connector variable. if empty, set default port: 22.
+ portParam, err := variable.IntVar(nil, connectorVars, _const.VariableConnectorPort)
+ if err != nil {
+ klog.V(4).Infof("connector port is empty use: %v", defaultSSHPort)
+ portParam = ptr.To(defaultSSHPort)
+ }
+ // get user in connector variable. if empty, set default user: root.
+ userParam, err := variable.StringVar(nil, connectorVars, _const.VariableConnectorUser)
+ if err != nil {
+ klog.V(4).Infof("connector user is empty use: %s", defaultSSHUser)
+ userParam = defaultSSHUser
+ }
+ // get password in connector variable. if empty, should connector by private key.
+ passwdParam, err := variable.StringVar(nil, connectorVars, _const.VariableConnectorPassword)
+ if err != nil {
+ klog.V(4).InfoS("connector password is empty use public key")
+ }
+ // get private key path in connector variable. if empty, set default path: /root/.ssh/id_rsa.
+ keyParam, err := variable.StringVar(nil, connectorVars, _const.VariableConnectorPrivateKey)
+ if err != nil {
+ klog.V(4).Infof("ssh public key is empty, use: %s", defaultSSHPrivateKey)
+ keyParam = defaultSSHPrivateKey
+ }
+
+ return &sshConnector{
+ Host: hostParam,
+ Port: *portParam,
+ User: userParam,
+ Password: passwdParam,
+ PrivateKey: keyParam,
+ }, nil
+ case connectedKubernetes:
+ kubeconfig, err := variable.StringVar(nil, connectorVars, _const.VariableConnectorKubeconfig)
+ if err != nil && host != _const.VariableLocalHost {
+ return nil, err
+ }
+
+ return &kubernetesConnector{Cmd: exec.New(), clusterName: host, kubeconfig: kubeconfig}, nil
+ default:
+ localHost, _ := os.Hostname()
+ // get host in connector variable. if empty, set default host: host_name.
+ hostParam, err := variable.StringVar(nil, connectorVars, _const.VariableConnectorHost)
+ if err != nil {
+ klog.V(4).Infof("connector host is empty use: %s", host)
+ hostParam = host
+ }
+ if host == _const.VariableLocalHost || host == localHost || isLocalIP(hostParam) {
+ return &localConnector{Cmd: exec.New()}, nil
+ }
+ // get port in connector variable. if empty, set default port: 22.
+ portParam, err := variable.IntVar(nil, connectorVars, _const.VariableConnectorPort)
+ if err != nil {
+ klog.V(4).Infof("connector port is empty use: %v", defaultSSHPort)
+ portParam = ptr.To(defaultSSHPort)
+ }
+ // get user in connector variable. if empty, set default user: root.
+ userParam, err := variable.StringVar(nil, connectorVars, _const.VariableConnectorUser)
+ if err != nil {
+ klog.V(4).Infof("connector user is empty use: %s", defaultSSHUser)
+ userParam = defaultSSHUser
+ }
+ // get password in connector variable. if empty, should connector by private key.
+ passwdParam, err := variable.StringVar(nil, connectorVars, _const.VariableConnectorPassword)
+ if err != nil {
+ klog.V(4).InfoS("connector password is empty use public key")
+ }
+ // get private key path in connector variable. if empty, set default path: /root/.ssh/id_rsa.
+ keyParam, err := variable.StringVar(nil, connectorVars, _const.VariableConnectorPrivateKey)
+ if err != nil {
+ klog.V(4).Infof("ssh public key is empty, use: %s", defaultSSHPrivateKey)
+ keyParam = defaultSSHPrivateKey
+ }
+
+ return &sshConnector{
+ Host: hostParam,
+ Port: *portParam,
+ User: userParam,
+ Password: passwdParam,
+ PrivateKey: keyParam,
+ }, nil
+ }
+}
+
+// GatherFacts get host info.
+type GatherFacts interface {
+ HostInfo(ctx context.Context) (map[string]any, error)
+}
+
+// isLocalIP check if given ipAddr is local network ip
+func isLocalIP(ipAddr string) bool {
+ addrs, err := net.InterfaceAddrs()
+ if err != nil {
+ klog.ErrorS(err, "get network address error")
+
+ return false
+ }
+
+ for _, addr := range addrs {
+ var ip net.IP
+ switch v := addr.(type) {
+ case *net.IPNet:
+ ip = v.IP
+ case *net.IPAddr:
+ ip = v.IP
+ default:
+ klog.V(4).InfoS("unknown address type", "address", addr.String())
+
+ continue
+ }
+
+ if ip.String() == ipAddr {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/feature/pkg/connector/helper.go b/feature/pkg/connector/helper.go
new file mode 100644
index 000000000..60f4f3832
--- /dev/null
+++ b/feature/pkg/connector/helper.go
@@ -0,0 +1,73 @@
+/*
+Copyright 2024 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package connector
+
+import (
+ "bufio"
+ "bytes"
+ "strings"
+)
+
+// convertBytesToMap with split string, only convert line which contain split
+func convertBytesToMap(bs []byte, split string) map[string]string {
+ config := make(map[string]string)
+ scanner := bufio.NewScanner(bytes.NewBuffer(bs))
+ for scanner.Scan() {
+ line := scanner.Text()
+ parts := strings.SplitN(line, split, 2)
+ if len(parts) == 2 {
+ key := strings.TrimSpace(parts[0])
+ value := strings.TrimSpace(parts[1])
+ config[key] = value
+ }
+ }
+
+ return config
+}
+
+// convertBytesToSlice with split string. only convert line which contain split.
+// group by empty line
+func convertBytesToSlice(bs []byte, split string) []map[string]string {
+ var config []map[string]string
+ currentMap := make(map[string]string)
+
+ scanner := bufio.NewScanner(bytes.NewBuffer(bs))
+ for scanner.Scan() {
+ line := scanner.Text()
+ line = strings.TrimSpace(line)
+
+ if line != "" {
+ parts := strings.SplitN(line, split, 2)
+ if len(parts) == 2 {
+ key := strings.TrimSpace(parts[0])
+ value := strings.TrimSpace(parts[1])
+ currentMap[key] = value
+ }
+ } else if len(currentMap) > 0 {
+ // If encountering an empty line, add the current map to config and create a new map
+ config = append(config, currentMap)
+ currentMap = make(map[string]string)
+ }
+ }
+
+ // Add the last map if not already added
+ if len(currentMap) > 0 {
+ config = append(config, currentMap)
+ }
+
+ return config
+}
diff --git a/feature/pkg/connector/helper_test.go b/feature/pkg/connector/helper_test.go
new file mode 100644
index 000000000..433cfcd02
--- /dev/null
+++ b/feature/pkg/connector/helper_test.go
@@ -0,0 +1,110 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package connector
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestConvertBytesToMap(t *testing.T) {
+ testcases := []struct {
+ name string
+ data []byte
+ excepted map[string]string
+ }{
+ {
+ name: "succeed",
+ data: []byte(`PRETTY_NAME="Ubuntu 22.04.1 LTS"
+NAME="Ubuntu"
+VERSION_ID="22.04"
+VERSION="22.04.1 LTS (Jammy Jellyfish)"
+VERSION_CODENAME=jammy
+ID=ubuntu
+ID_LIKE=debian
+HOME_URL="https://www.ubuntu.com/"
+SUPPORT_URL="https://help.ubuntu.com/"
+BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/"
+PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy"
+UBUNTU_CODENAME=jammy
+`),
+ excepted: map[string]string{
+ "PRETTY_NAME": "\"Ubuntu 22.04.1 LTS\"",
+ "NAME": "\"Ubuntu\"",
+ "VERSION_ID": "\"22.04\"",
+ "VERSION": "\"22.04.1 LTS (Jammy Jellyfish)\"",
+ "VERSION_CODENAME": "jammy",
+ "ID": "ubuntu",
+ "ID_LIKE": "debian",
+ "HOME_URL": "\"https://www.ubuntu.com/\"",
+ "SUPPORT_URL": "\"https://help.ubuntu.com/\"",
+ "BUG_REPORT_URL": "\"https://bugs.launchpad.net/ubuntu/\"",
+ "PRIVACY_POLICY_URL": "\"https://www.ubuntu.com/legal/terms-and-policies/privacy-policy\"",
+ "UBUNTU_CODENAME": "jammy",
+ },
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ assert.Equal(t, tc.excepted, convertBytesToMap(tc.data, "="))
+ })
+ }
+}
+
+func TestConvertBytesToSlice(t *testing.T) {
+ testcases := []struct {
+ name string
+ data []byte
+ excepted []map[string]string
+ }{
+ {
+ name: "succeed",
+ data: []byte(`processor : 0
+vendor_id : GenuineIntel
+cpu family : 6
+model : 60
+model name : Intel Core Processor (Haswell, no TSX, IBRS)
+
+processor : 1
+vendor_id : GenuineIntel
+cpu family : 6
+`),
+ excepted: []map[string]string{
+ {
+ "processor": "0",
+ "vendor_id": "GenuineIntel",
+ "cpu family": "6",
+ "model": "60",
+ "model name": "Intel Core Processor (Haswell, no TSX, IBRS)",
+ },
+ {
+ "processor": "1",
+ "vendor_id": "GenuineIntel",
+ "cpu family": "6",
+ },
+ },
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ assert.Equal(t, tc.excepted, convertBytesToSlice(tc.data, ":"))
+ })
+ }
+}
diff --git a/feature/pkg/connector/kubernetes_connector.go b/feature/pkg/connector/kubernetes_connector.go
new file mode 100644
index 000000000..7213a64bf
--- /dev/null
+++ b/feature/pkg/connector/kubernetes_connector.go
@@ -0,0 +1,121 @@
+/*
+Copyright 2024 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package connector
+
+import (
+ "context"
+ "io"
+ "io/fs"
+ "os"
+ "path/filepath"
+
+ "k8s.io/klog/v2"
+ "k8s.io/utils/exec"
+
+ _const "github.com/kubesphere/kubekey/v4/pkg/const"
+)
+
+const kubeconfigRelPath = ".kube/config"
+
+var _ Connector = &kubernetesConnector{}
+
+type kubernetesConnector struct {
+ clusterName string
+ kubeconfig string
+ homeDir string
+ Cmd exec.Interface
+}
+
+// Init connector, create home dir in local for each kubernetes.
+func (c *kubernetesConnector) Init(_ context.Context) error {
+ if c.clusterName == _const.VariableLocalHost && c.kubeconfig == "" {
+ klog.V(4).InfoS("kubeconfig is not set, using local kubeconfig")
+ // use default kubeconfig. skip
+ return nil
+ }
+ // set home dir for each kubernetes
+ c.homeDir = filepath.Join(_const.GetWorkDir(), _const.KubernetesDir, c.clusterName)
+ if _, err := os.Stat(c.homeDir); err != nil && os.IsNotExist(err) {
+ if err := os.MkdirAll(c.homeDir, os.ModePerm); err != nil {
+ klog.V(4).ErrorS(err, "Failed to create local dir", "cluster", c.clusterName)
+ // if dir is not exist, create it.
+ return err
+ }
+ }
+ // create kubeconfig path in home dir
+ kubeconfigPath := filepath.Join(c.homeDir, kubeconfigRelPath)
+ if _, err := os.Stat(kubeconfigPath); err != nil && os.IsNotExist(err) {
+ if err := os.MkdirAll(filepath.Dir(kubeconfigPath), os.ModePerm); err != nil {
+ klog.V(4).ErrorS(err, "Failed to create local dir", "cluster", c.clusterName)
+
+ return err
+ }
+ }
+ // write kubeconfig to home dir
+ if err := os.WriteFile(kubeconfigPath, []byte(c.kubeconfig), os.ModePerm); err != nil {
+ klog.V(4).ErrorS(err, "Failed to create kubeconfig file", "cluster", c.clusterName)
+
+ return err
+ }
+
+ return nil
+}
+
+// Close connector, do nothing
+func (c *kubernetesConnector) Close(_ context.Context) error {
+ return nil
+}
+
+// PutFile copy src file to dst file. src is the local filename, dst is the local filename.
+// Typically, the configuration file for each cluster may be different,
+// and it may be necessary to keep them in separate directories locally.
+func (c *kubernetesConnector) PutFile(_ context.Context, src []byte, dst string, mode fs.FileMode) error {
+ dst = filepath.Join(c.homeDir, dst)
+ if _, err := os.Stat(filepath.Dir(dst)); err != nil && os.IsNotExist(err) {
+ if err := os.MkdirAll(filepath.Dir(dst), mode); err != nil {
+ klog.V(4).ErrorS(err, "Failed to create local dir", "dst_file", dst)
+
+ return err
+ }
+ }
+
+ return os.WriteFile(dst, src, mode)
+}
+
+// FetchFile copy src file to dst writer. src is the local filename, dst is the local writer.
+func (c *kubernetesConnector) FetchFile(ctx context.Context, src string, dst io.Writer) error {
+ // add "--kubeconfig" to src command
+ klog.V(5).InfoS("exec local command", "cmd", src)
+ command := c.Cmd.CommandContext(ctx, "/bin/sh", "-c", src)
+ command.SetDir(c.homeDir)
+ command.SetEnv([]string{"KUBECONFIG=" + filepath.Join(c.homeDir, kubeconfigRelPath)})
+ command.SetStdout(dst)
+ _, err := command.CombinedOutput()
+
+ return err
+}
+
+// ExecuteCommand in a kubernetes cluster
+func (c *kubernetesConnector) ExecuteCommand(ctx context.Context, cmd string) ([]byte, error) {
+ // add "--kubeconfig" to src command
+ klog.V(5).InfoS("exec local command", "cmd", cmd)
+ command := c.Cmd.CommandContext(ctx, "/bin/sh", "-c", cmd)
+ command.SetDir(c.homeDir)
+ command.SetEnv([]string{"KUBECONFIG=" + filepath.Join(c.homeDir, kubeconfigRelPath)})
+
+ return command.CombinedOutput()
+}
diff --git a/feature/pkg/connector/local_connector.go b/feature/pkg/connector/local_connector.go
new file mode 100644
index 000000000..ad62185fd
--- /dev/null
+++ b/feature/pkg/connector/local_connector.go
@@ -0,0 +1,140 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package connector
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "runtime"
+
+ "k8s.io/klog/v2"
+ "k8s.io/utils/exec"
+
+ _const "github.com/kubesphere/kubekey/v4/pkg/const"
+)
+
+var _ Connector = &localConnector{}
+var _ GatherFacts = &localConnector{}
+
+type localConnector struct {
+ Cmd exec.Interface
+}
+
+// Init connector. do nothing
+func (c *localConnector) Init(context.Context) error {
+ return nil
+}
+
+// Close connector. do nothing
+func (c *localConnector) Close(context.Context) error {
+ return nil
+}
+
+// PutFile copy src file to dst file. src is the local filename, dst is the local filename.
+func (c *localConnector) PutFile(_ context.Context, src []byte, dst string, mode fs.FileMode) error {
+ if _, err := os.Stat(filepath.Dir(dst)); err != nil && os.IsNotExist(err) {
+ if err := os.MkdirAll(filepath.Dir(dst), mode); err != nil {
+ klog.V(4).ErrorS(err, "Failed to create local dir", "dst_file", dst)
+
+ return err
+ }
+ }
+
+ return os.WriteFile(dst, src, mode)
+}
+
+// FetchFile copy src file to dst writer. src is the local filename, dst is the local writer.
+func (c *localConnector) FetchFile(_ context.Context, src string, dst io.Writer) error {
+ var err error
+ file, err := os.Open(src)
+ if err != nil {
+ klog.V(4).ErrorS(err, "Failed to read local file failed", "src_file", src)
+
+ return err
+ }
+
+ if _, err := io.Copy(dst, file); err != nil {
+ klog.V(4).ErrorS(err, "Failed to copy local file", "src_file", src)
+
+ return err
+ }
+
+ return nil
+}
+
+// ExecuteCommand in local host
+func (c *localConnector) ExecuteCommand(ctx context.Context, cmd string) ([]byte, error) {
+ klog.V(5).InfoS("exec local command", "cmd", cmd)
+
+ return c.Cmd.CommandContext(ctx, "/bin/sh", "-c", cmd).CombinedOutput()
+}
+
+// HostInfo for GatherFacts
+func (c *localConnector) HostInfo(ctx context.Context) (map[string]any, error) {
+ switch runtime.GOOS {
+ case "linux":
+ // os information
+ osVars := make(map[string]any)
+ var osRelease bytes.Buffer
+ if err := c.FetchFile(ctx, "/etc/os-release", &osRelease); err != nil {
+ return nil, fmt.Errorf("failed to fetch os-release: %w", err)
+ }
+ osVars[_const.VariableOSRelease] = convertBytesToMap(osRelease.Bytes(), "=")
+ kernel, err := c.ExecuteCommand(ctx, "uname -r")
+ if err != nil {
+ return nil, fmt.Errorf("get kernel version error: %w", err)
+ }
+ osVars[_const.VariableOSKernelVersion] = string(bytes.TrimSuffix(kernel, []byte("\n")))
+ hn, err := c.ExecuteCommand(ctx, "hostname")
+ if err != nil {
+ return nil, fmt.Errorf("get hostname error: %w", err)
+ }
+ osVars[_const.VariableOSHostName] = string(bytes.TrimSuffix(hn, []byte("\n")))
+ arch, err := c.ExecuteCommand(ctx, "arch")
+ if err != nil {
+ return nil, fmt.Errorf("get arch error: %w", err)
+ }
+ osVars[_const.VariableOSArchitecture] = string(bytes.TrimSuffix(arch, []byte("\n")))
+
+ // process information
+ procVars := make(map[string]any)
+ var cpu bytes.Buffer
+ if err := c.FetchFile(ctx, "/proc/cpuinfo", &cpu); err != nil {
+ return nil, fmt.Errorf("get cpuinfo error: %w", err)
+ }
+ procVars[_const.VariableProcessCPU] = convertBytesToSlice(cpu.Bytes(), ":")
+ var mem bytes.Buffer
+ if err := c.FetchFile(ctx, "/proc/meminfo", &mem); err != nil {
+ return nil, fmt.Errorf("get meminfo error: %w", err)
+ }
+ procVars[_const.VariableProcessMemory] = convertBytesToMap(mem.Bytes(), ":")
+
+ return map[string]any{
+ _const.VariableOS: osVars,
+ _const.VariableProcess: procVars,
+ }, nil
+ default:
+ klog.V(4).ErrorS(nil, "Unsupported platform", "platform", runtime.GOOS)
+
+ return make(map[string]any), nil
+ }
+}
diff --git a/feature/pkg/connector/local_connector_test.go b/feature/pkg/connector/local_connector_test.go
new file mode 100644
index 000000000..2e9883b1a
--- /dev/null
+++ b/feature/pkg/connector/local_connector_test.go
@@ -0,0 +1,81 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package connector
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "k8s.io/utils/exec"
+ testingexec "k8s.io/utils/exec/testing"
+)
+
+func newFakeLocalConnector(runCmd string, output string) *localConnector {
+ return &localConnector{
+ Cmd: &testingexec.FakeExec{CommandScript: []testingexec.FakeCommandAction{
+ func(cmd string, args ...string) exec.Cmd {
+ if strings.TrimSpace(fmt.Sprintf("%s %s", cmd, strings.Join(args, " "))) == "/bin/sh -c "+runCmd {
+ return &testingexec.FakeCmd{
+ CombinedOutputScript: []testingexec.FakeAction{func() ([]byte, []byte, error) {
+ return []byte(output), nil, nil
+ }},
+ }
+ }
+
+ return &testingexec.FakeCmd{
+ CombinedOutputScript: []testingexec.FakeAction{func() ([]byte, []byte, error) {
+ return nil, nil, errors.New("error command")
+ }},
+ }
+ },
+ }},
+ }
+}
+
+func TestSshConnector_ExecuteCommand(t *testing.T) {
+ testcases := []struct {
+ name string
+ cmd string
+ exceptedErr error
+ }{
+ {
+ name: "execute command succeed",
+ cmd: "echo 'hello'",
+ exceptedErr: nil,
+ },
+ {
+ name: "execute command failed",
+ cmd: "echo 'hello1'",
+ exceptedErr: errors.New("error command"),
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
+ defer cancel()
+ lc := newFakeLocalConnector("echo 'hello'", "hello")
+ _, err := lc.ExecuteCommand(ctx, tc.cmd)
+ assert.Equal(t, tc.exceptedErr, err)
+ })
+ }
+}
diff --git a/feature/pkg/connector/ssh_connector.go b/feature/pkg/connector/ssh_connector.go
new file mode 100644
index 000000000..b41b265c3
--- /dev/null
+++ b/feature/pkg/connector/ssh_connector.go
@@ -0,0 +1,229 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package connector
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
+ "os"
+ "os/user"
+ "path/filepath"
+ "time"
+
+ "github.com/pkg/sftp"
+ "golang.org/x/crypto/ssh"
+ "k8s.io/klog/v2"
+
+ _const "github.com/kubesphere/kubekey/v4/pkg/const"
+)
+
+const (
+ defaultSSHPort = 22
+ defaultSSHUser = "root"
+)
+
+var defaultSSHPrivateKey string
+
+func init() {
+ if currentUser, err := user.Current(); err == nil {
+ defaultSSHPrivateKey = filepath.Join(currentUser.HomeDir, ".ssh/id_rsa")
+ } else {
+ defaultSSHPrivateKey = filepath.Join(defaultSSHUser, ".ssh/id_rsa")
+ }
+}
+
+var _ Connector = &sshConnector{}
+var _ GatherFacts = &sshConnector{}
+
+type sshConnector struct {
+ Host string
+ Port int
+ User string
+ Password string
+ PrivateKey string
+ client *ssh.Client
+}
+
+// Init connector, get ssh.Client
+func (c *sshConnector) Init(context.Context) error {
+ if c.Host == "" {
+ return errors.New("host is not set")
+ }
+
+ var auth []ssh.AuthMethod
+ if c.Password != "" {
+ auth = append(auth, ssh.Password(c.Password))
+ }
+ if _, err := os.Stat(c.PrivateKey); err == nil {
+ key, err := os.ReadFile(c.PrivateKey)
+ if err != nil {
+ return fmt.Errorf("read private key error: %w", err)
+ }
+ privateKey, err := ssh.ParsePrivateKey(key)
+ if err != nil {
+ return fmt.Errorf("parse private key error: %w", err)
+ }
+ auth = append(auth, ssh.PublicKeys(privateKey))
+ }
+
+ sshClient, err := ssh.Dial("tcp", fmt.Sprintf("%s:%v", c.Host, c.Port), &ssh.ClientConfig{
+ User: c.User,
+ Auth: auth,
+ HostKeyCallback: ssh.InsecureIgnoreHostKey(),
+ Timeout: 30 * time.Second,
+ })
+ if err != nil {
+ klog.V(4).ErrorS(err, "Dial ssh server failed", "host", c.Host, "port", c.Port)
+
+ return err
+ }
+ c.client = sshClient
+
+ return nil
+}
+
+// Close connector
+func (c *sshConnector) Close(context.Context) error {
+ return c.client.Close()
+}
+
+// PutFile to remote node. src is the file bytes. dst is the remote filename
+func (c *sshConnector) PutFile(_ context.Context, src []byte, dst string, mode fs.FileMode) error {
+ // create sftp client
+ sftpClient, err := sftp.NewClient(c.client)
+ if err != nil {
+ klog.V(4).ErrorS(err, "Failed to create sftp client")
+
+ return err
+ }
+ defer sftpClient.Close()
+ // create remote file
+ if _, err := sftpClient.Stat(filepath.Dir(dst)); err != nil && os.IsNotExist(err) {
+ if err := sftpClient.MkdirAll(filepath.Dir(dst)); err != nil {
+ klog.V(4).ErrorS(err, "Failed to create remote dir", "remote_file", dst)
+
+ return err
+ }
+ }
+
+ rf, err := sftpClient.Create(dst)
+ if err != nil {
+ klog.V(4).ErrorS(err, "Failed to create remote file", "remote_file", dst)
+
+ return err
+ }
+ defer rf.Close()
+
+ if _, err = rf.Write(src); err != nil {
+ klog.V(4).ErrorS(err, "Failed to write content to remote file", "remote_file", dst)
+
+ return err
+ }
+
+ return rf.Chmod(mode)
+}
+
+// FetchFile from remote node. src is the remote filename, dst is the local writer.
+func (c *sshConnector) FetchFile(_ context.Context, src string, dst io.Writer) error {
+ // create sftp client
+ sftpClient, err := sftp.NewClient(c.client)
+ if err != nil {
+ klog.V(4).ErrorS(err, "Failed to create sftp client", "remote_file", src)
+
+ return err
+ }
+ defer sftpClient.Close()
+
+ rf, err := sftpClient.Open(src)
+ if err != nil {
+ klog.V(4).ErrorS(err, "Failed to open file", "remote_file", src)
+
+ return err
+ }
+ defer rf.Close()
+
+ if _, err := io.Copy(dst, rf); err != nil {
+ klog.V(4).ErrorS(err, "Failed to copy file", "remote_file", src)
+
+ return err
+ }
+
+ return nil
+}
+
+// ExecuteCommand in remote host
+func (c *sshConnector) ExecuteCommand(_ context.Context, cmd string) ([]byte, error) {
+ klog.V(5).InfoS("exec ssh command", "cmd", cmd, "host", c.Host)
+ // create ssh session
+ session, err := c.client.NewSession()
+ if err != nil {
+ klog.V(4).ErrorS(err, "Failed to create ssh session")
+
+ return nil, err
+ }
+ defer session.Close()
+
+ return session.CombinedOutput(cmd)
+}
+
+// HostInfo for GatherFacts
+func (c *sshConnector) HostInfo(ctx context.Context) (map[string]any, error) {
+ // os information
+ osVars := make(map[string]any)
+ var osRelease bytes.Buffer
+ if err := c.FetchFile(ctx, "/etc/os-release", &osRelease); err != nil {
+ return nil, fmt.Errorf("failed to fetch os-release: %w", err)
+ }
+ osVars[_const.VariableOSRelease] = convertBytesToMap(osRelease.Bytes(), "=")
+ kernel, err := c.ExecuteCommand(ctx, "uname -r")
+ if err != nil {
+ return nil, fmt.Errorf("get kernel version error: %w", err)
+ }
+ osVars[_const.VariableOSKernelVersion] = string(bytes.TrimSuffix(kernel, []byte("\n")))
+ hn, err := c.ExecuteCommand(ctx, "hostname")
+ if err != nil {
+ return nil, fmt.Errorf("get hostname error: %w", err)
+ }
+ osVars[_const.VariableOSHostName] = string(bytes.TrimSuffix(hn, []byte("\n")))
+ arch, err := c.ExecuteCommand(ctx, "arch")
+ if err != nil {
+ return nil, fmt.Errorf("get arch error: %w", err)
+ }
+ osVars[_const.VariableOSArchitecture] = string(bytes.TrimSuffix(arch, []byte("\n")))
+
+ // process information
+ procVars := make(map[string]any)
+ var cpu bytes.Buffer
+ if err := c.FetchFile(ctx, "/proc/cpuinfo", &cpu); err != nil {
+ return nil, fmt.Errorf("get cpuinfo error: %w", err)
+ }
+ procVars[_const.VariableProcessCPU] = convertBytesToSlice(cpu.Bytes(), ":")
+ var mem bytes.Buffer
+ if err := c.FetchFile(ctx, "/proc/meminfo", &mem); err != nil {
+ return nil, fmt.Errorf("get meminfo error: %w", err)
+ }
+ procVars[_const.VariableProcessMemory] = convertBytesToMap(mem.Bytes(), ":")
+
+ return map[string]any{
+ _const.VariableOS: osVars,
+ _const.VariableProcess: procVars,
+ }, nil
+}
diff --git a/feature/pkg/const/common.go b/feature/pkg/const/common.go
new file mode 100644
index 000000000..3ea218d51
--- /dev/null
+++ b/feature/pkg/const/common.go
@@ -0,0 +1,81 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package _const
+
+// variable specific key in system
+const ( // === From inventory ===
+ // VariableLocalHost is the default local host name in inventory.
+ VariableLocalHost = "localhost"
+ // VariableIPv4 is the ipv4 in inventory.
+ VariableIPv4 = "internal_ipv4"
+ // VariableIPv6 is the ipv6 in inventory.
+ VariableIPv6 = "internal_ipv6"
+ // VariableGroups the value is a host_name slice
+ VariableGroups = "groups"
+ // VariableConnector is connector parameter in inventory.
+ VariableConnector = "connector"
+ // VariableConnectorType is connected type for VariableConnector.
+ VariableConnectorType = "type"
+ // VariableConnectorHost is connected address for VariableConnector.
+ VariableConnectorHost = "host"
+ // VariableConnectorPort is connected address for VariableConnector.
+ VariableConnectorPort = "port"
+ // VariableConnectorUser is connected user for VariableConnector.
+ VariableConnectorUser = "user"
+ // VariableConnectorPassword is connected type for VariableConnector.
+ VariableConnectorPassword = "password"
+ // VariableConnectorPrivateKey is connected auth key for VariableConnector.
+ VariableConnectorPrivateKey = "private_key"
+ // VariableConnectorKubeconfig is connected auth key for VariableConnector.
+ VariableConnectorKubeconfig = "kubeconfig"
+)
+
+const ( // === From system generate ===
+ // VariableInventoryName the value which defined in inventory.spec.host.
+ VariableInventoryName = "inventory_name"
+ // VariableHostName the value is node hostname, default VariableInventoryName.
+ // If VariableInventoryName is "localhost". try to set the actual name.
+ VariableHostName = "hostname"
+ // VariableGlobalHosts the value is host_var which defined in inventory.
+ VariableGlobalHosts = "inventory_hosts"
+ // VariableGroupsAll the value is a all host_name slice of VariableGroups.
+ VariableGroupsAll = "all"
+)
+
+const ( // === From GatherFact ===
+ // VariableOS the value is os information.
+ VariableOS = "os"
+ // VariableOSRelease the value is os-release of VariableOS.
+ VariableOSRelease = "release"
+ // VariableOSKernelVersion the value is kernel version of VariableOS.
+ VariableOSKernelVersion = "kernel_version"
+ // VariableOSHostName the value is hostname of VariableOS.
+ VariableOSHostName = "hostname"
+ // VariableOSArchitecture the value is architecture of VariableOS.
+ VariableOSArchitecture = "architecture"
+ // VariableProcess the value is process information.
+ VariableProcess = "process"
+ // VariableProcessCPU the value is cpu info of VariableProcess.
+ VariableProcessCPU = "cpuInfo"
+ // VariableProcessMemory the value is memory info of VariableProcess.
+ VariableProcessMemory = "memInfo"
+)
+
+const ( // === From runtime ===
+ // VariableItem for "loop" argument when run a task.
+ VariableItem = "item"
+)
diff --git a/feature/pkg/const/helper.go b/feature/pkg/const/helper.go
new file mode 100644
index 000000000..2eab99507
--- /dev/null
+++ b/feature/pkg/const/helper.go
@@ -0,0 +1,49 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package _const
+
+import (
+ "path/filepath"
+ "sync"
+
+ kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
+)
+
+var workDirOnce = &sync.Once{}
+
+// SetWorkDir sets the workdir once.
+func SetWorkDir(wd string) {
+ workDirOnce.Do(func() {
+ workDir = wd
+ })
+}
+
+// GetWorkDir returns the workdir.
+func GetWorkDir() string {
+ return workDir
+}
+
+// GetRuntimeDir returns the absolute path of the runtime directory.
+func GetRuntimeDir() string {
+ return filepath.Join(workDir, RuntimeDir)
+}
+
+// RuntimeDirFromPipeline returns the absolute path of the runtime directory for specify Pipeline
+func RuntimeDirFromPipeline(obj kkcorev1.Pipeline) string {
+ return filepath.Join(GetRuntimeDir(), kkcorev1.SchemeGroupVersion.String(),
+ RuntimePipelineDir, obj.Namespace, obj.Name)
+}
diff --git a/feature/pkg/const/helper_test.go b/feature/pkg/const/helper_test.go
new file mode 100644
index 000000000..42ac33fff
--- /dev/null
+++ b/feature/pkg/const/helper_test.go
@@ -0,0 +1,34 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package _const
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestWorkDir(t *testing.T) {
+ // should not get workdir before set
+ assert.Empty(t, GetWorkDir())
+ // set workdir
+ SetWorkDir("/tmp")
+ assert.Equal(t, "/tmp", GetWorkDir())
+ // should not set workdir again
+ SetWorkDir("/tmp2")
+ assert.Equal(t, "/tmp", GetWorkDir())
+}
diff --git a/feature/pkg/const/scheme.go b/feature/pkg/const/scheme.go
new file mode 100644
index 000000000..19e7f5e50
--- /dev/null
+++ b/feature/pkg/const/scheme.go
@@ -0,0 +1,55 @@
+/*
+Copyright 2024 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package _const
+
+import (
+ batchv1 "k8s.io/api/batch/v1"
+ corev1 "k8s.io/api/core/v1"
+ rbacv1 "k8s.io/api/rbac/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/serializer"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+
+ kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
+ kkcorev1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1alpha1"
+)
+
+var (
+ // Scheme is the default instance of runtime.Scheme to which types in the Kubernetes API are already registered.
+ // NOTE: If you are copying this file to start a new api group, STOP! Copy the
+ // extensions group instead. This Scheme is special and should appear ONLY in
+ // the api group, unless you really know what you're doing.
+ Scheme = newScheme()
+
+ // Codecs provides access to encoding and decoding for the scheme
+ Codecs = serializer.NewCodecFactory(Scheme)
+
+ // ParameterCodec handles versioning of objects that are converted to query parameters.
+ ParameterCodec = runtime.NewParameterCodec(Scheme)
+)
+
+func newScheme() *runtime.Scheme {
+ s := runtime.NewScheme()
+ utilruntime.Must(batchv1.AddToScheme(s))
+ utilruntime.Must(corev1.AddToScheme(s))
+ utilruntime.Must(rbacv1.AddToScheme(s))
+ utilruntime.Must(kkcorev1.AddToScheme(s))
+ utilruntime.Must(kkcorev1alpha1.AddToScheme(s))
+ utilruntime.Must(kkcorev1alpha1.AddConversionFuncs(s))
+
+ return s
+}
diff --git a/feature/pkg/const/workdir.go b/feature/pkg/const/workdir.go
new file mode 100644
index 000000000..6514d9104
--- /dev/null
+++ b/feature/pkg/const/workdir.go
@@ -0,0 +1,133 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package _const
+
+/** a kubekey workdir like that:
+workdir/
+|-- projects/
+| |-- ansible-project1/
+| | |-- playbooks/
+| | |-- roles/
+| | | |-- roleName/
+| | | | |-- tasks/
+| | | | | |-- main.yml
+| | | | |-- defaults/
+| | | | | |-- main.yml
+| | | | |-- templates/
+| | | | |-- files/
+| |
+| |-- ansible-project2/
+| |-- ...
+|
+|-- runtime/
+|-- group/version/
+| | |-- pipelines/
+| | | |-- namespace/
+| | | | |-- pipeline.yaml
+| | | | |-- /pipelineName/variable/
+| | | | | |-- location.json
+| | | | | |-- hostname.json
+| | |-- tasks/
+| | | |-- namespace/
+| | | | |-- task.yaml
+| | |-- configs/
+| | | |-- namespace/
+| | | | | |-- config.yaml
+| | |-- inventories/
+| | | |-- namespace/
+| | | | |-- inventory.yaml
+|
+|-- kubekey/
+|-- artifact-path...
+|-- images
+|
+|-- kubernetes/
+
+*/
+
+// workDir is the user-specified working directory. By default, it is the same as the directory where the kubekey command is executed.
+var workDir string
+
+// ProjectDir is a fixed directory name under workdir, used to store the Ansible project.
+const ProjectDir = "projects"
+
+// ansible-project is the name of different Ansible projects
+
+// ProjectPlaybooksDir is a fixed directory name under ansible-project. used to store executable playbook files.
+const ProjectPlaybooksDir = "playbooks"
+
+// ProjectRolesDir is a fixed directory name under ansible-project. used to store roles which playbook need.
+const ProjectRolesDir = "roles"
+
+// roleName is the name of different roles
+
+// ProjectRolesTasksDir is a fixed directory name under roleName. used to store task which role need.
+const ProjectRolesTasksDir = "tasks"
+
+// ProjectRolesTasksMainFile is a fixed file under tasks. it must run if the role run. support *.yaml or *yml
+const ProjectRolesTasksMainFile = "main"
+
+// ProjectRolesDefaultsDir is a fixed directory name under roleName. it set default variables to role.
+const ProjectRolesDefaultsDir = "defaults"
+
+// ProjectRolesDefaultsMainFile is a fixed file under defaults. support *.yaml or *yml
+const ProjectRolesDefaultsMainFile = "main"
+
+// ProjectRolesTemplateDir is a fixed directory name under roleName. used to store template which task need.
+const ProjectRolesTemplateDir = "templates"
+
+// ProjectRolesFilesDir is a fixed directory name under roleName. used to store files which task need.
+const ProjectRolesFilesDir = "files"
+
+// RuntimeDir is a fixed directory name under workdir, used to store the runtime data of the current task execution.
+const RuntimeDir = "runtime"
+
+// the resources dir store as etcd key.
+// like: /prefix/group/version/resource/namespace/name
+
+// RuntimePipelineDir store Pipeline resources
+const RuntimePipelineDir = "pipelines"
+
+// pipeline.yaml is the data of Pipeline resource
+
+// RuntimePipelineVariableDir is a fixed directory name under runtime, used to store the task execution parameters.
+const RuntimePipelineVariableDir = "variable"
+
+// RuntimePipelineTaskDir is a fixed directory name under runtime, used to store the task execution status.
+
+// task.yaml is the data of Task resource
+
+// RuntimeConfigDir store Config resources
+
+// config.yaml is the data of Config resource
+
+// RuntimeInventoryDir store Inventory resources
+
+// inventory.yaml is the data of Inventory resource
+
+// ArtifactDir is the default directory name under the working directory. It is used to store
+// files required when executing the kubekey command (such as: docker, etcd, image packages, etc.).
+// These files will be downloaded locally and distributed to remote nodes.
+const ArtifactDir = "kubekey"
+
+// artifact-path store artifact package.
+
+// ArtifactImagesDir store images files. contains blobs and manifests.
+const ArtifactImagesDir = "images"
+
+// KubernetesDir represents the remote host directory for each kubernetes connection
+const KubernetesDir = "kubernetes"
diff --git a/feature/pkg/controllers/pipeline_controller.go b/feature/pkg/controllers/pipeline_controller.go
new file mode 100644
index 000000000..addee6ce3
--- /dev/null
+++ b/feature/pkg/controllers/pipeline_controller.go
@@ -0,0 +1,312 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package controllers
+
+import (
+ "context"
+ "os"
+
+ batchv1 "k8s.io/api/batch/v1"
+ corev1 "k8s.io/api/core/v1"
+ rbacv1 "k8s.io/api/rbac/v1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/client-go/tools/record"
+ "k8s.io/klog/v2"
+ "k8s.io/utils/ptr"
+ ctrl "sigs.k8s.io/controller-runtime"
+ ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
+ ctrlcontroller "sigs.k8s.io/controller-runtime/pkg/controller"
+ "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
+ ctrlfinalizer "sigs.k8s.io/controller-runtime/pkg/finalizer"
+
+ kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
+)
+
+const (
+ // jobLabel set in job or cronJob. value is which pipeline belongs to.
+ jobLabel = "kubekey.kubesphere.io/pipeline"
+ defaultExecutorImage = "hub.kubesphere.com.cn/kubekey/executor:latest"
+ defaultPullPolicy = "IfNotPresent"
+ defaultServiceAccount = "kk-executor"
+)
+
+// PipelineReconciler reconcile pipeline
+type PipelineReconciler struct {
+ *runtime.Scheme
+ ctrlclient.Client
+ record.EventRecorder
+
+ ctrlfinalizer.Finalizers
+ MaxConcurrentReconciles int
+}
+
+func (r PipelineReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
+ // get pipeline
+ pipeline := &kkcorev1.Pipeline{}
+ err := r.Client.Get(ctx, req.NamespacedName, pipeline)
+ if err != nil {
+ if apierrors.IsNotFound(err) {
+ klog.V(5).InfoS("pipeline not found", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline))
+
+ return ctrl.Result{}, nil
+ }
+
+ return ctrl.Result{}, err
+ }
+
+ if pipeline.DeletionTimestamp != nil {
+ klog.V(5).InfoS("pipeline is deleting", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline))
+
+ return ctrl.Result{}, nil
+ }
+
+ switch pipeline.Status.Phase {
+ case "":
+ excepted := pipeline.DeepCopy()
+ pipeline.Status.Phase = kkcorev1.PipelinePhasePending
+ if err := r.Client.Status().Patch(ctx, pipeline, ctrlclient.MergeFrom(excepted)); err != nil {
+ klog.V(5).ErrorS(err, "update pipeline error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline))
+
+ return ctrl.Result{}, err
+ }
+ case kkcorev1.PipelinePhasePending:
+ excepted := pipeline.DeepCopy()
+ pipeline.Status.Phase = kkcorev1.PipelinePhaseRunning
+ if err := r.Client.Status().Patch(ctx, pipeline, ctrlclient.MergeFrom(excepted)); err != nil {
+ klog.V(5).ErrorS(err, "update pipeline error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline))
+
+ return ctrl.Result{}, err
+ }
+ case kkcorev1.PipelinePhaseRunning:
+
+ return r.dealRunningPipeline(ctx, pipeline)
+ case kkcorev1.PipelinePhaseFailed:
+ // do nothing
+ case kkcorev1.PipelinePhaseSucceed:
+ // do nothing
+ }
+
+ return ctrl.Result{}, nil
+}
+
+func (r *PipelineReconciler) dealRunningPipeline(ctx context.Context, pipeline *kkcorev1.Pipeline) (ctrl.Result, error) {
+ if err := r.checkServiceAccount(ctx, *pipeline); err != nil {
+ return ctrl.Result{}, err
+ }
+
+ // check if job is exist
+ switch pipeline.Spec.JobSpec.Schedule {
+ case "": // pipeline will create job
+ jobs := &batchv1.JobList{}
+ if err := r.Client.List(ctx, jobs, ctrlclient.InNamespace(pipeline.Namespace), ctrlclient.MatchingLabels{
+ jobLabel: pipeline.Name,
+ }); err != nil && !apierrors.IsNotFound(err) {
+ return ctrl.Result{}, err
+ } else if len(jobs.Items) != 0 {
+ // could find exist job
+ return ctrl.Result{}, nil
+ }
+
+ // create job
+ job := &batchv1.Job{
+ ObjectMeta: metav1.ObjectMeta{
+ GenerateName: pipeline.Name + "-",
+ Namespace: pipeline.Namespace,
+ Labels: map[string]string{
+ jobLabel: pipeline.Name,
+ },
+ },
+ Spec: r.GenerateJobSpec(*pipeline),
+ }
+ if err := controllerutil.SetControllerReference(pipeline, job, r.Scheme); err != nil {
+ return ctrl.Result{}, err
+ }
+
+ if err := r.Client.Create(ctx, job); err != nil {
+ return ctrl.Result{}, err
+ }
+ default: // pipeline will create cronJob
+ jobs := &batchv1.CronJobList{}
+ if err := r.Client.List(ctx, jobs, ctrlclient.InNamespace(pipeline.Namespace), ctrlclient.MatchingLabels{
+ jobLabel: pipeline.Name,
+ }); err != nil && !apierrors.IsNotFound(err) {
+ return ctrl.Result{}, err
+ } else if len(jobs.Items) != 0 {
+ // could find exist cronJob
+ for _, job := range jobs.Items {
+ // update cronJob from pipeline, the pipeline status should always be running.
+ if pipeline.Spec.JobSpec.Suspend != job.Spec.Suspend {
+ cp := job.DeepCopy()
+ job.Spec.Suspend = pipeline.Spec.JobSpec.Suspend
+ // update pipeline status
+ if err := r.Client.Status().Patch(ctx, &job, ctrlclient.MergeFrom(cp)); err != nil {
+ klog.V(5).ErrorS(err, "update corn job error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline),
+ "cronJob", ctrlclient.ObjectKeyFromObject(&job))
+ }
+ }
+ }
+
+ return ctrl.Result{}, nil
+ }
+
+ // create cornJob
+ cornJob := &batchv1.CronJob{
+ ObjectMeta: metav1.ObjectMeta{
+ GenerateName: pipeline.Name + "-",
+ Namespace: pipeline.Namespace,
+ Labels: map[string]string{
+ jobLabel: pipeline.Name,
+ },
+ },
+ Spec: batchv1.CronJobSpec{
+ Schedule: pipeline.Spec.JobSpec.Schedule,
+ JobTemplate: batchv1.JobTemplateSpec{
+ Spec: r.GenerateJobSpec(*pipeline),
+ },
+ Suspend: pipeline.Spec.JobSpec.Suspend,
+ SuccessfulJobsHistoryLimit: pipeline.Spec.JobSpec.SuccessfulJobsHistoryLimit,
+ FailedJobsHistoryLimit: pipeline.Spec.JobSpec.FailedJobsHistoryLimit,
+ },
+ }
+ if err := controllerutil.SetControllerReference(pipeline, cornJob, r.Scheme); err != nil {
+ return ctrl.Result{}, err
+ }
+
+ if err := r.Client.Create(ctx, cornJob); err != nil {
+ return ctrl.Result{}, err
+ }
+ }
+
+ return ctrl.Result{}, nil
+}
+
+// checkServiceAccount when ServiceAccount is not exist, create it.
+func (r *PipelineReconciler) checkServiceAccount(ctx context.Context, pipeline kkcorev1.Pipeline) error {
+ // get ServiceAccount name for executor pod
+ saName, ok := os.LookupEnv("EXECUTOR_SERVICEACCOUNT")
+ if !ok {
+ saName = defaultServiceAccount
+ }
+
+ var sa = &corev1.ServiceAccount{}
+ if err := r.Client.Get(ctx, ctrlclient.ObjectKey{Namespace: pipeline.Namespace, Name: saName}, sa); err != nil {
+ if !apierrors.IsNotFound(err) {
+ klog.ErrorS(err, "get service account", "pipeline", ctrlclient.ObjectKeyFromObject(&pipeline))
+
+ return err
+ }
+ // create sa
+ if err := r.Client.Create(ctx, &corev1.ServiceAccount{
+ ObjectMeta: metav1.ObjectMeta{Name: saName, Namespace: pipeline.Namespace},
+ }); err != nil {
+ klog.ErrorS(err, "create service account error", "pipeline", ctrlclient.ObjectKeyFromObject(&pipeline))
+
+ return err
+ }
+ }
+
+ var rb = &rbacv1.ClusterRoleBinding{}
+ if err := r.Client.Get(ctx, ctrlclient.ObjectKey{Namespace: pipeline.Namespace, Name: saName}, rb); err != nil {
+ if !apierrors.IsNotFound(err) {
+ klog.ErrorS(err, "create role binding error", "pipeline", ctrlclient.ObjectKeyFromObject(&pipeline))
+
+ return err
+ }
+ //create rolebinding
+ if err := r.Client.Create(ctx, &rbacv1.ClusterRoleBinding{
+ ObjectMeta: metav1.ObjectMeta{Namespace: pipeline.Namespace, Name: saName},
+ RoleRef: rbacv1.RoleRef{
+ APIGroup: rbacv1.GroupName,
+ Kind: "ClusterRole",
+ Name: saName,
+ },
+ Subjects: []rbacv1.Subject{
+ {
+ APIGroup: corev1.GroupName,
+ Kind: "ServiceAccount",
+ Name: saName,
+ Namespace: pipeline.Namespace,
+ },
+ },
+ }); err != nil {
+ klog.ErrorS(err, "create role binding error", "pipeline", ctrlclient.ObjectKeyFromObject(&pipeline))
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+// GenerateJobSpec for pipeline
+func (r *PipelineReconciler) GenerateJobSpec(pipeline kkcorev1.Pipeline) batchv1.JobSpec {
+ // get ServiceAccount name for executor pod
+ saName, ok := os.LookupEnv("EXECUTOR_SERVICEACCOUNT")
+ if !ok {
+ saName = defaultServiceAccount
+ }
+ // get image from env
+ image, ok := os.LookupEnv("EXECUTOR_IMAGE")
+ if !ok {
+ image = defaultExecutorImage
+ }
+ // get image from env
+ imagePullPolicy, ok := os.LookupEnv("EXECUTOR_IMAGE_PULLPOLICY")
+ if !ok {
+ imagePullPolicy = defaultPullPolicy
+ }
+
+ // create a job spec
+ jobSpec := batchv1.JobSpec{
+ Parallelism: ptr.To[int32](1),
+ Completions: ptr.To[int32](1),
+ BackoffLimit: ptr.To[int32](0),
+ Template: corev1.PodTemplateSpec{
+ Spec: corev1.PodSpec{
+ ServiceAccountName: saName,
+ RestartPolicy: "Never",
+ Volumes: pipeline.Spec.JobSpec.Volumes,
+ Containers: []corev1.Container{
+ {
+ Name: "executor",
+ Image: image,
+ ImagePullPolicy: corev1.PullPolicy(imagePullPolicy),
+ Command: []string{"kk"},
+ Args: []string{"pipeline",
+ "--name", pipeline.Name,
+ "--namespace", pipeline.Namespace},
+ VolumeMounts: pipeline.Spec.JobSpec.VolumeMounts,
+ },
+ },
+ },
+ },
+ }
+
+ return jobSpec
+}
+
+// SetupWithManager sets up the controller with the Manager.
+func (r *PipelineReconciler) SetupWithManager(mgr ctrl.Manager) error {
+ return ctrl.NewControllerManagedBy(mgr).
+ WithOptions(ctrlcontroller.Options{
+ MaxConcurrentReconciles: r.MaxConcurrentReconciles,
+ }).
+ For(&kkcorev1.Pipeline{}).
+ Complete(r)
+}
diff --git a/feature/pkg/converter/converter.go b/feature/pkg/converter/converter.go
new file mode 100644
index 000000000..6da378f15
--- /dev/null
+++ b/feature/pkg/converter/converter.go
@@ -0,0 +1,126 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package converter
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/util/json"
+ "k8s.io/klog/v2"
+
+ kkcorev1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1alpha1"
+ kkprojectv1 "github.com/kubesphere/kubekey/v4/pkg/apis/project/v1"
+)
+
+// MarshalBlock marshal block to task
+func MarshalBlock(role string, hosts []string, when []string, block kkprojectv1.Block) *kkcorev1alpha1.Task {
+ task := &kkcorev1alpha1.Task{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "Task",
+ APIVersion: kkcorev1alpha1.SchemeGroupVersion.String(),
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ CreationTimestamp: metav1.Now(),
+ Annotations: map[string]string{
+ kkcorev1alpha1.TaskAnnotationRole: role,
+ },
+ },
+ Spec: kkcorev1alpha1.TaskSpec{
+ Name: block.Name,
+ Hosts: hosts,
+ IgnoreError: block.IgnoreErrors,
+ Retries: block.Retries,
+ When: when,
+ FailedWhen: block.FailedWhen.Data,
+ Register: block.Register,
+ },
+ }
+
+ if block.Loop != nil {
+ data, err := json.Marshal(block.Loop)
+ if err != nil {
+ klog.V(4).ErrorS(err, "Marshal loop failed", "task", task.Name, "block", block.Name)
+ }
+ task.Spec.Loop = runtime.RawExtension{Raw: data}
+ }
+
+ return task
+}
+
+// GroupHostBySerial group hosts by serial
+func GroupHostBySerial(hosts []string, serial []any) ([][]string, error) {
+ if len(serial) == 0 {
+ return [][]string{hosts}, nil
+ }
+
+ // convertSerial to []int
+ var sis = make([]int, len(serial))
+ // the count for sis
+ var count int
+ for i, a := range serial {
+ switch val := a.(type) {
+ case int:
+ sis[i] = val
+ case string:
+ if strings.HasSuffix(val, "%") {
+ b, err := strconv.ParseFloat(val[:len(val)-1], 64)
+ if err != nil {
+ return nil, fmt.Errorf("convert serial %v to float error: %w", a, err)
+ }
+ sis[i] = int(math.Ceil(float64(len(hosts)) * b / 100.0))
+ } else {
+ b, err := strconv.Atoi(val)
+ if err != nil {
+ return nil, fmt.Errorf("convert serial %v to int error: %w", a, err)
+ }
+ sis[i] = b
+ }
+ default:
+ return nil, errors.New("unknown serial type. only support int or percent")
+ }
+ if sis[i] == 0 {
+ return nil, fmt.Errorf("serial %v should not be zero", a)
+ }
+ count += sis[i]
+ }
+
+ if len(hosts) > count {
+ for i := 0.0; i < float64(len(hosts)-count)/float64(sis[len(sis)-1]); i++ {
+ sis = append(sis, sis[len(sis)-1])
+ }
+ }
+
+ // total result
+ result := make([][]string, len(sis))
+ var begin, end int
+ for i, si := range sis {
+ end += si
+ if end > len(hosts) {
+ end = len(hosts)
+ }
+ result[i] = hosts[begin:end]
+ begin += si
+ }
+
+ return result, nil
+}
diff --git a/feature/pkg/converter/converter_test.go b/feature/pkg/converter/converter_test.go
new file mode 100644
index 000000000..942368ac4
--- /dev/null
+++ b/feature/pkg/converter/converter_test.go
@@ -0,0 +1,107 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package converter
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestGroupHostBySerial(t *testing.T) {
+ hosts := []string{"h1", "h2", "h3", "h4", "h5", "h6", "h7"}
+ testcases := []struct {
+ name string
+ serial []any
+ exceptResult [][]string
+ exceptErr bool
+ }{
+ {
+ name: "group host by 1",
+ serial: []any{1},
+ exceptResult: [][]string{
+ {"h1"},
+ {"h2"},
+ {"h3"},
+ {"h4"},
+ {"h5"},
+ {"h6"},
+ {"h7"},
+ },
+ exceptErr: false,
+ },
+ {
+ name: "group host by serial 2",
+ serial: []any{2},
+ exceptResult: [][]string{
+ {"h1", "h2"},
+ {"h3", "h4"},
+ {"h5", "h6"},
+ {"h7"},
+ },
+ exceptErr: false,
+ },
+ {
+ name: "group host by serial 1 and 2",
+ serial: []any{1, 2},
+ exceptResult: [][]string{
+ {"h1"},
+ {"h2", "h3"},
+ {"h4", "h5"},
+ {"h6", "h7"},
+ },
+ exceptErr: false,
+ },
+ {
+ name: "group host by serial 1 and 40%",
+ serial: []any{"1", "40%"},
+ exceptResult: [][]string{
+ {"h1"},
+ {"h2", "h3", "h4"},
+ {"h5", "h6", "h7"},
+ },
+ exceptErr: false,
+ },
+ {
+ name: "group host by unSupport serial type",
+ serial: []any{1.1},
+ exceptResult: nil,
+ exceptErr: true,
+ },
+ {
+ name: "group host by unSupport serial value",
+ serial: []any{"%1.1%"},
+ exceptResult: nil,
+ exceptErr: true,
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ result, err := GroupHostBySerial(hosts, tc.serial)
+ if err != nil {
+ if tc.exceptErr {
+ assert.Error(t, err)
+
+ return
+ }
+ t.Fatal(err)
+ }
+ assert.Equal(t, tc.exceptResult, result)
+ })
+ }
+}
diff --git a/feature/pkg/converter/internal/functions.go b/feature/pkg/converter/internal/functions.go
new file mode 100644
index 000000000..499b87d3b
--- /dev/null
+++ b/feature/pkg/converter/internal/functions.go
@@ -0,0 +1,60 @@
+package internal
+
+import (
+ "math"
+ "strings"
+ "text/template"
+
+ "github.com/Masterminds/sprig/v3"
+ "gopkg.in/yaml.v3"
+)
+
+// Template parse file or vars which defined in project.
+var Template = template.New("kubekey").Funcs(funcMap())
+
+func funcMap() template.FuncMap {
+ var f = sprig.TxtFuncMap()
+ delete(f, "env")
+ delete(f, "expandenv")
+ // add custom function
+ f["toYaml"] = toYAML
+ f["ipInCIDR"] = ipInCIDR
+ f["pow"] = pow
+
+ return f
+}
+
+// toYAML takes an interface, marshals it to yaml, and returns a string. It will
+// always return a string, even on marshal error (empty string).
+//
+// This is designed to be called from a template.
+func toYAML(v any) string {
+ data, err := yaml.Marshal(v)
+ if err != nil {
+ // Swallow errors inside of a template.
+ return ""
+ }
+
+ return strings.TrimSuffix(string(data), "\n")
+}
+
+// ipInCIDR get the IP of a specific location within the cidr range
+func ipInCIDR(index int, cidr string) (string, error) {
+ var ips = make([]string, 0)
+ for _, s := range strings.Split(cidr, ",") {
+ ips = append(ips, parseIP(s)...)
+ }
+
+ if index < 0 {
+ index = max(len(ips)+index, 0)
+ }
+ index = max(index, 0)
+ index = min(index, len(ips)-1)
+
+ return ips[index], nil
+}
+
+// pow Get the "pow" power of "base". (base ** pow)
+func pow(base, pow float64) (float64, error) {
+ return math.Pow(base, pow), nil
+}
diff --git a/feature/pkg/converter/internal/helper.go b/feature/pkg/converter/internal/helper.go
new file mode 100644
index 000000000..80c9485a7
--- /dev/null
+++ b/feature/pkg/converter/internal/helper.go
@@ -0,0 +1,133 @@
+package internal
+
+import (
+ "encoding/binary"
+ "net"
+ "strconv"
+ "strings"
+)
+
+// parseIP parse cidr to actual ip slice, or parse the ip range string (format xxx-xxx) to actual ip slice,
+func parseIP(ip string) []string {
+ var availableIPs []string
+ // if ip is "1.1.1.1/",trim /
+ ip = strings.TrimRight(ip, "/")
+ if strings.Contains(ip, "/") {
+ if strings.Contains(ip, "/32") {
+ aip := strings.Replace(ip, "/32", "", -1)
+ availableIPs = append(availableIPs, aip)
+ } else {
+ availableIPs = getAvailableIP(ip)
+ }
+ } else if strings.Contains(ip, "-") {
+ ipRange := strings.SplitN(ip, "-", 2)
+ availableIPs = getAvailableIPRange(ipRange[0], ipRange[1])
+ } else {
+ availableIPs = append(availableIPs, ip)
+ }
+
+ return availableIPs
+}
+
+func getAvailableIPRange(ipStart, ipEnd string) []string {
+ var availableIPs []string
+
+ firstIP := net.ParseIP(ipStart)
+ endIP := net.ParseIP(ipEnd)
+ if firstIP.To4() == nil || endIP.To4() == nil {
+ return availableIPs
+ }
+
+ firstIPNum := ipToInt(firstIP.To4())
+ endIPNum := ipToInt(endIP.To4())
+ pos := int32(1)
+ newNum := firstIPNum
+ for newNum <= endIPNum {
+ availableIPs = append(availableIPs, intToIP(newNum).String())
+ newNum += pos
+ }
+
+ return availableIPs
+}
+
+func getAvailableIP(ipAndMask string) []string {
+ var availableIPs = make([]string, 0)
+
+ ipAndMask = strings.TrimSpace(ipAndMask)
+ ipAndMask = iPAddressToCIDR(ipAndMask)
+ _, ipnet, _ := net.ParseCIDR(ipAndMask)
+
+ firstIP, _ := networkRange(ipnet)
+ ipNum := ipToInt(firstIP)
+ size := networkSize(ipnet.Mask)
+ pos := int32(1)
+ m := size - 2 // -1 for the broadcast address, -1 for the gateway address
+
+ var newNum int32
+ for range m {
+ newNum = ipNum + pos
+ pos = pos%m + 1
+ availableIPs = append(availableIPs, intToIP(newNum).String())
+ }
+
+ return availableIPs
+}
+
+func ipToInt(ip net.IP) int32 {
+ return int32(binary.BigEndian.Uint32(ip.To4()))
+}
+
+func intToIP(n int32) net.IP {
+ b := make([]byte, 4)
+ binary.BigEndian.PutUint32(b, uint32(n))
+
+ return b
+}
+
+func iPAddressToCIDR(ipAddress string) string {
+ if strings.Contains(ipAddress, "/") {
+ ipAndMask := strings.Split(ipAddress, "/")
+ ip := ipAndMask[0]
+ mask := ipAndMask[1]
+ if strings.Contains(mask, ".") {
+ mask = iPMaskStringToCIDR(mask)
+ }
+
+ return ip + "/" + mask
+ }
+
+ return ipAddress
+}
+
+func iPMaskStringToCIDR(netmask string) string {
+ netmaskList := strings.Split(netmask, ".")
+ var mint = make([]int, len(netmaskList))
+ for i, v := range netmaskList {
+ mint[i], _ = strconv.Atoi(v)
+ }
+
+ myIPMask := net.IPv4Mask(byte(mint[0]), byte(mint[1]), byte(mint[2]), byte(mint[3]))
+ ones, _ := myIPMask.Size()
+
+ return strconv.Itoa(ones)
+}
+
+func networkRange(network *net.IPNet) (net.IP, net.IP) {
+ netIP := network.IP.To4()
+ firstIP := netIP.Mask(network.Mask)
+ lastIP := net.IPv4(0, 0, 0, 0).To4()
+ for i := 0; i < len(lastIP); i++ {
+ lastIP[i] = netIP[i] | ^network.Mask[i]
+ }
+
+ return firstIP, lastIP
+}
+
+func networkSize(mask net.IPMask) int32 {
+ m := net.IPv4Mask(0, 0, 0, 0)
+ for i := range net.IPv4len {
+ m[i] = ^mask[i]
+ }
+
+ return int32(binary.BigEndian.Uint32(m)) + 1
+}
diff --git a/feature/pkg/converter/internal/helper_test.go b/feature/pkg/converter/internal/helper_test.go
new file mode 100644
index 000000000..d7446121a
--- /dev/null
+++ b/feature/pkg/converter/internal/helper_test.go
@@ -0,0 +1,53 @@
+package internal
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestParseIp(t *testing.T) {
+ testcases := []struct {
+ name string
+ ipRange string
+ excepted func() []string
+ }{
+ {
+ name: "parse cidr",
+ ipRange: "192.168.0.0/18",
+ excepted: func() []string {
+ // 192.168.0.1 - 192.168.63.254
+ var ips []string
+ for i := range 64 {
+ for j := range 256 {
+ ips = append(ips, fmt.Sprintf("192.168.%d.%d", i, j))
+ }
+ }
+
+ return ips[1 : len(ips)-1]
+ },
+ },
+ {
+ name: "parse range",
+ ipRange: "192.168.0.1-192.168.63.254",
+ excepted: func() []string {
+ // 192.168.0.1 - 192.168.63.254
+ var ips []string
+ for i := range 64 {
+ for j := range 256 {
+ ips = append(ips, fmt.Sprintf("192.168.%d.%d", i, j))
+ }
+ }
+
+ return ips[1 : len(ips)-1]
+ },
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ assert.Equal(t, tc.excepted(), parseIP(tc.ipRange))
+ })
+ }
+}
diff --git a/feature/pkg/converter/tmpl/template.go b/feature/pkg/converter/tmpl/template.go
new file mode 100644
index 000000000..d50cc9af5
--- /dev/null
+++ b/feature/pkg/converter/tmpl/template.go
@@ -0,0 +1,77 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package tmpl
+
+import (
+ "bytes"
+ "fmt"
+ "strings"
+
+ "k8s.io/klog/v2"
+
+ "github.com/kubesphere/kubekey/v4/pkg/converter/internal"
+)
+
+// ParseBool parse template string to bool
+func ParseBool(ctx map[string]any, inputs []string) (bool, error) {
+ for _, input := range inputs {
+ if !IsTmplSyntax(input) {
+ input = "{{ " + input + " }}"
+ }
+
+ tl, err := internal.Template.Parse(input)
+ if err != nil {
+ return false, fmt.Errorf("failed to parse template '%s': %w", input, err)
+ }
+
+ result := bytes.NewBuffer(nil)
+ if err := tl.Execute(result, ctx); err != nil {
+ return false, fmt.Errorf("failed to execute template '%s': %w", input, err)
+ }
+ klog.V(6).InfoS(" parse template succeed", "result", result.String())
+ if result.String() != "true" {
+ return false, nil
+ }
+ }
+
+ return true, nil
+}
+
+// ParseString parse template string to actual string
+func ParseString(ctx map[string]any, input string) (string, error) {
+ if !IsTmplSyntax(input) {
+ return input, nil
+ }
+
+ tl, err := internal.Template.Parse(input)
+ if err != nil {
+ return "", fmt.Errorf("failed to parse template '%s': %w", input, err)
+ }
+
+ result := bytes.NewBuffer(nil)
+ if err := tl.Execute(result, ctx); err != nil {
+ return "", fmt.Errorf("failed to execute template '%s': %w", input, err)
+ }
+ klog.V(6).InfoS(" parse template succeed", "result", result.String())
+
+ return strings.TrimPrefix(strings.TrimSuffix(result.String(), "\n"), "\n"), nil
+}
+
+// IsTmplSyntax Check if the string conforms to the template syntax.
+func IsTmplSyntax(s string) bool {
+ return strings.Contains(s, "{{") && strings.Contains(s, "}}")
+}
diff --git a/feature/pkg/converter/tmpl/template_test.go b/feature/pkg/converter/tmpl/template_test.go
new file mode 100644
index 000000000..ac3d8931f
--- /dev/null
+++ b/feature/pkg/converter/tmpl/template_test.go
@@ -0,0 +1,644 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package tmpl
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestParseBool(t *testing.T) {
+ testcases := []struct {
+ name string
+ condition []string
+ variable map[string]any
+ excepted bool
+ }{
+ // ======= semverCompare =======
+ {
+ name: "semverCompare true-1",
+ condition: []string{"{{ .foo | semverCompare \">=v1.21\" }}"},
+ variable: map[string]any{
+ "foo": "v1.23",
+ },
+ excepted: true,
+ },
+ {
+ name: "semverCompare true-2",
+ condition: []string{"{{ .foo | semverCompare \"v1.21\" }}"},
+ variable: map[string]any{
+ "foo": "v1.21",
+ },
+ excepted: true,
+ },
+ {
+ name: "semverCompare true-3",
+ condition: []string{"{{ semverCompare \">=v1.21\" .foo }}"},
+ variable: map[string]any{
+ "foo": "v1.23",
+ },
+ excepted: true,
+ },
+ {
+ name: "semverCompare true-3",
+ condition: []string{"{{ semverCompare \" rescue -> always
+// If rescue is defined, execute it when block execute error.
+// If always id defined, execute it.
+func (e blockExecutor) dealBlock(ctx context.Context, hosts []string, ignoreErrors *bool, when []string, tags kkprojectv1.Taggable, block kkprojectv1.Block) error {
+ var errs error
+ // exec block
+ if err := (blockExecutor{
+ option: e.option,
+ hosts: hosts,
+ ignoreErrors: ignoreErrors,
+ role: e.role,
+ blocks: block.Block,
+ when: when,
+ tags: tags,
+ }.Exec(ctx)); err != nil {
+ klog.V(5).ErrorS(err, "execute tasks from block error", "block", block.Name, "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline))
+ errs = errors.Join(errs, err)
+ }
+ // if block exec failed exec rescue
+ if e.pipeline.Status.Phase == kkcorev1.PipelinePhaseFailed && len(block.Rescue) != 0 {
+ if err := (blockExecutor{
+ option: e.option,
+ hosts: hosts,
+ ignoreErrors: ignoreErrors,
+ blocks: block.Rescue,
+ role: e.role,
+ when: when,
+ tags: tags,
+ }.Exec(ctx)); err != nil {
+ klog.V(5).ErrorS(err, "execute tasks from rescue error", "block", block.Name, "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline))
+ errs = errors.Join(errs, err)
+ }
+ }
+ // exec always after block
+ if len(block.Always) != 0 {
+ if err := (blockExecutor{
+ option: e.option,
+ hosts: hosts,
+ ignoreErrors: ignoreErrors,
+ blocks: block.Always,
+ role: e.role,
+ when: when,
+ tags: tags,
+ }.Exec(ctx)); err != nil {
+ klog.V(5).ErrorS(err, "execute tasks from always error", "block", block.Name, "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline))
+ errs = errors.Join(errs, err)
+ }
+ }
+ // when execute error. return
+ return errs
+}
+
+// dealTask "block" argument is not defined in block.
+func (e blockExecutor) dealTask(ctx context.Context, hosts []string, when []string, block kkprojectv1.Block) error {
+ task := converter.MarshalBlock(e.role, hosts, when, block)
+ // complete by pipeline
+ task.GenerateName = e.pipeline.Name + "-"
+ task.Namespace = e.pipeline.Namespace
+ if err := controllerutil.SetControllerReference(e.pipeline, task, e.client.Scheme()); err != nil {
+ klog.V(5).ErrorS(err, "Set controller reference error", "block", block.Name, "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline))
+
+ return err
+ }
+ // complete module by unknown field
+ for n, a := range block.UnknownField {
+ data, err := json.Marshal(a)
+ if err != nil {
+ klog.V(5).ErrorS(err, "Marshal unknown field error", "field", n, "block", block.Name, "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline))
+
+ return err
+ }
+ if m := modules.FindModule(n); m != nil {
+ task.Spec.Module.Name = n
+ task.Spec.Module.Args = runtime.RawExtension{Raw: data}
+
+ break
+ }
+ }
+
+ if task.Spec.Module.Name == "" { // action is necessary for a task
+ klog.V(5).ErrorS(nil, "No module/action detected in task", "block", block.Name, "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline))
+
+ return fmt.Errorf("no module/action detected in task: %s", task.Name)
+ }
+
+ if err := (taskExecutor{option: e.option, task: task}.Exec(ctx)); err != nil {
+ klog.V(5).ErrorS(err, "exec task error", "block", block.Name, "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline))
+
+ return err
+ }
+
+ return nil
+}
diff --git a/feature/pkg/executor/block_executor_test.go b/feature/pkg/executor/block_executor_test.go
new file mode 100644
index 000000000..995f21501
--- /dev/null
+++ b/feature/pkg/executor/block_executor_test.go
@@ -0,0 +1,133 @@
+package executor
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "k8s.io/utils/ptr"
+
+ kkprojectv1 "github.com/kubesphere/kubekey/v4/pkg/apis/project/v1"
+)
+
+func TestBlockExecutor_DealRunOnce(t *testing.T) {
+ testcases := []struct {
+ name string
+ runOnce bool
+ except []string
+ }{
+ {
+ name: "runonce is false",
+ runOnce: false,
+ except: []string{"node1", "node2", "node3"},
+ },
+ {
+ name: "runonce is true",
+ runOnce: true,
+ except: []string{"node1"},
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ assert.ElementsMatch(t, blockExecutor{
+ hosts: []string{"node1", "node2", "node3"},
+ }.dealRunOnce(tc.runOnce), tc.except)
+ })
+ }
+}
+
+func TestBlockExecutor_DealIgnoreErrors(t *testing.T) {
+ testcases := []struct {
+ name string
+ ignoreErrors *bool
+ except *bool
+ }{
+ {
+ name: "ignoreErrors is empty",
+ ignoreErrors: nil,
+ except: ptr.To(true),
+ },
+ {
+ name: "ignoreErrors is true",
+ ignoreErrors: ptr.To(true),
+ except: ptr.To(true),
+ },
+ {
+ name: "ignoreErrors is false",
+ ignoreErrors: ptr.To(false),
+ except: ptr.To(false),
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ assert.Equal(t, blockExecutor{
+ ignoreErrors: ptr.To(true),
+ }.dealIgnoreErrors(tc.ignoreErrors), tc.except)
+ })
+ }
+}
+
+func TestBlockExecutor_DealTags(t *testing.T) {
+ testcases := []struct {
+ name string
+ tags kkprojectv1.Taggable
+ except kkprojectv1.Taggable
+ }{
+ {
+ name: "single tags",
+ tags: kkprojectv1.Taggable{Tags: []string{"c"}},
+ except: kkprojectv1.Taggable{Tags: []string{"a", "b", "c"}},
+ },
+ {
+ name: "mutil tags",
+ tags: kkprojectv1.Taggable{Tags: []string{"c", "d"}},
+ except: kkprojectv1.Taggable{Tags: []string{"a", "b", "c", "d"}},
+ },
+ {
+ name: "repeat tags",
+ tags: kkprojectv1.Taggable{Tags: []string{"b", "c"}},
+ except: kkprojectv1.Taggable{Tags: []string{"a", "b", "c"}},
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ assert.ElementsMatch(t, blockExecutor{
+ tags: kkprojectv1.Taggable{Tags: []string{"a", "b"}},
+ }.dealTags(tc.tags).Tags, tc.except.Tags)
+ })
+ }
+}
+
+func TestBlockExecutor_DealWhen(t *testing.T) {
+ testcases := []struct {
+ name string
+ when []string
+ except []string
+ }{
+ {
+ name: "single when",
+ when: []string{"c"},
+ except: []string{"a", "b", "c"},
+ },
+ {
+ name: "mutil when",
+ when: []string{"c", "d"},
+ except: []string{"a", "b", "c", "d"},
+ },
+ {
+ name: "repeat when",
+ when: []string{"b", "c"},
+ except: []string{"a", "b", "c"},
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ assert.ElementsMatch(t, blockExecutor{
+ when: []string{"a", "b"},
+ }.dealWhen(kkprojectv1.When{Data: tc.when}), tc.except)
+ })
+ }
+}
diff --git a/feature/pkg/executor/executor.go b/feature/pkg/executor/executor.go
new file mode 100644
index 000000000..a3d8e12bd
--- /dev/null
+++ b/feature/pkg/executor/executor.go
@@ -0,0 +1,26 @@
+package executor
+
+import (
+ "context"
+ "io"
+
+ ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
+
+ kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
+ "github.com/kubesphere/kubekey/v4/pkg/variable"
+)
+
+// Executor all task in pipeline
+type Executor interface {
+ Exec(ctx context.Context) error
+}
+
+// option for pipelineExecutor, blockExecutor, taskExecutor
+type option struct {
+ client ctrlclient.Client
+
+ pipeline *kkcorev1.Pipeline
+ variable variable.Variable
+ // commandLine log output. default os.stdout
+ logOutput io.Writer
+}
diff --git a/feature/pkg/executor/executor_test.go b/feature/pkg/executor/executor_test.go
new file mode 100644
index 000000000..d0fbddaa7
--- /dev/null
+++ b/feature/pkg/executor/executor_test.go
@@ -0,0 +1,73 @@
+package executor
+
+import (
+ "context"
+ "os"
+
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client/fake"
+
+ kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
+ kkcorev1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1alpha1"
+ _const "github.com/kubesphere/kubekey/v4/pkg/const"
+ "github.com/kubesphere/kubekey/v4/pkg/variable"
+ "github.com/kubesphere/kubekey/v4/pkg/variable/source"
+)
+
+func newTestOption() (*option, error) {
+ var err error
+
+ o := &option{
+ client: fake.NewClientBuilder().WithScheme(_const.Scheme).WithStatusSubresource(&kkcorev1.Pipeline{}, &kkcorev1alpha1.Task{}).Build(),
+ pipeline: &kkcorev1.Pipeline{
+ TypeMeta: metav1.TypeMeta{},
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ Namespace: corev1.NamespaceDefault,
+ },
+ Spec: kkcorev1.PipelineSpec{
+ InventoryRef: &corev1.ObjectReference{
+ Name: "test",
+ Namespace: corev1.NamespaceDefault,
+ },
+ ConfigRef: &corev1.ObjectReference{
+ Name: "test",
+ Namespace: corev1.NamespaceDefault,
+ },
+ },
+ Status: kkcorev1.PipelineStatus{},
+ },
+ logOutput: os.Stdout,
+ }
+
+ if err := o.client.Create(context.TODO(), &kkcorev1.Inventory{
+ TypeMeta: metav1.TypeMeta{},
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ Namespace: corev1.NamespaceDefault,
+ },
+ Spec: kkcorev1.InventorySpec{},
+ }); err != nil {
+ return nil, err
+ }
+
+ if err := o.client.Create(context.TODO(), &kkcorev1.Config{
+ TypeMeta: metav1.TypeMeta{},
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ Namespace: corev1.NamespaceDefault,
+ },
+ Spec: runtime.RawExtension{},
+ }); err != nil {
+ return nil, err
+ }
+
+ o.variable, err = variable.New(context.TODO(), o.client, *o.pipeline, source.MemorySource)
+ if err != nil {
+ return nil, err
+ }
+
+ return o, nil
+}
diff --git a/feature/pkg/executor/pipeline_executor.go b/feature/pkg/executor/pipeline_executor.go
new file mode 100644
index 000000000..7338c8c20
--- /dev/null
+++ b/feature/pkg/executor/pipeline_executor.go
@@ -0,0 +1,276 @@
+/*
+Copyright 2024 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package executor
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+
+ "k8s.io/klog/v2"
+ ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
+
+ kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
+ kkprojectv1 "github.com/kubesphere/kubekey/v4/pkg/apis/project/v1"
+ "github.com/kubesphere/kubekey/v4/pkg/connector"
+ _const "github.com/kubesphere/kubekey/v4/pkg/const"
+ "github.com/kubesphere/kubekey/v4/pkg/converter"
+ "github.com/kubesphere/kubekey/v4/pkg/project"
+ "github.com/kubesphere/kubekey/v4/pkg/variable"
+ "github.com/kubesphere/kubekey/v4/pkg/variable/source"
+)
+
+// NewPipelineExecutor return a new pipelineExecutor
+func NewPipelineExecutor(ctx context.Context, client ctrlclient.Client, pipeline *kkcorev1.Pipeline, logOutput io.Writer) Executor {
+ // get variable
+ v, err := variable.New(ctx, client, *pipeline, source.FileSource)
+ if err != nil {
+ klog.V(5).ErrorS(nil, "convert playbook error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline))
+
+ return nil
+ }
+
+ return &pipelineExecutor{
+ option: &option{
+ client: client,
+ pipeline: pipeline,
+ variable: v,
+ logOutput: logOutput,
+ },
+ }
+}
+
+// executor for pipeline
+type pipelineExecutor struct {
+ *option
+}
+
+// Exec pipeline. covert playbook to block and executor it.
+func (e pipelineExecutor) Exec(ctx context.Context) error {
+ klog.V(5).InfoS("deal project", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline))
+ pj, err := project.New(ctx, *e.pipeline, true)
+ if err != nil {
+ return fmt.Errorf("deal project error: %w", err)
+ }
+
+ // convert to transfer.Playbook struct
+ pb, err := pj.MarshalPlaybook()
+ if err != nil {
+ return fmt.Errorf("convert playbook error: %w", err)
+ }
+
+ for _, play := range pb.Play {
+ // check tags
+ if !play.Taggable.IsEnabled(e.pipeline.Spec.Tags, e.pipeline.Spec.SkipTags) {
+ // if not match the tags. skip
+ continue
+ }
+ // hosts should contain all host's name. hosts should not be empty.
+ var hosts []string
+ if err := e.dealHosts(play.PlayHost, &hosts); err != nil {
+ klog.V(4).ErrorS(err, "deal hosts error, skip this playbook", "hosts", play.PlayHost)
+
+ continue
+ }
+ // when gather_fact is set. get host's information from remote.
+ if err := e.dealGatherFacts(ctx, play.GatherFacts, hosts); err != nil {
+ return fmt.Errorf("deal gather_facts argument error: %w", err)
+ }
+ // Batch execution, with each batch being a group of hosts run in serial.
+ var batchHosts [][]string
+ if err := e.dealSerial(play.Serial.Data, hosts, &batchHosts); err != nil {
+ return fmt.Errorf("deal serial argument error: %w", err)
+ }
+ e.dealRunOnce(play.RunOnce, hosts, &batchHosts)
+ // exec pipeline in each BatchHosts
+ if err := e.execBatchHosts(ctx, play, batchHosts); err != nil {
+ return fmt.Errorf("exec batch hosts error: %v", err)
+ }
+ }
+
+ return nil
+}
+
+// execBatchHosts executor block in play order by: "pre_tasks" > "roles" > "tasks" > "post_tasks"
+func (e pipelineExecutor) execBatchHosts(ctx context.Context, play kkprojectv1.Play, batchHosts [][]string) any {
+ // generate and execute task.
+ for _, serials := range batchHosts {
+ // each batch hosts should not be empty.
+ if len(serials) == 0 {
+ klog.V(5).ErrorS(nil, "Host is empty", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline))
+
+ return errors.New("host is empty")
+ }
+
+ if err := e.variable.Merge(variable.MergeRuntimeVariable(play.Vars, serials...)); err != nil {
+ return fmt.Errorf("merge variable error: %w", err)
+ }
+ // generate task from pre tasks
+ if err := (blockExecutor{
+ option: e.option,
+ hosts: serials,
+ ignoreErrors: play.IgnoreErrors,
+ blocks: play.PreTasks,
+ tags: play.Taggable,
+ }.Exec(ctx)); err != nil {
+ return fmt.Errorf("execute pre-tasks from play error: %w", err)
+ }
+ // generate task from role
+ for _, role := range play.Roles {
+ if err := e.variable.Merge(variable.MergeRuntimeVariable(role.Vars, serials...)); err != nil {
+ return fmt.Errorf("merge variable error: %w", err)
+ }
+ // use the most closely configuration
+ ignoreErrors := role.IgnoreErrors
+ if ignoreErrors == nil {
+ ignoreErrors = play.IgnoreErrors
+ }
+ // role is block.
+ if err := (blockExecutor{
+ option: e.option,
+ hosts: serials,
+ ignoreErrors: ignoreErrors,
+ blocks: role.Block,
+ role: role.Role,
+ when: role.When.Data,
+ tags: kkprojectv1.JoinTag(role.Taggable, play.Taggable),
+ }.Exec(ctx)); err != nil {
+ return fmt.Errorf("execute role-tasks error: %w", err)
+ }
+ }
+ // generate task from tasks
+ if err := (blockExecutor{
+ option: e.option,
+ hosts: serials,
+ ignoreErrors: play.IgnoreErrors,
+ blocks: play.Tasks,
+ tags: play.Taggable,
+ }.Exec(ctx)); err != nil {
+ return fmt.Errorf("execute tasks error: %w", err)
+ }
+ // generate task from post tasks
+ if err := (blockExecutor{
+ option: e.option,
+ hosts: serials,
+ ignoreErrors: play.IgnoreErrors,
+ blocks: play.Tasks,
+ tags: play.Taggable,
+ }.Exec(ctx)); err != nil {
+ return fmt.Errorf("execute post-tasks error: %w", err)
+ }
+ }
+
+ return nil
+}
+
+// dealHosts "hosts" argument in playbook. get hostname from kkprojectv1.PlayHost
+func (e pipelineExecutor) dealHosts(host kkprojectv1.PlayHost, i *[]string) error {
+ ahn, err := e.variable.Get(variable.GetHostnames(host.Hosts))
+ if err != nil {
+ return fmt.Errorf("getHostnames error: %w", err)
+ }
+
+ if h, ok := ahn.([]string); ok {
+ *i = h
+ }
+ if len(*i) == 0 { // if hosts is empty skip this playbook
+ return errors.New("hosts is empty")
+ }
+
+ return nil
+}
+
+// dealGatherFacts "gather_facts" argument in playbook. get host remote info and merge to variable
+func (e pipelineExecutor) dealGatherFacts(ctx context.Context, gatherFacts bool, hosts []string) error {
+ if !gatherFacts {
+ // skip
+ return nil
+ }
+
+ dealGatherFactsInHost := func(hostname string) error {
+ v, err := e.variable.Get(variable.GetParamVariable(hostname))
+ if err != nil {
+ klog.V(5).ErrorS(err, "get host variable error", "hostname", hostname)
+
+ return err
+ }
+
+ connectorVars := make(map[string]any)
+ if c1, ok := v.(map[string]any)[_const.VariableConnector]; ok {
+ if c2, ok := c1.(map[string]any); ok {
+ connectorVars = c2
+ }
+ }
+ // get host connector
+ conn, err := connector.NewConnector(hostname, connectorVars)
+ if err != nil {
+ klog.V(5).ErrorS(err, "new connector error", "hostname", hostname)
+
+ return err
+ }
+ if err := conn.Init(ctx); err != nil {
+ klog.V(5).ErrorS(err, "init connection error", "hostname", hostname)
+
+ return err
+ }
+ defer conn.Close(ctx)
+
+ if gf, ok := conn.(connector.GatherFacts); ok {
+ remoteInfo, err := gf.HostInfo(ctx)
+ if err != nil {
+ klog.V(5).ErrorS(err, "gatherFacts from connector error", "hostname", hostname)
+
+ return err
+ }
+ if err := e.variable.Merge(variable.MergeRemoteVariable(remoteInfo, hostname)); err != nil {
+ klog.V(5).ErrorS(err, "merge gather fact error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "host", hostname)
+
+ return fmt.Errorf("merge gather fact error: %w", err)
+ }
+ }
+
+ return nil
+ }
+
+ for _, hostname := range hosts {
+ if err := dealGatherFactsInHost(hostname); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// dealSerial "serial" argument in playbook.
+func (e pipelineExecutor) dealSerial(serial []any, hosts []string, batchHosts *[][]string) error {
+ var err error
+ *batchHosts, err = converter.GroupHostBySerial(hosts, serial)
+ if err != nil {
+ return fmt.Errorf("group host by serial error: %w", err)
+ }
+
+ return nil
+}
+
+// dealRunOnce argument in playbook. if RunOnce is true. it's always only run in the first hosts.
+func (e pipelineExecutor) dealRunOnce(runOnce bool, hosts []string, batchHosts *[][]string) {
+ if runOnce {
+ // runOnce only run in first node
+ *batchHosts = [][]string{{hosts[0]}}
+ }
+}
diff --git a/feature/pkg/executor/pipeline_executor_test.go b/feature/pkg/executor/pipeline_executor_test.go
new file mode 100644
index 000000000..a6c767212
--- /dev/null
+++ b/feature/pkg/executor/pipeline_executor_test.go
@@ -0,0 +1,38 @@
+package executor
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestPipelineExecutor_DealRunOnce(t *testing.T) {
+ testcases := []struct {
+ name string
+ runOnce bool
+ hosts []string
+ batchHosts [][]string
+ except [][]string
+ }{
+ {
+ name: "runonce is false",
+ runOnce: false,
+ batchHosts: [][]string{{"node1", "node2"}},
+ except: [][]string{{"node1", "node2"}},
+ },
+ {
+ name: "runonce is true",
+ runOnce: true,
+ hosts: []string{"node1"},
+ batchHosts: [][]string{{"node1", "node2"}},
+ except: [][]string{{"node1"}},
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ pipelineExecutor{}.dealRunOnce(tc.runOnce, tc.hosts, &tc.batchHosts)
+ assert.Equal(t, tc.batchHosts, tc.except)
+ })
+ }
+}
diff --git a/feature/pkg/executor/task_executor.go b/feature/pkg/executor/task_executor.go
new file mode 100644
index 000000000..44a8382ec
--- /dev/null
+++ b/feature/pkg/executor/task_executor.go
@@ -0,0 +1,342 @@
+package executor
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/schollz/progressbar/v3"
+ "k8s.io/apimachinery/pkg/util/json"
+ "k8s.io/apimachinery/pkg/util/wait"
+ "k8s.io/klog/v2"
+ ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
+
+ kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
+ kkcorev1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1alpha1"
+ _const "github.com/kubesphere/kubekey/v4/pkg/const"
+ "github.com/kubesphere/kubekey/v4/pkg/converter/tmpl"
+ "github.com/kubesphere/kubekey/v4/pkg/modules"
+ "github.com/kubesphere/kubekey/v4/pkg/variable"
+)
+
+type taskExecutor struct {
+ *option
+ task *kkcorev1alpha1.Task
+}
+
+// Exec and store Task
+func (e taskExecutor) Exec(ctx context.Context) error {
+ // create task
+ if err := e.client.Create(ctx, e.task); err != nil {
+ klog.V(5).ErrorS(err, "create task error", "task", ctrlclient.ObjectKeyFromObject(e.task), "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline))
+
+ return err
+ }
+ defer func() {
+ e.pipeline.Status.TaskResult.Total++
+ switch e.task.Status.Phase {
+ case kkcorev1alpha1.TaskPhaseSuccess:
+ e.pipeline.Status.TaskResult.Success++
+ case kkcorev1alpha1.TaskPhaseIgnored:
+ e.pipeline.Status.TaskResult.Ignored++
+ case kkcorev1alpha1.TaskPhaseFailed:
+ e.pipeline.Status.TaskResult.Failed++
+ }
+ }()
+
+ for !e.task.IsComplete() {
+ var roleLog string
+ if e.task.Annotations[kkcorev1alpha1.TaskAnnotationRole] != "" {
+ roleLog = "[" + e.task.Annotations[kkcorev1alpha1.TaskAnnotationRole] + "] "
+ }
+ klog.V(5).InfoS("begin run task", "task", ctrlclient.ObjectKeyFromObject(e.task))
+ fmt.Fprintf(e.logOutput, "%s %s%s\n", time.Now().Format(time.TimeOnly+" MST"), roleLog, e.task.Spec.Name)
+ // exec task
+ e.task.Status.Phase = kkcorev1alpha1.TaskPhaseRunning
+ if err := e.client.Status().Update(ctx, e.task); err != nil {
+ klog.V(5).ErrorS(err, "update task status error", "task", ctrlclient.ObjectKeyFromObject(e.task), "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline))
+ }
+ e.execTask(ctx)
+ if err := e.client.Status().Update(ctx, e.task); err != nil {
+ klog.V(5).ErrorS(err, "update task status error", "task", ctrlclient.ObjectKeyFromObject(e.task), "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline))
+
+ return err
+ }
+ }
+ // exit when task run failed
+ if e.task.IsFailed() {
+ var hostReason []kkcorev1.PipelineFailedDetailHost
+ for _, tr := range e.task.Status.HostResults {
+ hostReason = append(hostReason, kkcorev1.PipelineFailedDetailHost{
+ Host: tr.Host,
+ Stdout: tr.Stdout,
+ StdErr: tr.StdErr,
+ })
+ }
+ e.pipeline.Status.FailedDetail = append(e.pipeline.Status.FailedDetail, kkcorev1.PipelineFailedDetail{
+ Task: e.task.Spec.Name,
+ Hosts: hostReason,
+ })
+ e.pipeline.Status.Phase = kkcorev1.PipelinePhaseFailed
+
+ return fmt.Errorf("task %s run failed", e.task.Spec.Name)
+ }
+
+ return nil
+}
+
+// execTask
+func (e taskExecutor) execTask(ctx context.Context) {
+ // check task host results
+ wg := &wait.Group{}
+ e.task.Status.HostResults = make([]kkcorev1alpha1.TaskHostResult, len(e.task.Spec.Hosts))
+ for i, h := range e.task.Spec.Hosts {
+ wg.StartWithContext(ctx, e.execTaskHost(i, h))
+ }
+ wg.Wait()
+ // host result for task
+ e.task.Status.Phase = kkcorev1alpha1.TaskPhaseSuccess
+ for _, data := range e.task.Status.HostResults {
+ if data.StdErr != "" {
+ if e.task.Spec.IgnoreError != nil && *e.task.Spec.IgnoreError {
+ e.task.Status.Phase = kkcorev1alpha1.TaskPhaseIgnored
+ } else {
+ e.task.Status.Phase = kkcorev1alpha1.TaskPhaseFailed
+ }
+
+ break
+ }
+ }
+}
+
+// execTaskHost deal module in each host parallel.
+func (e taskExecutor) execTaskHost(i int, h string) func(ctx context.Context) {
+ return func(ctx context.Context) {
+ // task result
+ var stdout, stderr string
+ defer func() {
+ if err := e.dealRegister(stdout, stderr, h); err != nil {
+ stderr = err.Error()
+ }
+ if stderr != "" && e.task.Spec.IgnoreError != nil && *e.task.Spec.IgnoreError {
+ klog.V(5).ErrorS(nil, "task run failed", "host", h, "stdout", stdout, "stderr", stderr, "task", ctrlclient.ObjectKeyFromObject(e.task))
+ } else if stderr != "" {
+ klog.ErrorS(nil, "task run failed", "host", h, "stdout", stdout, "stderr", stderr, "task", ctrlclient.ObjectKeyFromObject(e.task))
+ }
+ // fill result
+ e.task.Status.HostResults[i] = kkcorev1alpha1.TaskHostResult{
+ Host: h,
+ Stdout: stdout,
+ StdErr: stderr,
+ }
+ }()
+ // task log
+ deferFunc := e.execTaskHostLogs(ctx, h, &stdout, &stderr)
+ defer deferFunc()
+ // task execute
+ ha, err := e.variable.Get(variable.GetAllVariable(h))
+ if err != nil {
+ stderr = fmt.Sprintf("get variable error: %v", err)
+
+ return
+ }
+ // convert hostVariable to map
+ had, ok := ha.(map[string]any)
+ if !ok {
+ stderr = fmt.Sprintf("variable is not map error: %v", err)
+ }
+ // check when condition
+ if skip := e.dealWhen(had, &stdout, &stderr); skip {
+ return
+ }
+ // execute module in loop with loop item.
+ // if loop is empty. execute once, and the item is null
+ for _, item := range e.dealLoop(had) {
+ // set item to runtime variable
+ if err := e.variable.Merge(variable.MergeRuntimeVariable(map[string]any{
+ _const.VariableItem: item,
+ }, h)); err != nil {
+ stderr = fmt.Sprintf("set loop item to variable error: %v", err)
+
+ return
+ }
+ e.executeModule(ctx, e.task, h, &stdout, &stderr)
+ // delete item
+ if err := e.variable.Merge(variable.MergeRuntimeVariable(map[string]any{
+ _const.VariableItem: nil,
+ }, h)); err != nil {
+ stderr = fmt.Sprintf("clean loop item to variable error: %v", err)
+
+ return
+ }
+ }
+ }
+}
+
+// execTaskHostLogs logs for each host
+func (e taskExecutor) execTaskHostLogs(ctx context.Context, h string, stdout, stderr *string) func() {
+ // placeholder format task log
+ var placeholder string
+ if hostNameMaxLen, err := e.variable.Get(variable.GetHostMaxLength()); err == nil {
+ if hl, ok := hostNameMaxLen.(int); ok {
+ placeholder = strings.Repeat(" ", hl-len(h))
+ }
+ }
+ // progress bar for task
+ var bar = progressbar.NewOptions(-1,
+ progressbar.OptionSetWriter(e.logOutput),
+ progressbar.OptionSpinnerCustom([]string{" "}),
+ progressbar.OptionEnableColorCodes(true),
+ progressbar.OptionSetDescription(fmt.Sprintf("[\033[36m%s\033[0m]%s \033[36mrunning\033[0m", h, placeholder)),
+ progressbar.OptionOnCompletion(func() {
+ if _, err := os.Stdout.WriteString("\n"); err != nil {
+ klog.ErrorS(err, "failed to write output", "host", h)
+ }
+ }),
+ )
+ // run progress
+ go func() {
+ err := wait.PollUntilContextCancel(ctx, 100*time.Millisecond, true, func(context.Context) (bool, error) {
+ if bar.IsFinished() {
+ return true, nil
+ }
+ if err := bar.Add(1); err != nil {
+ return false, err
+ }
+
+ return false, nil
+ })
+ if err != nil {
+ klog.ErrorS(err, "failed to wait for task run to finish", "host", h)
+ }
+ }()
+
+ return func() {
+ switch {
+ case *stderr != "":
+ if e.task.Spec.IgnoreError != nil && *e.task.Spec.IgnoreError { // ignore
+ bar.Describe(fmt.Sprintf("[\033[36m%s\033[0m]%s \033[34mignore \033[0m", h, placeholder))
+ } else { // failed
+ bar.Describe(fmt.Sprintf("[\033[36m%s\033[0m]%s \033[31mfailed \033[0m", h, placeholder))
+ }
+ case *stdout == modules.StdoutSkip: // skip
+ bar.Describe(fmt.Sprintf("[\033[36m%s\033[0m]%s \033[34mskip \033[0m", h, placeholder))
+ default: //success
+ bar.Describe(fmt.Sprintf("[\033[36m%s\033[0m]%s \033[34msuccess\033[0m", h, placeholder))
+ }
+ if err := bar.Finish(); err != nil {
+ klog.ErrorS(err, "finish bar error")
+ }
+ }
+}
+
+// execLoop parse loop to item slice and execute it. if loop contains template string. convert it.
+// loop is json string. try convertor to string slice by json.
+// loop is normal string. set it to empty slice and return.
+func (e taskExecutor) dealLoop(ha map[string]any) []any {
+ var items []any
+ switch {
+ case e.task.Spec.Loop.Raw == nil:
+ // loop is not set. add one element to execute once module.
+ items = []any{nil}
+ default:
+ items = variable.Extension2Slice(ha, e.task.Spec.Loop)
+ }
+
+ return items
+}
+
+// executeModule find register module and execute it in a single host.
+func (e taskExecutor) executeModule(ctx context.Context, task *kkcorev1alpha1.Task, host string, stdout, stderr *string) {
+ // get all variable. which contains item.
+ ha, err := e.variable.Get(variable.GetAllVariable(host))
+ if err != nil {
+ *stderr = fmt.Sprintf("failed to get host %s variable: %v", host, err)
+
+ return
+ }
+ // convert hostVariable to map
+ had, ok := ha.(map[string]any)
+ if !ok {
+ *stderr = fmt.Sprintf("host: %s variable is not a map", host)
+
+ return
+ }
+ // check failed when condition
+ if skip := e.dealFailedWhen(had, stdout, stderr); skip {
+ return
+ }
+ *stdout, *stderr = modules.FindModule(task.Spec.Module.Name)(ctx, modules.ExecOptions{
+ Args: e.task.Spec.Module.Args,
+ Host: host,
+ Variable: e.variable,
+ Task: *e.task,
+ Pipeline: *e.pipeline,
+ })
+}
+
+// dealWhen "when" argument in task.
+func (e taskExecutor) dealWhen(had map[string]any, stdout, stderr *string) bool {
+ if len(e.task.Spec.When) > 0 {
+ ok, err := tmpl.ParseBool(had, e.task.Spec.When)
+ if err != nil {
+ klog.V(5).ErrorS(err, "validate when condition error", "task", ctrlclient.ObjectKeyFromObject(e.task))
+ *stderr = fmt.Sprintf("parse when condition error: %v", err)
+
+ return true
+ }
+ if !ok {
+ *stdout = modules.StdoutSkip
+
+ return true
+ }
+ }
+
+ return false
+}
+
+// dealFailedWhen "failed_when" argument in task.
+func (e taskExecutor) dealFailedWhen(had map[string]any, stdout, stderr *string) bool {
+ if len(e.task.Spec.FailedWhen) > 0 {
+ ok, err := tmpl.ParseBool(had, e.task.Spec.FailedWhen)
+ if err != nil {
+ klog.V(5).ErrorS(err, "validate failed_when condition error", "task", ctrlclient.ObjectKeyFromObject(e.task))
+ *stderr = fmt.Sprintf("parse failed_when condition error: %v", err)
+
+ return true
+ }
+ if ok {
+ *stdout = modules.StdoutSkip
+ *stderr = "reach failed_when, failed"
+
+ return true
+ }
+ }
+
+ return false
+}
+
+// dealRegister "register" argument in task.
+func (e taskExecutor) dealRegister(stdout, stderr, host string) error {
+ if e.task.Spec.Register != "" {
+ var stdoutResult any = stdout
+ var stderrResult any = stderr
+ // try to convert by json
+ _ = json.Unmarshal([]byte(stdout), &stdoutResult)
+ // try to convert by json
+ _ = json.Unmarshal([]byte(stderr), &stderrResult)
+ // set variable to parent location
+ if err := e.variable.Merge(variable.MergeRuntimeVariable(map[string]any{
+ e.task.Spec.Register: map[string]any{
+ "stdout": stdoutResult,
+ "stderr": stderrResult,
+ },
+ }, host)); err != nil {
+ return fmt.Errorf("register task result to variable error: %w", err)
+ }
+ }
+
+ return nil
+}
diff --git a/feature/pkg/executor/task_executor_test.go b/feature/pkg/executor/task_executor_test.go
new file mode 100644
index 000000000..fdf0bede4
--- /dev/null
+++ b/feature/pkg/executor/task_executor_test.go
@@ -0,0 +1,71 @@
+package executor
+
+import (
+ "context"
+ "testing"
+
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+
+ kkcorev1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1alpha1"
+)
+
+func TestTaskExecutor(t *testing.T) {
+ testcases := []struct {
+ name string
+ task *kkcorev1alpha1.Task
+ }{
+ {
+ name: "debug module in single host",
+ task: &kkcorev1alpha1.Task{
+ TypeMeta: metav1.TypeMeta{},
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ Namespace: corev1.NamespaceDefault,
+ },
+ Spec: kkcorev1alpha1.TaskSpec{
+ Hosts: []string{"node1"},
+ Module: kkcorev1alpha1.Module{
+ Name: "debug",
+ Args: runtime.RawExtension{Raw: []byte(`{"msg":"hello"}`)},
+ },
+ },
+ Status: kkcorev1alpha1.TaskStatus{},
+ },
+ },
+ {
+ name: "debug module in multiple hosts",
+ task: &kkcorev1alpha1.Task{
+ TypeMeta: metav1.TypeMeta{},
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ Namespace: corev1.NamespaceDefault,
+ },
+ Spec: kkcorev1alpha1.TaskSpec{
+ Hosts: []string{"node1", "n2"},
+ Module: kkcorev1alpha1.Module{
+ Name: "debug",
+ Args: runtime.RawExtension{Raw: []byte(`{"msg":"hello"}`)},
+ },
+ },
+ Status: kkcorev1alpha1.TaskStatus{},
+ },
+ },
+ }
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ o, err := newTestOption()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err := (&taskExecutor{
+ option: o,
+ task: tc.task,
+ }).Exec(context.TODO()); err != nil {
+ t.Fatal(err)
+ }
+ })
+ }
+}
diff --git a/feature/pkg/manager/command_manager.go b/feature/pkg/manager/command_manager.go
new file mode 100644
index 000000000..5c4d565ba
--- /dev/null
+++ b/feature/pkg/manager/command_manager.go
@@ -0,0 +1,93 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package manager
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "os"
+ "time"
+
+ "k8s.io/klog/v2"
+ ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
+
+ kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
+ _const "github.com/kubesphere/kubekey/v4/pkg/const"
+ "github.com/kubesphere/kubekey/v4/pkg/executor"
+)
+
+type commandManager struct {
+ *kkcorev1.Pipeline
+ *kkcorev1.Config
+ *kkcorev1.Inventory
+
+ ctrlclient.Client
+
+ logOutput io.Writer
+}
+
+// Run command Manager. print log and run pipeline executor.
+func (m *commandManager) Run(ctx context.Context) error {
+ fmt.Fprint(m.logOutput, `
+
+ _ __ _ _ __
+| | / / | | | | / /
+| |/ / _ _| |__ ___| |/ / ___ _ _
+| \| | | | '_ \ / _ \ \ / _ \ | | |
+| |\ \ |_| | |_) | __/ |\ \ __/ |_| |
+\_| \_/\__,_|_.__/ \___\_| \_/\___|\__, |
+ __/ |
+ |___/
+
+`)
+ fmt.Fprintf(m.logOutput, "%s [Pipeline %s] start\n", time.Now().Format(time.TimeOnly+" MST"), ctrlclient.ObjectKeyFromObject(m.Pipeline))
+ cp := m.Pipeline.DeepCopy()
+ defer func() {
+ fmt.Fprintf(m.logOutput, "%s [Pipeline %s] finish. total: %v,success: %v,ignored: %v,failed: %v\n", time.Now().Format(time.TimeOnly+" MST"), ctrlclient.ObjectKeyFromObject(m.Pipeline),
+ m.Pipeline.Status.TaskResult.Total, m.Pipeline.Status.TaskResult.Success, m.Pipeline.Status.TaskResult.Ignored, m.Pipeline.Status.TaskResult.Failed)
+ go func() {
+ if !m.Pipeline.Spec.Debug && m.Pipeline.Status.Phase == kkcorev1.PipelinePhaseSucceed {
+ <-ctx.Done()
+ fmt.Fprintf(m.logOutput, "%s [Pipeline %s] clean runtime directory\n", time.Now().Format(time.TimeOnly+" MST"), ctrlclient.ObjectKeyFromObject(m.Pipeline))
+ // clean runtime directory
+ if err := os.RemoveAll(_const.GetRuntimeDir()); err != nil {
+ klog.ErrorS(err, "clean runtime directory error", "pipeline", ctrlclient.ObjectKeyFromObject(m.Pipeline), "runtime_dir", _const.GetRuntimeDir())
+ }
+ }
+ }()
+ // if pipeline is cornJob. it's always running.
+ if m.Pipeline.Spec.JobSpec.Schedule != "" {
+ m.Pipeline.Status.Phase = kkcorev1.PipelinePhaseRunning
+ }
+ // update pipeline status
+ if err := m.Client.Status().Patch(ctx, m.Pipeline, ctrlclient.MergeFrom(cp)); err != nil {
+ klog.ErrorS(err, "update pipeline error", "pipeline", ctrlclient.ObjectKeyFromObject(m.Pipeline))
+ }
+ }()
+
+ m.Pipeline.Status.Phase = kkcorev1.PipelinePhaseSucceed
+ if err := executor.NewPipelineExecutor(ctx, m.Client, m.Pipeline, m.logOutput).Exec(ctx); err != nil {
+ klog.ErrorS(err, "executor tasks error", "pipeline", ctrlclient.ObjectKeyFromObject(m.Pipeline))
+ m.Pipeline.Status.Phase = kkcorev1.PipelinePhaseFailed
+ m.Pipeline.Status.Reason = err.Error()
+
+ return err
+ }
+
+ return nil
+}
diff --git a/feature/pkg/manager/controller_manager.go b/feature/pkg/manager/controller_manager.go
new file mode 100644
index 000000000..dd72a3cd6
--- /dev/null
+++ b/feature/pkg/manager/controller_manager.go
@@ -0,0 +1,71 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package manager
+
+import (
+ "context"
+ "fmt"
+
+ "k8s.io/client-go/rest"
+ "k8s.io/klog/v2"
+ ctrl "sigs.k8s.io/controller-runtime"
+
+ _const "github.com/kubesphere/kubekey/v4/pkg/const"
+ "github.com/kubesphere/kubekey/v4/pkg/controllers"
+ "github.com/kubesphere/kubekey/v4/pkg/proxy"
+)
+
+type controllerManager struct {
+ MaxConcurrentReconciles int
+ LeaderElection bool
+}
+
+// Run controllerManager, run controller in kubernetes
+func (c controllerManager) Run(ctx context.Context) error {
+ ctrl.SetLogger(klog.NewKlogr())
+ restconfig, err := ctrl.GetConfig()
+ if err != nil {
+ klog.Infof("kubeconfig in empty, store resources local")
+ restconfig = &rest.Config{}
+ }
+ restconfig, err = proxy.NewConfig(restconfig)
+ if err != nil {
+ return fmt.Errorf("could not get rest config: %w", err)
+ }
+
+ mgr, err := ctrl.NewManager(restconfig, ctrl.Options{
+ Scheme: _const.Scheme,
+ LeaderElection: c.LeaderElection,
+ LeaderElectionID: "controller-leader-election-kk",
+ })
+ if err != nil {
+ return fmt.Errorf("could not create controller manager: %w", err)
+ }
+
+ if err := (&controllers.PipelineReconciler{
+ Client: mgr.GetClient(),
+ EventRecorder: mgr.GetEventRecorderFor("pipeline"),
+ Scheme: mgr.GetScheme(),
+ MaxConcurrentReconciles: c.MaxConcurrentReconciles,
+ }).SetupWithManager(mgr); err != nil {
+ klog.ErrorS(err, "create pipeline controller error")
+
+ return err
+ }
+
+ return mgr.Start(ctx)
+}
diff --git a/feature/pkg/manager/manager.go b/feature/pkg/manager/manager.go
new file mode 100644
index 000000000..a8ecf0ead
--- /dev/null
+++ b/feature/pkg/manager/manager.go
@@ -0,0 +1,66 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package manager
+
+import (
+ "context"
+ "os"
+
+ ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
+
+ kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
+)
+
+// Manager shared dependencies such as Addr and , and provides them to Runnable.
+type Manager interface {
+ // Run the driver
+ Run(ctx context.Context) error
+}
+
+// CommandManagerOptions for NewCommandManager
+type CommandManagerOptions struct {
+ *kkcorev1.Pipeline
+ *kkcorev1.Config
+ *kkcorev1.Inventory
+
+ ctrlclient.Client
+}
+
+// NewCommandManager return a new commandManager
+func NewCommandManager(o CommandManagerOptions) Manager {
+ return &commandManager{
+ Pipeline: o.Pipeline,
+ Config: o.Config,
+ Inventory: o.Inventory,
+ Client: o.Client,
+ logOutput: os.Stdout,
+ }
+}
+
+// ControllerManagerOptions for NewControllerManager
+type ControllerManagerOptions struct {
+ MaxConcurrentReconciles int
+ LeaderElection bool
+}
+
+// NewControllerManager return a new controllerManager
+func NewControllerManager(o ControllerManagerOptions) Manager {
+ return &controllerManager{
+ MaxConcurrentReconciles: o.MaxConcurrentReconciles,
+ LeaderElection: o.LeaderElection,
+ }
+}
diff --git a/feature/pkg/modules/assert.go b/feature/pkg/modules/assert.go
new file mode 100644
index 000000000..a19b185d8
--- /dev/null
+++ b/feature/pkg/modules/assert.go
@@ -0,0 +1,106 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package modules
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/klog/v2"
+ ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
+
+ "github.com/kubesphere/kubekey/v4/pkg/converter/tmpl"
+ "github.com/kubesphere/kubekey/v4/pkg/variable"
+)
+
+type assertArgs struct {
+ that []string
+ successMsg string
+ failMsg string // high priority than msg
+ msg string
+}
+
+func newAssertArgs(_ context.Context, raw runtime.RawExtension, vars map[string]any) (*assertArgs, error) {
+ var err error
+ aa := &assertArgs{}
+ args := variable.Extension2Variables(raw)
+ if aa.that, err = variable.StringSliceVar(vars, args, "that"); err != nil {
+ return nil, errors.New("\"that\" should be []string or string")
+ }
+ aa.successMsg, _ = variable.StringVar(vars, args, "success_msg")
+ if aa.successMsg == "" {
+ aa.successMsg = StdoutTrue
+ }
+ aa.failMsg, _ = variable.StringVar(vars, args, "fail_msg")
+ aa.msg, _ = variable.StringVar(vars, args, "msg")
+ if aa.msg == "" {
+ aa.msg = StdoutFalse
+ }
+
+ return aa, nil
+}
+
+// ModuleAssert deal "assert" module
+func ModuleAssert(ctx context.Context, options ExecOptions) (string, string) {
+ // get host variable
+ ha, err := options.getAllVariables()
+ if err != nil {
+ return "", err.Error()
+ }
+
+ aa, err := newAssertArgs(ctx, options.Args, ha)
+ if err != nil {
+ klog.V(4).ErrorS(err, "get assert args error", "task", ctrlclient.ObjectKeyFromObject(&options.Task))
+
+ return "", err.Error()
+ }
+
+ ok, err := tmpl.ParseBool(ha, aa.that)
+ if err != nil {
+ return "", fmt.Sprintf("parse \"that\" error: %v", err)
+ }
+ // condition is true
+ if ok {
+ r, err := tmpl.ParseString(ha, aa.successMsg)
+ if err == nil {
+ return r, ""
+ }
+ klog.V(4).ErrorS(err, "parse \"success_msg\" error", "task", ctrlclient.ObjectKeyFromObject(&options.Task))
+
+ return StdoutTrue, ""
+ }
+ // condition is false and fail_msg is not empty
+ if aa.failMsg != "" {
+ r, err := tmpl.ParseString(ha, aa.failMsg)
+ if err == nil {
+ return StdoutFalse, r
+ }
+ klog.V(4).ErrorS(err, "parse \"fail_msg\" error", "task", ctrlclient.ObjectKeyFromObject(&options.Task))
+ }
+ // condition is false and msg is not empty
+ if aa.msg != "" {
+ r, err := tmpl.ParseString(ha, aa.msg)
+ if err == nil {
+ return StdoutFalse, r
+ }
+ klog.V(4).ErrorS(err, "parse \"msg\" error", "task", ctrlclient.ObjectKeyFromObject(&options.Task))
+ }
+
+ return StdoutFalse, "False"
+}
diff --git a/feature/pkg/modules/assert_test.go b/feature/pkg/modules/assert_test.go
new file mode 100644
index 000000000..2a1afaa6a
--- /dev/null
+++ b/feature/pkg/modules/assert_test.go
@@ -0,0 +1,121 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package modules
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+func TestAssert(t *testing.T) {
+ testcases := []struct {
+ name string
+ opt ExecOptions
+ exceptStdout string
+ exceptStderr string
+ }{
+ {
+ name: "non-that",
+ opt: ExecOptions{
+ Host: "local",
+ Variable: &testVariable{},
+ Args: runtime.RawExtension{},
+ },
+ exceptStderr: "\"that\" should be []string or string",
+ },
+ {
+ name: "success with non-msg",
+ opt: ExecOptions{
+ Host: "local",
+ Args: runtime.RawExtension{
+ Raw: []byte(`{"that": ["true", "eq .testvalue \"a\""]}`),
+ },
+ Variable: &testVariable{
+ value: map[string]any{
+ "testvalue": "a",
+ },
+ },
+ },
+ exceptStdout: StdoutTrue,
+ },
+ {
+ name: "success with success_msg",
+ opt: ExecOptions{
+ Host: "local",
+ Args: runtime.RawExtension{
+ Raw: []byte(`{"that": ["true", "eq .k1 \"v1\""], "success_msg": "success {{ .k2 }}"}`),
+ },
+ Variable: &testVariable{
+ value: map[string]any{
+ "k1": "v1",
+ "k2": "v2",
+ },
+ },
+ },
+ exceptStdout: "success v2",
+ },
+ {
+ name: "failed with non-msg",
+ opt: ExecOptions{
+ Host: "local",
+ Args: runtime.RawExtension{
+ Raw: []byte(`{"that": ["true", "eq .k1 \"v2\""]}`),
+ },
+ Variable: &testVariable{
+ value: map[string]any{
+ "k1": "v1",
+ "k2": "v2",
+ },
+ },
+ },
+ exceptStdout: StdoutFalse,
+ exceptStderr: "False",
+ },
+ {
+ name: "failed with failed_msg",
+ opt: ExecOptions{
+ Host: "local",
+ Args: runtime.RawExtension{
+ Raw: []byte(`{"that": ["true", "eq .k1 \"v2\""], "fail_msg": "failed {{ .k2 }}"}`),
+ },
+ Variable: &testVariable{
+ value: map[string]any{
+ "k1": "v1",
+ "k2": "v2",
+ },
+ },
+ },
+ exceptStdout: StdoutFalse,
+ exceptStderr: "failed v2",
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
+ defer cancel()
+
+ acStdout, acStderr := ModuleAssert(ctx, tc.opt)
+ assert.Equal(t, tc.exceptStdout, acStdout)
+ assert.Equal(t, tc.exceptStderr, acStderr)
+ })
+ }
+}
diff --git a/feature/pkg/modules/command.go b/feature/pkg/modules/command.go
new file mode 100644
index 000000000..1161054e8
--- /dev/null
+++ b/feature/pkg/modules/command.go
@@ -0,0 +1,55 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package modules
+
+import (
+ "context"
+ "strings"
+
+ "github.com/kubesphere/kubekey/v4/pkg/variable"
+)
+
+// ModuleCommand deal "command" module.
+func ModuleCommand(ctx context.Context, options ExecOptions) (string, string) {
+ // get host variable
+ ha, err := options.getAllVariables()
+ if err != nil {
+ return "", err.Error()
+ }
+ // get connector
+ conn, err := getConnector(ctx, options.Host, ha)
+ if err != nil {
+ return "", err.Error()
+ }
+ defer conn.Close(ctx)
+ // command string
+ command, err := variable.Extension2String(ha, options.Args)
+ if err != nil {
+ return "", err.Error()
+ }
+ // execute command
+ var stdout, stderr string
+ data, err := conn.ExecuteCommand(ctx, command)
+ if err != nil {
+ stderr = err.Error()
+ }
+ if data != nil {
+ stdout = strings.TrimSuffix(string(data), "\n")
+ }
+
+ return stdout, stderr
+}
diff --git a/feature/pkg/modules/command_test.go b/feature/pkg/modules/command_test.go
new file mode 100644
index 000000000..e442189f7
--- /dev/null
+++ b/feature/pkg/modules/command_test.go
@@ -0,0 +1,78 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package modules
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+func TestCommand(t *testing.T) {
+ testcases := []struct {
+ name string
+ opt ExecOptions
+ ctxFunc func() context.Context
+ exceptStdout string
+ exceptStderr string
+ }{
+ {
+ name: "non-host variable",
+ opt: ExecOptions{
+ Variable: &testVariable{},
+ },
+ ctxFunc: context.Background,
+ exceptStderr: "host is not set",
+ },
+ {
+ name: "exec command success",
+ ctxFunc: func() context.Context {
+ return context.WithValue(context.Background(), ConnKey, successConnector)
+ },
+ opt: ExecOptions{
+ Host: "test",
+ Args: runtime.RawExtension{Raw: []byte("echo success")},
+ Variable: &testVariable{},
+ },
+ exceptStdout: "success",
+ },
+ {
+ name: "exec command failed",
+ ctxFunc: func() context.Context { return context.WithValue(context.Background(), ConnKey, failedConnector) },
+ opt: ExecOptions{
+ Host: "test",
+ Args: runtime.RawExtension{Raw: []byte("echo success")},
+ Variable: &testVariable{},
+ },
+ exceptStderr: "failed",
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx, cancel := context.WithTimeout(tc.ctxFunc(), time.Second*5)
+ defer cancel()
+
+ acStdout, acStderr := ModuleCommand(ctx, tc.opt)
+ assert.Equal(t, tc.exceptStdout, acStdout)
+ assert.Equal(t, tc.exceptStderr, acStderr)
+ })
+ }
+}
diff --git a/feature/pkg/modules/copy.go b/feature/pkg/modules/copy.go
new file mode 100644
index 000000000..85b6bc47c
--- /dev/null
+++ b/feature/pkg/modules/copy.go
@@ -0,0 +1,289 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package modules
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/klog/v2"
+ ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
+
+ "github.com/kubesphere/kubekey/v4/pkg/connector"
+
+ kkcorev1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1alpha1"
+ "github.com/kubesphere/kubekey/v4/pkg/project"
+ "github.com/kubesphere/kubekey/v4/pkg/variable"
+)
+
+type copyArgs struct {
+ src string
+ content string
+ dest string
+ mode *int
+}
+
+func newCopyArgs(_ context.Context, raw runtime.RawExtension, vars map[string]any) (*copyArgs, error) {
+ var err error
+ ca := ©Args{}
+ args := variable.Extension2Variables(raw)
+ ca.src, _ = variable.StringVar(vars, args, "src")
+ ca.content, _ = variable.StringVar(vars, args, "content")
+ ca.dest, err = variable.StringVar(vars, args, "dest")
+ if err != nil {
+ return nil, errors.New("\"dest\" in args should be string")
+ }
+ ca.mode, _ = variable.IntVar(vars, args, "mode")
+
+ return ca, nil
+}
+
+// ModuleCopy deal "copy" module
+func ModuleCopy(ctx context.Context, options ExecOptions) (string, string) {
+ // get host variable
+ ha, err := options.getAllVariables()
+ if err != nil {
+ return "", err.Error()
+ }
+
+ ca, err := newCopyArgs(ctx, options.Args, ha)
+ if err != nil {
+ klog.V(4).ErrorS(err, "get copy args error", "task", ctrlclient.ObjectKeyFromObject(&options.Task))
+
+ return "", err.Error()
+ }
+
+ // get connector
+ conn, err := getConnector(ctx, options.Host, ha)
+ if err != nil {
+ return "", fmt.Sprintf("get connector error: %v", err)
+ }
+ defer conn.Close(ctx)
+
+ switch {
+ case ca.src != "": // copy local file to remote
+ return ca.copySrc(ctx, options, conn)
+ case ca.content != "":
+ return ca.copyContent(ctx, os.ModePerm, conn)
+ default:
+ return "", "either \"src\" or \"content\" must be provided."
+ }
+}
+
+// copySrc copy src file to dest
+func (ca copyArgs) copySrc(ctx context.Context, options ExecOptions, conn connector.Connector) (string, string) {
+ if filepath.IsAbs(ca.src) { // if src is absolute path. find it in local path
+ fileInfo, err := os.Stat(ca.src)
+ if err != nil {
+ return "", fmt.Sprintf(" get src file %s in local path error: %v", ca.src, err)
+ }
+
+ if fileInfo.IsDir() { // src is dir
+ if err := ca.absDir(ctx, conn); err != nil {
+ return "", fmt.Sprintf("sync copy absolute dir error %s", err)
+ }
+ } else { // src is file
+ if err := ca.absFile(ctx, fileInfo.Mode(), conn); err != nil {
+ return "", fmt.Sprintf("sync copy absolute dir error %s", err)
+ }
+ }
+ } else { // if src is not absolute path. find file in project
+ pj, err := project.New(ctx, options.Pipeline, false)
+ if err != nil {
+ return "", fmt.Sprintf("get project error: %v", err)
+ }
+
+ fileInfo, err := pj.Stat(ca.src, project.GetFileOption{IsFile: true, Role: options.Task.Annotations[kkcorev1alpha1.TaskAnnotationRole]})
+ if err != nil {
+ return "", fmt.Sprintf("get file %s from project error %v", ca.src, err)
+ }
+
+ if fileInfo.IsDir() {
+ if err := ca.relDir(ctx, pj, options.Task.Annotations[kkcorev1alpha1.TaskAnnotationRole], conn); err != nil {
+ return "", fmt.Sprintf("sync copy relative dir error %s", err)
+ }
+ } else {
+ if err := ca.relFile(ctx, pj, options.Task.Annotations[kkcorev1alpha1.TaskAnnotationRole], fileInfo.Mode(), conn); err != nil {
+ return "", fmt.Sprintf("sync copy relative dir error %s", err)
+ }
+ }
+ }
+
+ return StdoutSuccess, ""
+}
+
+// copyContent convert content param and copy to dest
+func (ca copyArgs) copyContent(ctx context.Context, mode fs.FileMode, conn connector.Connector) (string, string) {
+ if strings.HasSuffix(ca.dest, "/") {
+ return "", "\"content\" should copy to a file"
+ }
+
+ if ca.mode != nil {
+ mode = os.FileMode(*ca.mode)
+ }
+
+ if err := conn.PutFile(ctx, []byte(ca.content), ca.dest, mode); err != nil {
+ return "", fmt.Sprintf("copy file error: %v", err)
+ }
+
+ return StdoutSuccess, ""
+}
+
+// relFile when copy.src is relative dir, get all files from project, and copy to remote.
+func (ca copyArgs) relFile(ctx context.Context, pj project.Project, role string, mode fs.FileMode, conn connector.Connector) any {
+ data, err := pj.ReadFile(ca.src, project.GetFileOption{IsFile: true, Role: role})
+ if err != nil {
+ return fmt.Errorf("read file error: %w", err)
+ }
+
+ dest := ca.dest
+ if strings.HasSuffix(ca.dest, "/") {
+ dest = filepath.Join(ca.dest, filepath.Base(ca.src))
+ }
+
+ if ca.mode != nil {
+ mode = os.FileMode(*ca.mode)
+ }
+
+ if err := conn.PutFile(ctx, data, dest, mode); err != nil {
+ return fmt.Errorf("copy file error: %w", err)
+ }
+
+ return nil
+}
+
+// relDir when copy.src is relative dir, get all files from project, and copy to remote.
+func (ca copyArgs) relDir(ctx context.Context, pj project.Project, role string, conn connector.Connector) error {
+ if err := pj.WalkDir(ca.src, project.GetFileOption{IsFile: true, Role: role}, func(path string, d fs.DirEntry, err error) error {
+ if d.IsDir() { // only copy file
+ return nil
+ }
+ if err != nil {
+ return fmt.Errorf("walk dir %s error: %w", ca.src, err)
+ }
+
+ info, err := d.Info()
+ if err != nil {
+ return fmt.Errorf("get file info error: %w", err)
+ }
+
+ mode := info.Mode()
+ if ca.mode != nil {
+ mode = os.FileMode(*ca.mode)
+ }
+
+ data, err := pj.ReadFile(path, project.GetFileOption{Role: role})
+ if err != nil {
+ return fmt.Errorf("read file error: %w", err)
+ }
+
+ dest := ca.dest
+ if strings.HasSuffix(ca.dest, "/") {
+ rel, err := pj.Rel(ca.src, path, project.GetFileOption{Role: role})
+ if err != nil {
+ return fmt.Errorf("get relative file path error: %w", err)
+ }
+ dest = filepath.Join(ca.dest, rel)
+ }
+
+ if err := conn.PutFile(ctx, data, dest, mode); err != nil {
+ return fmt.Errorf("copy file error: %w", err)
+ }
+
+ return nil
+ }); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// absFile when copy.src is absolute file, get file from os, and copy to remote.
+func (ca copyArgs) absFile(ctx context.Context, mode fs.FileMode, conn connector.Connector) error {
+ data, err := os.ReadFile(ca.src)
+ if err != nil {
+ return fmt.Errorf("read file error: %w", err)
+ }
+
+ dest := ca.dest
+ if strings.HasSuffix(ca.dest, "/") {
+ dest = filepath.Join(ca.dest, filepath.Base(ca.src))
+ }
+
+ if ca.mode != nil {
+ mode = os.FileMode(*ca.mode)
+ }
+
+ if err := conn.PutFile(ctx, data, dest, mode); err != nil {
+ return fmt.Errorf("copy file error: %w", err)
+ }
+
+ return nil
+}
+
+// absDir when copy.src is absolute dir, get all files from os, and copy to remote.
+func (ca copyArgs) absDir(ctx context.Context, conn connector.Connector) error {
+ if err := filepath.WalkDir(ca.src, func(path string, d fs.DirEntry, err error) error {
+ if d.IsDir() { // only copy file
+ return nil
+ }
+
+ if err != nil {
+ return fmt.Errorf("walk dir %s error: %w", ca.src, err)
+ }
+ // get file old mode
+ info, err := d.Info()
+ if err != nil {
+ return fmt.Errorf("get file info error: %w", err)
+ }
+
+ mode := info.Mode()
+ if ca.mode != nil {
+ mode = os.FileMode(*ca.mode)
+ }
+ // read file
+ data, err := os.ReadFile(path)
+ if err != nil {
+ return fmt.Errorf("read file error: %w", err)
+ }
+ // copy file to remote
+ dest := ca.dest
+ if strings.HasSuffix(ca.dest, "/") {
+ rel, err := filepath.Rel(ca.src, path)
+ if err != nil {
+ return fmt.Errorf("get relative file path error: %w", err)
+ }
+ dest = filepath.Join(ca.dest, rel)
+ }
+
+ if err := conn.PutFile(ctx, data, dest, mode); err != nil {
+ return fmt.Errorf("copy file error: %w", err)
+ }
+
+ return nil
+ }); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/feature/pkg/modules/copy_test.go b/feature/pkg/modules/copy_test.go
new file mode 100644
index 000000000..3cde7e82b
--- /dev/null
+++ b/feature/pkg/modules/copy_test.go
@@ -0,0 +1,118 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package modules
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+func TestCopy(t *testing.T) {
+ testcases := []struct {
+ name string
+ opt ExecOptions
+ ctxFunc func() context.Context
+ exceptStdout string
+ exceptStderr string
+ }{
+ {
+ name: "src and content is empty",
+ opt: ExecOptions{
+ Args: runtime.RawExtension{
+ Raw: []byte(`{"dest": "hello world"}`),
+ },
+ Host: "local",
+ Variable: &testVariable{},
+ },
+ ctxFunc: func() context.Context {
+ return context.WithValue(context.Background(), ConnKey, successConnector)
+ },
+ exceptStderr: "either \"src\" or \"content\" must be provided.",
+ },
+ {
+ name: "dest is empty",
+ opt: ExecOptions{
+ Args: runtime.RawExtension{
+ Raw: []byte(`{"content": "hello world"}`),
+ },
+ Host: "local",
+ Variable: &testVariable{},
+ },
+ ctxFunc: func() context.Context {
+ return context.WithValue(context.Background(), ConnKey, successConnector)
+ },
+ exceptStderr: "\"dest\" in args should be string",
+ },
+ {
+ name: "content not copy to file",
+ opt: ExecOptions{
+ Args: runtime.RawExtension{
+ Raw: []byte(`{"content": "hello world", "dest": "/etc/"}`),
+ },
+ Host: "local",
+ Variable: &testVariable{},
+ },
+ ctxFunc: func() context.Context {
+ return context.WithValue(context.Background(), ConnKey, successConnector)
+ },
+ exceptStderr: "\"content\" should copy to a file",
+ },
+ {
+ name: "copy success",
+ opt: ExecOptions{
+ Args: runtime.RawExtension{
+ Raw: []byte(`{"content": "hello world", "dest": "/etc/test.txt"}`),
+ },
+ Host: "local",
+ Variable: &testVariable{},
+ },
+ ctxFunc: func() context.Context {
+ return context.WithValue(context.Background(), ConnKey, successConnector)
+ },
+ exceptStdout: StdoutSuccess,
+ },
+ {
+ name: "copy failed",
+ opt: ExecOptions{
+ Args: runtime.RawExtension{
+ Raw: []byte(`{"content": "hello world", "dest": "/etc/test.txt"}`),
+ },
+ Host: "local",
+ Variable: &testVariable{},
+ },
+ ctxFunc: func() context.Context {
+ return context.WithValue(context.Background(), ConnKey, failedConnector)
+ },
+ exceptStderr: "copy file error: failed",
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx, cancel := context.WithTimeout(tc.ctxFunc(), time.Second*5)
+ defer cancel()
+
+ acStdout, acStderr := ModuleCopy(ctx, tc.opt)
+ assert.Equal(t, tc.exceptStdout, acStdout)
+ assert.Equal(t, tc.exceptStderr, acStderr)
+ })
+ }
+}
diff --git a/feature/pkg/modules/debug.go b/feature/pkg/modules/debug.go
new file mode 100644
index 000000000..4a2606772
--- /dev/null
+++ b/feature/pkg/modules/debug.go
@@ -0,0 +1,51 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package modules
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/kubesphere/kubekey/v4/pkg/converter/tmpl"
+ "github.com/kubesphere/kubekey/v4/pkg/variable"
+)
+
+// ModuleDebug deal "debug" module
+func ModuleDebug(_ context.Context, options ExecOptions) (string, string) {
+ // get host variable
+ ha, err := options.getAllVariables()
+ if err != nil {
+ return "", err.Error()
+ }
+
+ args := variable.Extension2Variables(options.Args)
+ // var is defined. return the value of var
+ if varParam, err := variable.StringVar(ha, args, "var"); err == nil {
+ result, err := tmpl.ParseString(ha, fmt.Sprintf("{{ %s }}", varParam))
+ if err != nil {
+ return "", fmt.Sprintf("failed to parse var: %v", err)
+ }
+
+ return result, ""
+ }
+ // msg is defined. return the actual msg
+ if msgParam, err := variable.StringVar(ha, args, "msg"); err == nil {
+ return msgParam, ""
+ }
+
+ return "", "unknown args for debug. only support var or msg"
+}
diff --git a/feature/pkg/modules/debug_test.go b/feature/pkg/modules/debug_test.go
new file mode 100644
index 000000000..a5226e0b1
--- /dev/null
+++ b/feature/pkg/modules/debug_test.go
@@ -0,0 +1,86 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package modules
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+func TestDebug(t *testing.T) {
+ testcases := []struct {
+ name string
+ opt ExecOptions
+ exceptStdout string
+ exceptStderr string
+ }{
+ {
+ name: "non-var and non-msg",
+ opt: ExecOptions{
+ Args: runtime.RawExtension{},
+ Host: "local",
+ Variable: &testVariable{},
+ },
+ exceptStderr: "unknown args for debug. only support var or msg",
+ },
+ {
+ name: "var value",
+ opt: ExecOptions{
+ Args: runtime.RawExtension{
+ Raw: []byte(`{"var": ".k"}`),
+ },
+ Host: "local",
+ Variable: &testVariable{
+ value: map[string]any{
+ "k": "v",
+ },
+ },
+ },
+ exceptStdout: "v",
+ },
+ {
+ name: "msg value",
+ opt: ExecOptions{
+ Args: runtime.RawExtension{
+ Raw: []byte(`{"msg": "{{ .k }}"}`),
+ },
+ Host: "local",
+ Variable: &testVariable{
+ value: map[string]any{
+ "k": "v",
+ },
+ },
+ },
+ exceptStdout: "v",
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
+ defer cancel()
+
+ acStdout, acStderr := ModuleDebug(ctx, tc.opt)
+ assert.Equal(t, tc.exceptStdout, acStdout)
+ assert.Equal(t, tc.exceptStderr, acStderr)
+ })
+ }
+}
diff --git a/feature/pkg/modules/fetch.go b/feature/pkg/modules/fetch.go
new file mode 100644
index 000000000..051455105
--- /dev/null
+++ b/feature/pkg/modules/fetch.go
@@ -0,0 +1,75 @@
+/*
+Copyright 2024 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package modules
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "k8s.io/klog/v2"
+
+ "github.com/kubesphere/kubekey/v4/pkg/variable"
+)
+
+// ModuleFetch deal fetch module
+func ModuleFetch(ctx context.Context, options ExecOptions) (string, string) {
+ // get host variable
+ ha, err := options.getAllVariables()
+ if err != nil {
+ return "", err.Error()
+ }
+ // check args
+ args := variable.Extension2Variables(options.Args)
+ srcParam, err := variable.StringVar(ha, args, "src")
+ if err != nil {
+ return "", "\"src\" in args should be string"
+ }
+ destParam, err := variable.StringVar(ha, args, "dest")
+ if err != nil {
+ return "", "\"dest\" in args should be string"
+ }
+
+ // get connector
+ conn, err := getConnector(ctx, options.Host, ha)
+ if err != nil {
+ return "", fmt.Sprintf("get connector error: %v", err)
+ }
+ defer conn.Close(ctx)
+
+ // fetch file
+ if _, err := os.Stat(filepath.Dir(destParam)); os.IsNotExist(err) {
+ if err := os.MkdirAll(filepath.Dir(destParam), os.ModePerm); err != nil {
+ return "", fmt.Sprintf("failed to create dest dir: %v", err)
+ }
+ }
+
+ destFile, err := os.Create(destParam)
+ if err != nil {
+ klog.V(4).ErrorS(err, "failed to create dest file")
+
+ return "", err.Error()
+ }
+ defer destFile.Close()
+
+ if err := conn.FetchFile(ctx, srcParam, destFile); err != nil {
+ return "", fmt.Sprintf("failed to fetch file: %v", err)
+ }
+
+ return StdoutSuccess, ""
+}
diff --git a/feature/pkg/modules/fetch_test.go b/feature/pkg/modules/fetch_test.go
new file mode 100644
index 000000000..537a6c213
--- /dev/null
+++ b/feature/pkg/modules/fetch_test.go
@@ -0,0 +1,74 @@
+/*
+Copyright 2024 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package modules
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+func TestFetch(t *testing.T) {
+ testcases := []struct {
+ name string
+ opt ExecOptions
+ ctxFunc func() context.Context
+ exceptStdout string
+ exceptStderr string
+ }{
+ {
+ name: "src and content is empty",
+ opt: ExecOptions{
+ Args: runtime.RawExtension{},
+ Host: "local",
+ Variable: &testVariable{},
+ },
+ ctxFunc: func() context.Context {
+ return context.WithValue(context.Background(), ConnKey, successConnector)
+ },
+ exceptStderr: "\"src\" in args should be string",
+ },
+ {
+ name: "dest is empty",
+ opt: ExecOptions{
+ Args: runtime.RawExtension{
+ Raw: []byte(`{"src": "/etc/test.txt"}`),
+ },
+ Host: "local",
+ Variable: &testVariable{},
+ },
+ ctxFunc: func() context.Context {
+ return context.WithValue(context.Background(), ConnKey, successConnector)
+ },
+ exceptStderr: "\"dest\" in args should be string",
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx, cancel := context.WithTimeout(tc.ctxFunc(), time.Second*5)
+ defer cancel()
+
+ acStdout, acStderr := ModuleFetch(ctx, tc.opt)
+ assert.Equal(t, tc.exceptStdout, acStdout)
+ assert.Equal(t, tc.exceptStderr, acStderr)
+ })
+ }
+}
diff --git a/feature/pkg/modules/gen_cert.go b/feature/pkg/modules/gen_cert.go
new file mode 100644
index 000000000..3ea26ee13
--- /dev/null
+++ b/feature/pkg/modules/gen_cert.go
@@ -0,0 +1,467 @@
+package modules
+
+import (
+ "context"
+ "crypto"
+ "crypto/ecdsa"
+ cryptorand "crypto/rand"
+ "crypto/rsa"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "math"
+ "math/big"
+ "net"
+ "os"
+ "time"
+
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/util/sets"
+ "k8s.io/apimachinery/pkg/util/validation"
+ cgutilcert "k8s.io/client-go/util/cert"
+ "k8s.io/client-go/util/keyutil"
+ "k8s.io/klog/v2"
+ netutils "k8s.io/utils/net"
+
+ "github.com/kubesphere/kubekey/v4/pkg/variable"
+)
+
+const (
+ // DefaultSignCertAfter defines the default timeout for sign certificates.
+ defaultSignCertAfter = time.Hour * 24 * 365 * 10
+ // CertificateBlockType is a possible value for pem.Block.Type.
+ certificateBlockType = "CERTIFICATE"
+ rsaKeySize = 2048
+
+ // policy to generate file
+ // policyAlways always generate new cert to override exist cert
+ policyAlways = "Always"
+ // policyIfNotPresent if cert is exist, check it.if not generate new cert.
+ policyIfNotPresent = "IfNotPresent"
+)
+
+var defaultAltName = &cgutilcert.AltNames{
+ DNSNames: []string{"localhost"},
+ IPs: []net.IP{net.IPv4(127, 0, 0, 1), net.IPv6loopback},
+}
+
+type genCertArgs struct {
+ rootKey string
+ rootCert string
+ date time.Duration
+ policy string
+ sans []string
+ cn string
+ outKey string
+ outCert string
+}
+
+// signedCertificate generate certificate signed by root certificate
+func (gca genCertArgs) signedCertificate(cfg *cgutilcert.Config) (string, string) {
+ parentKey, err := TryLoadKeyFromDisk(gca.rootKey)
+ if err != nil {
+ return "", fmt.Sprintf("failed to load root key: %v", err)
+ }
+ parentCert, _, err := TryLoadCertChainFromDisk(gca.rootCert)
+ if err != nil {
+ return "", fmt.Sprintf("failed to load root certificate: %v", err)
+ }
+
+ if gca.policy == policyIfNotPresent {
+ if _, err := TryLoadKeyFromDisk(gca.outKey); err != nil {
+ klog.V(4).InfoS("Failed to load out key, new it")
+
+ goto NEW
+ }
+
+ existCert, intermediates, err := TryLoadCertChainFromDisk(gca.outCert)
+ if err != nil {
+ klog.V(4).InfoS("Failed to load out cert, new it")
+
+ goto NEW
+ }
+ // check if the existing key and cert match the root key and cert
+ if err := ValidateCertPeriod(existCert, 0); err != nil {
+ return "", fmt.Sprintf("failed to ValidateCertPeriod: %v", err)
+ }
+ if err := VerifyCertChain(existCert, intermediates, parentCert); err != nil {
+ return "", fmt.Sprintf("failed to VerifyCertChain: %v", err)
+ }
+ if err := validateCertificateWithConfig(existCert, gca.outCert, cfg); err != nil {
+ return "", fmt.Sprintf("failed to validateCertificateWithConfig: %v", err)
+ }
+
+ return StdoutSkip, ""
+ }
+NEW:
+ newKey, err := rsa.GenerateKey(cryptorand.Reader, rsaKeySize)
+ if err != nil {
+ return "", fmt.Sprintf("generate rsa key error: %v", err)
+ }
+ newCert, err := NewSignedCert(*cfg, gca.date, newKey, parentCert, parentKey, true)
+ if err != nil {
+ return "", fmt.Sprintf("failed to generate certificate: %v", err)
+ }
+
+ // write key and cert to file
+ if err := WriteKey(gca.outKey, newKey, gca.policy); err != nil {
+ return "", fmt.Sprintf("failed to write key: %v", err)
+ }
+ if err := WriteCert(gca.outCert, newCert, gca.policy); err != nil {
+ return "", fmt.Sprintf("failed to write certificate: %v", err)
+ }
+
+ return StdoutSuccess, ""
+}
+
+// selfSignedCertificate generate Self-signed certificate
+func (gca genCertArgs) selfSignedCertificate(cfg *cgutilcert.Config) (string, string) {
+ newKey, err := rsa.GenerateKey(cryptorand.Reader, rsaKeySize)
+ if err != nil {
+ return "", fmt.Sprintf("generate rsa key error: %v", err)
+ }
+
+ newCert, err := NewSelfSignedCACert(*cfg, gca.date, newKey)
+ if err != nil {
+ return "", fmt.Sprintf("failed to generate self-signed certificate: %v", err)
+ }
+ // write key and cert to file
+ if err := WriteKey(gca.outKey, newKey, gca.policy); err != nil {
+ return "", fmt.Sprintf("failed to write key: %v", err)
+ }
+ if err := WriteCert(gca.outCert, newCert, gca.policy); err != nil {
+ return "", fmt.Sprintf("failed to write certificate: %v", err)
+ }
+
+ return StdoutSuccess, ""
+}
+
+func newGenCertArgs(_ context.Context, raw runtime.RawExtension, vars map[string]any) (*genCertArgs, error) {
+ gca := &genCertArgs{}
+ // args
+ args := variable.Extension2Variables(raw)
+ gca.rootKey, _ = variable.StringVar(vars, args, "root_key")
+ gca.rootCert, _ = variable.StringVar(vars, args, "root_cert")
+ gca.date, _ = variable.DurationVar(vars, args, "date")
+ gca.policy, _ = variable.StringVar(vars, args, "policy")
+ gca.sans, _ = variable.StringSliceVar(vars, args, "sans")
+ gca.cn, _ = variable.StringVar(vars, args, "cn")
+ gca.outKey, _ = variable.StringVar(vars, args, "out_key")
+ gca.outCert, _ = variable.StringVar(vars, args, "out_cert")
+ // check args
+ if gca.policy != policyAlways && gca.policy != policyIfNotPresent {
+ return nil, errors.New("\"policy\" should be one of [Always, IfNotPresent]")
+ }
+ if gca.outKey == "" || gca.outCert == "" {
+ return nil, errors.New("\"out_key\" or \"out_cert\" in args should be string")
+ }
+ if gca.cn == "" {
+ return nil, errors.New("\"cn\" in args should be string")
+ }
+
+ return gca, nil
+}
+
+// ModuleGenCert generate cert file.
+// if root_key and root_cert is empty, generate Self-signed certificate.
+func ModuleGenCert(ctx context.Context, options ExecOptions) (string, string) {
+ // get host variable
+ ha, err := options.getAllVariables()
+ if err != nil {
+ return "", err.Error()
+ }
+
+ gca, err := newGenCertArgs(ctx, options.Args, ha)
+ if err != nil {
+ return "", err.Error()
+ }
+
+ cfg := &cgutilcert.Config{
+ CommonName: gca.cn,
+ Organization: []string{"kubekey"},
+ AltNames: appendSANsToAltNames(defaultAltName, gca.sans),
+ }
+
+ switch {
+ case gca.rootKey == "" || gca.rootCert == "":
+ return gca.selfSignedCertificate(cfg)
+ default:
+ return gca.signedCertificate(cfg)
+ }
+}
+
+// WriteKey stores the given key at the given location
+func WriteKey(outKey string, key crypto.Signer, policy string) error {
+ if _, err := os.Stat(outKey); err == nil && policy == policyIfNotPresent {
+ // skip
+ return nil
+ }
+ if key == nil {
+ return errors.New("private key cannot be nil when writing to file")
+ }
+
+ encoded, err := keyutil.MarshalPrivateKeyToPEM(key)
+ if err != nil {
+ return fmt.Errorf("unable to marshal private key to PEM, error: %w", err)
+ }
+ if err := keyutil.WriteKey(outKey, encoded); err != nil {
+ return fmt.Errorf("unable to write private key to file %s, error: %w", outKey, err)
+ }
+
+ return nil
+}
+
+// WriteCert stores the given certificate at the given location
+func WriteCert(outCert string, cert *x509.Certificate, policy string) error {
+ if _, err := os.Stat(outCert); err == nil && policy == policyIfNotPresent {
+ // skip
+ return nil
+ }
+ if cert == nil {
+ return errors.New("certificate cannot be nil when writing to file")
+ }
+
+ if err := cgutilcert.WriteCert(outCert, EncodeCertPEM(cert)); err != nil {
+ return fmt.Errorf("unable to write certificate to file %s, error: %w", outCert, err)
+ }
+
+ return nil
+}
+
+// EncodeCertPEM returns PEM-endcoded certificate data
+func EncodeCertPEM(cert *x509.Certificate) []byte {
+ block := pem.Block{
+ Type: certificateBlockType,
+ Bytes: cert.Raw,
+ }
+
+ return pem.EncodeToMemory(&block)
+}
+
+// TryLoadKeyFromDisk tries to load the key from the disk and validates that it is valid
+func TryLoadKeyFromDisk(rootKey string) (crypto.Signer, error) {
+ // Parse the private key from a file
+ privKey, err := keyutil.PrivateKeyFromFile(rootKey)
+ if err != nil {
+ return nil, fmt.Errorf("couldn't load the private key file %s, error: %w", rootKey, err)
+ }
+
+ // Allow RSA and ECDSA formats only
+ var key crypto.Signer
+ switch k := privKey.(type) {
+ case *rsa.PrivateKey:
+ key = k
+ case *ecdsa.PrivateKey:
+ key = k
+ default:
+ return nil, fmt.Errorf("the private key file %s is neither in RSA nor ECDSA format", rootKey)
+ }
+
+ return key, nil
+}
+
+// TryLoadCertChainFromDisk tries to load the cert chain from the disk
+func TryLoadCertChainFromDisk(rootCert string) (*x509.Certificate, []*x509.Certificate, error) {
+ certs, err := cgutilcert.CertsFromFile(rootCert)
+ if err != nil {
+ return nil, nil, fmt.Errorf("couldn't load the certificate file %s, error: %w", rootCert, err)
+ }
+
+ cert := certs[0]
+ intermediates := certs[1:]
+
+ return cert, intermediates, nil
+}
+
+// appendSANsToAltNames parses SANs from as list of strings and adds them to altNames for use on a specific cert
+// altNames is passed in with a pointer, and the struct is modified
+// valid IP address strings are parsed and added to altNames.IPs as net.IP's
+// RFC-1123 compliant DNS strings are added to altNames.DNSNames as strings
+// RFC-1123 compliant wildcard DNS strings are added to altNames.DNSNames as strings
+// certNames is used to print user facing warnings and should be the name of the cert the altNames will be used for
+func appendSANsToAltNames(altNames *cgutilcert.AltNames, sans []string) cgutilcert.AltNames {
+ for _, altname := range sans {
+ if ip := netutils.ParseIPSloppy(altname); ip != nil {
+ altNames.IPs = append(altNames.IPs, ip)
+ } else if len(validation.IsDNS1123Subdomain(altname)) == 0 {
+ altNames.DNSNames = append(altNames.DNSNames, altname)
+ } else if len(validation.IsWildcardDNS1123Subdomain(altname)) == 0 {
+ altNames.DNSNames = append(altNames.DNSNames, altname)
+ } else {
+ klog.V(4).Infof(
+ "[certificates] WARNING: Added to the '%s' SAN failed, because it is not a valid IP or RFC-1123 compliant DNS entry\n",
+ altname,
+ )
+ }
+ }
+
+ return *altNames
+}
+
+// NewSelfSignedCACert creates a CA certificate
+func NewSelfSignedCACert(cfg cgutilcert.Config, after time.Duration, key crypto.Signer) (*x509.Certificate, error) {
+ now := time.Now()
+ // returns a uniform random value in [0, max-1), then add 1 to serial to make it a uniform random value in [1, max).
+ serial, err := cryptorand.Int(cryptorand.Reader, new(big.Int).SetInt64(math.MaxInt64-1))
+ if err != nil {
+ return nil, err
+ }
+ serial = new(big.Int).Add(serial, big.NewInt(1))
+
+ notBefore := now.UTC()
+ if !cfg.NotBefore.IsZero() {
+ notBefore = cfg.NotBefore.UTC()
+ }
+ if after == 0 { // default 10 year
+ after = defaultSignCertAfter
+ }
+
+ tmpl := x509.Certificate{
+ SerialNumber: serial,
+ Subject: pkix.Name{
+ CommonName: cfg.CommonName,
+ Organization: cfg.Organization,
+ },
+ DNSNames: []string{cfg.CommonName},
+ NotBefore: notBefore,
+ NotAfter: now.Add(after).UTC(),
+ KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
+ BasicConstraintsValid: true,
+ IsCA: true,
+ }
+
+ certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &tmpl, &tmpl, key.Public(), key)
+ if err != nil {
+ return nil, err
+ }
+
+ return x509.ParseCertificate(certDERBytes)
+}
+
+// NewSignedCert creates a signed certificate using the given CA certificate and key
+func NewSignedCert(cfg cgutilcert.Config, after time.Duration, key crypto.Signer, caCert *x509.Certificate, caKey crypto.Signer, isCA bool) (*x509.Certificate, error) {
+ // returns a uniform random value in [0, max-1), then add 1 to serial to make it a uniform random value in [1, max).
+ serial, err := cryptorand.Int(cryptorand.Reader, new(big.Int).SetInt64(math.MaxInt64-1))
+ if err != nil {
+ return nil, err
+ }
+ serial = new(big.Int).Add(serial, big.NewInt(1))
+
+ if cfg.CommonName == "" {
+ return nil, errors.New("must specify a CommonName")
+ }
+
+ keyUsage := x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature
+ if isCA {
+ keyUsage |= x509.KeyUsageCertSign
+ }
+
+ RemoveDuplicateAltNames(&cfg.AltNames)
+
+ if after == 0 {
+ after = defaultSignCertAfter
+ }
+
+ certTmpl := x509.Certificate{
+ Subject: pkix.Name{
+ CommonName: cfg.CommonName,
+ Organization: cfg.Organization,
+ },
+ DNSNames: cfg.AltNames.DNSNames,
+ IPAddresses: cfg.AltNames.IPs,
+ SerialNumber: serial,
+ NotBefore: caCert.NotBefore,
+ NotAfter: time.Now().Add(after).UTC(),
+ KeyUsage: keyUsage,
+ ExtKeyUsage: cfg.Usages,
+ BasicConstraintsValid: true,
+ IsCA: isCA,
+ }
+
+ certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &certTmpl, caCert, key.Public(), caKey)
+ if err != nil {
+ return nil, err
+ }
+
+ return x509.ParseCertificate(certDERBytes)
+}
+
+// RemoveDuplicateAltNames removes duplicate items in altNames.
+func RemoveDuplicateAltNames(altNames *cgutilcert.AltNames) {
+ if altNames == nil {
+ return
+ }
+
+ if altNames.DNSNames != nil {
+ altNames.DNSNames = sets.List(sets.New(altNames.DNSNames...))
+ }
+
+ ipsKeys := make(map[string]struct{})
+ var ips []net.IP
+ for _, one := range altNames.IPs {
+ if _, ok := ipsKeys[one.String()]; !ok {
+ ipsKeys[one.String()] = struct{}{}
+ ips = append(ips, one)
+ }
+ }
+ altNames.IPs = ips
+}
+
+// ValidateCertPeriod checks if the certificate is valid relative to the current time
+// (+/- offset)
+func ValidateCertPeriod(cert *x509.Certificate, offset time.Duration) error {
+ period := fmt.Sprintf("NotBefore: %v, NotAfter: %v", cert.NotBefore, cert.NotAfter)
+ now := time.Now().Add(offset)
+ if now.Before(cert.NotBefore) {
+ return fmt.Errorf("the certificate is not valid yet: %s", period)
+ }
+ if now.After(cert.NotAfter) {
+ return fmt.Errorf("the certificate has expired: %s", period)
+ }
+
+ return nil
+}
+
+// VerifyCertChain verifies that a certificate has a valid chain of
+// intermediate CAs back to the root CA
+func VerifyCertChain(cert *x509.Certificate, intermediates []*x509.Certificate, root *x509.Certificate) error {
+ rootPool := x509.NewCertPool()
+ rootPool.AddCert(root)
+
+ intermediatePool := x509.NewCertPool()
+ for _, c := range intermediates {
+ intermediatePool.AddCert(c)
+ }
+
+ verifyOptions := x509.VerifyOptions{
+ Roots: rootPool,
+ Intermediates: intermediatePool,
+ KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny},
+ }
+
+ if _, err := cert.Verify(verifyOptions); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// validateCertificateWithConfig makes sure that a given certificate is valid at
+// least for the SANs defined in the configuration.
+func validateCertificateWithConfig(cert *x509.Certificate, baseName string, cfg *cgutilcert.Config) error {
+ for _, dnsName := range cfg.AltNames.DNSNames {
+ if err := cert.VerifyHostname(dnsName); err != nil {
+ return fmt.Errorf("certificate %s is invalid, error: %w", baseName, err)
+ }
+ }
+
+ for _, ipAddress := range cfg.AltNames.IPs {
+ if err := cert.VerifyHostname(ipAddress.String()); err != nil {
+ return fmt.Errorf("certificate %s is invalid, error: %w", baseName, err)
+ }
+ }
+
+ return nil
+}
diff --git a/feature/pkg/modules/gen_cert_test.go b/feature/pkg/modules/gen_cert_test.go
new file mode 100644
index 000000000..eb27859d7
--- /dev/null
+++ b/feature/pkg/modules/gen_cert_test.go
@@ -0,0 +1,72 @@
+/*
+Copyright 2024 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package modules
+
+import (
+ "context"
+ "os"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+func TestModuleGenCert(t *testing.T) {
+ testcases := []struct {
+ name string
+ opt ExecOptions
+ exceptStdout string
+ exceptStderr string
+ }{
+ {
+ name: "gen root cert",
+ opt: ExecOptions{
+ Args: runtime.RawExtension{
+ Raw: []byte(`{
+"policy": "{{- .policy -}}\n",
+"sans": "[\"localhost\"]",
+"cn": "test",
+"out_key": "./test_gen_cert/test-key.pem",
+"out_cert": "./test_gen_cert/test-crt.pem"
+}`),
+ },
+ Host: "local",
+ Variable: &testVariable{
+ value: map[string]any{
+ "policy": "IfNotPresent",
+ },
+ },
+ },
+ exceptStdout: "success",
+ },
+ }
+
+ if _, err := os.Stat("./test_gen_cert"); os.IsNotExist(err) {
+ if err := os.Mkdir("./test_gen_cert", os.ModePerm); err != nil {
+ t.Fatal(err)
+ }
+ }
+ defer os.RemoveAll("./test_gen_cert")
+
+ for _, testcase := range testcases {
+ t.Run(testcase.name, func(t *testing.T) {
+ stdout, stderr := ModuleGenCert(context.Background(), testcase.opt)
+ assert.Equal(t, testcase.exceptStdout, stdout)
+ assert.Equal(t, testcase.exceptStderr, stderr)
+ })
+ }
+}
diff --git a/feature/pkg/modules/image.go b/feature/pkg/modules/image.go
new file mode 100644
index 000000000..cae90dce9
--- /dev/null
+++ b/feature/pkg/modules/image.go
@@ -0,0 +1,498 @@
+/*
+Copyright 2024 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package modules
+
+import (
+ "bytes"
+ "context"
+ "crypto/tls"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
+ "net/http"
+ "os"
+ "path/filepath"
+ "strings"
+
+ imagev1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/klog/v2"
+ "k8s.io/utils/ptr"
+ "oras.land/oras-go/v2"
+ "oras.land/oras-go/v2/registry"
+ "oras.land/oras-go/v2/registry/remote"
+ "oras.land/oras-go/v2/registry/remote/auth"
+
+ _const "github.com/kubesphere/kubekey/v4/pkg/const"
+ "github.com/kubesphere/kubekey/v4/pkg/variable"
+)
+
+type imageArgs struct {
+ pull *imagePullArgs
+ push *imagePushArgs
+}
+
+type imagePullArgs struct {
+ manifests []string
+ skipTLSVerify *bool
+ username string
+ password string
+}
+
+func (i imagePullArgs) pull(ctx context.Context) error {
+ for _, img := range i.manifests {
+ src, err := remote.NewRepository(img)
+ if err != nil {
+ return fmt.Errorf("failed to get remote image: %w", err)
+ }
+ src.Client = &auth.Client{
+ Client: &http.Client{
+ Transport: &http.Transport{
+ TLSClientConfig: &tls.Config{
+ InsecureSkipVerify: *i.skipTLSVerify,
+ },
+ },
+ },
+ Cache: auth.NewCache(),
+ Credential: auth.StaticCredential(src.Reference.Registry, auth.Credential{
+ Username: i.username,
+ Password: i.password,
+ }),
+ }
+
+ dst, err := newLocalRepository(filepath.Join(domain, src.Reference.Repository)+":"+src.Reference.Reference,
+ filepath.Join(_const.GetWorkDir(), _const.ArtifactDir, _const.ArtifactImagesDir))
+ if err != nil {
+ return fmt.Errorf("failed to get local image: %w", err)
+ }
+
+ if _, err = oras.Copy(ctx, src, src.Reference.Reference, dst, "", oras.DefaultCopyOptions); err != nil {
+ return fmt.Errorf("failed to copy image: %w", err)
+ }
+ }
+
+ return nil
+}
+
+type imagePushArgs struct {
+ imagesDir string
+ skipTLSVerify *bool
+ registry string
+ username string
+ password string
+ namespace string
+}
+
+// push local dir images to remote registry
+func (i imagePushArgs) push(ctx context.Context) error {
+ manifests, err := findLocalImageManifests(i.imagesDir)
+ klog.V(5).Info("manifests found", "manifests", manifests)
+ if err != nil {
+ return fmt.Errorf("failed to find local image manifests: %w", err)
+ }
+
+ for _, img := range manifests {
+ src, err := newLocalRepository(filepath.Join(domain, img), i.imagesDir)
+ if err != nil {
+ return fmt.Errorf("failed to get local image: %w", err)
+ }
+ repo := src.Reference.Repository
+ if i.namespace != "" {
+ repo = filepath.Join(i.namespace, filepath.Base(repo))
+ }
+
+ dst, err := remote.NewRepository(filepath.Join(i.registry, repo) + ":" + src.Reference.Reference)
+ if err != nil {
+ return fmt.Errorf("failed to get remote repo: %w", err)
+ }
+ dst.Client = &auth.Client{
+ Client: &http.Client{
+ Transport: &http.Transport{
+ TLSClientConfig: &tls.Config{
+ InsecureSkipVerify: *i.skipTLSVerify,
+ },
+ },
+ },
+ Cache: auth.NewCache(),
+ Credential: auth.StaticCredential(i.registry, auth.Credential{
+ Username: i.username,
+ Password: i.password,
+ }),
+ }
+
+ if _, err = oras.Copy(ctx, src, src.Reference.Reference, dst, "", oras.DefaultCopyOptions); err != nil {
+ return fmt.Errorf("failed to copy image: %w", err)
+ }
+ }
+
+ return nil
+}
+
+func newImageArgs(_ context.Context, raw runtime.RawExtension, vars map[string]any) (*imageArgs, error) {
+ ia := &imageArgs{}
+ // check args
+ args := variable.Extension2Variables(raw)
+ if pullArgs, ok := args["pull"]; ok {
+ pull, ok := pullArgs.(map[string]any)
+ if !ok {
+ return nil, errors.New("\"pull\" should be map")
+ }
+ ipl := &imagePullArgs{}
+ ipl.manifests, _ = variable.StringSliceVar(vars, pull, "manifests")
+ ipl.username, _ = variable.StringVar(vars, pull, "username")
+ ipl.password, _ = variable.StringVar(vars, pull, "password")
+ ipl.skipTLSVerify, _ = variable.BoolVar(vars, pull, "skipTLSVerify")
+ if ipl.skipTLSVerify == nil {
+ ipl.skipTLSVerify = ptr.To(false)
+ }
+ // check args
+ if len(ipl.manifests) == 0 {
+ return nil, errors.New("\"pull.manifests\" is required")
+ }
+ ia.pull = ipl
+ }
+ // if namespace_override is not empty, it will override the image manifests namespace_override. (namespace maybe multi sub path)
+ // push to private registry
+ if pushArgs, ok := args["push"]; ok {
+ push, ok := pushArgs.(map[string]any)
+ if !ok {
+ return nil, errors.New("\"push\" should be map")
+ }
+
+ ips := &imagePushArgs{}
+ ips.registry, _ = variable.StringVar(vars, push, "registry")
+ ips.username, _ = variable.StringVar(vars, push, "username")
+ ips.password, _ = variable.StringVar(vars, push, "password")
+ ips.namespace, _ = variable.StringVar(vars, push, "namespace_override")
+ ips.imagesDir, _ = variable.StringVar(vars, push, "images_dir")
+ ips.skipTLSVerify, _ = variable.BoolVar(vars, push, "skipTLSVerify")
+ if ips.skipTLSVerify == nil {
+ ips.skipTLSVerify = ptr.To(false)
+ }
+ // check args
+ if ips.registry == "" {
+ return nil, errors.New("\"push.registry\" is required")
+ }
+ if ips.imagesDir == "" {
+ return nil, errors.New("\"push.images_dir\" is required")
+ }
+ ia.push = ips
+ }
+
+ return ia, nil
+}
+
+// ModuleImage deal "image" module
+func ModuleImage(ctx context.Context, options ExecOptions) (string, string) {
+ // get host variable
+ ha, err := options.getAllVariables()
+ if err != nil {
+ return "", err.Error()
+ }
+
+ ia, err := newImageArgs(ctx, options.Args, ha)
+ if err != nil {
+ return "", err.Error()
+ }
+
+ // pull image manifests to local dir
+ if ia.pull != nil {
+ if err := ia.pull.pull(ctx); err != nil {
+ return "", fmt.Sprintf("failed to pull image: %v", err)
+ }
+ }
+ // push image to private registry
+ if ia.push != nil {
+ if err := ia.push.push(ctx); err != nil {
+ return "", fmt.Sprintf("failed to push image: %v", err)
+ }
+ }
+
+ return StdoutSuccess, ""
+}
+
+// findLocalImageManifests get image manifests with whole image's name.
+func findLocalImageManifests(localDir string) ([]string, error) {
+ if _, err := os.Stat(localDir); err != nil {
+ klog.V(4).ErrorS(err, "failed to stat local directory", "image_dir", localDir)
+ // images is not exist, skip
+ return make([]string, 0), nil
+ }
+
+ var manifests []string
+ if err := filepath.WalkDir(localDir, func(path string, d fs.DirEntry, err error) error {
+ if err != nil {
+ return err
+ }
+
+ if path == filepath.Join(localDir, "blobs") {
+ return filepath.SkipDir
+ }
+
+ if d.IsDir() || filepath.Base(path) == "manifests" {
+ return nil
+ }
+
+ file, err := os.ReadFile(path)
+ if err != nil {
+ return err
+ }
+
+ var data map[string]any
+ if err := json.Unmarshal(file, &data); err != nil {
+ // skip un except file (empty)
+ klog.V(4).ErrorS(err, "unmarshal manifests file error", "file", path)
+
+ return nil
+ }
+
+ mediaType, ok := data["mediaType"].(string)
+ if !ok {
+ return errors.New("invalid mediaType")
+ }
+ if mediaType == imagev1.MediaTypeImageIndex || mediaType == "application/vnd.docker.distribution.manifest.list.v2+json" {
+ subpath, err := filepath.Rel(localDir, path)
+ if err != nil {
+ return err
+ }
+ // the last dir is manifests. should delete it
+ manifests = append(manifests, filepath.Dir(filepath.Dir(subpath))+":"+filepath.Base(subpath))
+ }
+
+ return nil
+ }); err != nil {
+ return nil, err
+ }
+
+ return manifests, nil
+}
+
+// newLocalRepository local dir images repository
+func newLocalRepository(reference, localDir string) (*remote.Repository, error) {
+ ref, err := registry.ParseReference(reference)
+ if err != nil {
+ return nil, err
+ }
+
+ return &remote.Repository{
+ Reference: ref,
+ Client: &http.Client{Transport: &imageTransport{baseDir: localDir}},
+ }, nil
+}
+
+var responseNotFound = &http.Response{Proto: "Local", StatusCode: http.StatusNotFound}
+var responseNotAllowed = &http.Response{Proto: "Local", StatusCode: http.StatusMethodNotAllowed}
+var responseServerError = &http.Response{Proto: "Local", StatusCode: http.StatusInternalServerError}
+var responseCreated = &http.Response{Proto: "Local", StatusCode: http.StatusCreated}
+var responseOK = &http.Response{Proto: "Local", StatusCode: http.StatusOK}
+
+const domain = "internal"
+const apiPrefix = "/v2/"
+
+type imageTransport struct {
+ baseDir string
+}
+
+// RoundTrip deal http.Request in local dir images.
+func (i imageTransport) RoundTrip(request *http.Request) (*http.Response, error) {
+ switch request.Method {
+ case http.MethodHead: // check if file exist
+ return i.head(request)
+ case http.MethodPost:
+ return i.post(request)
+ case http.MethodPut:
+ return i.put(request)
+ case http.MethodGet:
+ return i.get(request)
+ default:
+ return responseNotAllowed, nil
+ }
+}
+
+// head method for http.MethodHead. check if file is exist in blobs dir or manifests dir
+func (i imageTransport) head(request *http.Request) (*http.Response, error) {
+ if strings.HasSuffix(filepath.Dir(request.URL.Path), "blobs") { // blobs
+ filename := filepath.Join(i.baseDir, "blobs", filepath.Base(request.URL.Path))
+ if _, err := os.Stat(filename); err != nil {
+ klog.V(4).ErrorS(err, "failed to stat blobs", "filename", filename)
+
+ return responseNotFound, nil
+ }
+
+ return responseOK, nil
+ } else if strings.HasSuffix(filepath.Dir(request.URL.Path), "manifests") { // manifests
+ filename := filepath.Join(i.baseDir, strings.TrimPrefix(request.URL.Path, apiPrefix))
+ if _, err := os.Stat(filename); err != nil {
+ klog.V(4).ErrorS(err, "failed to stat blobs", "filename", filename)
+
+ return responseNotFound, nil
+ }
+
+ file, err := os.ReadFile(filename)
+ if err != nil {
+ klog.V(4).ErrorS(err, "failed to read file", "filename", filename)
+
+ return responseServerError, nil
+ }
+
+ var data map[string]any
+ if err := json.Unmarshal(file, &data); err != nil {
+ klog.V(4).ErrorS(err, "failed to unmarshal file", "filename", filename)
+
+ return responseServerError, nil
+ }
+
+ mediaType, ok := data["mediaType"].(string)
+ if !ok {
+ klog.V(4).ErrorS(nil, "unknown mediaType", "filename", filename)
+
+ return responseServerError, nil
+ }
+
+ return &http.Response{
+ Proto: "Local",
+ StatusCode: http.StatusOK,
+ Header: http.Header{
+ "Content-Type": []string{mediaType},
+ },
+ ContentLength: int64(len(file)),
+ }, nil
+ }
+
+ return responseNotAllowed, nil
+}
+
+// post method for http.MethodPost, accept request.
+func (i imageTransport) post(request *http.Request) (*http.Response, error) {
+ if strings.HasSuffix(request.URL.Path, "/uploads/") {
+ return &http.Response{
+ Proto: "Local",
+ StatusCode: http.StatusAccepted,
+ Header: http.Header{
+ "Location": []string{filepath.Dir(request.URL.Path)},
+ },
+ Request: request,
+ }, nil
+ }
+
+ return responseNotAllowed, nil
+}
+
+// put method for http.MethodPut, create file in blobs dir or manifests dir
+func (i imageTransport) put(request *http.Request) (*http.Response, error) {
+ if strings.HasSuffix(request.URL.Path, "/uploads") { // blobs
+ body, err := io.ReadAll(request.Body)
+ if err != nil {
+ return responseServerError, nil
+ }
+ defer request.Body.Close()
+
+ filename := filepath.Join(i.baseDir, "blobs", request.URL.Query().Get("digest"))
+ if err := os.MkdirAll(filepath.Dir(filename), os.ModePerm); err != nil {
+ return responseServerError, nil
+ }
+
+ if err := os.WriteFile(filename, body, os.ModePerm); err != nil {
+ return responseServerError, nil
+ }
+
+ return responseCreated, nil
+ } else if strings.HasSuffix(filepath.Dir(request.URL.Path), "/manifests") { // manifest
+ filename := filepath.Join(i.baseDir, strings.TrimPrefix(request.URL.Path, apiPrefix))
+ if err := os.MkdirAll(filepath.Dir(filename), os.ModePerm); err != nil {
+ return responseServerError, nil
+ }
+
+ body, err := io.ReadAll(request.Body)
+ if err != nil {
+ return responseServerError, nil
+ }
+ defer request.Body.Close()
+
+ if err := os.WriteFile(filename, body, os.ModePerm); err != nil {
+ return responseServerError, nil
+ }
+
+ return responseCreated, nil
+ }
+
+ return responseNotAllowed, nil
+}
+
+// get method for http.MethodGet, get file in blobs dir or manifest dir
+func (i imageTransport) get(request *http.Request) (*http.Response, error) {
+ if strings.HasSuffix(filepath.Dir(request.URL.Path), "blobs") { // blobs
+ filename := filepath.Join(i.baseDir, "blobs", filepath.Base(request.URL.Path))
+ if _, err := os.Stat(filename); err != nil {
+ klog.V(4).ErrorS(err, "failed to stat blobs", "filename", filename)
+
+ return responseNotFound, nil
+ }
+
+ file, err := os.ReadFile(filename)
+ if err != nil {
+ klog.V(4).ErrorS(err, "failed to read file", "filename", filename)
+
+ return responseServerError, nil
+ }
+
+ return &http.Response{
+ Proto: "Local",
+ StatusCode: http.StatusOK,
+ ContentLength: int64(len(file)),
+ Body: io.NopCloser(bytes.NewReader(file)),
+ }, nil
+ } else if strings.HasSuffix(filepath.Dir(request.URL.Path), "manifests") { // manifests
+ filename := filepath.Join(i.baseDir, strings.TrimPrefix(request.URL.Path, apiPrefix))
+ if _, err := os.Stat(filename); err != nil {
+ klog.V(4).ErrorS(err, "failed to stat blobs", "filename", filename)
+
+ return responseNotFound, nil
+ }
+
+ file, err := os.ReadFile(filename)
+ if err != nil {
+ klog.V(4).ErrorS(err, "failed to read file", "filename", filename)
+
+ return responseServerError, nil
+ }
+
+ var data map[string]any
+ if err := json.Unmarshal(file, &data); err != nil {
+ return responseServerError, err
+ }
+
+ mediaType, ok := data["mediaType"].(string)
+ if !ok {
+ return responseServerError, nil
+ }
+
+ return &http.Response{
+ Proto: "Local",
+ StatusCode: http.StatusOK,
+ Header: http.Header{
+ "Content-Type": []string{mediaType},
+ },
+ ContentLength: int64(len(file)),
+ Body: io.NopCloser(bytes.NewReader(file)),
+ }, nil
+ }
+
+ return responseNotAllowed, nil
+}
diff --git a/feature/pkg/modules/image_test.go b/feature/pkg/modules/image_test.go
new file mode 100644
index 000000000..680f503b8
--- /dev/null
+++ b/feature/pkg/modules/image_test.go
@@ -0,0 +1,75 @@
+package modules
+
+import (
+ "context"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+func TestModuleImage(t *testing.T) {
+ testcases := []struct {
+ name string
+ opt ExecOptions
+ exceptStdout string
+ exceptStderr string
+ }{
+ {
+ name: "pull is not map",
+ opt: ExecOptions{
+ Args: runtime.RawExtension{
+ Raw: []byte(`{
+"pull": ""
+}`),
+ },
+ Variable: &testVariable{},
+ },
+ exceptStderr: "\"pull\" should be map",
+ },
+ {
+ name: "pull.manifests is empty",
+ opt: ExecOptions{
+ Args: runtime.RawExtension{
+ Raw: []byte(`{
+"pull": {}
+}`),
+ },
+ Variable: &testVariable{},
+ },
+ exceptStderr: "\"pull.manifests\" is required",
+ },
+ {
+ name: "push is not map",
+ opt: ExecOptions{
+ Args: runtime.RawExtension{
+ Raw: []byte(`{
+"push": ""
+}`),
+ },
+ Variable: &testVariable{},
+ },
+ exceptStderr: "\"push\" should be map",
+ },
+ {
+ name: "push.registry is empty",
+ opt: ExecOptions{
+ Args: runtime.RawExtension{
+ Raw: []byte(`{
+"push": {}
+}`),
+ },
+ Variable: &testVariable{},
+ },
+ exceptStderr: "\"push.registry\" is required",
+ },
+ }
+
+ for _, testcase := range testcases {
+ t.Run(testcase.name, func(t *testing.T) {
+ stdout, stderr := ModuleImage(context.Background(), testcase.opt)
+ assert.Equal(t, testcase.exceptStdout, stdout)
+ assert.Equal(t, testcase.exceptStderr, stderr)
+ })
+ }
+}
diff --git a/feature/pkg/modules/module.go b/feature/pkg/modules/module.go
new file mode 100644
index 000000000..0e05a1855
--- /dev/null
+++ b/feature/pkg/modules/module.go
@@ -0,0 +1,141 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package modules
+
+import (
+ "context"
+ "fmt"
+
+ "k8s.io/apimachinery/pkg/runtime"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+ "k8s.io/klog/v2"
+
+ kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
+ kkcorev1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1alpha1"
+ "github.com/kubesphere/kubekey/v4/pkg/connector"
+ _const "github.com/kubesphere/kubekey/v4/pkg/const"
+ "github.com/kubesphere/kubekey/v4/pkg/variable"
+)
+
+// message for stdout
+const (
+ // StdoutSuccess message for common module
+ StdoutSuccess = "success"
+ StdoutSkip = "skip"
+
+ // StdoutTrue for bool module
+ StdoutTrue = "True"
+ // StdoutFalse for bool module
+ StdoutFalse = "False"
+)
+
+// ModuleExecFunc exec module
+type ModuleExecFunc func(ctx context.Context, options ExecOptions) (stdout string, stderr string)
+
+// ExecOptions for module
+type ExecOptions struct {
+ // the defined Args for module
+ Args runtime.RawExtension
+ // which Host to execute
+ Host string
+ // the variable module need
+ variable.Variable
+ // the task to be executed
+ Task kkcorev1alpha1.Task
+ // the pipeline to be executed
+ Pipeline kkcorev1.Pipeline
+}
+
+func (o ExecOptions) getAllVariables() (map[string]any, error) {
+ ha, err := o.Variable.Get(variable.GetAllVariable(o.Host))
+ if err != nil {
+ return nil, fmt.Errorf("failed to get host %s variable: %w", o.Host, err)
+ }
+
+ vd, ok := ha.(map[string]any)
+ if !ok {
+ return nil, fmt.Errorf("host: %s variable is not a map", o.Host)
+ }
+
+ return vd, nil
+}
+
+var module = make(map[string]ModuleExecFunc)
+
+// RegisterModule register module
+func RegisterModule(moduleName string, exec ModuleExecFunc) error {
+ if _, ok := module[moduleName]; ok {
+ return fmt.Errorf("module %s is exist", moduleName)
+ }
+
+ module[moduleName] = exec
+
+ return nil
+}
+
+// FindModule by module name which has register.
+func FindModule(moduleName string) ModuleExecFunc {
+ return module[moduleName]
+}
+
+func init() {
+ utilruntime.Must(RegisterModule("assert", ModuleAssert))
+ utilruntime.Must(RegisterModule("command", ModuleCommand))
+ utilruntime.Must(RegisterModule("shell", ModuleCommand))
+ utilruntime.Must(RegisterModule("copy", ModuleCopy))
+ utilruntime.Must(RegisterModule("fetch", ModuleFetch))
+ utilruntime.Must(RegisterModule("debug", ModuleDebug))
+ utilruntime.Must(RegisterModule("template", ModuleTemplate))
+ utilruntime.Must(RegisterModule("set_fact", ModuleSetFact))
+ utilruntime.Must(RegisterModule("gen_cert", ModuleGenCert))
+ utilruntime.Must(RegisterModule("image", ModuleImage))
+}
+
+// ConnKey for connector which store in context
+var ConnKey = struct{}{}
+
+func getConnector(ctx context.Context, host string, data map[string]any) (connector.Connector, error) {
+ var conn connector.Connector
+ var err error
+
+ if v := ctx.Value(ConnKey); v != nil {
+ if vd, ok := v.(connector.Connector); ok {
+ conn = vd
+ }
+ } else {
+ connectorVars := make(map[string]any)
+
+ if c1, ok := data[_const.VariableConnector]; ok {
+ if c2, ok := c1.(map[string]any); ok {
+ connectorVars = c2
+ }
+ }
+
+ conn, err = connector.NewConnector(host, connectorVars)
+ if err != nil {
+ return conn, err
+ }
+ }
+
+ if err = conn.Init(ctx); err != nil {
+ klog.V(4).ErrorS(err, "failed to init connector")
+
+ return conn, err
+ }
+
+ return conn, nil
+}
diff --git a/feature/pkg/modules/module_test.go b/feature/pkg/modules/module_test.go
new file mode 100644
index 000000000..b518e5578
--- /dev/null
+++ b/feature/pkg/modules/module_test.go
@@ -0,0 +1,88 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package modules
+
+import (
+ "context"
+ "errors"
+ "io"
+ "io/fs"
+
+ "github.com/kubesphere/kubekey/v4/pkg/variable"
+)
+
+type testVariable struct {
+ value map[string]any
+ err error
+}
+
+func (v testVariable) Key() string {
+ return "testModule"
+}
+
+func (v testVariable) Get(variable.GetFunc) (any, error) {
+ return v.value, v.err
+}
+
+func (v *testVariable) Merge(variable.MergeFunc) error {
+ v.value = map[string]any{
+ "k": "v",
+ }
+
+ return nil
+}
+
+var successConnector = &testConnector{output: []byte("success")}
+var failedConnector = &testConnector{
+ copyErr: errors.New("failed"),
+ fetchErr: errors.New("failed"),
+ commandErr: errors.New("failed"),
+}
+
+type testConnector struct {
+ // return for init
+ initErr error
+ // return for close
+ closeErr error
+ // return for copy
+ copyErr error
+ // return for fetch
+ fetchErr error
+ // return for command
+ output []byte
+ commandErr error
+}
+
+func (t testConnector) Init(context.Context) error {
+ return t.initErr
+}
+
+func (t testConnector) Close(context.Context) error {
+ return t.closeErr
+}
+
+func (t testConnector) PutFile(context.Context, []byte, string, fs.FileMode) error {
+ return t.copyErr
+}
+
+func (t testConnector) FetchFile(context.Context, string, io.Writer) error {
+ return t.fetchErr
+}
+
+func (t testConnector) ExecuteCommand(context.Context, string) ([]byte, error) {
+ return t.output, t.commandErr
+}
diff --git a/feature/pkg/modules/set_fact.go b/feature/pkg/modules/set_fact.go
new file mode 100644
index 000000000..a77cf06c1
--- /dev/null
+++ b/feature/pkg/modules/set_fact.go
@@ -0,0 +1,36 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package modules
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/kubesphere/kubekey/v4/pkg/variable"
+)
+
+// ModuleSetFact deal "set_fact" module
+func ModuleSetFact(_ context.Context, options ExecOptions) (string, string) {
+ // get host variable
+ args := variable.Extension2Variables(options.Args)
+
+ if err := options.Variable.Merge(variable.MergeAllRuntimeVariable(args, options.Host)); err != nil {
+ return "", fmt.Sprintf("set_fact error: %v", err)
+ }
+
+ return StdoutSuccess, ""
+}
diff --git a/feature/pkg/modules/set_fact_test.go b/feature/pkg/modules/set_fact_test.go
new file mode 100644
index 000000000..62107696b
--- /dev/null
+++ b/feature/pkg/modules/set_fact_test.go
@@ -0,0 +1,63 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package modules
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "k8s.io/apimachinery/pkg/runtime"
+
+ kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
+ kkcorev1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1alpha1"
+)
+
+func TestSetFact(t *testing.T) {
+ testcases := []struct {
+ name string
+ opt ExecOptions
+ exceptStdout string
+ exceptStderr string
+ }{
+ {
+ name: "success",
+ opt: ExecOptions{
+ Args: runtime.RawExtension{
+ Raw: []byte(`{"k": "v"}`),
+ },
+ Host: "",
+ Variable: &testVariable{},
+ Task: kkcorev1alpha1.Task{},
+ Pipeline: kkcorev1.Pipeline{},
+ },
+ exceptStdout: "success",
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
+ defer cancel()
+
+ stdout, stderr := ModuleSetFact(ctx, tc.opt)
+ assert.Equal(t, tc.exceptStdout, stdout)
+ assert.Equal(t, tc.exceptStderr, stderr)
+ })
+ }
+}
diff --git a/feature/pkg/modules/template.go b/feature/pkg/modules/template.go
new file mode 100644
index 000000000..a9b461947
--- /dev/null
+++ b/feature/pkg/modules/template.go
@@ -0,0 +1,285 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package modules
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/klog/v2"
+ ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
+
+ "github.com/kubesphere/kubekey/v4/pkg/connector"
+
+ kkcorev1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1alpha1"
+ "github.com/kubesphere/kubekey/v4/pkg/converter/tmpl"
+ "github.com/kubesphere/kubekey/v4/pkg/project"
+ "github.com/kubesphere/kubekey/v4/pkg/variable"
+)
+
+type templateArgs struct {
+ src string
+ dest string
+ mode *int
+}
+
+func newTemplateArgs(_ context.Context, raw runtime.RawExtension, vars map[string]any) (*templateArgs, error) {
+ var err error
+ // check args
+ ta := &templateArgs{}
+ args := variable.Extension2Variables(raw)
+
+ ta.src, err = variable.StringVar(vars, args, "src")
+ if err != nil {
+ klog.V(4).ErrorS(err, "\"src\" should be string")
+
+ return nil, errors.New("\"src\" should be string")
+ }
+
+ ta.dest, err = variable.StringVar(vars, args, "dest")
+ if err != nil {
+ return nil, errors.New("\"dest\" should be string")
+ }
+
+ ta.mode, _ = variable.IntVar(vars, args, "mode")
+
+ return ta, nil
+}
+
+// ModuleTemplate deal "template" module
+func ModuleTemplate(ctx context.Context, options ExecOptions) (string, string) {
+ // get host variable
+ ha, err := options.getAllVariables()
+ if err != nil {
+ return "", err.Error()
+ }
+
+ ta, err := newTemplateArgs(ctx, options.Args, ha)
+ if err != nil {
+ klog.V(4).ErrorS(err, "get template args error", "task", ctrlclient.ObjectKeyFromObject(&options.Task))
+
+ return "", err.Error()
+ }
+
+ // get connector
+ conn, err := getConnector(ctx, options.Host, ha)
+ if err != nil {
+ return "", err.Error()
+ }
+ defer conn.Close(ctx)
+
+ if filepath.IsAbs(ta.src) {
+ fileInfo, err := os.Stat(ta.src)
+ if err != nil {
+ return "", fmt.Sprintf(" get src file %s in local path error: %v", ta.src, err)
+ }
+
+ if fileInfo.IsDir() { // src is dir
+ if err := ta.absDir(ctx, conn, ha); err != nil {
+ return "", fmt.Sprintf("sync template absolute dir error %s", err)
+ }
+ } else { // src is file
+ if err := ta.absFile(ctx, fileInfo.Mode(), conn, ha); err != nil {
+ return "", fmt.Sprintf("sync template absolute file error %s", err)
+ }
+ }
+ } else {
+ pj, err := project.New(ctx, options.Pipeline, false)
+ if err != nil {
+ return "", fmt.Sprintf("get project error: %v", err)
+ }
+
+ fileInfo, err := pj.Stat(ta.src, project.GetFileOption{IsTemplate: true, Role: options.Task.Annotations[kkcorev1alpha1.TaskAnnotationRole]})
+ if err != nil {
+ return "", fmt.Sprintf("get file %s from project error: %v", ta.src, err)
+ }
+
+ if fileInfo.IsDir() {
+ if err := ta.relDir(ctx, pj, options.Task.Annotations[kkcorev1alpha1.TaskAnnotationRole], conn, ha); err != nil {
+ return "", fmt.Sprintf("sync template relative dir error: %s", err)
+ }
+ } else {
+ if err := ta.relFile(ctx, pj, options.Task.Annotations[kkcorev1alpha1.TaskAnnotationRole], fileInfo.Mode(), conn, ha); err != nil {
+ return "", fmt.Sprintf("sync template relative dir error: %s", err)
+ }
+ }
+ }
+
+ return StdoutSuccess, ""
+}
+
+// relFile when template.src is relative file, get file from project, parse it, and copy to remote.
+func (ta templateArgs) relFile(ctx context.Context, pj project.Project, role string, mode fs.FileMode, conn connector.Connector, vars map[string]any) any {
+ data, err := pj.ReadFile(ta.src, project.GetFileOption{IsTemplate: true, Role: role})
+ if err != nil {
+ return fmt.Errorf("read file error: %w", err)
+ }
+
+ result, err := tmpl.ParseString(vars, string(data))
+ if err != nil {
+ return fmt.Errorf("parse file error: %w", err)
+ }
+
+ dest := ta.dest
+ if strings.HasSuffix(ta.dest, "/") {
+ dest = filepath.Join(ta.dest, filepath.Base(ta.src))
+ }
+
+ if ta.mode != nil {
+ mode = os.FileMode(*ta.mode)
+ }
+
+ if err := conn.PutFile(ctx, []byte(result), dest, mode); err != nil {
+ return fmt.Errorf("copy file error: %w", err)
+ }
+
+ return nil
+}
+
+// relDir when template.src is relative dir, get all files from project, parse it, and copy to remote.
+func (ta templateArgs) relDir(ctx context.Context, pj project.Project, role string, conn connector.Connector, vars map[string]any) error {
+ if err := pj.WalkDir(ta.src, project.GetFileOption{IsTemplate: true, Role: role}, func(path string, d fs.DirEntry, err error) error {
+ if d.IsDir() { // only copy file
+ return nil
+ }
+ if err != nil {
+ return fmt.Errorf("walk dir %s error: %w", ta.src, err)
+ }
+
+ info, err := d.Info()
+ if err != nil {
+ return fmt.Errorf("get file info error: %w", err)
+ }
+
+ mode := info.Mode()
+ if ta.mode != nil {
+ mode = os.FileMode(*ta.mode)
+ }
+
+ data, err := pj.ReadFile(path, project.GetFileOption{IsTemplate: true, Role: role})
+ if err != nil {
+ return fmt.Errorf("read file error: %w", err)
+ }
+ result, err := tmpl.ParseString(vars, string(data))
+ if err != nil {
+ return fmt.Errorf("parse file error: %w", err)
+ }
+
+ dest := ta.dest
+ if strings.HasSuffix(ta.dest, "/") {
+ rel, err := pj.Rel(ta.src, path, project.GetFileOption{IsTemplate: true, Role: role})
+ if err != nil {
+ return fmt.Errorf("get relative file path error: %w", err)
+ }
+ dest = filepath.Join(ta.dest, rel)
+ }
+
+ if err := conn.PutFile(ctx, []byte(result), dest, mode); err != nil {
+ return fmt.Errorf("copy file error: %w", err)
+ }
+
+ return nil
+ }); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// absFile when template.src is absolute file, get file by os, parse it, and copy to remote.
+func (ta templateArgs) absFile(ctx context.Context, mode fs.FileMode, conn connector.Connector, vars map[string]any) error {
+ data, err := os.ReadFile(ta.src)
+ if err != nil {
+ return fmt.Errorf("read file error: %w", err)
+ }
+
+ result, err := tmpl.ParseString(vars, string(data))
+ if err != nil {
+ return fmt.Errorf("parse file error: %w", err)
+ }
+
+ dest := ta.dest
+ if strings.HasSuffix(ta.dest, "/") {
+ dest = filepath.Join(ta.dest, filepath.Base(ta.src))
+ }
+
+ if ta.mode != nil {
+ mode = os.FileMode(*ta.mode)
+ }
+
+ if err := conn.PutFile(ctx, []byte(result), dest, mode); err != nil {
+ return fmt.Errorf("copy file error: %w", err)
+ }
+
+ return nil
+}
+
+// absDir when template.src is absolute dir, get all files by os, parse it, and copy to remote.
+func (ta templateArgs) absDir(ctx context.Context, conn connector.Connector, vars map[string]any) error {
+ if err := filepath.WalkDir(ta.src, func(path string, d fs.DirEntry, err error) error {
+ if d.IsDir() { // only copy file
+ return nil
+ }
+ if err != nil {
+ return fmt.Errorf("walk dir %s error: %w", ta.src, err)
+ }
+
+ // get file old mode
+ info, err := d.Info()
+ if err != nil {
+ return fmt.Errorf("get file info error: %w", err)
+ }
+ mode := info.Mode()
+ if ta.mode != nil {
+ mode = os.FileMode(*ta.mode)
+ }
+ // read file
+ data, err := os.ReadFile(path)
+ if err != nil {
+ return fmt.Errorf("read file error: %w", err)
+ }
+ result, err := tmpl.ParseString(vars, string(data))
+ if err != nil {
+ return fmt.Errorf("parse file error: %w", err)
+ }
+ // copy file to remote
+ dest := ta.dest
+ if strings.HasSuffix(ta.dest, "/") {
+ rel, err := filepath.Rel(ta.src, path)
+ if err != nil {
+ return fmt.Errorf("get relative file path error: %w", err)
+ }
+ dest = filepath.Join(ta.dest, rel)
+ }
+
+ if err := conn.PutFile(ctx, []byte(result), dest, mode); err != nil {
+ return fmt.Errorf("copy file error: %w", err)
+ }
+
+ return nil
+ }); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/feature/pkg/modules/template_test.go b/feature/pkg/modules/template_test.go
new file mode 100644
index 000000000..95f5c7279
--- /dev/null
+++ b/feature/pkg/modules/template_test.go
@@ -0,0 +1,86 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package modules
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "path/filepath"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+func TestTemplate(t *testing.T) {
+ absPath, err := filepath.Abs(os.Args[0])
+ if err != nil {
+ fmt.Println("Error getting absolute path:", err)
+
+ return
+ }
+
+ testcases := []struct {
+ name string
+ opt ExecOptions
+ ctxFunc func() context.Context
+ exceptStdout string
+ exceptStderr string
+ }{
+ {
+ name: "src is empty",
+ opt: ExecOptions{
+ Args: runtime.RawExtension{},
+ Host: "local",
+ Variable: &testVariable{},
+ },
+ ctxFunc: context.Background,
+ exceptStderr: "\"src\" should be string",
+ },
+ {
+ name: "dest is empty",
+ opt: ExecOptions{
+ Args: runtime.RawExtension{
+ Raw: []byte(`{
+"src": "{{ .absPath }}"
+}`),
+ },
+ Host: "local",
+ Variable: &testVariable{
+ value: map[string]any{
+ "absPath": absPath,
+ },
+ },
+ },
+ ctxFunc: context.Background,
+ exceptStderr: "\"dest\" should be string",
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx, cancel := context.WithTimeout(tc.ctxFunc(), time.Second*5)
+ defer cancel()
+
+ acStdout, acStderr := ModuleTemplate(ctx, tc.opt)
+ assert.Equal(t, tc.exceptStdout, acStdout)
+ assert.Equal(t, tc.exceptStderr, acStderr)
+ })
+ }
+}
diff --git a/feature/pkg/project/builtin.go b/feature/pkg/project/builtin.go
new file mode 100644
index 000000000..a9a201f75
--- /dev/null
+++ b/feature/pkg/project/builtin.go
@@ -0,0 +1,110 @@
+//go:build builtin
+// +build builtin
+
+/*
+Copyright 2024 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package project
+
+import (
+ "errors"
+ "io/fs"
+ "os"
+ "path/filepath"
+
+ "github.com/kubesphere/kubekey/v4/builtin"
+ kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
+ kkprojectv1 "github.com/kubesphere/kubekey/v4/pkg/apis/project/v1"
+ _const "github.com/kubesphere/kubekey/v4/pkg/const"
+)
+
+func init() {
+ builtinProjectFunc = func(pipeline kkcorev1.Pipeline) (Project, error) {
+ if pipeline.Spec.Playbook == "" {
+ return nil, errors.New("playbook should not be empty")
+ }
+
+ if filepath.IsAbs(pipeline.Spec.Playbook) {
+ return nil, errors.New("playbook should be relative path base on project.addr")
+ }
+
+ return &builtinProject{Pipeline: pipeline, FS: builtin.BuiltinPipeline, playbook: pipeline.Spec.Playbook}, nil
+ }
+}
+
+type builtinProject struct {
+ kkcorev1.Pipeline
+
+ fs.FS
+ // playbook relpath base on projectDir
+ playbook string
+}
+
+func (p builtinProject) getFilePath(path string, o GetFileOption) string {
+ var find []string
+ switch {
+ case o.IsFile:
+ if o.Role != "" {
+ // find from project/roles/roleName
+ find = append(find, filepath.Join(_const.ProjectRolesDir, o.Role, _const.ProjectRolesFilesDir, path))
+ // find from pbPath dir like: current_playbook/roles/roleName
+ find = append(find, filepath.Join(p.playbook, _const.ProjectRolesDir, o.Role, _const.ProjectRolesFilesDir, path))
+ }
+ find = append(find, filepath.Join(_const.ProjectRolesFilesDir, path))
+ case o.IsTemplate:
+ // find from project/roles/roleName
+ if o.Role != "" {
+ find = append(find, filepath.Join(_const.ProjectRolesDir, o.Role, _const.ProjectRolesTemplateDir, path))
+ // find from pbPath dir like: current_playbook/roles/roleName
+ find = append(find, filepath.Join(p.playbook, _const.ProjectRolesDir, o.Role, _const.ProjectRolesTemplateDir, path))
+ }
+ find = append(find, filepath.Join(_const.ProjectRolesTemplateDir, path))
+ default:
+ find = append(find, path)
+ }
+ for _, s := range find {
+ if _, err := fs.Stat(p.FS, s); err == nil {
+ return s
+ }
+ }
+
+ return ""
+}
+
+// MarshalPlaybook project file to playbook.
+func (p builtinProject) MarshalPlaybook() (*kkprojectv1.Playbook, error) {
+ return marshalPlaybook(p.FS, p.playbook)
+}
+
+// Stat role/file/template file or dir in project
+func (p builtinProject) Stat(path string, option GetFileOption) (os.FileInfo, error) {
+ return fs.Stat(p.FS, p.getFilePath(path, option))
+}
+
+// WalkDir role/file/template dir in project
+func (p builtinProject) WalkDir(path string, option GetFileOption, f fs.WalkDirFunc) error {
+ return fs.WalkDir(p.FS, p.getFilePath(path, option), f)
+}
+
+// ReadFile role/file/template file or dir in project
+func (p builtinProject) ReadFile(path string, option GetFileOption) ([]byte, error) {
+ return fs.ReadFile(p.FS, p.getFilePath(path, option))
+}
+
+// Rel path for role/file/template file or dir in project
+func (p builtinProject) Rel(root string, path string, option GetFileOption) (string, error) {
+ return filepath.Rel(p.getFilePath(root, option), path)
+}
diff --git a/feature/pkg/project/git.go b/feature/pkg/project/git.go
new file mode 100644
index 000000000..2aa7d5934
--- /dev/null
+++ b/feature/pkg/project/git.go
@@ -0,0 +1,184 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package project
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/go-git/go-git/v5"
+ "github.com/go-git/go-git/v5/plumbing"
+ "github.com/go-git/go-git/v5/plumbing/transport/http"
+ "k8s.io/klog/v2"
+
+ kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
+ kkprojectv1 "github.com/kubesphere/kubekey/v4/pkg/apis/project/v1"
+ _const "github.com/kubesphere/kubekey/v4/pkg/const"
+)
+
+func newGitProject(ctx context.Context, pipeline kkcorev1.Pipeline, update bool) (Project, error) {
+ if pipeline.Spec.Playbook == "" || pipeline.Spec.Project.Addr == "" {
+ return nil, errors.New("playbook and project.addr should not be empty")
+ }
+
+ if filepath.IsAbs(pipeline.Spec.Playbook) {
+ return nil, errors.New("playbook should be relative path base on project.addr")
+ }
+
+ // git clone to project dir
+ if pipeline.Spec.Project.Name == "" {
+ pipeline.Spec.Project.Name = strings.TrimSuffix(pipeline.Spec.Project.Addr[strings.LastIndex(pipeline.Spec.Project.Addr, "/")+1:], ".git")
+ }
+
+ p := &gitProject{
+ Pipeline: pipeline,
+ projectDir: filepath.Join(_const.GetWorkDir(), _const.ProjectDir, pipeline.Spec.Project.Name),
+ playbook: pipeline.Spec.Playbook,
+ }
+
+ if _, err := os.Stat(p.projectDir); os.IsNotExist(err) {
+ // git clone
+ if err := p.gitClone(ctx); err != nil {
+ return nil, fmt.Errorf("clone git project error: %w", err)
+ }
+ } else if update {
+ // git pull
+ if err := p.gitPull(ctx); err != nil {
+ return nil, fmt.Errorf("pull git project error: %w", err)
+ }
+ }
+
+ return p, nil
+}
+
+// gitProject from git
+type gitProject struct {
+ kkcorev1.Pipeline
+
+ //location
+ projectDir string
+ // playbook relpath base on projectDir
+ playbook string
+}
+
+func (p gitProject) getFilePath(path string, o GetFileOption) string {
+ var find []string
+ switch {
+ case o.IsFile:
+ if o.Role != "" {
+ // find from project/roles/roleName
+ find = append(find, filepath.Join(p.projectDir, _const.ProjectRolesDir, o.Role, _const.ProjectRolesFilesDir, path))
+ // find from pbPath dir like: current_playbook/roles/roleName
+ find = append(find, filepath.Join(p.projectDir, p.playbook, _const.ProjectRolesDir, o.Role, _const.ProjectRolesFilesDir, path))
+ }
+ find = append(find, filepath.Join(p.projectDir, _const.ProjectRolesFilesDir, path))
+ case o.IsTemplate:
+ // find from project/roles/roleName
+ if o.Role != "" {
+ find = append(find, filepath.Join(p.projectDir, _const.ProjectRolesDir, o.Role, _const.ProjectRolesTemplateDir, path))
+ // find from pbPath dir like: current_playbook/roles/roleName
+ find = append(find, filepath.Join(p.projectDir, p.playbook, _const.ProjectRolesDir, o.Role, _const.ProjectRolesTemplateDir, path))
+ }
+ find = append(find, filepath.Join(p.projectDir, _const.ProjectRolesTemplateDir, path))
+ default:
+ find = append(find, filepath.Join(p.projectDir, path))
+ }
+ for _, s := range find {
+ if _, err := os.Stat(s); err == nil {
+ return s
+ }
+ }
+
+ return ""
+}
+
+func (p gitProject) gitClone(ctx context.Context) error {
+ if _, err := git.PlainCloneContext(ctx, p.projectDir, false, &git.CloneOptions{
+ URL: p.Pipeline.Spec.Project.Addr,
+ Progress: nil,
+ ReferenceName: plumbing.NewBranchReferenceName(p.Pipeline.Spec.Project.Branch),
+ SingleBranch: true,
+ Auth: &http.TokenAuth{Token: p.Pipeline.Spec.Project.Token},
+ InsecureSkipTLS: false,
+ }); err != nil {
+ klog.Errorf("clone project %s failed: %v", p.Pipeline.Spec.Project.Addr, err)
+
+ return err
+ }
+
+ return nil
+}
+
+func (p gitProject) gitPull(ctx context.Context) error {
+ open, err := git.PlainOpen(p.projectDir)
+ if err != nil {
+ klog.V(4).ErrorS(err, "git open error", "local_dir", p.projectDir)
+
+ return err
+ }
+
+ wt, err := open.Worktree()
+ if err != nil {
+ klog.V(4).ErrorS(err, "git open worktree error", "local_dir", p.projectDir)
+
+ return err
+ }
+
+ if err := wt.PullContext(ctx, &git.PullOptions{
+ RemoteURL: p.Pipeline.Spec.Project.Addr,
+ ReferenceName: plumbing.NewBranchReferenceName(p.Pipeline.Spec.Project.Branch),
+ SingleBranch: true,
+ Auth: &http.TokenAuth{Token: p.Pipeline.Spec.Project.Token},
+ InsecureSkipTLS: false,
+ }); err != nil && !errors.Is(err, git.NoErrAlreadyUpToDate) {
+ klog.V(4).ErrorS(err, "git pull error", "local_dir", p.projectDir)
+
+ return err
+ }
+
+ return nil
+}
+
+// MarshalPlaybook project file to playbook.
+func (p gitProject) MarshalPlaybook() (*kkprojectv1.Playbook, error) {
+ return marshalPlaybook(os.DirFS(p.projectDir), p.Pipeline.Spec.Playbook)
+}
+
+// Stat role/file/template file or dir in project
+func (p gitProject) Stat(path string, option GetFileOption) (os.FileInfo, error) {
+ return os.Stat(p.getFilePath(path, option))
+}
+
+// WalkDir role/file/template dir in project
+func (p gitProject) WalkDir(path string, option GetFileOption, f fs.WalkDirFunc) error {
+ return filepath.WalkDir(p.getFilePath(path, option), f)
+}
+
+// ReadFile role/file/template file or dir in project
+func (p gitProject) ReadFile(path string, option GetFileOption) ([]byte, error) {
+ return os.ReadFile(p.getFilePath(path, option))
+}
+
+// Rel path for role/file/template file or dir in project
+func (p gitProject) Rel(root string, path string, option GetFileOption) (string, error) {
+ return filepath.Rel(p.getFilePath(root, option), path)
+}
diff --git a/feature/pkg/project/helper.go b/feature/pkg/project/helper.go
new file mode 100644
index 000000000..60e4ec16b
--- /dev/null
+++ b/feature/pkg/project/helper.go
@@ -0,0 +1,376 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package project
+
+import (
+ "fmt"
+ "io/fs"
+ "os"
+ "path/filepath"
+
+ "gopkg.in/yaml.v3"
+
+ kkprojectv1 "github.com/kubesphere/kubekey/v4/pkg/apis/project/v1"
+ _const "github.com/kubesphere/kubekey/v4/pkg/const"
+)
+
+// marshalPlaybook kkprojectv1.Playbook from a playbook file
+func marshalPlaybook(baseFS fs.FS, pbPath string) (*kkprojectv1.Playbook, error) {
+ // convert playbook to kkprojectv1.Playbook
+ pb := &kkprojectv1.Playbook{}
+ if err := loadPlaybook(baseFS, pbPath, pb); err != nil {
+ return nil, fmt.Errorf("load playbook failed: %w", err)
+ }
+ // convertRoles.
+ if err := convertRoles(baseFS, pbPath, pb); err != nil {
+ return nil, fmt.Errorf("convert roles failed: %w", err)
+ }
+ // convertIncludeTasks
+ if err := convertIncludeTasks(baseFS, pbPath, pb); err != nil {
+ return nil, fmt.Errorf("convert include tasks failed: %w", err)
+ }
+ // validate playbook
+ if err := pb.Validate(); err != nil {
+ return nil, fmt.Errorf("validate playbook failed: %w", err)
+ }
+
+ return pb, nil
+}
+
+// loadPlaybook with include_playbook. Join all playbooks into one playbook
+func loadPlaybook(baseFS fs.FS, pbPath string, pb *kkprojectv1.Playbook) error {
+ // baseDir is the local ansible project dir which playbook belong to
+ pbData, err := fs.ReadFile(baseFS, pbPath)
+ if err != nil {
+ return fmt.Errorf("read playbook failed: %w", err)
+ }
+ var plays []kkprojectv1.Play
+ if err := yaml.Unmarshal(pbData, &plays); err != nil {
+ return fmt.Errorf("unmarshal playbook failed: %w", err)
+ }
+
+ for _, p := range plays {
+ if err := dealImportPlaybook(p, baseFS, pbPath, pb); err != nil {
+ return fmt.Errorf("load import_playbook failed: %w", err)
+ }
+
+ if err := dealVarsFiles(p, baseFS, pbPath); err != nil {
+ return fmt.Errorf("load vars_files failed: %w", err)
+ }
+ // fill block in roles
+ if err := dealRoles(p, baseFS, pbPath); err != nil {
+ return fmt.Errorf("load roles failed: %w", err)
+ }
+
+ pb.Play = append(pb.Play, p)
+ }
+
+ return nil
+}
+
+// dealImportPlaybook "import_playbook" argument in play
+func dealImportPlaybook(p kkprojectv1.Play, baseFS fs.FS, pbPath string, pb *kkprojectv1.Playbook) error {
+ if p.ImportPlaybook != "" {
+ importPlaybook := getPlaybookBaseFromPlaybook(baseFS, pbPath, p.ImportPlaybook)
+ if importPlaybook == "" {
+ return fmt.Errorf("import_playbook %s path is empty, it's maybe [project-dir/playbooks/import_playbook_file, playbook-dir/playbooks/import_playbook-file, playbook-dir/import_playbook-file]", p.ImportPlaybook)
+ }
+ if err := loadPlaybook(baseFS, importPlaybook, pb); err != nil {
+ return fmt.Errorf("load playbook failed: %w", err)
+ }
+ }
+
+ return nil
+}
+
+// dealVarsFiles "var_files" argument in play
+func dealVarsFiles(p kkprojectv1.Play, baseFS fs.FS, pbPath string) error {
+ for _, file := range p.VarsFiles {
+ // load vars from vars_files
+ if _, err := fs.Stat(baseFS, filepath.Join(filepath.Dir(pbPath), file)); err != nil {
+ return fmt.Errorf("file %s not exists", file)
+ }
+ data, err := fs.ReadFile(baseFS, filepath.Join(filepath.Dir(pbPath), file))
+ if err != nil {
+ return fmt.Errorf("read file %s failed: %w", filepath.Join(filepath.Dir(pbPath), file), err)
+ }
+
+ var vars map[string]any
+ var node yaml.Node // marshal file on defined order
+ if err := yaml.Unmarshal(data, &node); err != nil {
+ return fmt.Errorf("unmarshal yaml file: %s failed: %w", filepath.Join(filepath.Dir(pbPath), file), err)
+ }
+ if err := node.Decode(&vars); err != nil {
+ return fmt.Errorf("unmarshal yaml file: %s failed: %w", filepath.Join(filepath.Dir(pbPath), file), err)
+ }
+ // store vars in play. the vars defined in file should not be repeated.
+ p.Vars, err = combineMaps(p.Vars, vars)
+ if err != nil {
+ return fmt.Errorf("combine maps file:%s failed: %w", filepath.Join(filepath.Dir(pbPath), file), err)
+ }
+ }
+
+ return nil
+}
+
+// dealRoles "roles" argument in play
+func dealRoles(p kkprojectv1.Play, baseFS fs.FS, pbPath string) error {
+ for i, r := range p.Roles {
+ roleBase := getRoleBaseFromPlaybook(baseFS, pbPath, r.Role)
+ if roleBase == "" {
+ return fmt.Errorf("cannot found Role %s", r.Role)
+ }
+
+ mainTask := getYamlFile(baseFS, filepath.Join(roleBase, _const.ProjectRolesTasksDir, _const.ProjectRolesTasksMainFile))
+ if mainTask == "" {
+ return fmt.Errorf("cannot found main task for Role %s", r.Role)
+ }
+
+ rdata, err := fs.ReadFile(baseFS, mainTask)
+ if err != nil {
+ return fmt.Errorf("read file %s failed: %w", mainTask, err)
+ }
+ var blocks []kkprojectv1.Block
+ if err := yaml.Unmarshal(rdata, &blocks); err != nil {
+ return fmt.Errorf("unmarshal yaml file: %s failed: %w", filepath.Join(filepath.Dir(pbPath), mainTask), err)
+ }
+ p.Roles[i].Block = blocks
+ }
+
+ return nil
+}
+
+// convertRoles convert roleName to block
+func convertRoles(baseFS fs.FS, pbPath string, pb *kkprojectv1.Playbook) error {
+ for i, p := range pb.Play {
+ for i, r := range p.Roles {
+ roleBase := getRoleBaseFromPlaybook(baseFS, pbPath, r.Role)
+ if roleBase == "" {
+ return fmt.Errorf("cannot found Role %s", r.Role)
+ }
+
+ var err error
+ if p.Roles[i].Block, err = convertRoleBlocks(baseFS, pbPath, roleBase); err != nil {
+ return fmt.Errorf("convert role %s tasks failed: %w", r.Role, err)
+ }
+
+ if p.Roles[i].Vars, err = convertRoleVars(baseFS, roleBase, p.Roles[i].Vars); err != nil {
+ return fmt.Errorf("convert role %s defaults failed: %w", r.Role, err)
+ }
+ }
+ pb.Play[i] = p
+ }
+
+ return nil
+}
+
+func convertRoleVars(baseFS fs.FS, roleBase string, roleVars map[string]any) (map[string]any, error) {
+ // load defaults (optional)
+ mainDefault := getYamlFile(baseFS, filepath.Join(roleBase, _const.ProjectRolesDefaultsDir, _const.ProjectRolesDefaultsMainFile))
+ if mainDefault != "" {
+ mainData, err := fs.ReadFile(baseFS, mainDefault)
+ if err != nil {
+ return nil, fmt.Errorf("read defaults variable file %s failed: %w", mainDefault, err)
+ }
+
+ var vars map[string]any
+ var node yaml.Node // marshal file on defined order
+ if err := yaml.Unmarshal(mainData, &node); err != nil {
+ return nil, fmt.Errorf("unmarshal defaults variable yaml file: %s failed: %w", mainDefault, err)
+ }
+ if err := node.Decode(&vars); err != nil {
+ return nil, fmt.Errorf("decode defaults variable yaml file: %s failed: %w", mainDefault, err)
+ }
+
+ return combineMaps(roleVars, vars)
+ }
+
+ return roleVars, nil
+}
+
+// convertRoleBlocks roles/task/main.yaml to []kkprojectv1.Block
+func convertRoleBlocks(baseFS fs.FS, pbPath string, roleBase string) ([]kkprojectv1.Block, error) {
+ mainTask := getYamlFile(baseFS, filepath.Join(roleBase, _const.ProjectRolesTasksDir, _const.ProjectRolesTasksMainFile))
+ if mainTask == "" {
+ return nil, fmt.Errorf("cannot found main task for Role %s", roleBase)
+ }
+
+ rdata, err := fs.ReadFile(baseFS, mainTask)
+ if err != nil {
+ return nil, fmt.Errorf("read file %s failed: %w", mainTask, err)
+ }
+ var blocks []kkprojectv1.Block
+ if err := yaml.Unmarshal(rdata, &blocks); err != nil {
+ return nil, fmt.Errorf("unmarshal yaml file: %s failed: %w", filepath.Join(filepath.Dir(pbPath), mainTask), err)
+ }
+
+ return blocks, nil
+}
+
+// convertIncludeTasks from file to blocks
+func convertIncludeTasks(baseFS fs.FS, pbPath string, pb *kkprojectv1.Playbook) error {
+ var pbBase = filepath.Dir(filepath.Dir(pbPath))
+ for _, play := range pb.Play {
+ if err := fileToBlock(baseFS, pbBase, play.PreTasks); err != nil {
+ return fmt.Errorf("convert pre_tasks file %s failed: %w", pbPath, err)
+ }
+
+ if err := fileToBlock(baseFS, pbBase, play.Tasks); err != nil {
+ return fmt.Errorf("convert tasks file %s failed: %w", pbPath, err)
+ }
+
+ if err := fileToBlock(baseFS, pbBase, play.PostTasks); err != nil {
+ return fmt.Errorf("convert post_tasks file %s failed: %w", pbPath, err)
+ }
+
+ for _, r := range play.Roles {
+ roleBase := getRoleBaseFromPlaybook(baseFS, pbPath, r.Role)
+ if err := fileToBlock(baseFS, filepath.Join(roleBase, _const.ProjectRolesTasksDir), r.Block); err != nil {
+ return fmt.Errorf("convert role %s failed: %w", filepath.Join(pbPath, r.Role), err)
+ }
+ }
+ }
+
+ return nil
+}
+
+func fileToBlock(baseFS fs.FS, baseDir string, blocks []kkprojectv1.Block) error {
+ for i, b := range blocks {
+ if b.IncludeTasks != "" {
+ data, err := fs.ReadFile(baseFS, filepath.Join(baseDir, b.IncludeTasks))
+ if err != nil {
+ return fmt.Errorf("read includeTask file %s failed: %w", filepath.Join(baseDir, b.IncludeTasks), err)
+ }
+ var bs []kkprojectv1.Block
+ if err := yaml.Unmarshal(data, &bs); err != nil {
+ return fmt.Errorf("unmarshal includeTask file %s failed: %w", filepath.Join(baseDir, b.IncludeTasks), err)
+ }
+
+ b.Block = bs
+ blocks[i] = b
+ }
+
+ if err := fileToBlock(baseFS, baseDir, b.Block); err != nil {
+ return fmt.Errorf("convert block file %s failed: %w", filepath.Join(baseDir, b.IncludeTasks), err)
+ }
+
+ if err := fileToBlock(baseFS, baseDir, b.Rescue); err != nil {
+ return fmt.Errorf("convert rescue file %s failed: %w", filepath.Join(baseDir, b.IncludeTasks), err)
+ }
+
+ if err := fileToBlock(baseFS, baseDir, b.Always); err != nil {
+ return fmt.Errorf("convert always file %s failed: %w", filepath.Join(baseDir, b.IncludeTasks), err)
+ }
+ }
+
+ return nil
+}
+
+// getPlaybookBaseFromPlaybook find import_playbook path base on the current_playbook
+// find from project/playbooks/playbook if exists.
+// find from current_playbook/playbooks/playbook if exists.
+// find current_playbook/playbook
+func getPlaybookBaseFromPlaybook(baseFS fs.FS, pbPath string, playbook string) string {
+ var find []string
+ // find from project/playbooks/playbook
+ find = append(find, filepath.Join(filepath.Dir(filepath.Dir(pbPath)), _const.ProjectPlaybooksDir, playbook))
+ // find from pbPath dir like: current_playbook/playbooks/playbook
+ find = append(find, filepath.Join(filepath.Dir(pbPath), _const.ProjectPlaybooksDir, playbook))
+ // find from pbPath dir like: current_playbook/playbook
+ find = append(find, filepath.Join(filepath.Dir(pbPath), playbook))
+ for _, s := range find {
+ if baseFS != nil {
+ if _, err := fs.Stat(baseFS, s); err == nil {
+ return s
+ }
+ } else {
+ if _, err := os.Stat(s); err == nil {
+ return s
+ }
+ }
+ }
+
+ return ""
+}
+
+// getRoleBaseFromPlaybook
+// find from project/roles/roleName if exists.
+// find from current_playbook/roles/roleName if exists.
+// find current_playbook/playbook
+func getRoleBaseFromPlaybook(baseFS fs.FS, pbPath string, roleName string) string {
+ var find []string
+ // find from project/roles/roleName
+ find = append(find, filepath.Join(filepath.Dir(filepath.Dir(pbPath)), _const.ProjectRolesDir, roleName))
+ // find from pbPath dir like: current_playbook/roles/roleName
+ find = append(find, filepath.Join(filepath.Dir(pbPath), _const.ProjectRolesDir, roleName))
+
+ for _, s := range find {
+ if baseFS != nil {
+ if _, err := fs.Stat(baseFS, s); err == nil {
+ return s
+ }
+ } else {
+ if _, err := os.Stat(s); err == nil {
+ return s
+ }
+ }
+ }
+
+ return ""
+}
+
+// getYamlFile
+// return *.yaml if exists
+// return *.yml if exists.
+func getYamlFile(baseFS fs.FS, base string) string {
+ var find []string
+ find = append(find, base+".yaml", base+".yml")
+
+ for _, s := range find {
+ if baseFS != nil {
+ if _, err := fs.Stat(baseFS, s); err == nil {
+ return s
+ }
+ } else {
+ if _, err := os.Stat(s); err == nil {
+ return s
+ }
+ }
+ }
+
+ return ""
+}
+
+// combine v2 map to v1 if not repeat.
+func combineMaps(v1, v2 map[string]any) (map[string]any, error) {
+ if len(v1) == 0 {
+ return v2, nil
+ }
+
+ mv := make(map[string]any)
+ for k, v := range v1 {
+ mv[k] = v
+ }
+ for k, v := range v2 {
+ if _, ok := mv[k]; ok {
+ return nil, fmt.Errorf("duplicate key: %s", k)
+ }
+ mv[k] = v
+ }
+
+ return mv, nil
+}
diff --git a/feature/pkg/project/helper_test.go b/feature/pkg/project/helper_test.go
new file mode 100644
index 000000000..d5074e0e6
--- /dev/null
+++ b/feature/pkg/project/helper_test.go
@@ -0,0 +1,290 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package project
+
+import (
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ kkprojectv1 "github.com/kubesphere/kubekey/v4/pkg/apis/project/v1"
+)
+
+func TestGetPlaybookBaseFromAbsPlaybook(t *testing.T) {
+ testcases := []struct {
+ name string
+ basePlaybook string
+ playbook string
+ except string
+ }{
+ {
+ name: "find from project/playbooks/playbook",
+ basePlaybook: filepath.Join("playbooks", "playbook1.yaml"),
+ playbook: "playbook2.yaml",
+ except: filepath.Join("playbooks", "playbook2.yaml"),
+ },
+ {
+ name: "find from current_playbook/playbooks/playbook",
+ basePlaybook: filepath.Join("playbooks", "playbook1.yaml"),
+ playbook: "playbook3.yaml",
+ except: filepath.Join("playbooks", "playbooks", "playbook3.yaml"),
+ },
+ {
+ name: "cannot find",
+ basePlaybook: filepath.Join("playbooks", "playbook1.yaml"),
+ playbook: "playbook4.yaml",
+ except: "",
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ assert.Equal(t, tc.except, getPlaybookBaseFromPlaybook(os.DirFS("testdata"), tc.basePlaybook, tc.playbook))
+ })
+ }
+}
+
+func TestGetRoleBaseFromAbsPlaybook(t *testing.T) {
+ testcases := []struct {
+ name string
+ basePlaybook string
+ roleName string
+ except string
+ }{
+ {
+ name: "find from project/roles/roleName",
+ basePlaybook: filepath.Join("playbooks", "playbook1.yaml"),
+ roleName: "role1",
+ except: filepath.Join("roles", "role1"),
+ },
+ {
+ name: "find from current_playbook/roles/roleName",
+ basePlaybook: filepath.Join("playbooks", "playbook1.yaml"),
+ roleName: "role2",
+ except: filepath.Join("playbooks", "roles", "role2"),
+ },
+ {
+ name: "cannot find",
+ basePlaybook: filepath.Join("playbooks", "playbook1.yaml"),
+ roleName: "role3",
+ except: "",
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ assert.Equal(t, tc.except, getRoleBaseFromPlaybook(os.DirFS("testdata"), tc.basePlaybook, tc.roleName))
+ })
+ }
+}
+
+func TestGetYamlFile(t *testing.T) {
+ testcases := []struct {
+ name string
+ base string
+ except string
+ }{
+ {
+ name: "get yaml",
+ base: filepath.Join("playbooks", "playbook2"),
+ except: filepath.Join("playbooks", "playbook2.yaml"),
+ },
+ {
+ name: "get yml",
+ base: filepath.Join("playbooks", "playbook3"),
+ except: filepath.Join("playbooks", "playbook3.yml"),
+ },
+ {
+ name: "cannot find",
+ base: filepath.Join("playbooks", "playbook4"),
+ except: "",
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ assert.Equal(t, tc.except, getYamlFile(os.DirFS("testdata"), tc.base))
+ })
+ }
+}
+
+func TestMarshalPlaybook(t *testing.T) {
+ testcases := []struct {
+ name string
+ file string
+ except *kkprojectv1.Playbook
+ }{
+ {
+ name: "marshal playbook",
+ file: "playbooks/playbook1.yaml",
+ except: &kkprojectv1.Playbook{Play: []kkprojectv1.Play{
+ {
+ Base: kkprojectv1.Base{Name: "play1"},
+ PlayHost: kkprojectv1.PlayHost{Hosts: []string{"localhost"}},
+ Roles: []kkprojectv1.Role{
+ {
+ RoleInfo: kkprojectv1.RoleInfo{
+ Role: "role1",
+ Block: []kkprojectv1.Block{
+ {
+ BlockBase: kkprojectv1.BlockBase{Base: kkprojectv1.Base{Name: "role1 | block1"}},
+ Task: kkprojectv1.Task{UnknownField: map[string]any{
+ "debug": map[string]any{
+ "msg": "echo \"hello world\"",
+ },
+ }},
+ },
+ },
+ },
+ },
+ },
+ Handlers: nil,
+ PreTasks: []kkprojectv1.Block{
+ {
+ BlockBase: kkprojectv1.BlockBase{Base: kkprojectv1.Base{Name: "play1 | pre_block1"}},
+ Task: kkprojectv1.Task{UnknownField: map[string]any{
+ "debug": map[string]any{
+ "msg": "echo \"hello world\"",
+ },
+ }},
+ },
+ },
+ PostTasks: []kkprojectv1.Block{
+ {
+ BlockBase: kkprojectv1.BlockBase{Base: kkprojectv1.Base{Name: "play1 | post_block1"}},
+ Task: kkprojectv1.Task{UnknownField: map[string]any{
+ "debug": map[string]any{
+ "msg": "echo \"hello world\"",
+ },
+ }},
+ },
+ },
+ Tasks: []kkprojectv1.Block{
+ {
+ BlockBase: kkprojectv1.BlockBase{Base: kkprojectv1.Base{Name: "play1 | block1"}},
+ BlockInfo: kkprojectv1.BlockInfo{Block: []kkprojectv1.Block{
+ {
+ BlockBase: kkprojectv1.BlockBase{Base: kkprojectv1.Base{Name: "play1 | block1 | block1"}},
+ Task: kkprojectv1.Task{UnknownField: map[string]any{
+ "debug": map[string]any{
+ "msg": "echo \"hello world\"",
+ },
+ }},
+ },
+ {
+ BlockBase: kkprojectv1.BlockBase{Base: kkprojectv1.Base{Name: "play1 | block1 | block2"}},
+ Task: kkprojectv1.Task{UnknownField: map[string]any{
+ "debug": map[string]any{
+ "msg": "echo \"hello world\"",
+ },
+ }},
+ },
+ }},
+ },
+ {
+ BlockBase: kkprojectv1.BlockBase{Base: kkprojectv1.Base{Name: "play1 | block2"}},
+ Task: kkprojectv1.Task{UnknownField: map[string]any{
+ "debug": map[string]any{
+ "msg": "echo \"hello world\"",
+ },
+ }},
+ },
+ },
+ },
+ {
+ Base: kkprojectv1.Base{Name: "play2"},
+ PlayHost: kkprojectv1.PlayHost{Hosts: []string{"localhost"}},
+ Tasks: []kkprojectv1.Block{
+ {
+ BlockBase: kkprojectv1.BlockBase{Base: kkprojectv1.Base{Name: "play2 | block1"}},
+ Task: kkprojectv1.Task{UnknownField: map[string]any{
+ "debug": map[string]any{
+ "msg": "echo \"hello world\"",
+ },
+ }},
+ },
+ },
+ },
+ }},
+ },
+ }
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ pb, err := marshalPlaybook(os.DirFS("testdata"), tc.file)
+ if err != nil {
+ t.Fatal(err)
+ }
+ assert.Equal(t, tc.except, pb)
+ })
+ }
+}
+
+func TestCombineMaps(t *testing.T) {
+ testcases := []struct {
+ name string
+ v1 map[string]any
+ v2 map[string]any
+ except map[string]any
+ err bool
+ }{
+ {
+ name: "v1 is null",
+ v2: map[string]any{
+ "a": "b",
+ },
+ except: map[string]any{
+ "a": "b",
+ },
+ },
+ {
+ name: "success",
+ v1: map[string]any{
+ "a1": "b1",
+ },
+ v2: map[string]any{
+ "a2": "b2",
+ },
+ except: map[string]any{
+ "a1": "b1",
+ "a2": "b2",
+ },
+ },
+ {
+ name: "duplicate key",
+ v1: map[string]any{
+ "a1": "b1",
+ },
+ v2: map[string]any{
+ "a1": "b2",
+ },
+ err: true,
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ maps, err := combineMaps(tc.v1, tc.v2)
+ if tc.err {
+ assert.Error(t, err)
+ } else {
+ assert.Equal(t, tc.except, maps)
+ }
+ })
+ }
+}
diff --git a/feature/pkg/project/local.go b/feature/pkg/project/local.go
new file mode 100644
index 000000000..f1ccc01f1
--- /dev/null
+++ b/feature/pkg/project/local.go
@@ -0,0 +1,126 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package project
+
+import (
+ "errors"
+ "fmt"
+ "io/fs"
+ "os"
+ "path/filepath"
+
+ kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
+ kkprojectv1 "github.com/kubesphere/kubekey/v4/pkg/apis/project/v1"
+ _const "github.com/kubesphere/kubekey/v4/pkg/const"
+)
+
+func newLocalProject(pipeline kkcorev1.Pipeline) (Project, error) {
+ if !filepath.IsAbs(pipeline.Spec.Playbook) {
+ if pipeline.Spec.Project.Addr == "" {
+ wd, err := os.Getwd()
+ if err != nil {
+ return nil, err
+ }
+ pipeline.Spec.Project.Addr = wd
+ }
+ pipeline.Spec.Playbook = filepath.Join(pipeline.Spec.Project.Addr, pipeline.Spec.Playbook)
+ }
+
+ if _, err := os.Stat(pipeline.Spec.Playbook); err != nil {
+ return nil, fmt.Errorf("cannot find playbook %s", pipeline.Spec.Playbook)
+ }
+
+ if filepath.Base(filepath.Dir(pipeline.Spec.Playbook)) != _const.ProjectPlaybooksDir {
+ // the format of playbook is not correct
+ return nil, errors.New("playbook should be projectDir/playbooks/playbookfile")
+ }
+
+ projectDir := filepath.Dir(filepath.Dir(pipeline.Spec.Playbook))
+ playbook, err := filepath.Rel(projectDir, pipeline.Spec.Playbook)
+ if err != nil {
+ return nil, err
+ }
+
+ return &localProject{Pipeline: pipeline, projectDir: projectDir, playbook: playbook}, nil
+}
+
+type localProject struct {
+ kkcorev1.Pipeline
+
+ projectDir string
+ // playbook relpath base on projectDir
+ playbook string
+}
+
+func (p localProject) getFilePath(path string, o GetFileOption) string {
+ if filepath.IsAbs(path) {
+ return path
+ }
+ var find []string
+ switch {
+ case o.IsFile:
+ if o.Role != "" {
+ // find from project/roles/roleName
+ find = append(find, filepath.Join(p.projectDir, _const.ProjectRolesDir, o.Role, _const.ProjectRolesFilesDir, path))
+ // find from pbPath dir like: current_playbook/roles/roleName
+ find = append(find, filepath.Join(p.projectDir, p.playbook, _const.ProjectRolesDir, o.Role, _const.ProjectRolesFilesDir, path))
+ }
+ find = append(find, filepath.Join(p.projectDir, _const.ProjectRolesFilesDir, path))
+ case o.IsTemplate:
+ // find from project/roles/roleName
+ if o.Role != "" {
+ find = append(find, filepath.Join(p.projectDir, _const.ProjectRolesDir, o.Role, _const.ProjectRolesTemplateDir, path))
+ // find from pbPath dir like: current_playbook/roles/roleName
+ find = append(find, filepath.Join(p.projectDir, p.playbook, _const.ProjectRolesDir, o.Role, _const.ProjectRolesTemplateDir, path))
+ }
+ find = append(find, filepath.Join(p.projectDir, _const.ProjectRolesTemplateDir, path))
+ default:
+ find = append(find, filepath.Join(p.projectDir, path))
+ }
+ for _, s := range find {
+ if _, err := os.Stat(s); err == nil {
+ return s
+ }
+ }
+
+ return ""
+}
+
+// MarshalPlaybook project file to playbook.
+func (p localProject) MarshalPlaybook() (*kkprojectv1.Playbook, error) {
+ return marshalPlaybook(os.DirFS(p.projectDir), p.playbook)
+}
+
+// Stat role/file/template file or dir in project
+func (p localProject) Stat(path string, option GetFileOption) (os.FileInfo, error) {
+ return os.Stat(p.getFilePath(path, option))
+}
+
+// WalkDir role/file/template dir in project
+func (p localProject) WalkDir(path string, option GetFileOption, f fs.WalkDirFunc) error {
+ return filepath.WalkDir(p.getFilePath(path, option), f)
+}
+
+// ReadFile role/file/template file or dir in project
+func (p localProject) ReadFile(path string, option GetFileOption) ([]byte, error) {
+ return os.ReadFile(p.getFilePath(path, option))
+}
+
+// Rel path for role/file/template file or dir in project
+func (p localProject) Rel(root string, path string, option GetFileOption) (string, error) {
+ return filepath.Rel(p.getFilePath(root, option), path)
+}
diff --git a/feature/pkg/project/project.go b/feature/pkg/project/project.go
new file mode 100644
index 000000000..70b1e0bd1
--- /dev/null
+++ b/feature/pkg/project/project.go
@@ -0,0 +1,64 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package project
+
+import (
+ "context"
+ "io/fs"
+ "os"
+ "strings"
+
+ kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
+ kkprojectv1 "github.com/kubesphere/kubekey/v4/pkg/apis/project/v1"
+)
+
+var builtinProjectFunc func(kkcorev1.Pipeline) (Project, error)
+
+// Project represent location of actual project.
+// get project file should base on it
+type Project interface {
+ MarshalPlaybook() (*kkprojectv1.Playbook, error)
+ Stat(path string, option GetFileOption) (os.FileInfo, error)
+ WalkDir(path string, option GetFileOption, f fs.WalkDirFunc) error
+ ReadFile(path string, option GetFileOption) ([]byte, error)
+ Rel(root string, path string, option GetFileOption) (string, error)
+}
+
+// GetFileOption for file.
+type GetFileOption struct {
+ Role string
+ IsTemplate bool
+ IsFile bool
+}
+
+// New project.
+// If project address is git format. newGitProject
+// If pipeline has BuiltinsProjectAnnotation. builtinProjectFunc
+// Default newLocalProject
+func New(ctx context.Context, pipeline kkcorev1.Pipeline, update bool) (Project, error) {
+ if strings.HasPrefix(pipeline.Spec.Project.Addr, "https://") ||
+ strings.HasPrefix(pipeline.Spec.Project.Addr, "http://") ||
+ strings.HasPrefix(pipeline.Spec.Project.Addr, "git@") {
+ return newGitProject(ctx, pipeline, update)
+ }
+
+ if _, ok := pipeline.Annotations[kkcorev1.BuiltinsProjectAnnotation]; ok {
+ return builtinProjectFunc(pipeline)
+ }
+
+ return newLocalProject(pipeline)
+}
diff --git a/feature/pkg/project/testdata/playbooks/playbook1.yaml b/feature/pkg/project/testdata/playbooks/playbook1.yaml
new file mode 100644
index 000000000..08e2e5d09
--- /dev/null
+++ b/feature/pkg/project/testdata/playbooks/playbook1.yaml
@@ -0,0 +1,30 @@
+- name: play1
+ hosts: localhost
+ pre_tasks:
+ - name: play1 | pre_block1
+ debug:
+ msg: echo "hello world"
+ tasks:
+ - name: play1 | block1
+ block:
+ - name: play1 | block1 | block1
+ debug:
+ msg: echo "hello world"
+ - name: play1 | block1 | block2
+ debug:
+ msg: echo "hello world"
+ - name: play1 | block2
+ debug:
+ msg: echo "hello world"
+ post_tasks:
+ - name: play1 | post_block1
+ debug:
+ msg: echo "hello world"
+ roles:
+ - role1
+- name: play2
+ hosts: localhost
+ tasks:
+ - name: play2 | block1
+ debug:
+ msg: echo "hello world"
diff --git a/feature/pkg/project/testdata/playbooks/playbook2.yaml b/feature/pkg/project/testdata/playbooks/playbook2.yaml
new file mode 100644
index 000000000..e69de29bb
diff --git a/feature/pkg/project/testdata/playbooks/playbook2.yml b/feature/pkg/project/testdata/playbooks/playbook2.yml
new file mode 100644
index 000000000..e69de29bb
diff --git a/feature/pkg/project/testdata/playbooks/playbook3.yml b/feature/pkg/project/testdata/playbooks/playbook3.yml
new file mode 100644
index 000000000..e69de29bb
diff --git a/feature/pkg/project/testdata/playbooks/playbooks/playbook3.yaml b/feature/pkg/project/testdata/playbooks/playbooks/playbook3.yaml
new file mode 100644
index 000000000..e69de29bb
diff --git a/feature/pkg/project/testdata/playbooks/roles/role2/tasks/main.yaml b/feature/pkg/project/testdata/playbooks/roles/role2/tasks/main.yaml
new file mode 100644
index 000000000..0a50611a3
--- /dev/null
+++ b/feature/pkg/project/testdata/playbooks/roles/role2/tasks/main.yaml
@@ -0,0 +1,3 @@
+- name: role1 | block1
+ debug:
+ msg: echo "hello world"
diff --git a/feature/pkg/project/testdata/roles/role1/tasks/main.yaml b/feature/pkg/project/testdata/roles/role1/tasks/main.yaml
new file mode 100644
index 000000000..0a50611a3
--- /dev/null
+++ b/feature/pkg/project/testdata/roles/role1/tasks/main.yaml
@@ -0,0 +1,3 @@
+- name: role1 | block1
+ debug:
+ msg: echo "hello world"
diff --git a/feature/pkg/proxy/admit.go b/feature/pkg/proxy/admit.go
new file mode 100644
index 000000000..22d348979
--- /dev/null
+++ b/feature/pkg/proxy/admit.go
@@ -0,0 +1,48 @@
+/*
+Copyright 2024 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package proxy
+
+import (
+ "context"
+
+ "k8s.io/apiserver/pkg/admission"
+)
+
+func newAlwaysAdmit() admission.Interface {
+ return &admit{}
+}
+
+type admit struct{}
+
+// Validate always pass
+func (a admit) Validate(context.Context, admission.Attributes, admission.ObjectInterfaces) error {
+ return nil
+}
+
+// Admit always pass
+func (a admit) Admit(context.Context, admission.Attributes, admission.ObjectInterfaces) error {
+ return nil
+}
+
+// Handles always true
+func (a admit) Handles(admission.Operation) bool {
+ return true
+}
+
+var _ admission.MutationInterface = admit{}
+
+var _ admission.ValidationInterface = admit{}
diff --git a/feature/pkg/proxy/api_resources.go b/feature/pkg/proxy/api_resources.go
new file mode 100644
index 000000000..5343114a4
--- /dev/null
+++ b/feature/pkg/proxy/api_resources.go
@@ -0,0 +1,178 @@
+/*
+Copyright 2024 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package proxy
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+ "reflect"
+ "strings"
+ "time"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apiserver/pkg/admission"
+ "k8s.io/apiserver/pkg/endpoints/discovery"
+ "k8s.io/apiserver/pkg/endpoints/handlers/negotiation"
+ "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters"
+ "k8s.io/apiserver/pkg/features"
+ apirest "k8s.io/apiserver/pkg/registry/rest"
+ utilfeature "k8s.io/apiserver/pkg/util/feature"
+ "k8s.io/klog/v2"
+
+ _const "github.com/kubesphere/kubekey/v4/pkg/const"
+)
+
+const defaultMinRequestTimeout = 1800 * time.Second
+
+type apiResources struct {
+ gv schema.GroupVersion
+ prefix string
+ minRequestTimeout time.Duration
+
+ resourceOptions []resourceOptions
+ list []metav1.APIResource
+ typer runtime.ObjectTyper
+ serializer runtime.NegotiatedSerializer
+}
+
+type resourceOptions struct {
+ path string
+ resource string // generate by path
+ subresource string // generate by path
+ resourcePath string // generate by path
+ itemPath string // generate by path
+ storage apirest.Storage
+ admit admission.Interface
+}
+
+func (o *resourceOptions) init() error {
+ // prefix for resourcePath.
+ var prefix string
+ scoper, ok := o.storage.(apirest.Scoper)
+ if !ok {
+ return fmt.Errorf("%q must implement scoper", o.path)
+ }
+ if scoper.NamespaceScoped() {
+ prefix = "/namespaces/{namespace}/"
+ } else {
+ prefix = "/"
+ }
+
+ // checks if the given storage path is the path of a subresource
+ switch parts := strings.Split(o.path, "/"); len(parts) {
+ case 2:
+ o.resource, o.subresource = parts[0], parts[1]
+ o.resourcePath = prefix + o.resource + "/{name}/" + o.subresource
+ o.itemPath = prefix + o.resource + "/{name}/" + o.subresource
+ case 1:
+ o.resource = parts[0]
+ o.resourcePath = prefix + o.resource
+ o.itemPath = prefix + o.resource + "/{name}"
+ default:
+ return errors.New("api_installer allows only one or two segment paths (resource or resource/subresource)")
+ }
+
+ if o.admit == nil {
+ // set default admit
+ o.admit = newAlwaysAdmit()
+ }
+
+ return nil
+}
+
+func newAPIIResources(gv schema.GroupVersion) *apiResources {
+ return &apiResources{
+ gv: gv,
+ prefix: "/apis/" + gv.String(),
+ minRequestTimeout: defaultMinRequestTimeout,
+
+ typer: _const.Scheme,
+ serializer: _const.Codecs,
+ }
+}
+
+// AddResource add a api-resources
+func (r *apiResources) AddResource(o resourceOptions) error {
+ if err := o.init(); err != nil {
+ klog.V(6).ErrorS(err, "Failed to initialize resourceOptions")
+
+ return err
+ }
+ r.resourceOptions = append(r.resourceOptions, o)
+ storageVersionProvider, isStorageVersionProvider := o.storage.(apirest.StorageVersionProvider)
+ var apiResource metav1.APIResource
+ if isStorageVersionProvider &&
+ utilfeature.DefaultFeatureGate.Enabled(features.StorageVersionHash) &&
+ storageVersionProvider.StorageVersion() != nil {
+ versioner := storageVersionProvider.StorageVersion()
+ gvk, err := getStorageVersionKind(versioner, o.storage, r.typer)
+ if err != nil {
+ klog.V(6).ErrorS(err, "failed to get storage version kind", "storage", reflect.TypeOf(o.storage))
+
+ return err
+ }
+ apiResource.Group = gvk.Group
+ apiResource.Version = gvk.Version
+ apiResource.Kind = gvk.Kind
+ apiResource.StorageVersionHash = discovery.StorageVersionHash(gvk.Group, gvk.Version, gvk.Kind)
+ }
+ apiResource.Name = o.path
+ apiResource.Namespaced = true
+ apiResource.Verbs = []string{"*"}
+ if shortNamesProvider, ok := o.storage.(apirest.ShortNamesProvider); ok {
+ apiResource.ShortNames = shortNamesProvider.ShortNames()
+ }
+ if categoriesProvider, ok := o.storage.(apirest.CategoriesProvider); ok {
+ apiResource.Categories = categoriesProvider.Categories()
+ }
+
+ if o.subresource == "" {
+ singularNameProvider, ok := o.storage.(apirest.SingularNameProvider)
+ if !ok {
+ return fmt.Errorf("resource %s must implement SingularNameProvider", o.path)
+ }
+ apiResource.SingularName = singularNameProvider.GetSingularName()
+ }
+ r.list = append(r.list, apiResource)
+
+ return nil
+}
+
+func (r *apiResources) handlerAPIResources() http.HandlerFunc {
+ return func(writer http.ResponseWriter, request *http.Request) {
+ responsewriters.WriteObjectNegotiated(r.serializer, negotiation.DefaultEndpointRestrictions, schema.GroupVersion{}, writer, request, http.StatusOK,
+ &metav1.APIResourceList{GroupVersion: r.gv.String(), APIResources: r.list}, false)
+ }
+}
+
+// calculate the storage gvk, the gvk objects are converted to before persisted to the etcd.
+func getStorageVersionKind(storageVersioner runtime.GroupVersioner, storage apirest.Storage, typer runtime.ObjectTyper) (schema.GroupVersionKind, error) {
+ object := storage.New()
+ fqKinds, _, err := typer.ObjectKinds(object)
+ if err != nil {
+ return schema.GroupVersionKind{}, err
+ }
+ gvk, ok := storageVersioner.KindForGroupVersionKinds(fqKinds)
+ if !ok {
+ return schema.GroupVersionKind{}, fmt.Errorf("cannot find the storage version kind for %v", reflect.TypeOf(object))
+ }
+
+ return gvk, nil
+}
diff --git a/feature/pkg/proxy/internal/file_storage.go b/feature/pkg/proxy/internal/file_storage.go
new file mode 100644
index 000000000..1997f6137
--- /dev/null
+++ b/feature/pkg/proxy/internal/file_storage.go
@@ -0,0 +1,557 @@
+/*
+Copyright 2024 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package internal
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "path/filepath"
+ "reflect"
+ "strings"
+ "sync"
+
+ "k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/conversion"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/watch"
+ apistorage "k8s.io/apiserver/pkg/storage"
+ "k8s.io/apiserver/pkg/storage/storagebackend/factory"
+ "k8s.io/klog/v2"
+)
+
+const (
+ // when delete resource, add suffix deleteTagSuffix to the file name.
+ // after delete event is handled, the file will be deleted from disk.
+ deleteTagSuffix = "-deleted"
+ // the file type of resource will store local.
+ yamlSuffix = ".yaml"
+)
+
+func newFileStorage(prefix string, resource schema.GroupResource, codec runtime.Codec, newFunc func() runtime.Object) (apistorage.Interface, factory.DestroyFunc) {
+ return &fileStorage{
+ prefix: prefix,
+ versioner: apistorage.APIObjectVersioner{},
+ resource: resource,
+ codec: codec,
+ newFunc: newFunc,
+ }, func() {
+ // do nothing
+ }
+}
+
+type fileStorage struct {
+ prefix string
+ versioner apistorage.Versioner
+ codec runtime.Codec
+ resource schema.GroupResource
+
+ newFunc func() runtime.Object
+}
+
+var _ apistorage.Interface = &fileStorage{}
+
+// Versioner of local resource files.
+func (s fileStorage) Versioner() apistorage.Versioner {
+ return s.versioner
+}
+
+// Create local resource files.
+func (s fileStorage) Create(_ context.Context, key string, obj, out runtime.Object, _ uint64) error {
+ // set resourceVersion to obj
+ metaObj, err := meta.Accessor(obj)
+ if err != nil {
+ klog.V(6).ErrorS(err, "failed to get meta object", "path", filepath.Dir(key))
+
+ return err
+ }
+ metaObj.SetResourceVersion("1")
+ // create file to local disk
+ if _, err := os.Stat(filepath.Dir(key)); err != nil {
+ if !os.IsNotExist(err) {
+ klog.V(6).ErrorS(err, "failed to check dir", "path", filepath.Dir(key))
+
+ return err
+ }
+ if err := os.MkdirAll(filepath.Dir(key), os.ModePerm); err != nil {
+ klog.V(6).ErrorS(err, "failed to create dir", "path", filepath.Dir(key))
+
+ return err
+ }
+ }
+
+ data, err := runtime.Encode(s.codec, obj)
+ if err != nil {
+ klog.V(6).ErrorS(err, "failed to encode resource file", "path", key)
+
+ return err
+ }
+ // render to out
+ if out != nil {
+ err = decode(s.codec, data, out)
+ if err != nil {
+ klog.V(6).ErrorS(err, "failed to decode resource file", "path", key)
+
+ return err
+ }
+ }
+ // render to file
+ if err := os.WriteFile(key+yamlSuffix, data, os.ModePerm); err != nil {
+ klog.V(6).ErrorS(err, "failed to create resource file", "path", key)
+
+ return err
+ }
+
+ return nil
+}
+
+// Delete local resource files.
+func (s fileStorage) Delete(ctx context.Context, key string, out runtime.Object, preconditions *apistorage.Preconditions, validateDeletion apistorage.ValidateObjectFunc, cachedExistingObject runtime.Object) error {
+ if cachedExistingObject != nil {
+ out = cachedExistingObject
+ } else {
+ if err := s.Get(ctx, key, apistorage.GetOptions{}, out); err != nil {
+ klog.V(6).ErrorS(err, "failed to get resource", "path", key)
+
+ return err
+ }
+ }
+
+ if err := preconditions.Check(key, out); err != nil {
+ klog.V(6).ErrorS(err, "failed to check preconditions", "path", key)
+
+ return err
+ }
+
+ if err := validateDeletion(ctx, out); err != nil {
+ klog.V(6).ErrorS(err, "failed to validate deletion", "path", key)
+
+ return err
+ }
+
+ // delete object
+ // rename file to trigger watcher
+ if err := os.Rename(key+yamlSuffix, key+yamlSuffix+deleteTagSuffix); err != nil {
+ klog.V(6).ErrorS(err, "failed to rename resource file", "path", key)
+
+ return err
+ }
+
+ return nil
+}
+
+// Watch local resource files.
+func (s fileStorage) Watch(_ context.Context, key string, _ apistorage.ListOptions) (watch.Interface, error) {
+ return newFileWatcher(s.prefix, key, s.codec, s.newFunc)
+}
+
+// Get local resource files.
+func (s fileStorage) Get(_ context.Context, key string, _ apistorage.GetOptions, out runtime.Object) error {
+ data, err := os.ReadFile(key + yamlSuffix)
+ if err != nil {
+ klog.V(6).ErrorS(err, "failed to read resource file", "path", key)
+
+ return err
+ }
+ if err := decode(s.codec, data, out); err != nil {
+ klog.V(6).ErrorS(err, "failed to decode resource file", "path", key)
+
+ return err
+ }
+
+ return nil
+}
+
+// GetList local resource files.
+func (s fileStorage) GetList(_ context.Context, key string, opts apistorage.ListOptions, listObj runtime.Object) error {
+ listPtr, err := meta.GetItemsPtr(listObj)
+ if err != nil {
+ return err
+ }
+ v, err := conversion.EnforcePtr(listPtr)
+ if err != nil || v.Kind() != reflect.Slice {
+ return fmt.Errorf("need ptr to slice: %w", err)
+ }
+
+ // Build matching rules for resource version and continue key.
+ resourceVersionMatchRule, continueKeyMatchRule, err := s.buildMatchRules(key, opts, &sync.Once{})
+ if err != nil {
+ return err
+ }
+
+ // Get the root entries in the directory corresponding to 'key'.
+ rootEntries, isAllNamespace, err := s.getRootEntries(key)
+ if err != nil {
+ return err
+ }
+
+ var lastKey string
+ var hasMore bool
+ // Iterate over root entries, processing either directories or files.
+ for i, entry := range rootEntries {
+ if isAllNamespace {
+ // Process namespace directory.
+ err = s.processNamespaceDirectory(key, entry, v, continueKeyMatchRule, resourceVersionMatchRule, &lastKey, &hasMore, opts, listObj)
+ } else {
+ // Process individual resource file.
+ err = s.processResourceFile(key, entry, v, continueKeyMatchRule, resourceVersionMatchRule, &lastKey, opts, listObj)
+ }
+ if err != nil {
+ return err
+ }
+ // Check if we have reached the limit of results requested by the client.
+ if opts.Predicate.Limit != 0 && int64(v.Len()) >= opts.Predicate.Limit {
+ hasMore = i != len(rootEntries)-1
+
+ break
+ }
+ }
+ // Handle the final result after all entries have been processed.
+ return s.handleResult(listObj, v, lastKey, hasMore)
+}
+
+// buildMatchRules creates the match rules for resource version and continue key based on the given options.
+func (s fileStorage) buildMatchRules(key string, opts apistorage.ListOptions, startReadOnce *sync.Once) (func(uint64) bool, func(string) bool, error) {
+ resourceVersionMatchRule := func(uint64) bool { return true }
+ continueKeyMatchRule := func(key string) bool { return strings.HasSuffix(key, yamlSuffix) }
+
+ switch {
+ case opts.Recursive && opts.Predicate.Continue != "":
+ // If continue token is present, set up a rule to start reading after the continueKey.
+ continueKey, _, err := apistorage.DecodeContinue(opts.Predicate.Continue, key)
+ if err != nil {
+ klog.V(6).ErrorS(err, "failed to parse continueKey", "continueKey", opts.Predicate.Continue)
+
+ return nil, nil, fmt.Errorf("invalid continue token: %w", err)
+ }
+
+ continueKeyMatchRule = func(key string) bool {
+ startRead := false
+ if key == continueKey {
+ startReadOnce.Do(func() { startRead = true })
+ }
+
+ return startRead && key != continueKey
+ }
+ case opts.ResourceVersion != "":
+ // Handle resource version matching based on the provided match rule.
+ parsedRV, err := s.versioner.ParseResourceVersion(opts.ResourceVersion)
+ if err != nil {
+ return nil, nil, fmt.Errorf("invalid resource version: %w", err)
+ }
+ switch opts.ResourceVersionMatch {
+ case metav1.ResourceVersionMatchNotOlderThan:
+ resourceVersionMatchRule = func(u uint64) bool { return u >= parsedRV }
+ case metav1.ResourceVersionMatchExact:
+ resourceVersionMatchRule = func(u uint64) bool { return u == parsedRV }
+ case "":
+ // Legacy case: match all resource versions.
+ default:
+ return nil, nil, fmt.Errorf("unknown ResourceVersionMatch value: %v", opts.ResourceVersionMatch)
+ }
+ }
+
+ return resourceVersionMatchRule, continueKeyMatchRule, nil
+}
+
+// getRootEntries reads the directory entries at the given key path.
+func (s fileStorage) getRootEntries(key string) ([]os.DirEntry, bool, error) {
+ var allNamespace bool
+ switch len(filepath.SplitList(strings.TrimPrefix(key, s.prefix))) {
+ case 0: // read all namespace's resources
+ // Traverse the resource storage directory. startRead after continueKey.
+ // get all resources from key. key is runtimeDir
+ allNamespace = true
+ case 1: // read a namespace's resources
+ // Traverse the resource storage directory. startRead after continueKey.
+ // get all resources from key. key is runtimeDir
+ allNamespace = false
+ default:
+ klog.V(6).ErrorS(nil, "key is invalid", "key", key)
+
+ return nil, false, fmt.Errorf("key is invalid: %s", key)
+ }
+
+ rootEntries, err := os.ReadDir(key)
+ if err != nil && !os.IsNotExist(err) {
+ klog.V(6).ErrorS(err, "failed to read runtime dir", "path", key)
+
+ return nil, allNamespace, err
+ }
+
+ return rootEntries, allNamespace, nil
+}
+
+// processNamespaceDirectory handles the traversal and processing of a namespace directory.
+func (s fileStorage) processNamespaceDirectory(key string, ns os.DirEntry, v reflect.Value, continueKeyMatchRule func(string) bool, resourceVersionMatchRule func(uint64) bool, lastKey *string, hasMore *bool, opts apistorage.ListOptions, listObj runtime.Object) error {
+ if !ns.IsDir() {
+ // only need dir. skip
+ return nil
+ }
+ nsDir := filepath.Join(key, ns.Name())
+ entries, err := os.ReadDir(nsDir)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil
+ }
+ klog.V(6).ErrorS(err, "failed to read namespaces dir", "path", nsDir)
+
+ return err
+ }
+
+ for _, entry := range entries {
+ err := s.processResourceFile(nsDir, entry, v, continueKeyMatchRule, resourceVersionMatchRule, lastKey, opts, listObj)
+ if err != nil {
+ return err
+ }
+ // Check if we have reached the limit of results requested by the client.
+ if opts.Predicate.Limit != 0 && int64(v.Len()) >= opts.Predicate.Limit {
+ *hasMore = true
+
+ return nil
+ }
+ }
+
+ return nil
+}
+
+// processResourceFile handles reading, decoding, and processing a single resource file.
+func (s fileStorage) processResourceFile(parentDir string, entry os.DirEntry, v reflect.Value, continueKeyMatchRule func(string) bool, resourceVersionMatchRule func(uint64) bool, lastKey *string, opts apistorage.ListOptions, listObj runtime.Object) error {
+ if entry.IsDir() {
+ // only need file. skip
+ return nil
+ }
+ currentKey := filepath.Join(parentDir, entry.Name())
+ if !continueKeyMatchRule(currentKey) {
+ return nil
+ }
+
+ data, err := os.ReadFile(currentKey)
+ if err != nil {
+ klog.V(6).ErrorS(err, "failed to read resource file", "path", currentKey)
+
+ return err
+ }
+
+ obj, _, err := s.codec.Decode(data, nil, getNewItem(listObj, v))
+ if err != nil {
+ klog.V(6).ErrorS(err, "failed to decode resource file", "path", currentKey)
+
+ return err
+ }
+
+ metaObj, err := meta.Accessor(obj)
+ if err != nil {
+ klog.V(6).ErrorS(err, "failed to get meta object", "path", currentKey)
+
+ return err
+ }
+
+ rv, err := s.versioner.ParseResourceVersion(metaObj.GetResourceVersion())
+ if err != nil {
+ klog.V(6).ErrorS(err, "failed to parse resource version", "resourceVersion", metaObj.GetResourceVersion())
+
+ return err
+ }
+
+ // Apply the resource version match rule.
+ if !resourceVersionMatchRule(rv) {
+ return nil
+ }
+
+ // Check if the object matches the given predicate.
+ if matched, err := opts.Predicate.Matches(obj); err == nil && matched {
+ v.Set(reflect.Append(v, reflect.ValueOf(obj).Elem()))
+ *lastKey = currentKey
+ }
+
+ return nil
+}
+
+// handleResult processes and finalizes the result before returning it.
+func (s fileStorage) handleResult(listObj runtime.Object, v reflect.Value, lastKey string, hasMore bool) error {
+ if v.IsNil() {
+ v.Set(reflect.MakeSlice(v.Type(), 0, 0))
+ }
+
+ if hasMore {
+ // If there are more results, set the continuation token for the next query.
+ next, err := apistorage.EncodeContinue(lastKey+"\x00", "", 0)
+ if err != nil {
+ return err
+ }
+
+ return s.versioner.UpdateList(listObj, 1, next, nil)
+ }
+
+ // If no more results, return the final list without continuation.
+ return s.versioner.UpdateList(listObj, 1, "", nil)
+}
+
+// GuaranteedUpdate local resource file.
+func (s fileStorage) GuaranteedUpdate(ctx context.Context, key string, destination runtime.Object, ignoreNotFound bool, preconditions *apistorage.Preconditions, tryUpdate apistorage.UpdateFunc, cachedExistingObject runtime.Object) error {
+ var oldObj runtime.Object
+ if cachedExistingObject != nil {
+ oldObj = cachedExistingObject
+ } else {
+ oldObj = s.newFunc()
+ if err := s.Get(ctx, key, apistorage.GetOptions{IgnoreNotFound: ignoreNotFound}, oldObj); err != nil {
+ klog.V(6).ErrorS(err, "failed to get resource", "path", key)
+
+ return err
+ }
+ }
+ if err := preconditions.Check(key, oldObj); err != nil {
+ klog.V(6).ErrorS(err, "failed to check preconditions", "path", key)
+
+ return err
+ }
+ // set resourceVersion to obj
+ metaObj, err := meta.Accessor(oldObj)
+ if err != nil {
+ klog.V(6).ErrorS(err, "failed to get meta object", "path", filepath.Dir(key))
+
+ return err
+ }
+ oldVersion, err := s.versioner.ParseResourceVersion(metaObj.GetResourceVersion())
+ if err != nil {
+ klog.V(6).ErrorS(err, "failed to parse resource version", "resourceVersion", metaObj.GetResourceVersion())
+
+ return err
+ }
+ out, _, err := tryUpdate(oldObj, apistorage.ResponseMeta{ResourceVersion: oldVersion + 1})
+ if err != nil {
+ klog.V(6).ErrorS(err, "failed to try update", "path", key)
+
+ return err
+ }
+
+ data, err := runtime.Encode(s.codec, out)
+ if err != nil {
+ klog.V(6).ErrorS(err, "failed to encode resource file", "path", key)
+
+ return err
+ }
+ // render to destination
+ if destination != nil {
+ err = decode(s.codec, data, destination)
+ if err != nil {
+ klog.V(6).ErrorS(err, "failed to decode resource file", "path", key)
+
+ return err
+ }
+ }
+ // render to file
+ if err := os.WriteFile(key+yamlSuffix, data, os.ModePerm); err != nil {
+ klog.V(6).ErrorS(err, "failed to create resource file", "path", key)
+
+ return err
+ }
+
+ return nil
+}
+
+// Count local resource file
+func (s fileStorage) Count(key string) (int64, error) {
+ // countByNSDir count the crd files by namespace dir.
+ countByNSDir := func(dir string) (int64, error) {
+ var count int64
+ entries, err := os.ReadDir(dir)
+ if err != nil {
+ klog.V(6).ErrorS(err, "failed to read namespaces dir", "path", dir)
+ // cannot read namespace dir
+ return 0, err
+ }
+ // count the file
+ for _, entry := range entries {
+ if !entry.IsDir() && strings.HasSuffix(entry.Name(), yamlSuffix) {
+ count++
+ }
+ }
+
+ return count, nil
+ }
+
+ switch len(filepath.SplitList(strings.TrimPrefix(key, s.prefix))) {
+ case 0: // count all namespace's resources
+ var count int64
+ rootEntries, err := os.ReadDir(key)
+ if err != nil && !os.IsNotExist(err) {
+ klog.V(6).ErrorS(err, "failed to read runtime dir", "path", key)
+
+ return 0, err
+ }
+ for _, ns := range rootEntries {
+ if !ns.IsDir() {
+ continue
+ }
+ // the next dir is namespace.
+ c, err := countByNSDir(filepath.Join(key, ns.Name()))
+ if err != nil {
+ return 0, err
+ }
+ count += c
+ }
+
+ return count, nil
+ case 1: // count a namespace's resources
+ return countByNSDir(key)
+ default:
+ klog.V(6).ErrorS(nil, "key is invalid", "key", key)
+ // not support key
+ return 0, fmt.Errorf("key is invalid: %s", key)
+ }
+}
+
+// RequestWatchProgress do nothing.
+func (s fileStorage) RequestWatchProgress(context.Context) error {
+ return nil
+}
+
+// decode decodes value of bytes into object. It will also set the object resource version to rev.
+// On success, objPtr would be set to the object.
+func decode(codec runtime.Codec, value []byte, objPtr runtime.Object) error {
+ if _, err := conversion.EnforcePtr(objPtr); err != nil {
+ return fmt.Errorf("unable to convert output object to pointer: %w", err)
+ }
+ _, _, err := codec.Decode(value, nil, objPtr)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func getNewItem(listObj runtime.Object, v reflect.Value) runtime.Object {
+ // For unstructured lists with a target group/version, preserve the group/version in the instantiated list items
+ if unstructuredList, isUnstructured := listObj.(*unstructured.UnstructuredList); isUnstructured {
+ if apiVersion := unstructuredList.GetAPIVersion(); apiVersion != "" {
+ return &unstructured.Unstructured{Object: map[string]any{"apiVersion": apiVersion}}
+ }
+ }
+ // Otherwise just instantiate an empty item
+ elem := v.Type().Elem()
+ if obj, ok := reflect.New(elem).Interface().(runtime.Object); ok {
+ return obj
+ }
+ klog.V(6).Info("elem is not runtime.Object")
+
+ return nil
+}
diff --git a/feature/pkg/proxy/internal/rest_option.go b/feature/pkg/proxy/internal/rest_option.go
new file mode 100644
index 000000000..5a28574cf
--- /dev/null
+++ b/feature/pkg/proxy/internal/rest_option.go
@@ -0,0 +1,117 @@
+/*
+Copyright 2024 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package internal
+
+import (
+ "path/filepath"
+ "sync"
+
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/runtime/serializer/json"
+ "k8s.io/apimachinery/pkg/runtime/serializer/versioning"
+ apigeneric "k8s.io/apiserver/pkg/registry/generic"
+ apistorage "k8s.io/apiserver/pkg/storage"
+ cacherstorage "k8s.io/apiserver/pkg/storage/cacher"
+ "k8s.io/apiserver/pkg/storage/storagebackend"
+ "k8s.io/apiserver/pkg/storage/storagebackend/factory"
+ cgtoolscache "k8s.io/client-go/tools/cache"
+
+ _const "github.com/kubesphere/kubekey/v4/pkg/const"
+)
+
+// NewFileRESTOptionsGetter return fileRESTOptionsGetter
+func NewFileRESTOptionsGetter(gv schema.GroupVersion) apigeneric.RESTOptionsGetter {
+ return &fileRESTOptionsGetter{
+ gv: gv,
+ storageConfig: &storagebackend.Config{
+ Type: "",
+ Prefix: "/",
+ Transport: storagebackend.TransportConfig{},
+ Codec: newYamlCodec(gv),
+ EncodeVersioner: runtime.NewMultiGroupVersioner(gv),
+ },
+ }
+}
+
+func newYamlCodec(gv schema.GroupVersion) runtime.Codec {
+ yamlSerializer := json.NewSerializerWithOptions(json.DefaultMetaFactory, _const.Scheme, _const.Scheme, json.SerializerOptions{Yaml: true})
+
+ return versioning.NewDefaultingCodecForScheme(
+ _const.Scheme,
+ yamlSerializer,
+ yamlSerializer,
+ gv,
+ gv,
+ )
+}
+
+// fileRESTOptionsGetter local rest info
+type fileRESTOptionsGetter struct {
+ gv schema.GroupVersion
+ storageConfig *storagebackend.Config
+}
+
+// GetRESTOptions return apigeneric.RESTOptions
+func (f fileRESTOptionsGetter) GetRESTOptions(resource schema.GroupResource) (apigeneric.RESTOptions, error) {
+ prefix := filepath.Join(_const.GetRuntimeDir(), f.gv.Group, f.gv.Version, resource.Resource)
+
+ return apigeneric.RESTOptions{
+ StorageConfig: f.storageConfig.ForResource(resource),
+ Decorator: func(storageConfig *storagebackend.ConfigForResource, resourcePrefix string,
+ keyFunc func(obj runtime.Object) (string, error),
+ newFunc func() runtime.Object,
+ newListFunc func() runtime.Object,
+ getAttrsFunc apistorage.AttrFunc,
+ triggerFuncs apistorage.IndexerFuncs,
+ indexers *cgtoolscache.Indexers) (apistorage.Interface, factory.DestroyFunc, error) {
+ s, d := newFileStorage(prefix, resource, storageConfig.Codec, newFunc)
+
+ cacherConfig := cacherstorage.Config{
+ Storage: s,
+ Versioner: apistorage.APIObjectVersioner{},
+ GroupResource: storageConfig.GroupResource,
+ ResourcePrefix: resourcePrefix,
+ KeyFunc: keyFunc,
+ NewFunc: newFunc,
+ NewListFunc: newListFunc,
+ GetAttrsFunc: getAttrsFunc,
+ IndexerFuncs: triggerFuncs,
+ Indexers: indexers,
+ Codec: storageConfig.Codec,
+ }
+ cacher, err := cacherstorage.NewCacherFromConfig(cacherConfig)
+ if err != nil {
+ return nil, func() {}, err
+ }
+ var once sync.Once
+ destroyFunc := func() {
+ once.Do(func() {
+ cacher.Stop()
+ d()
+ })
+ }
+
+ return cacher, destroyFunc, nil
+ },
+ EnableGarbageCollection: false,
+ DeleteCollectionWorkers: 0,
+ ResourcePrefix: prefix,
+ CountMetricPollPeriod: 0,
+ StorageObjectCountTracker: nil,
+ }, nil
+}
diff --git a/feature/pkg/proxy/internal/watcher.go b/feature/pkg/proxy/internal/watcher.go
new file mode 100644
index 000000000..90e1cd209
--- /dev/null
+++ b/feature/pkg/proxy/internal/watcher.go
@@ -0,0 +1,207 @@
+/*
+Copyright 2024 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package internal
+
+import (
+ "os"
+ "path/filepath"
+ "strings"
+
+ "k8s.io/apimachinery/pkg/api/meta"
+
+ "github.com/fsnotify/fsnotify"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/watch"
+ "k8s.io/klog/v2"
+)
+
+// fileWatcher watcher local dir resource files.
+type fileWatcher struct {
+ prefix string
+ codec runtime.Codec
+ newFunc func() runtime.Object
+ watcher *fsnotify.Watcher
+ watchEvents chan watch.Event
+}
+
+// newFileWatcher get fileWatcher
+func newFileWatcher(prefix, path string, codec runtime.Codec, newFunc func() runtime.Object) (watch.Interface, error) {
+ if _, err := os.Stat(path); err != nil {
+ if !os.IsNotExist(err) {
+ klog.V(6).ErrorS(err, "failed to stat path", "path", path)
+
+ return nil, err
+ }
+ if err := os.MkdirAll(path, os.ModePerm); err != nil {
+ return nil, err
+ }
+ }
+
+ watcher, err := fsnotify.NewWatcher()
+ if err != nil {
+ klog.V(6).ErrorS(err, "failed to create file watcher", "path", path)
+
+ return nil, err
+ }
+ if err := watcher.Add(path); err != nil {
+ klog.V(6).ErrorS(err, "failed to add path to file watcher", "path", path)
+
+ return nil, err
+ }
+ // add namespace dir to watcher
+ if prefix == path {
+ entry, err := os.ReadDir(prefix)
+ if err != nil {
+ klog.V(6).ErrorS(err, "failed to read dir", "dir", path)
+
+ return nil, err
+ }
+ for _, e := range entry {
+ if e.IsDir() {
+ if err := watcher.Add(filepath.Join(prefix, e.Name())); err != nil {
+ klog.V(6).ErrorS(err, "failed to add namespace dir to file watcher", "dir", e.Name())
+
+ return nil, err
+ }
+ }
+ }
+ }
+
+ w := &fileWatcher{
+ prefix: prefix,
+ codec: codec,
+ watcher: watcher,
+ newFunc: newFunc,
+ watchEvents: make(chan watch.Event),
+ }
+
+ go w.watch()
+
+ return w, nil
+}
+
+// Stop watch
+func (w *fileWatcher) Stop() {
+ if err := w.watcher.Close(); err != nil {
+ klog.V(6).ErrorS(err, "failed to close file watcher")
+ }
+}
+
+// ResultChan get watch event
+func (w *fileWatcher) ResultChan() <-chan watch.Event {
+ return w.watchEvents
+}
+
+func (w *fileWatcher) watch() {
+ for {
+ select {
+ case event := <-w.watcher.Events:
+ klog.V(6).InfoS("receive watcher event", "event", event)
+ // Adjust the listening range. a watcher for a namespace.
+ // the watcher contains all resources in the namespace.
+ entry, err := os.Stat(event.Name)
+ if err != nil {
+ klog.V(6).ErrorS(err, "failed to stat resource file", "event", event)
+
+ continue
+ }
+ if entry.IsDir() && len(filepath.SplitList(strings.TrimPrefix(event.Name, w.prefix))) == 1 {
+ // the dir is namespace dir
+ switch event.Op {
+ case fsnotify.Create:
+ if err := w.watcher.Add(event.Name); err != nil {
+ klog.V(6).ErrorS(err, "failed to add namespace dir to file watcher", "event", event)
+ }
+ case fsnotify.Remove:
+ if err := w.watcher.Remove(event.Name); err != nil {
+ klog.V(6).ErrorS(err, "failed to remove namespace dir to file watcher", "event", event)
+ }
+ default:
+ // do nothing
+ }
+
+ continue
+ }
+
+ if err := w.watchFile(event); err != nil {
+ klog.V(6).ErrorS(err, "watch resource file error")
+ }
+
+ case err := <-w.watcher.Errors:
+ klog.V(6).ErrorS(err, "file watcher error")
+
+ return
+ }
+ }
+}
+
+// watchFile for resource.
+func (w *fileWatcher) watchFile(event fsnotify.Event) error {
+ if !strings.HasSuffix(event.Name, yamlSuffix) {
+ return nil
+ }
+ data, err := os.ReadFile(event.Name)
+ if err != nil {
+ klog.V(6).ErrorS(err, "failed to read resource file", "event", event)
+
+ return err
+ }
+ obj, _, err := w.codec.Decode(data, nil, w.newFunc())
+ if err != nil {
+ klog.V(6).ErrorS(err, "failed to decode resource file", "event", event)
+
+ return err
+ }
+ metaObj, err := meta.Accessor(obj)
+ if err != nil {
+ klog.V(6).ErrorS(err, "failed to convert to metaObject", "event", event)
+
+ return err
+ }
+ if metaObj.GetName() == "" && metaObj.GetGenerateName() == "" { // ignore unknown file
+ klog.V(6).InfoS("name is empty. ignore", "event", event)
+
+ return nil
+ }
+
+ switch event.Op {
+ case fsnotify.Create:
+ w.watchEvents <- watch.Event{
+ Type: watch.Added,
+ Object: obj,
+ }
+ case fsnotify.Write:
+ if strings.HasSuffix(filepath.Base(event.Name), deleteTagSuffix) {
+ // delete event
+ w.watchEvents <- watch.Event{
+ Type: watch.Deleted,
+ Object: obj,
+ }
+ if err := os.Remove(event.Name); err != nil {
+ klog.ErrorS(err, "failed to remove file", "event", event)
+ }
+ } else {
+ // update event
+ w.watchEvents <- watch.Event{
+ Type: watch.Modified,
+ Object: obj,
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/feature/pkg/proxy/path_expression.go b/feature/pkg/proxy/path_expression.go
new file mode 100644
index 000000000..bf51ff646
--- /dev/null
+++ b/feature/pkg/proxy/path_expression.go
@@ -0,0 +1,105 @@
+/*
+Copyright 2024 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package proxy
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "bytes"
+ "fmt"
+ "regexp"
+ "strings"
+)
+
+// PathExpression holds a compiled path expression (RegExp) needed to match against
+// Http request paths and to extract path parameter values.
+type pathExpression struct {
+ LiteralCount int // the number of literal characters (means those not resulting from template variable substitution)
+ VarNames []string // the names of parameters (enclosed by {}) in the path
+ VarCount int // the number of named parameters (enclosed by {}) in the path
+ Matcher *regexp.Regexp
+ Source string // Path as defined by the RouteBuilder
+ tokens []string
+}
+
+// NewPathExpression creates a PathExpression from the input URL path.
+// Returns an error if the path is invalid.
+func newPathExpression(path string) (*pathExpression, error) {
+ expression, literalCount, varNames, varCount, tokens := templateToRegularExpression(path)
+ compiled, err := regexp.Compile(expression)
+ if err != nil {
+ return nil, err
+ }
+
+ return &pathExpression{literalCount, varNames, varCount, compiled, expression, tokens}, nil
+}
+
+// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-370003.7.3
+func templateToRegularExpression(template string) (string, int, []string, int, []string) {
+ var (
+ literalCount int
+ varNames []string
+ varCount int
+ )
+ var buffer bytes.Buffer
+ buffer.WriteString("^")
+ tokens := tokenizePath(template)
+ for _, each := range tokens {
+ if each == "" {
+ continue
+ }
+ buffer.WriteString("/")
+ if strings.HasPrefix(each, "{") {
+ // check for regular expression in variable
+ colon := strings.Index(each, ":")
+ var varName string
+ if colon != -1 {
+ // extract expression
+ varName = strings.TrimSpace(each[1:colon])
+ paramExpr := strings.TrimSpace(each[colon+1 : len(each)-1])
+ if paramExpr == "*" { // special case
+ buffer.WriteString("(.*)")
+ } else {
+ buffer.WriteString(fmt.Sprintf("(%s)", paramExpr)) // between colon and closing moustache
+ }
+ } else {
+ // plain var
+ varName = strings.TrimSpace(each[1 : len(each)-1])
+ buffer.WriteString("([^/]+?)")
+ }
+ varNames = append(varNames, varName)
+ varCount++
+ } else {
+ literalCount += len(each)
+ encoded := each
+ buffer.WriteString(regexp.QuoteMeta(encoded))
+ }
+ }
+
+ return strings.TrimRight(buffer.String(), "/") + "(/.*)?$", literalCount, varNames, varCount, tokens
+}
+
+// Tokenize an URL path using the slash separator ; the result does not have empty tokens
+func tokenizePath(path string) []string {
+ if path == "/" {
+ return nil
+ }
+ // 3.9.0
+ return strings.Split(strings.Trim(path, "/"), "/")
+}
diff --git a/feature/pkg/proxy/resources/config/storage.go b/feature/pkg/proxy/resources/config/storage.go
new file mode 100644
index 000000000..21124b39f
--- /dev/null
+++ b/feature/pkg/proxy/resources/config/storage.go
@@ -0,0 +1,65 @@
+/*
+Copyright 2024 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package config
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ apigeneric "k8s.io/apiserver/pkg/registry/generic"
+ apiregistry "k8s.io/apiserver/pkg/registry/generic/registry"
+ apirest "k8s.io/apiserver/pkg/registry/rest"
+
+ kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
+)
+
+// ConfigStorage storage for Config
+type ConfigStorage struct {
+ Config *REST
+}
+
+// REST resource for Config
+type REST struct {
+ *apiregistry.Store
+}
+
+// NewStorage for Config
+func NewStorage(optsGetter apigeneric.RESTOptionsGetter) (ConfigStorage, error) {
+ store := &apiregistry.Store{
+ NewFunc: func() runtime.Object { return &kkcorev1.Config{} },
+ NewListFunc: func() runtime.Object { return &kkcorev1.ConfigList{} },
+ DefaultQualifiedResource: kkcorev1.SchemeGroupVersion.WithResource("configs").GroupResource(),
+ SingularQualifiedResource: kkcorev1.SchemeGroupVersion.WithResource("config").GroupResource(),
+
+ CreateStrategy: Strategy,
+ UpdateStrategy: Strategy,
+ DeleteStrategy: Strategy,
+ ReturnDeletedObject: true,
+
+ TableConvertor: apirest.NewDefaultTableConvertor(kkcorev1.SchemeGroupVersion.WithResource("configs").GroupResource()),
+ }
+
+ options := &apigeneric.StoreOptions{
+ RESTOptions: optsGetter,
+ }
+
+ if err := store.CompleteWithOptions(options); err != nil {
+ return ConfigStorage{}, err
+ }
+
+ return ConfigStorage{
+ Config: &REST{store},
+ }, nil
+}
diff --git a/feature/pkg/proxy/resources/config/strategy.go b/feature/pkg/proxy/resources/config/strategy.go
new file mode 100644
index 000000000..daa2ee45a
--- /dev/null
+++ b/feature/pkg/proxy/resources/config/strategy.go
@@ -0,0 +1,103 @@
+/*
+Copyright 2024 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package config
+
+import (
+ "context"
+
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/util/validation/field"
+ apinames "k8s.io/apiserver/pkg/storage/names"
+ "sigs.k8s.io/structured-merge-diff/v4/fieldpath"
+
+ _const "github.com/kubesphere/kubekey/v4/pkg/const"
+)
+
+// ConfigStrategy implements behavior for Pods
+type ConfigStrategy struct {
+ runtime.ObjectTyper
+ apinames.NameGenerator
+}
+
+// Strategy is the default logic that applies when creating and updating Pod
+// objects via the REST API.
+var Strategy = ConfigStrategy{_const.Scheme, apinames.SimpleNameGenerator}
+
+// ===CreateStrategy===
+
+// NamespaceScoped always true
+func (t ConfigStrategy) NamespaceScoped() bool {
+ return true
+}
+
+// PrepareForCreate do no-thing
+func (t ConfigStrategy) PrepareForCreate(context.Context, runtime.Object) {
+ // do nothing
+}
+
+// Validate always pass
+func (t ConfigStrategy) Validate(context.Context, runtime.Object) field.ErrorList {
+ // do nothing
+ return nil
+}
+
+// WarningsOnCreate do no-thing
+func (t ConfigStrategy) WarningsOnCreate(context.Context, runtime.Object) []string {
+ // do nothing
+ return nil
+}
+
+// Canonicalize do no-thing
+func (t ConfigStrategy) Canonicalize(runtime.Object) {
+ // do nothing
+}
+
+// ===UpdateStrategy===
+
+// AllowCreateOnUpdate always false
+func (t ConfigStrategy) AllowCreateOnUpdate() bool {
+ return false
+}
+
+// PrepareForUpdate do no-thing
+func (t ConfigStrategy) PrepareForUpdate(context.Context, runtime.Object, runtime.Object) {
+ // do nothing
+}
+
+// ValidateUpdate do nothing
+func (t ConfigStrategy) ValidateUpdate(context.Context, runtime.Object, runtime.Object) field.ErrorList {
+ // do nothing
+ return nil
+}
+
+// WarningsOnUpdate always nil
+func (t ConfigStrategy) WarningsOnUpdate(context.Context, runtime.Object, runtime.Object) []string {
+ // do nothing
+ return nil
+}
+
+// AllowUnconditionalUpdate always true
+func (t ConfigStrategy) AllowUnconditionalUpdate() bool {
+ return true
+}
+
+// ===ResetFieldsStrategy===
+
+// GetResetFields always nil
+func (t ConfigStrategy) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set {
+ return nil
+}
diff --git a/feature/pkg/proxy/resources/inventory/storage.go b/feature/pkg/proxy/resources/inventory/storage.go
new file mode 100644
index 000000000..1c37b4ad3
--- /dev/null
+++ b/feature/pkg/proxy/resources/inventory/storage.go
@@ -0,0 +1,63 @@
+/*
+Copyright 2024 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package inventory
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ apigeneric "k8s.io/apiserver/pkg/registry/generic"
+ apiregistry "k8s.io/apiserver/pkg/registry/generic/registry"
+ apirest "k8s.io/apiserver/pkg/registry/rest"
+
+ kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
+)
+
+// InventoryStorage storage for Inventory
+type InventoryStorage struct {
+ Inventory *REST
+}
+
+// REST resource for Inventory
+type REST struct {
+ *apiregistry.Store
+}
+
+// NewStorage for Inventory
+func NewStorage(optsGetter apigeneric.RESTOptionsGetter) (InventoryStorage, error) {
+ store := &apiregistry.Store{
+ NewFunc: func() runtime.Object { return &kkcorev1.Inventory{} },
+ NewListFunc: func() runtime.Object { return &kkcorev1.InventoryList{} },
+ DefaultQualifiedResource: kkcorev1.SchemeGroupVersion.WithResource("inventories").GroupResource(),
+ SingularQualifiedResource: kkcorev1.SchemeGroupVersion.WithResource("inventory").GroupResource(),
+
+ CreateStrategy: Strategy,
+ UpdateStrategy: Strategy,
+ DeleteStrategy: Strategy,
+ ReturnDeletedObject: true,
+
+ TableConvertor: apirest.NewDefaultTableConvertor(kkcorev1.SchemeGroupVersion.WithResource("inventories").GroupResource()),
+ }
+ options := &apigeneric.StoreOptions{
+ RESTOptions: optsGetter,
+ }
+ if err := store.CompleteWithOptions(options); err != nil {
+ return InventoryStorage{}, err
+ }
+
+ return InventoryStorage{
+ Inventory: &REST{store},
+ }, nil
+}
diff --git a/feature/pkg/proxy/resources/inventory/strategy.go b/feature/pkg/proxy/resources/inventory/strategy.go
new file mode 100644
index 000000000..97be95876
--- /dev/null
+++ b/feature/pkg/proxy/resources/inventory/strategy.go
@@ -0,0 +1,101 @@
+/*
+Copyright 2024 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package inventory
+
+import (
+ "context"
+
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/util/validation/field"
+ apinames "k8s.io/apiserver/pkg/storage/names"
+ "sigs.k8s.io/structured-merge-diff/v4/fieldpath"
+
+ _const "github.com/kubesphere/kubekey/v4/pkg/const"
+)
+
+// inventoryStrategy implements behavior for Pods
+type inventoryStrategy struct {
+ runtime.ObjectTyper
+ apinames.NameGenerator
+}
+
+// Strategy is the default logic that applies when creating and updating Pod
+// objects via the REST API.
+var Strategy = inventoryStrategy{_const.Scheme, apinames.SimpleNameGenerator}
+
+// ===CreateStrategy===
+
+// NamespaceScoped always true
+func (t inventoryStrategy) NamespaceScoped() bool {
+ return true
+}
+
+// PrepareForCreate do no-thing
+func (t inventoryStrategy) PrepareForCreate(context.Context, runtime.Object) {
+ // do nothing
+}
+
+// Validate always pass
+func (t inventoryStrategy) Validate(context.Context, runtime.Object) field.ErrorList {
+ // do nothing
+ return nil
+}
+
+// WarningsOnCreate do no-thing
+func (t inventoryStrategy) WarningsOnCreate(context.Context, runtime.Object) []string {
+ // do nothing
+ return nil
+}
+
+// Canonicalize do no-thing
+func (t inventoryStrategy) Canonicalize(runtime.Object) {
+ // do nothing
+}
+
+// ===UpdateStrategy===
+
+// AllowCreateOnUpdate always false
+func (t inventoryStrategy) AllowCreateOnUpdate() bool {
+ return false
+}
+
+// PrepareForUpdate do no-thing
+func (t inventoryStrategy) PrepareForUpdate(context.Context, runtime.Object, runtime.Object) {}
+
+// ValidateUpdate do nothing
+func (t inventoryStrategy) ValidateUpdate(context.Context, runtime.Object, runtime.Object) field.ErrorList {
+ // do nothing
+ return nil
+}
+
+// WarningsOnUpdate always nil
+func (t inventoryStrategy) WarningsOnUpdate(context.Context, runtime.Object, runtime.Object) []string {
+ // do nothing
+ return nil
+}
+
+// AllowUnconditionalUpdate always true
+func (t inventoryStrategy) AllowUnconditionalUpdate() bool {
+ return true
+}
+
+// ===ResetFieldsStrategy===
+
+// GetResetFields always nil
+func (t inventoryStrategy) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set {
+ return nil
+}
diff --git a/feature/pkg/proxy/resources/pipeline/storage.go b/feature/pkg/proxy/resources/pipeline/storage.go
new file mode 100644
index 000000000..67e225df8
--- /dev/null
+++ b/feature/pkg/proxy/resources/pipeline/storage.go
@@ -0,0 +1,112 @@
+/*
+Copyright 2024 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package pipeline
+
+import (
+ "context"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ apigeneric "k8s.io/apiserver/pkg/registry/generic"
+ apiregistry "k8s.io/apiserver/pkg/registry/generic/registry"
+ apirest "k8s.io/apiserver/pkg/registry/rest"
+ "sigs.k8s.io/structured-merge-diff/v4/fieldpath"
+
+ kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
+)
+
+// PipelineStorage storage for Pipeline
+type PipelineStorage struct {
+ Pipeline *REST
+ PipelineStatus *StatusREST
+}
+
+// REST resource for Pipeline
+type REST struct {
+ *apiregistry.Store
+}
+
+// StatusREST status subresource for Pipeline
+type StatusREST struct {
+ store *apiregistry.Store
+}
+
+// NamespaceScoped is true for Pipeline
+func (r *StatusREST) NamespaceScoped() bool {
+ return true
+}
+
+// New creates a new Node object.
+func (r *StatusREST) New() runtime.Object {
+ return &kkcorev1.Pipeline{}
+}
+
+// Destroy cleans up resources on shutdown.
+func (r *StatusREST) Destroy() {
+ // Given that underlying store is shared with REST,
+ // we don't destroy it here explicitly.
+}
+
+// Get retrieves the object from the storage. It is required to support Patch.
+func (r *StatusREST) Get(ctx context.Context, name string, options *metav1.GetOptions) (runtime.Object, error) {
+ return r.store.Get(ctx, name, options)
+}
+
+// Update alters the status subset of an object.
+func (r *StatusREST) Update(ctx context.Context, name string, objInfo apirest.UpdatedObjectInfo, createValidation apirest.ValidateObjectFunc, updateValidation apirest.ValidateObjectUpdateFunc, _ bool, options *metav1.UpdateOptions) (runtime.Object, bool, error) {
+ // We are explicitly setting forceAllowCreate to false in the call to the underlying storage because
+ // subresources should never allow create on update.
+ return r.store.Update(ctx, name, objInfo, createValidation, updateValidation, false, options)
+}
+
+// GetResetFields implements rest.ResetFieldsStrategy
+func (r *StatusREST) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set {
+ return r.store.GetResetFields()
+}
+
+// ConvertToTable print table view
+func (r *StatusREST) ConvertToTable(ctx context.Context, object runtime.Object, tableOptions runtime.Object) (*metav1.Table, error) {
+ return r.store.ConvertToTable(ctx, object, tableOptions)
+}
+
+// NewStorage for Pipeline storage
+func NewStorage(optsGetter apigeneric.RESTOptionsGetter) (PipelineStorage, error) {
+ store := &apiregistry.Store{
+ NewFunc: func() runtime.Object { return &kkcorev1.Pipeline{} },
+ NewListFunc: func() runtime.Object { return &kkcorev1.PipelineList{} },
+ DefaultQualifiedResource: kkcorev1.SchemeGroupVersion.WithResource("pipelines").GroupResource(),
+ SingularQualifiedResource: kkcorev1.SchemeGroupVersion.WithResource("pipeline").GroupResource(),
+
+ CreateStrategy: Strategy,
+ UpdateStrategy: Strategy,
+ DeleteStrategy: Strategy,
+ ReturnDeletedObject: true,
+
+ TableConvertor: apirest.NewDefaultTableConvertor(kkcorev1.SchemeGroupVersion.WithResource("pipelines").GroupResource()),
+ }
+ options := &apigeneric.StoreOptions{
+ RESTOptions: optsGetter,
+ }
+ if err := store.CompleteWithOptions(options); err != nil {
+ return PipelineStorage{}, err
+ }
+
+ return PipelineStorage{
+ Pipeline: &REST{store},
+ PipelineStatus: &StatusREST{store},
+ }, nil
+}
diff --git a/feature/pkg/proxy/resources/pipeline/strategy.go b/feature/pkg/proxy/resources/pipeline/strategy.go
new file mode 100644
index 000000000..b7d01d14b
--- /dev/null
+++ b/feature/pkg/proxy/resources/pipeline/strategy.go
@@ -0,0 +1,116 @@
+/*
+Copyright 2024 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package pipeline
+
+import (
+ "context"
+ "errors"
+ "reflect"
+
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/util/validation/field"
+ apinames "k8s.io/apiserver/pkg/storage/names"
+ "sigs.k8s.io/structured-merge-diff/v4/fieldpath"
+
+ kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
+ _const "github.com/kubesphere/kubekey/v4/pkg/const"
+)
+
+// pipelineStrategy implements behavior for Pods
+type pipelineStrategy struct {
+ runtime.ObjectTyper
+ apinames.NameGenerator
+}
+
+// Strategy is the default logic that applies when creating and updating Pod
+// objects via the REST API.
+var Strategy = pipelineStrategy{_const.Scheme, apinames.SimpleNameGenerator}
+
+// ===CreateStrategy===
+
+// NamespaceScoped always true
+func (t pipelineStrategy) NamespaceScoped() bool {
+ return true
+}
+
+// PrepareForCreate do no-thing
+func (t pipelineStrategy) PrepareForCreate(context.Context, runtime.Object) {}
+
+// Validate always pass
+func (t pipelineStrategy) Validate(context.Context, runtime.Object) field.ErrorList {
+ // do nothing
+ return nil
+}
+
+// WarningsOnCreate do no-thing
+func (t pipelineStrategy) WarningsOnCreate(context.Context, runtime.Object) []string {
+ // do nothing
+ return nil
+}
+
+// Canonicalize do no-thing
+func (t pipelineStrategy) Canonicalize(runtime.Object) {
+ // do nothing
+}
+
+// ===UpdateStrategy===
+
+// AllowCreateOnUpdate always false
+func (t pipelineStrategy) AllowCreateOnUpdate() bool {
+ return false
+}
+
+// PrepareForUpdate do no-thing
+func (t pipelineStrategy) PrepareForUpdate(context.Context, runtime.Object, runtime.Object) {
+ // do nothing
+}
+
+// ValidateUpdate spec is immutable
+func (t pipelineStrategy) ValidateUpdate(_ context.Context, obj, old runtime.Object) field.ErrorList {
+ // only support update status
+ pipeline, ok := obj.(*kkcorev1.Pipeline)
+ if !ok {
+ return field.ErrorList{field.InternalError(field.NewPath("spec"), errors.New("the object is not Task"))}
+ }
+ oldPipeline, ok := old.(*kkcorev1.Pipeline)
+ if !ok {
+ return field.ErrorList{field.InternalError(field.NewPath("spec"), errors.New("the object is not Task"))}
+ }
+ if !reflect.DeepEqual(pipeline.Spec, oldPipeline.Spec) {
+ return field.ErrorList{field.Forbidden(field.NewPath("spec"), "spec is immutable")}
+ }
+
+ return nil
+}
+
+// WarningsOnUpdate always nil
+func (t pipelineStrategy) WarningsOnUpdate(context.Context, runtime.Object, runtime.Object) []string {
+ // do nothing
+ return nil
+}
+
+// AllowUnconditionalUpdate always true
+func (t pipelineStrategy) AllowUnconditionalUpdate() bool {
+ return true
+}
+
+// ===ResetFieldsStrategy===
+
+// GetResetFields always nil
+func (t pipelineStrategy) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set {
+ return nil
+}
diff --git a/feature/pkg/proxy/resources/task/storage.go b/feature/pkg/proxy/resources/task/storage.go
new file mode 100644
index 000000000..7e098fbdd
--- /dev/null
+++ b/feature/pkg/proxy/resources/task/storage.go
@@ -0,0 +1,117 @@
+/*
+Copyright 2024 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package task
+
+import (
+ "context"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ apigeneric "k8s.io/apiserver/pkg/registry/generic"
+ apiregistry "k8s.io/apiserver/pkg/registry/generic/registry"
+ apirest "k8s.io/apiserver/pkg/registry/rest"
+ apistorage "k8s.io/apiserver/pkg/storage"
+ "sigs.k8s.io/structured-merge-diff/v4/fieldpath"
+
+ kkcorev1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1alpha1"
+)
+
+// TaskStorage storage for Task
+type TaskStorage struct {
+ Task *REST
+ TaskStatus *StatusREST
+}
+
+// REST resource for Task
+type REST struct {
+ *apiregistry.Store
+}
+
+// StatusREST status subresource for Task
+type StatusREST struct {
+ store *apiregistry.Store
+}
+
+// NamespaceScoped is true for Task
+func (r *StatusREST) NamespaceScoped() bool {
+ return true
+}
+
+// New creates a new Node object.
+func (r *StatusREST) New() runtime.Object {
+ return &kkcorev1alpha1.Task{}
+}
+
+// Destroy cleans up resources on shutdown.
+func (r *StatusREST) Destroy() {
+ // Given that underlying store is shared with REST,
+ // we don't destroy it here explicitly.
+}
+
+// Get retrieves the object from the storage. It is required to support Patch.
+func (r *StatusREST) Get(ctx context.Context, name string, options *metav1.GetOptions) (runtime.Object, error) {
+ return r.store.Get(ctx, name, options)
+}
+
+// Update alters the status subset of an object.
+func (r *StatusREST) Update(ctx context.Context, name string, objInfo apirest.UpdatedObjectInfo, createValidation apirest.ValidateObjectFunc, updateValidation apirest.ValidateObjectUpdateFunc, _ bool, options *metav1.UpdateOptions) (runtime.Object, bool, error) {
+ // We are explicitly setting forceAllowCreate to false in the call to the underlying storage because
+ // subresources should never allow create on update.
+ return r.store.Update(ctx, name, objInfo, createValidation, updateValidation, false, options)
+}
+
+// GetResetFields implements rest.ResetFieldsStrategy
+func (r *StatusREST) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set {
+ return r.store.GetResetFields()
+}
+
+// ConvertToTable print table view
+func (r *StatusREST) ConvertToTable(ctx context.Context, object runtime.Object, tableOptions runtime.Object) (*metav1.Table, error) {
+ return r.store.ConvertToTable(ctx, object, tableOptions)
+}
+
+// NewStorage for Task storage
+func NewStorage(optsGetter apigeneric.RESTOptionsGetter) (TaskStorage, error) {
+ store := &apiregistry.Store{
+ NewFunc: func() runtime.Object { return &kkcorev1alpha1.Task{} },
+ NewListFunc: func() runtime.Object { return &kkcorev1alpha1.TaskList{} },
+ PredicateFunc: MatchTask,
+ DefaultQualifiedResource: kkcorev1alpha1.SchemeGroupVersion.WithResource("tasks").GroupResource(),
+ SingularQualifiedResource: kkcorev1alpha1.SchemeGroupVersion.WithResource("task").GroupResource(),
+
+ CreateStrategy: Strategy,
+ UpdateStrategy: Strategy,
+ DeleteStrategy: Strategy,
+ ReturnDeletedObject: true,
+
+ TableConvertor: apirest.NewDefaultTableConvertor(kkcorev1alpha1.SchemeGroupVersion.WithResource("tasks").GroupResource()),
+ }
+ options := &apigeneric.StoreOptions{
+ RESTOptions: optsGetter,
+ AttrFunc: GetAttrs,
+ TriggerFunc: map[string]apistorage.IndexerFunc{kkcorev1alpha1.TaskOwnerField: OwnerPipelineTriggerFunc},
+ Indexers: Indexers(),
+ }
+ if err := store.CompleteWithOptions(options); err != nil {
+ return TaskStorage{}, err
+ }
+
+ return TaskStorage{
+ Task: &REST{store},
+ TaskStatus: &StatusREST{store},
+ }, nil
+}
diff --git a/feature/pkg/proxy/resources/task/strategy.go b/feature/pkg/proxy/resources/task/strategy.go
new file mode 100644
index 000000000..0f0c8bb82
--- /dev/null
+++ b/feature/pkg/proxy/resources/task/strategy.go
@@ -0,0 +1,213 @@
+/*
+Copyright 2024 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package task
+
+import (
+ "context"
+ "errors"
+ "reflect"
+
+ "k8s.io/apimachinery/pkg/fields"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/util/validation/field"
+ apigeneric "k8s.io/apiserver/pkg/registry/generic"
+ apistorage "k8s.io/apiserver/pkg/storage"
+ apinames "k8s.io/apiserver/pkg/storage/names"
+ cgtoolscache "k8s.io/client-go/tools/cache"
+ "sigs.k8s.io/structured-merge-diff/v4/fieldpath"
+
+ kkcorev1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1alpha1"
+ _const "github.com/kubesphere/kubekey/v4/pkg/const"
+)
+
+const pipelineKind = "Pipeline"
+
+// taskStrategy implements behavior for Pods
+type taskStrategy struct {
+ runtime.ObjectTyper
+ apinames.NameGenerator
+}
+
+// Strategy is the default logic that applies when creating and updating Pod
+// objects via the REST API.
+var Strategy = taskStrategy{_const.Scheme, apinames.SimpleNameGenerator}
+
+// ===CreateStrategy===
+
+// NamespaceScoped always true
+func (t taskStrategy) NamespaceScoped() bool {
+ return true
+}
+
+// PrepareForCreate set tasks status to pending
+func (t taskStrategy) PrepareForCreate(_ context.Context, obj runtime.Object) {
+ // init status when create
+ if task, ok := obj.(*kkcorev1alpha1.Task); ok {
+ task.Status = kkcorev1alpha1.TaskStatus{
+ Phase: kkcorev1alpha1.TaskPhasePending,
+ }
+ }
+}
+
+// Validate always pass
+func (t taskStrategy) Validate(context.Context, runtime.Object) field.ErrorList {
+ return nil
+}
+
+// WarningsOnCreate do no-thing
+func (t taskStrategy) WarningsOnCreate(context.Context, runtime.Object) []string {
+ return nil
+}
+
+// Canonicalize do no-thing
+func (t taskStrategy) Canonicalize(runtime.Object) {}
+
+// ===UpdateStrategy===
+
+// AllowCreateOnUpdate always false
+func (t taskStrategy) AllowCreateOnUpdate() bool {
+ return false
+}
+
+// PrepareForUpdate do no-thing
+func (t taskStrategy) PrepareForUpdate(context.Context, runtime.Object, runtime.Object) {}
+
+// ValidateUpdate spec is immutable
+func (t taskStrategy) ValidateUpdate(_ context.Context, obj, old runtime.Object) field.ErrorList {
+ // only support update status
+ task, ok := obj.(*kkcorev1alpha1.Task)
+ if !ok {
+ return field.ErrorList{field.InternalError(field.NewPath("spec"), errors.New("the object is not Task"))}
+ }
+ oldTask, ok := old.(*kkcorev1alpha1.Task)
+ if !ok {
+ return field.ErrorList{field.InternalError(field.NewPath("spec"), errors.New("the object is not Task"))}
+ }
+ if !reflect.DeepEqual(task.Spec, oldTask.Spec) {
+ return field.ErrorList{field.Forbidden(field.NewPath("spec"), "spec is immutable")}
+ }
+
+ return nil
+}
+
+// WarningsOnUpdate always nil
+func (t taskStrategy) WarningsOnUpdate(context.Context, runtime.Object, runtime.Object) []string {
+ return nil
+}
+
+// AllowUnconditionalUpdate always true
+func (t taskStrategy) AllowUnconditionalUpdate() bool {
+ return true
+}
+
+// ===ResetFieldsStrategy===
+
+// GetResetFields always nil
+func (t taskStrategy) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set {
+ return nil
+}
+
+// OwnerPipelineIndexFunc return value ownerReference.object is pipeline.
+func OwnerPipelineIndexFunc(obj any) ([]string, error) {
+ task, ok := obj.(*kkcorev1alpha1.Task)
+ if !ok {
+ return nil, errors.New("not Task")
+ }
+
+ var index string
+ for _, reference := range task.OwnerReferences {
+ if reference.Kind == pipelineKind {
+ index = types.NamespacedName{
+ Namespace: task.Namespace,
+ Name: reference.Name,
+ }.String()
+
+ break
+ }
+ }
+ if index == "" {
+ return nil, errors.New("task has no ownerReference.pipeline")
+ }
+
+ return []string{index}, nil
+}
+
+// Indexers returns the indexers for pod storage.
+func Indexers() *cgtoolscache.Indexers {
+ return &cgtoolscache.Indexers{
+ apistorage.FieldIndex(kkcorev1alpha1.TaskOwnerField): OwnerPipelineIndexFunc,
+ }
+}
+
+// MatchTask returns a generic matcher for a given label and field selector.
+func MatchTask(label labels.Selector, fd fields.Selector) apistorage.SelectionPredicate {
+ return apistorage.SelectionPredicate{
+ Label: label,
+ Field: fd,
+ GetAttrs: GetAttrs,
+ IndexFields: []string{kkcorev1alpha1.TaskOwnerField},
+ }
+}
+
+// GetAttrs returns labels and fields of a given object for filtering purposes.
+func GetAttrs(obj runtime.Object) (labels.Set, fields.Set, error) {
+ task, ok := obj.(*kkcorev1alpha1.Task)
+ if !ok {
+ return nil, nil, errors.New("not Task")
+ }
+
+ return task.ObjectMeta.Labels, ToSelectableFields(task), nil
+}
+
+// ToSelectableFields returns a field set that represents the object
+func ToSelectableFields(task *kkcorev1alpha1.Task) fields.Set {
+ // The purpose of allocation with a given number of elements is to reduce
+ // amount of allocations needed to create the fields.Set. If you add any
+ // field here or the number of object-meta related fields changes, this should
+ // be adjusted.
+ taskSpecificFieldsSet := make(fields.Set)
+ for _, reference := range task.OwnerReferences {
+ if reference.Kind == pipelineKind {
+ taskSpecificFieldsSet[kkcorev1alpha1.TaskOwnerField] = types.NamespacedName{
+ Namespace: task.Namespace,
+ Name: reference.Name,
+ }.String()
+
+ break
+ }
+ }
+
+ return apigeneric.AddObjectMetaFieldsSet(taskSpecificFieldsSet, &task.ObjectMeta, true)
+}
+
+// OwnerPipelineTriggerFunc returns value ownerReference is pipeline of given object.
+func OwnerPipelineTriggerFunc(obj runtime.Object) string {
+ if task, ok := obj.(*kkcorev1alpha1.Task); ok {
+ for _, reference := range task.OwnerReferences {
+ if reference.Kind == pipelineKind {
+ return types.NamespacedName{
+ Namespace: task.Namespace,
+ Name: reference.Name,
+ }.String()
+ }
+ }
+ }
+
+ return ""
+}
diff --git a/feature/pkg/proxy/router.go b/feature/pkg/proxy/router.go
new file mode 100644
index 000000000..281254d42
--- /dev/null
+++ b/feature/pkg/proxy/router.go
@@ -0,0 +1,67 @@
+/*
+Copyright 2024 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package proxy
+
+import "net/http"
+
+type router struct {
+ path string // The path of the action
+ pathExpr *pathExpression // cached compilation of rootPath as RegExp
+ handlers map[string]http.HandlerFunc
+}
+
+// Types and functions to support the sorting of Dispatchers
+
+type dispatcherCandidate struct {
+ router router
+ finalMatch string
+ matchesCount int // the number of capturing groups
+ literalCount int // the number of literal characters (means those not resulting from template variable substitution)
+ nonDefaultCount int // the number of capturing groups with non-default regular expressions (i.e. not ‘([^ /]+?)’)
+}
+type sortableDispatcherCandidates struct {
+ candidates []dispatcherCandidate
+}
+
+func (dc *sortableDispatcherCandidates) Len() int {
+ return len(dc.candidates)
+}
+
+func (dc *sortableDispatcherCandidates) Swap(i, j int) {
+ dc.candidates[i], dc.candidates[j] = dc.candidates[j], dc.candidates[i]
+}
+
+func (dc *sortableDispatcherCandidates) Less(i, j int) bool {
+ ci := dc.candidates[i]
+ cj := dc.candidates[j]
+ // primary key
+ if ci.matchesCount < cj.matchesCount {
+ return true
+ }
+ if ci.matchesCount > cj.matchesCount {
+ return false
+ }
+ // secundary key
+ if ci.literalCount < cj.literalCount {
+ return true
+ }
+ if ci.literalCount > cj.literalCount {
+ return false
+ }
+ // tertiary key
+ return ci.nonDefaultCount < cj.nonDefaultCount
+}
diff --git a/feature/pkg/proxy/transport.go b/feature/pkg/proxy/transport.go
new file mode 100644
index 000000000..d94c1343d
--- /dev/null
+++ b/feature/pkg/proxy/transport.go
@@ -0,0 +1,480 @@
+/*
+Copyright 2024 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package proxy
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "sort"
+ "strings"
+
+ "k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/util/managedfields"
+ "k8s.io/apimachinery/pkg/util/sets"
+ "k8s.io/apiserver/pkg/authorization/authorizer"
+ "k8s.io/apiserver/pkg/authorization/authorizerfactory"
+ apiendpoints "k8s.io/apiserver/pkg/endpoints"
+ genericapifilters "k8s.io/apiserver/pkg/endpoints/filters"
+ apihandlers "k8s.io/apiserver/pkg/endpoints/handlers"
+ apirequest "k8s.io/apiserver/pkg/endpoints/request"
+ apirest "k8s.io/apiserver/pkg/registry/rest"
+ "k8s.io/client-go/rest"
+ "k8s.io/klog/v2"
+ "sigs.k8s.io/structured-merge-diff/v4/fieldpath"
+
+ kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
+ kkcorev1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1alpha1"
+ _const "github.com/kubesphere/kubekey/v4/pkg/const"
+ "github.com/kubesphere/kubekey/v4/pkg/proxy/internal"
+ "github.com/kubesphere/kubekey/v4/pkg/proxy/resources/config"
+ "github.com/kubesphere/kubekey/v4/pkg/proxy/resources/inventory"
+ "github.com/kubesphere/kubekey/v4/pkg/proxy/resources/pipeline"
+ "github.com/kubesphere/kubekey/v4/pkg/proxy/resources/task"
+)
+
+// NewConfig replace the restconfig transport to proxy transport
+func NewConfig(restconfig *rest.Config) (*rest.Config, error) {
+ var err error
+ restconfig.Transport, err = newProxyTransport(restconfig)
+ if err != nil {
+ return nil, fmt.Errorf("create proxy transport error: %w", err)
+ }
+ restconfig.TLSClientConfig = rest.TLSClientConfig{}
+
+ return restconfig, nil
+}
+
+// NewProxyTransport return a new http.RoundTripper use in ctrl.client.
+// When restConfig is not empty: should connect a kubernetes cluster and store some resources in there.
+// Such as: pipeline.kubekey.kubesphere.io/v1, inventory.kubekey.kubesphere.io/v1, config.kubekey.kubesphere.io/v1
+// when restConfig is empty: store all resource in local.
+//
+// SPECIFICALLY: since tasks is running data, which is reentrant and large in quantity,
+// they should always store in local.
+func newProxyTransport(restConfig *rest.Config) (http.RoundTripper, error) {
+ lt := &transport{
+ authz: authorizerfactory.NewAlwaysAllowAuthorizer(),
+ handlerChainFunc: func(handler http.Handler) http.Handler {
+ return genericapifilters.WithRequestInfo(handler, &apirequest.RequestInfoFactory{
+ APIPrefixes: sets.NewString("apis"),
+ })
+ },
+ }
+ if restConfig.Host != "" {
+ clientFor, err := rest.HTTPClientFor(restConfig)
+ if err != nil {
+ return nil, err
+ }
+ lt.restClient = clientFor
+ }
+
+ // register kkcorev1alpha1 resources
+ kkv1alpha1 := newAPIIResources(kkcorev1alpha1.SchemeGroupVersion)
+ storage, err := task.NewStorage(internal.NewFileRESTOptionsGetter(kkcorev1alpha1.SchemeGroupVersion))
+ if err != nil {
+ klog.V(6).ErrorS(err, "failed to create storage")
+
+ return nil, err
+ }
+ if err := kkv1alpha1.AddResource(resourceOptions{
+ path: "tasks",
+ storage: storage.Task,
+ }); err != nil {
+ klog.V(6).ErrorS(err, "failed to add resource")
+
+ return nil, err
+ }
+ if err := kkv1alpha1.AddResource(resourceOptions{
+ path: "tasks/status",
+ storage: storage.TaskStatus,
+ }); err != nil {
+ klog.V(6).ErrorS(err, "failed to add resource")
+
+ return nil, err
+ }
+ if err := lt.registerResources(kkv1alpha1); err != nil {
+ klog.V(6).ErrorS(err, "failed to register resources")
+ }
+
+ // when restConfig is null. should store all resource local
+ if restConfig.Host == "" {
+ // register kkcorev1 resources
+ kkv1 := newAPIIResources(kkcorev1.SchemeGroupVersion)
+ // add config
+ configStorage, err := config.NewStorage(internal.NewFileRESTOptionsGetter(kkcorev1.SchemeGroupVersion))
+ if err != nil {
+ klog.V(6).ErrorS(err, "failed to create storage")
+
+ return nil, err
+ }
+ if err := kkv1.AddResource(resourceOptions{
+ path: "configs",
+ storage: configStorage.Config,
+ }); err != nil {
+ klog.V(6).ErrorS(err, "failed to add resource")
+
+ return nil, err
+ }
+ // add inventory
+ inventoryStorage, err := inventory.NewStorage(internal.NewFileRESTOptionsGetter(kkcorev1.SchemeGroupVersion))
+ if err != nil {
+ klog.V(6).ErrorS(err, "failed to create storage")
+
+ return nil, err
+ }
+ if err := kkv1.AddResource(resourceOptions{
+ path: "inventories",
+ storage: inventoryStorage.Inventory,
+ }); err != nil {
+ klog.V(6).ErrorS(err, "failed to add resource")
+
+ return nil, err
+ }
+ // add pipeline
+ pipelineStorage, err := pipeline.NewStorage(internal.NewFileRESTOptionsGetter(kkcorev1.SchemeGroupVersion))
+ if err != nil {
+ klog.V(6).ErrorS(err, "failed to create storage")
+
+ return nil, err
+ }
+ if err := kkv1.AddResource(resourceOptions{
+ path: "pipelines",
+ storage: pipelineStorage.Pipeline,
+ }); err != nil {
+ klog.V(6).ErrorS(err, "failed to add resource")
+
+ return nil, err
+ }
+ if err := kkv1.AddResource(resourceOptions{
+ path: "pipelines/status",
+ storage: pipelineStorage.PipelineStatus,
+ }); err != nil {
+ klog.V(6).ErrorS(err, "failed to add resource")
+
+ return nil, err
+ }
+
+ if err := lt.registerResources(kkv1); err != nil {
+ klog.V(6).ErrorS(err, "failed to register resources")
+
+ return nil, err
+ }
+ }
+
+ return lt, nil
+}
+
+type responseWriter struct {
+ *http.Response
+}
+
+// Header get header for responseWriter
+func (r *responseWriter) Header() http.Header {
+ return r.Response.Header
+}
+
+// Write body for responseWriter
+func (r *responseWriter) Write(bs []byte) (int, error) {
+ r.Response.Body = io.NopCloser(bytes.NewBuffer(bs))
+
+ return 0, nil
+}
+
+// WriteHeader writer header for responseWriter
+func (r *responseWriter) WriteHeader(statusCode int) {
+ r.Response.StatusCode = statusCode
+}
+
+type transport struct {
+ // use to connect remote
+ restClient *http.Client
+
+ authz authorizer.Authorizer
+ // routers is a list of routers
+ routers []router
+
+ // handlerChain will be called after each request.
+ handlerChainFunc func(handler http.Handler) http.Handler
+}
+
+// RoundTrip deal proxy transport http.Request.
+func (l *transport) RoundTrip(request *http.Request) (*http.Response, error) {
+ if l.restClient != nil && !strings.HasPrefix(request.URL.Path, "/apis/"+kkcorev1alpha1.SchemeGroupVersion.String()) {
+ return l.restClient.Transport.RoundTrip(request)
+ }
+
+ response := &http.Response{
+ Proto: "local",
+ Header: make(http.Header),
+ }
+ // dispatch request
+ handler, err := l.detectDispatcher(request)
+ if err != nil {
+ return response, fmt.Errorf("no router for request. url: %s, method: %s", request.URL.Path, request.Method)
+ }
+ // call handler
+ l.handlerChainFunc(handler).ServeHTTP(&responseWriter{response}, request)
+
+ return response, nil
+}
+
+// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 (step 1)
+func (l transport) detectDispatcher(request *http.Request) (http.HandlerFunc, error) {
+ filtered := &sortableDispatcherCandidates{}
+ for _, each := range l.routers {
+ matches := each.pathExpr.Matcher.FindStringSubmatch(request.URL.Path)
+ if matches != nil {
+ filtered.candidates = append(filtered.candidates,
+ dispatcherCandidate{each, matches[len(matches)-1], len(matches), each.pathExpr.LiteralCount, each.pathExpr.VarCount})
+ }
+ }
+ if len(filtered.candidates) == 0 {
+ return nil, errors.New("not found")
+ }
+ sort.Sort(sort.Reverse(filtered))
+
+ handler, ok := filtered.candidates[0].router.handlers[request.Method]
+ if !ok {
+ return nil, errors.New("not found")
+ }
+
+ return handler, nil
+}
+
+func (l *transport) registerResources(resources *apiResources) error {
+ // register apiResources router
+ l.registerRouter(http.MethodGet, resources.prefix, resources.handlerAPIResources(), true)
+ // register resources router
+ for _, o := range resources.resourceOptions {
+ // what verbs are supported by the storage, used to know what verbs we support per path
+
+ _, isLister := o.storage.(apirest.Lister)
+ _, isTableProvider := o.storage.(apirest.TableConvertor)
+ if isLister && !isTableProvider {
+ // All listers must implement TableProvider
+ return fmt.Errorf("%q must implement TableConvertor", o.path)
+ }
+
+ // Get the list of actions for the given scope.
+ // namespace
+ reqScope, err := newReqScope(resources, o, l.authz)
+ if err != nil {
+ return err
+ }
+ // LIST
+ l.registerList(resources, reqScope, o)
+ // POST
+ l.registerPost(resources, reqScope, o)
+ // DELETECOLLECTION
+ l.registerDeleteCollection(resources, reqScope, o)
+ // DEPRECATED in 1.11 WATCHLIST
+ l.registerWatchList(resources, reqScope, o)
+ // GET
+ l.registerGet(resources, reqScope, o)
+ // PUT
+ l.registerPut(resources, reqScope, o)
+ // PATCH
+ l.registerPatch(resources, reqScope, o)
+ // DELETE
+ l.registerDelete(resources, reqScope, o)
+ // DEPRECATED in 1.11 WATCH
+ l.registerWatch(resources, reqScope, o)
+ // CONNECT
+ l.registerConnect(resources, reqScope, o)
+ }
+
+ return nil
+}
+
+// newReqScope for resource.
+func newReqScope(resources *apiResources, o resourceOptions, authz authorizer.Authorizer) (apihandlers.RequestScope, error) {
+ tableProvider, _ := o.storage.(apirest.TableConvertor)
+ gvAcceptor, _ := o.storage.(apirest.GroupVersionAcceptor)
+ // request scope
+ fqKindToRegister, err := apiendpoints.GetResourceKind(resources.gv, o.storage, _const.Scheme)
+ if err != nil {
+ return apihandlers.RequestScope{}, err
+ }
+ reqScope := apihandlers.RequestScope{
+ Namer: apihandlers.ContextBasedNaming{
+ Namer: meta.NewAccessor(),
+ ClusterScoped: false,
+ },
+ Serializer: _const.Codecs,
+ ParameterCodec: _const.ParameterCodec,
+ Creater: _const.Scheme,
+ Convertor: _const.Scheme,
+ Defaulter: _const.Scheme,
+ Typer: _const.Scheme,
+ UnsafeConvertor: _const.Scheme,
+ Authorizer: authz,
+ EquivalentResourceMapper: runtime.NewEquivalentResourceRegistry(),
+ TableConvertor: tableProvider,
+ Resource: resources.gv.WithResource(o.resource),
+ Subresource: o.subresource,
+ Kind: fqKindToRegister,
+ AcceptsGroupVersionDelegate: gvAcceptor,
+ HubGroupVersion: schema.GroupVersion{Group: fqKindToRegister.Group, Version: runtime.APIVersionInternal},
+ MetaGroupVersion: metav1.SchemeGroupVersion,
+ MaxRequestBodyBytes: 0,
+ }
+ var resetFields map[fieldpath.APIVersion]*fieldpath.Set
+ if resetFieldsStrategy, isResetFieldsStrategy := o.storage.(apirest.ResetFieldsStrategy); isResetFieldsStrategy {
+ resetFields = resetFieldsStrategy.GetResetFields()
+ }
+ reqScope.FieldManager, err = managedfields.NewDefaultFieldManager(
+ managedfields.NewDeducedTypeConverter(),
+ _const.Scheme,
+ _const.Scheme,
+ _const.Scheme,
+ fqKindToRegister,
+ reqScope.HubGroupVersion,
+ o.subresource,
+ resetFields,
+ )
+ if err != nil {
+ return apihandlers.RequestScope{}, err
+ }
+
+ return reqScope, nil
+}
+
+func (l *transport) registerRouter(verb, path string, handler http.HandlerFunc, shouldAdd bool) {
+ if !shouldAdd {
+ // if the router should not be added. return
+ return
+ }
+ for i, r := range l.routers {
+ if r.path != path {
+ continue
+ }
+ // add handler to router
+ if _, ok := r.handlers[verb]; ok {
+ // if handler is exists. throw error
+ klog.V(6).ErrorS(errors.New("handler has already register"), "failed to register router", "path", path, "verb", verb)
+
+ return
+ }
+ l.routers[i].handlers[verb] = handler
+
+ return
+ }
+
+ // add new router
+ expression, err := newPathExpression(path)
+ if err != nil {
+ klog.V(6).ErrorS(err, "failed to register router", "path", path, "verb", verb)
+
+ return
+ }
+ l.routers = append(l.routers, router{
+ path: path,
+ pathExpr: expression,
+ handlers: map[string]http.HandlerFunc{
+ verb: handler,
+ },
+ })
+}
+
+func (l *transport) registerList(resources *apiResources, reqScope apihandlers.RequestScope, o resourceOptions) {
+ lister, isLister := o.storage.(apirest.Lister)
+ watcher, isWatcher := o.storage.(apirest.Watcher)
+ l.registerRouter(http.MethodGet, resources.prefix+o.resourcePath, apihandlers.ListResource(lister, watcher, &reqScope, false, resources.minRequestTimeout), isLister)
+ // list or post across namespace.
+ // For ex: LIST all pods in all namespaces by sending a LIST request at /api/apiVersion/pods.
+ // LIST
+ l.registerRouter(http.MethodGet, resources.prefix+"/"+o.resource, apihandlers.ListResource(lister, watcher, &reqScope, false, resources.minRequestTimeout), o.subresource == "" && isLister)
+ // WATCHLIST
+ l.registerRouter(http.MethodGet, resources.prefix+"/watch/"+o.resource, apihandlers.ListResource(lister, watcher, &reqScope, true, resources.minRequestTimeout), o.subresource == "" && isWatcher && isLister)
+}
+
+func (l *transport) registerPost(resources *apiResources, reqScope apihandlers.RequestScope, o resourceOptions) {
+ creater, isCreater := o.storage.(apirest.Creater)
+ namedCreater, isNamedCreater := o.storage.(apirest.NamedCreater)
+ if isNamedCreater {
+ l.registerRouter(http.MethodPost, resources.prefix+o.resourcePath, apihandlers.CreateNamedResource(namedCreater, &reqScope, o.admit), isCreater)
+ } else {
+ l.registerRouter(http.MethodPost, resources.prefix+o.resourcePath, apihandlers.CreateResource(creater, &reqScope, o.admit), isCreater)
+ }
+}
+
+func (l *transport) registerDeleteCollection(resources *apiResources, reqScope apihandlers.RequestScope, o resourceOptions) {
+ collectionDeleter, isCollectionDeleter := o.storage.(apirest.CollectionDeleter)
+ l.registerRouter(http.MethodDelete, resources.prefix+o.resourcePath, apihandlers.DeleteCollection(collectionDeleter, isCollectionDeleter, &reqScope, o.admit), isCollectionDeleter)
+}
+
+func (l *transport) registerWatchList(resources *apiResources, reqScope apihandlers.RequestScope, o resourceOptions) {
+ lister, isLister := o.storage.(apirest.Lister)
+ watcher, isWatcher := o.storage.(apirest.Watcher)
+ l.registerRouter(http.MethodGet, resources.prefix+"/watch"+o.resourcePath, apihandlers.ListResource(lister, watcher, &reqScope, true, resources.minRequestTimeout), isWatcher && isLister)
+}
+
+func (l *transport) registerGet(resources *apiResources, reqScope apihandlers.RequestScope, o resourceOptions) {
+ getterWithOptions, isGetterWithOptions := o.storage.(apirest.GetterWithOptions)
+ getter, isGetter := o.storage.(apirest.Getter)
+ if isGetterWithOptions {
+ _, getSubpath, _ := getterWithOptions.NewGetOptions()
+ l.registerRouter(http.MethodGet, resources.prefix+o.itemPath, apihandlers.GetResourceWithOptions(getterWithOptions, &reqScope, o.subresource != ""), isGetter)
+ l.registerRouter(http.MethodGet, resources.prefix+o.itemPath+"/{path:*}", apihandlers.GetResourceWithOptions(getterWithOptions, &reqScope, o.subresource != ""), isGetter && getSubpath)
+ } else {
+ l.registerRouter(http.MethodGet, resources.prefix+o.itemPath, apihandlers.GetResource(getter, &reqScope), isGetter)
+ l.registerRouter(http.MethodGet, resources.prefix+o.itemPath+"/{path:*}", apihandlers.GetResource(getter, &reqScope), false)
+ }
+}
+
+func (l *transport) registerPut(resources *apiResources, reqScope apihandlers.RequestScope, o resourceOptions) {
+ updater, isUpdater := o.storage.(apirest.Updater)
+ l.registerRouter(http.MethodPut, resources.prefix+o.itemPath, apihandlers.UpdateResource(updater, &reqScope, o.admit), isUpdater)
+}
+
+func (l *transport) registerPatch(resources *apiResources, reqScope apihandlers.RequestScope, o resourceOptions) {
+ patcher, isPatcher := o.storage.(apirest.Patcher)
+ l.registerRouter(http.MethodPatch, resources.prefix+o.itemPath, apihandlers.PatchResource(patcher, &reqScope, o.admit, []string{
+ string(types.JSONPatchType),
+ string(types.MergePatchType),
+ string(types.StrategicMergePatchType),
+ string(types.ApplyPatchType),
+ }), isPatcher)
+}
+
+func (l *transport) registerDelete(resources *apiResources, reqScope apihandlers.RequestScope, o resourceOptions) {
+ gracefulDeleter, isGracefulDeleter := o.storage.(apirest.GracefulDeleter)
+ l.registerRouter(http.MethodDelete, resources.prefix+o.itemPath, apihandlers.DeleteResource(gracefulDeleter, isGracefulDeleter, &reqScope, o.admit), isGracefulDeleter)
+}
+
+func (l *transport) registerWatch(resources *apiResources, reqScope apihandlers.RequestScope, o resourceOptions) {
+ lister, _ := o.storage.(apirest.Lister)
+ watcher, isWatcher := o.storage.(apirest.Watcher)
+ l.registerRouter(http.MethodGet, resources.prefix+"/watch"+o.itemPath, apihandlers.ListResource(lister, watcher, &reqScope, true, resources.minRequestTimeout), isWatcher)
+}
+
+func (l *transport) registerConnect(resources *apiResources, reqScope apihandlers.RequestScope, o resourceOptions) {
+ var connectSubpath bool
+ connecter, isConnecter := o.storage.(apirest.Connecter)
+ if isConnecter {
+ _, connectSubpath, _ = connecter.NewConnectOptions()
+ }
+ l.registerRouter(http.MethodConnect, resources.prefix+o.itemPath, apihandlers.ConnectResource(connecter, &reqScope, o.admit, o.path, o.subresource != ""), isConnecter)
+ l.registerRouter(http.MethodConnect, resources.prefix+o.itemPath+"/{path:*}", apihandlers.ConnectResource(connecter, &reqScope, o.admit, o.path, o.subresource != ""), isConnecter && connectSubpath)
+}
diff --git a/feature/pkg/variable/helper.go b/feature/pkg/variable/helper.go
new file mode 100644
index 000000000..f82c30b7c
--- /dev/null
+++ b/feature/pkg/variable/helper.go
@@ -0,0 +1,469 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package variable
+
+import (
+ "fmt"
+ "net"
+ "reflect"
+ "slices"
+ "strconv"
+ "strings"
+ "time"
+
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/util/json"
+ "k8s.io/klog/v2"
+ "k8s.io/utils/ptr"
+
+ kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
+ _const "github.com/kubesphere/kubekey/v4/pkg/const"
+ "github.com/kubesphere/kubekey/v4/pkg/converter/tmpl"
+)
+
+// combineVariables merge multiple variables into one variable
+// v2 will override v1 if variable is repeated
+func combineVariables(m1, m2 map[string]any) map[string]any {
+ var f func(val1, val2 any) any
+ f = func(val1, val2 any) any {
+ if val1 != nil && val2 != nil &&
+ reflect.TypeOf(val1).Kind() == reflect.Map && reflect.TypeOf(val2).Kind() == reflect.Map {
+ mergedVars := make(map[string]any)
+ for _, k := range reflect.ValueOf(val1).MapKeys() {
+ mergedVars[k.String()] = reflect.ValueOf(val1).MapIndex(k).Interface()
+ }
+
+ for _, k := range reflect.ValueOf(val2).MapKeys() {
+ mergedVars[k.String()] = f(mergedVars[k.String()], reflect.ValueOf(val2).MapIndex(k).Interface())
+ }
+
+ return mergedVars
+ }
+
+ return val2
+ }
+ mv := make(map[string]any)
+
+ for k, v := range m1 {
+ mv[k] = v
+ }
+
+ for k, v := range m2 {
+ mv[k] = f(mv[k], v)
+ }
+
+ return mv
+}
+
+func convertGroup(inv kkcorev1.Inventory) map[string]any {
+ groups := make(map[string]any)
+ all := make([]string, 0)
+
+ for hn := range inv.Spec.Hosts {
+ all = append(all, hn)
+ }
+
+ if !slices.Contains(all, _const.VariableLocalHost) { // set default localhost
+ all = append(all, _const.VariableLocalHost)
+ }
+
+ groups[_const.VariableGroupsAll] = all
+
+ for gn := range inv.Spec.Groups {
+ groups[gn] = hostsInGroup(inv, gn)
+ }
+
+ return groups
+}
+
+// hostsInGroup get a host_name slice in a given group
+// if the given group contains other group. convert other group to host_name slice.
+func hostsInGroup(inv kkcorev1.Inventory, groupName string) []string {
+ if v, ok := inv.Spec.Groups[groupName]; ok {
+ var hosts []string
+ for _, cg := range v.Groups {
+ hosts = mergeSlice(hostsInGroup(inv, cg), hosts)
+ }
+
+ return mergeSlice(hosts, v.Hosts)
+ }
+
+ return nil
+}
+
+// mergeSlice with skip repeat value
+func mergeSlice(g1, g2 []string) []string {
+ uniqueValues := make(map[string]bool)
+ mg := make([]string, 0)
+
+ // Add values from the first slice
+ for _, v := range g1 {
+ if !uniqueValues[v] {
+ uniqueValues[v] = true
+
+ mg = append(mg, v)
+ }
+ }
+
+ // Add values from the second slice
+ for _, v := range g2 {
+ if !uniqueValues[v] {
+ uniqueValues[v] = true
+
+ mg = append(mg, v)
+ }
+ }
+
+ return mg
+}
+
+// parseVariable parse all string values to the actual value.
+func parseVariable(v any, parseTmplFunc func(string) (string, error)) error {
+ switch reflect.ValueOf(v).Kind() {
+ case reflect.Map:
+ if err := parseVariableFromMap(v, parseTmplFunc); err != nil {
+ return err
+ }
+ case reflect.Slice, reflect.Array:
+ if err := parseVariableFromArray(v, parseTmplFunc); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// parseVariableFromMap parse to variable when the v is map.
+func parseVariableFromMap(v any, parseTmplFunc func(string) (string, error)) error {
+ for _, kv := range reflect.ValueOf(v).MapKeys() {
+ val := reflect.ValueOf(v).MapIndex(kv)
+ if vv, ok := val.Interface().(string); ok {
+ if !tmpl.IsTmplSyntax(vv) {
+ continue
+ }
+
+ newValue, err := parseTmplFunc(vv)
+ if err != nil {
+ return err
+ }
+
+ switch {
+ case strings.EqualFold(newValue, "TRUE"):
+ reflect.ValueOf(v).SetMapIndex(kv, reflect.ValueOf(true))
+ case strings.EqualFold(newValue, "FALSE"):
+ reflect.ValueOf(v).SetMapIndex(kv, reflect.ValueOf(false))
+ default:
+ reflect.ValueOf(v).SetMapIndex(kv, reflect.ValueOf(newValue))
+ }
+ } else {
+ if err := parseVariable(val.Interface(), parseTmplFunc); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+// parseVariableFromArray parse to variable when the v is slice.
+func parseVariableFromArray(v any, parseTmplFunc func(string) (string, error)) error {
+ for i := range reflect.ValueOf(v).Len() {
+ val := reflect.ValueOf(v).Index(i)
+ if vv, ok := val.Interface().(string); ok {
+ if !tmpl.IsTmplSyntax(vv) {
+ continue
+ }
+
+ newValue, err := parseTmplFunc(vv)
+ if err != nil {
+ return err
+ }
+
+ switch {
+ case strings.EqualFold(newValue, "TRUE"):
+ val.Set(reflect.ValueOf(true))
+ case strings.EqualFold(newValue, "FALSE"):
+ val.Set(reflect.ValueOf(false))
+ default:
+ val.Set(reflect.ValueOf(newValue))
+ }
+ } else {
+ if err := parseVariable(val.Interface(), parseTmplFunc); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+// setlocalhostVarialbe set default vars when hostname is "localhost"
+func setlocalhostVarialbe(hostname string, v value, hostVars map[string]any) {
+ if hostname == _const.VariableLocalHost {
+ if os, ok := v.Hosts[hostname].RemoteVars[_const.VariableOS]; ok {
+ // try to set hostname by current actual hostname.
+ if osd, ok := os.(map[string]any); ok {
+ hostVars[_const.VariableHostName] = osd[_const.VariableOSHostName]
+ }
+ }
+
+ if _, ok := hostVars[_const.VariableIPv4]; !ok {
+ hostVars[_const.VariableIPv4] = getLocalIP(_const.VariableIPv4)
+ }
+
+ if _, ok := hostVars[_const.VariableIPv6]; !ok {
+ hostVars[_const.VariableIPv6] = getLocalIP(_const.VariableIPv6)
+ }
+ }
+}
+
+// getLocalIP get the ipv4 or ipv6 for localhost machine
+func getLocalIP(ipType string) string {
+ addrs, err := net.InterfaceAddrs()
+ if err != nil {
+ klog.ErrorS(err, "get network address error")
+ }
+
+ for _, addr := range addrs {
+ if ipNet, ok := addr.(*net.IPNet); ok && !ipNet.IP.IsLoopback() {
+ if ipType == _const.VariableIPv4 && ipNet.IP.To4() != nil {
+ return ipNet.IP.String()
+ }
+
+ if ipType == _const.VariableIPv6 && ipNet.IP.To16() != nil && ipNet.IP.To4() == nil {
+ return ipNet.IP.String()
+ }
+ }
+ }
+
+ klog.V(4).Infof("connot get local %s address", ipType)
+
+ return ""
+}
+
+// StringVar get string value by key
+func StringVar(d map[string]any, args map[string]any, key string) (string, error) {
+ val, ok := args[key]
+ if !ok {
+ klog.V(4).InfoS("cannot find variable", "key", key)
+
+ return "", fmt.Errorf("cannot find variable \"%s\"", key)
+ }
+ // convert to string
+ sv, ok := val.(string)
+ if !ok {
+ klog.V(4).ErrorS(nil, "variable is not string", "key", key)
+
+ return "", fmt.Errorf("variable \"%s\" is not string", key)
+ }
+
+ return tmpl.ParseString(d, sv)
+}
+
+// StringSliceVar get string slice value by key
+func StringSliceVar(d map[string]any, vars map[string]any, key string) ([]string, error) {
+ val, ok := vars[key]
+ if !ok {
+ klog.V(4).InfoS("cannot find variable", "key", key)
+
+ return nil, fmt.Errorf("cannot find variable \"%s\"", key)
+ }
+
+ switch valv := val.(type) {
+ case []any:
+ var ss []string
+
+ for _, a := range valv {
+ av, ok := a.(string)
+ if !ok {
+ klog.V(6).InfoS("variable is not string", "key", key)
+
+ return nil, nil
+ }
+
+ as, err := tmpl.ParseString(d, av)
+ if err != nil {
+ return nil, err
+ }
+
+ ss = append(ss, as)
+ }
+
+ return ss, nil
+ case string:
+ as, err := tmpl.ParseString(d, valv)
+ if err != nil {
+ klog.V(4).ErrorS(err, "parse variable error", "key", key)
+
+ return nil, err
+ }
+
+ var ss []string
+ if err := json.Unmarshal([]byte(as), &ss); err == nil {
+ return ss, nil
+ }
+
+ return []string{as}, nil
+ default:
+ klog.V(4).ErrorS(nil, "unsupported variable type", "key", key)
+
+ return nil, fmt.Errorf("unsupported variable \"%s\" type", key)
+ }
+}
+
+// IntVar get int value by key
+func IntVar(d map[string]any, vars map[string]any, key string) (*int, error) {
+ val, ok := vars[key]
+ if !ok {
+ klog.V(4).InfoS("cannot find variable", "key", key)
+
+ return nil, fmt.Errorf("cannot find variable \"%s\"", key)
+ }
+ // default convert to int
+ v := reflect.ValueOf(val)
+ switch v.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return ptr.To(int(v.Int())), nil
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return ptr.To(int(v.Uint())), nil
+ case reflect.Float32, reflect.Float64:
+ return ptr.To(int(v.Float())), nil
+ case reflect.String:
+ vs, err := tmpl.ParseString(d, v.String())
+ if err != nil {
+ klog.V(4).ErrorS(err, "parse string variable error", "key", key)
+
+ return nil, err
+ }
+
+ atoi, err := strconv.Atoi(vs)
+ if err != nil {
+ klog.V(4).ErrorS(err, "parse convert string to int error", "key", key)
+
+ return nil, err
+ }
+
+ return ptr.To(atoi), nil
+ default:
+ klog.V(4).ErrorS(nil, "unsupported variable type", "key", key)
+
+ return nil, fmt.Errorf("unsupported variable \"%s\" type", key)
+ }
+}
+
+// BoolVar get bool value by key
+func BoolVar(d map[string]any, args map[string]any, key string) (*bool, error) {
+ val, ok := args[key]
+ if !ok {
+ klog.V(4).InfoS("cannot find variable", "key", key)
+
+ return nil, fmt.Errorf("cannot find variable \"%s\"", key)
+ }
+ // default convert to int
+ v := reflect.ValueOf(val)
+ switch v.Kind() {
+ case reflect.Bool:
+ return ptr.To(v.Bool()), nil
+ case reflect.String:
+ vs, err := tmpl.ParseString(d, v.String())
+ if err != nil {
+ klog.V(4).ErrorS(err, "parse string variable error", "key", key)
+
+ return nil, err
+ }
+
+ if strings.EqualFold(vs, "TRUE") {
+ return ptr.To(true), nil
+ }
+
+ if strings.EqualFold(vs, "FALSE") {
+ return ptr.To(false), nil
+ }
+ }
+
+ return nil, fmt.Errorf("unsupported variable \"%s\" type", key)
+}
+
+// DurationVar get time.Duration value by key
+func DurationVar(d map[string]any, args map[string]any, key string) (time.Duration, error) {
+ stringVar, err := StringVar(d, args, key)
+ if err != nil {
+ return 0, err
+ }
+
+ return time.ParseDuration(stringVar)
+}
+
+// Extension2Variables convert runtime.RawExtension to variables
+func Extension2Variables(ext runtime.RawExtension) map[string]any {
+ if len(ext.Raw) == 0 {
+ return make(map[string]any)
+ }
+
+ var data map[string]any
+ if err := json.Unmarshal(ext.Raw, &data); err != nil {
+ klog.V(4).ErrorS(err, "failed to unmarshal extension to variables")
+ }
+
+ return data
+}
+
+// Extension2Slice convert runtime.RawExtension to slice
+// if runtime.RawExtension contains tmpl syntax, parse it.
+func Extension2Slice(d map[string]any, ext runtime.RawExtension) []any {
+ if len(ext.Raw) == 0 {
+ return nil
+ }
+
+ var data []any
+ // try parse yaml string which defined by single value or multi value
+ if err := json.Unmarshal(ext.Raw, &data); err == nil {
+ return data
+ }
+ // try converter template string
+ val, err := Extension2String(d, ext)
+ if err != nil {
+ klog.ErrorS(err, "extension2string error", "input", string(ext.Raw))
+ }
+
+ if err := json.Unmarshal([]byte(val), &data); err == nil {
+ return data
+ }
+
+ return []any{val}
+}
+
+// Extension2String convert runtime.RawExtension to string.
+// if runtime.RawExtension contains tmpl syntax, parse it.
+func Extension2String(d map[string]any, ext runtime.RawExtension) (string, error) {
+ if len(ext.Raw) == 0 {
+ return "", nil
+ }
+
+ var input = string(ext.Raw)
+ // try to escape string
+ if ns, err := strconv.Unquote(string(ext.Raw)); err == nil {
+ input = ns
+ }
+
+ result, err := tmpl.ParseString(d, input)
+ if err != nil {
+ return "", err
+ }
+
+ return result, nil
+}
diff --git a/feature/pkg/variable/helper_test.go b/feature/pkg/variable/helper_test.go
new file mode 100644
index 000000000..600da3cfe
--- /dev/null
+++ b/feature/pkg/variable/helper_test.go
@@ -0,0 +1,358 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package variable
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
+ "github.com/kubesphere/kubekey/v4/pkg/converter/tmpl"
+)
+
+func TestMergeVariable(t *testing.T) {
+ testcases := []struct {
+ name string
+ v1 map[string]any
+ v2 map[string]any
+ excepted map[string]any
+ }{
+ {
+ name: "primary variables value is empty",
+ v1: nil,
+ v2: map[string]any{
+ "a1": "v1",
+ },
+ excepted: map[string]any{
+ "a1": "v1",
+ },
+ },
+ {
+ name: "auxiliary variables value is empty",
+ v1: map[string]any{
+ "p1": "v1",
+ },
+ v2: nil,
+ excepted: map[string]any{
+ "p1": "v1",
+ },
+ },
+ {
+ name: "non-repeat value",
+ v1: map[string]any{
+ "p1": "v1",
+ "p2": map[string]any{
+ "p21": "v21",
+ },
+ },
+ v2: map[string]any{
+ "a1": "v1",
+ },
+ excepted: map[string]any{
+ "p1": "v1",
+ "p2": map[string]any{
+ "p21": "v21",
+ },
+ "a1": "v1",
+ },
+ },
+ {
+ name: "repeat value",
+ v1: map[string]any{
+ "p1": "v1",
+ "p2": map[string]any{
+ "p21": "v21",
+ "p22": "v22",
+ },
+ },
+ v2: map[string]any{
+ "a1": "v1",
+ "p1": "v2",
+ "p2": map[string]any{
+ "p21": "v22",
+ "a21": "v21",
+ },
+ },
+ excepted: map[string]any{
+ "a1": "v1",
+ "p1": "v2",
+ "p2": map[string]any{
+ "p21": "v22",
+ "a21": "v21",
+ "p22": "v22",
+ },
+ },
+ },
+ {
+ name: "repeat deep value",
+ v1: map[string]any{
+ "p1": map[string]string{
+ "p11": "v11",
+ },
+ "p2": map[string]any{
+ "p21": "v21",
+ "p22": "v22",
+ },
+ },
+ v2: map[string]any{
+ "p1": map[string]string{
+ "p21": "v21",
+ },
+ "p2": map[string]any{
+ "p21": map[string]any{
+ "p211": "v211",
+ },
+ "a21": "v21",
+ },
+ },
+ excepted: map[string]any{
+ "p1": map[string]any{
+ "p11": "v11",
+ "p21": "v21",
+ },
+ "p2": map[string]any{
+ "p21": map[string]any{
+ "p211": "v211",
+ },
+ "p22": "v22",
+ "a21": "v21",
+ },
+ },
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ v := combineVariables(tc.v1, tc.v2)
+ assert.Equal(t, tc.excepted, v)
+ })
+ }
+}
+
+func TestMergeGroup(t *testing.T) {
+ testcases := []struct {
+ name string
+ g1 []string
+ g2 []string
+ except []string
+ }{
+ {
+ name: "non-repeat",
+ g1: []string{
+ "h1", "h2", "h3",
+ },
+ g2: []string{
+ "h4", "h5",
+ },
+ except: []string{
+ "h1", "h2", "h3", "h4", "h5",
+ },
+ },
+ {
+ name: "repeat value",
+ g1: []string{
+ "h1", "h2", "h3",
+ },
+ g2: []string{
+ "h3", "h4", "h5",
+ },
+ except: []string{
+ "h1", "h2", "h3", "h4", "h5",
+ },
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ ac := mergeSlice(tc.g1, tc.g2)
+ assert.Equal(t, tc.except, ac)
+ })
+ }
+}
+
+func TestParseVariable(t *testing.T) {
+ testcases := []struct {
+ name string
+ data map[string]any
+ base map[string]any
+ except map[string]any
+ }{
+ {
+ name: "parse string",
+ data: map[string]any{
+ "a": "{{ .a }}",
+ },
+ base: map[string]any{
+ "a": "b",
+ },
+ except: map[string]any{
+ "a": "b",
+ },
+ },
+ {
+ name: "parse map",
+ data: map[string]any{
+ "a": "{{ .a.b }}",
+ },
+ base: map[string]any{
+ "a": map[string]any{
+ "b": "c",
+ },
+ },
+ except: map[string]any{
+ "a": "c",
+ },
+ },
+ {
+ name: "parse slice",
+ data: map[string]any{
+ "a": []string{"{{ .b }}"},
+ },
+ base: map[string]any{
+ "b": "c",
+ },
+ except: map[string]any{
+ "a": []string{"c"},
+ },
+ },
+ {
+ name: "parse map in slice",
+ data: map[string]any{
+ "a": []map[string]any{
+ {
+ "a1": []any{"{{ .b }}"},
+ },
+ },
+ },
+ base: map[string]any{
+ "b": "c",
+ },
+ except: map[string]any{
+ "a": []map[string]any{
+ {
+ "a1": []any{"c"},
+ },
+ },
+ },
+ },
+ {
+ name: "parse slice with bool value",
+ data: map[string]any{
+ "a": []any{"{{ .b }}"},
+ },
+ base: map[string]any{
+ "b": "true",
+ },
+ except: map[string]any{
+ "a": []any{true},
+ },
+ },
+ {
+ name: "parse map with bool value",
+ data: map[string]any{
+ "a": "{{ .b }}",
+ },
+ base: map[string]any{
+ "b": "true",
+ },
+ except: map[string]any{
+ "a": true,
+ },
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ err := parseVariable(tc.data, func(s string) (string, error) {
+ // parse use total variable. the task variable should not contain template syntax.
+ return tmpl.ParseString(combineVariables(tc.data, tc.base), s)
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ assert.Equal(t, tc.except, tc.data)
+ })
+ }
+}
+
+func TestHostsInGroup(t *testing.T) {
+ testcases := []struct {
+ name string
+ inventory kkcorev1.Inventory
+ groupName string
+ except []string
+ }{
+ {
+ name: "single group",
+ inventory: kkcorev1.Inventory{
+ Spec: kkcorev1.InventorySpec{
+ Groups: map[string]kkcorev1.InventoryGroup{
+ "g1": {
+ Hosts: []string{"h1", "h2", "h3"},
+ },
+ },
+ },
+ },
+ groupName: "g1",
+ except: []string{"h1", "h2", "h3"},
+ },
+ {
+ name: "group in group",
+ inventory: kkcorev1.Inventory{
+ Spec: kkcorev1.InventorySpec{
+ Groups: map[string]kkcorev1.InventoryGroup{
+ "g1": {
+ Hosts: []string{"h1", "h2", "h3"},
+ Groups: []string{"g2"},
+ },
+ "g2": {
+ Hosts: []string{"h4"},
+ },
+ },
+ },
+ },
+ groupName: "g1",
+ except: []string{"h1", "h2", "h3", "h4"},
+ },
+ {
+ name: "repeat hosts in group",
+ inventory: kkcorev1.Inventory{
+ Spec: kkcorev1.InventorySpec{
+ Groups: map[string]kkcorev1.InventoryGroup{
+ "g1": {
+ Hosts: []string{"h1", "h2", "h3"},
+ Groups: []string{"g2"},
+ },
+ "g2": {
+ Hosts: []string{"h3", "h4"},
+ },
+ },
+ },
+ },
+ groupName: "g1",
+ except: []string{"h4", "h1", "h2", "h3"},
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ assert.ElementsMatch(t, tc.except, hostsInGroup(tc.inventory, tc.groupName))
+ })
+ }
+}
diff --git a/feature/pkg/variable/internal.go b/feature/pkg/variable/internal.go
new file mode 100644
index 000000000..3d8d0d474
--- /dev/null
+++ b/feature/pkg/variable/internal.go
@@ -0,0 +1,387 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package variable
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "reflect"
+ "regexp"
+ "slices"
+ "strconv"
+ "strings"
+ "sync"
+
+ "k8s.io/apimachinery/pkg/util/rand"
+ "k8s.io/klog/v2"
+
+ kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
+ _const "github.com/kubesphere/kubekey/v4/pkg/const"
+ "github.com/kubesphere/kubekey/v4/pkg/converter/tmpl"
+ "github.com/kubesphere/kubekey/v4/pkg/variable/source"
+)
+
+type variable struct {
+ // key is the unique Identifier of the variable. usually the UID of the pipeline.
+ key string
+ // source is where the variable is stored
+ source source.Source
+ // value is the data of the variable, which store in memory
+ value *value
+ // lock is the lock for value
+ sync.Mutex
+}
+
+// value is the specific data contained in the variable
+type value struct {
+ kkcorev1.Config `json:"-"`
+ kkcorev1.Inventory `json:"-"`
+ // Hosts store the variable for running tasks on specific hosts
+ Hosts map[string]host `json:"hosts"`
+}
+
+func (v value) deepCopy() value {
+ nv := value{}
+
+ data, err := json.Marshal(v)
+ if err != nil {
+ return value{}
+ }
+
+ if err := json.Unmarshal(data, &nv); err != nil {
+ return value{}
+ }
+
+ return nv
+}
+
+// getParameterVariable get defined variable from inventory and config
+func (v value) getParameterVariable() map[string]any {
+ globalHosts := make(map[string]any)
+
+ for hostname := range v.Hosts {
+ // get host vars
+ hostVars := Extension2Variables(v.Inventory.Spec.Hosts[hostname])
+ // set inventory_name to hostVars
+ // "inventory_name" is the hostname configured in the inventory file.
+ hostVars[_const.VariableInventoryName] = hostname
+ if _, ok := hostVars[_const.VariableHostName]; !ok {
+ hostVars[_const.VariableHostName] = hostname
+ }
+ // merge group vars to host vars
+ for _, gv := range v.Inventory.Spec.Groups {
+ if slices.Contains(gv.Hosts, hostname) {
+ hostVars = combineVariables(hostVars, Extension2Variables(gv.Vars))
+ }
+ }
+ // set default localhost
+ setlocalhostVarialbe(hostname, v, hostVars)
+ // merge inventory vars to host vars
+ hostVars = combineVariables(hostVars, Extension2Variables(v.Inventory.Spec.Vars))
+ // merge config vars to host vars
+ hostVars = combineVariables(hostVars, Extension2Variables(v.Config.Spec))
+ globalHosts[hostname] = hostVars
+ }
+
+ var externalVal = make(map[string]any)
+ // external vars
+ for hostname := range globalHosts {
+ var val = make(map[string]any)
+ val = combineVariables(val, map[string]any{
+ _const.VariableGlobalHosts: globalHosts,
+ })
+ val = combineVariables(val, map[string]any{
+ _const.VariableGroups: convertGroup(v.Inventory),
+ })
+ externalVal[hostname] = val
+ }
+
+ return combineVariables(globalHosts, externalVal)
+}
+
+type host struct {
+ // RemoteVars sources from remote node config. as gather_fact.scope all tasks. it should not be changed.
+ RemoteVars map[string]any `json:"remote"`
+ // RuntimeVars sources from runtime. store which defined in each appeared block vars.
+ RuntimeVars map[string]any `json:"runtime"`
+}
+
+// Get vars
+func (v *variable) Get(f GetFunc) (any, error) {
+ return f(v)
+}
+
+// Merge hosts vars to variable and sync to resource
+func (v *variable) Merge(f MergeFunc) error {
+ v.Lock()
+ defer v.Unlock()
+
+ old := v.value.deepCopy()
+
+ if err := f(v); err != nil {
+ return err
+ }
+
+ return v.syncSource(old)
+}
+
+// syncSource sync hosts vars to source.
+func (v *variable) syncSource(old value) error {
+ for hn, hv := range v.value.Hosts {
+ if reflect.DeepEqual(old.Hosts[hn], hv) {
+ // nothing change skip.
+ continue
+ }
+ // write to source
+ data, err := json.MarshalIndent(hv, "", " ")
+ if err != nil {
+ klog.ErrorS(err, "marshal host data error", "hostname", hn)
+
+ return err
+ }
+
+ if err := v.source.Write(data, hn+".json"); err != nil {
+ klog.ErrorS(err, "write host data to local file error", "hostname", hn, "filename", hn+".json")
+ }
+ }
+
+ return nil
+}
+
+// ***************************** GetFunc ***************************** //
+
+// GetHostnames get all hostnames from a group or host
+var GetHostnames = func(name []string) GetFunc {
+ if len(name) == 0 {
+ return emptyGetFunc
+ }
+
+ return func(v Variable) (any, error) {
+ vv, ok := v.(*variable)
+ if !ok {
+ return nil, errors.New("variable type error")
+ }
+ var hs []string
+ for _, n := range name {
+ // add host to hs
+ if _, ok := vv.value.Hosts[n]; ok {
+ hs = append(hs, n)
+ }
+ // add group's host to gs
+ for gn, gv := range convertGroup(vv.value.Inventory) {
+ if gn == n {
+ if gvd, ok := gv.([]string); ok {
+ hs = mergeSlice(hs, gvd)
+ }
+
+ break
+ }
+ }
+
+ // Add the specified host in the specified group to the hs.
+ regexForIndex := regexp.MustCompile(`^(.*)\[\d\]$`)
+ if match := regexForIndex.FindStringSubmatch(strings.TrimSpace(n)); match != nil {
+ index, err := strconv.Atoi(match[2])
+ if err != nil {
+ klog.V(4).ErrorS(err, "convert index to int error", "index", match[2])
+
+ return nil, err
+ }
+ if group, ok := convertGroup(vv.value.Inventory)[match[1]].([]string); ok {
+ if index >= len(group) {
+ return nil, fmt.Errorf("index %v out of range for group %s", index, group)
+ }
+ hs = append(hs, group[index])
+ }
+ }
+
+ // add random host in group
+ regexForRandom := regexp.MustCompile(`^(.+?)\s*\|\s*random$`)
+ if match := regexForRandom.FindStringSubmatch(strings.TrimSpace(n)); match != nil {
+ if group, ok := convertGroup(vv.value.Inventory)[match[1]].([]string); ok {
+ hs = append(hs, group[rand.Intn(len(group))])
+ }
+ }
+ }
+
+ return hs, nil
+ }
+}
+
+// GetParamVariable get param variable which is combination of inventory, config.
+// if hostname is empty, return all host's param variable.
+var GetParamVariable = func(hostname string) GetFunc {
+ return func(v Variable) (any, error) {
+ vv, ok := v.(*variable)
+ if !ok {
+ return nil, errors.New("variable type error")
+ }
+ if hostname == "" {
+ return vv.value.getParameterVariable(), nil
+ }
+
+ return vv.value.getParameterVariable()[hostname], nil
+ }
+}
+
+// GetAllVariable get all variable for a given host
+var GetAllVariable = func(hostName string) GetFunc {
+ return func(v Variable) (any, error) {
+ vv, ok := v.(*variable)
+ if !ok {
+ return nil, errors.New("variable type error")
+ }
+ result := make(map[string]any)
+ // find from runtime
+ result = combineVariables(result, vv.value.Hosts[hostName].RuntimeVars)
+ // find from remote
+ result = combineVariables(result, vv.value.Hosts[hostName].RemoteVars)
+ // find from global.
+ if vv, ok := vv.value.getParameterVariable()[hostName]; ok {
+ if vvd, ok := vv.(map[string]any); ok {
+ result = combineVariables(result, vvd)
+ }
+ }
+
+ return result, nil
+ }
+}
+
+// GetHostMaxLength get the max length for all hosts
+var GetHostMaxLength = func() GetFunc {
+ return func(v Variable) (any, error) {
+ vv, ok := v.(*variable)
+ if !ok {
+ return nil, errors.New("variable type error")
+ }
+ var hostNameMaxLen int
+ for k := range vv.value.Hosts {
+ hostNameMaxLen = max(len(k), hostNameMaxLen)
+ }
+
+ return hostNameMaxLen, nil
+ }
+}
+
+// ***************************** MergeFunc ***************************** //
+
+// MergeRemoteVariable merge variable to remote.
+var MergeRemoteVariable = func(data map[string]any, hostname string) MergeFunc {
+ return func(v Variable) error {
+ vv, ok := v.(*variable)
+ if !ok {
+ return errors.New("variable type error")
+ }
+
+ if hostname == "" {
+ return errors.New("when merge source is remote. HostName cannot be empty")
+ }
+ if _, ok := vv.value.Hosts[hostname]; !ok {
+ return fmt.Errorf("when merge source is remote. HostName %s not exist", hostname)
+ }
+
+ // it should not be changed
+ if hv := vv.value.Hosts[hostname]; len(hv.RemoteVars) == 0 {
+ hv.RemoteVars = data
+ vv.value.Hosts[hostname] = hv
+ }
+
+ return nil
+ }
+}
+
+// MergeRuntimeVariable parse variable by specific host and merge to the host.
+var MergeRuntimeVariable = func(data map[string]any, hosts ...string) MergeFunc {
+ if len(data) == 0 || len(hosts) == 0 {
+ // skip
+ return emptyMergeFunc
+ }
+
+ return func(v Variable) error {
+ for _, hostName := range hosts {
+ vv, ok := v.(*variable)
+ if !ok {
+ return errors.New("variable type error")
+ }
+ // merge to specify host
+ curVariable, err := v.Get(GetAllVariable(hostName))
+ if err != nil {
+ return err
+ }
+ // parse variable
+ if err := parseVariable(data, func(s string) (string, error) {
+ // parse use total variable. the task variable should not contain template syntax.
+ cv, ok := curVariable.(map[string]any)
+ if !ok {
+ return "", errors.New("variable type error")
+ }
+
+ return tmpl.ParseString(combineVariables(data, cv), s)
+ }); err != nil {
+ return err
+ }
+
+ if _, ok := v.(*variable); !ok {
+ return errors.New("variable type error")
+ }
+ hv := vv.value.Hosts[hostName]
+ hv.RuntimeVars = combineVariables(hv.RuntimeVars, data)
+ vv.value.Hosts[hostName] = hv
+ }
+
+ return nil
+ }
+}
+
+// MergeAllRuntimeVariable parse variable by specific host and merge to all hosts.
+var MergeAllRuntimeVariable = func(data map[string]any, hostName string) MergeFunc {
+ return func(v Variable) error {
+ vv, ok := v.(*variable)
+ if !ok {
+ return errors.New("variable type error")
+ }
+ // merge to specify host
+ curVariable, err := v.Get(GetAllVariable(hostName))
+ if err != nil {
+ return err
+ }
+ // parse variable
+ if err := parseVariable(data, func(s string) (string, error) {
+ // parse use total variable. the task variable should not contain template syntax.
+ cv, ok := curVariable.(map[string]any)
+ if !ok {
+ return "", errors.New("variable type error")
+ }
+
+ return tmpl.ParseString(combineVariables(data, cv), s)
+ }); err != nil {
+ return err
+ }
+
+ for h := range vv.value.Hosts {
+ if _, ok := v.(*variable); !ok {
+ return errors.New("variable type error")
+ }
+ hv := vv.value.Hosts[h]
+ hv.RuntimeVars = combineVariables(hv.RuntimeVars, data)
+ vv.value.Hosts[h] = hv
+ }
+
+ return nil
+ }
+}
diff --git a/feature/pkg/variable/internal_test.go b/feature/pkg/variable/internal_test.go
new file mode 100644
index 000000000..07dcd16b9
--- /dev/null
+++ b/feature/pkg/variable/internal_test.go
@@ -0,0 +1,95 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package variable
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "k8s.io/apimachinery/pkg/runtime"
+
+ kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
+)
+
+func TestGetAllVariable(t *testing.T) {
+ testcases := []struct {
+ name string
+ value *value
+ except map[string]any
+ }{
+ {
+ name: "global override runtime variable",
+ value: &value{
+ Config: kkcorev1.Config{
+ Spec: runtime.RawExtension{
+ Raw: []byte(`{
+"artifact": {
+ "images": [ "abc" ]
+}
+}`)},
+ },
+ Inventory: kkcorev1.Inventory{
+ Spec: kkcorev1.InventorySpec{
+ Hosts: map[string]runtime.RawExtension{
+ "localhost": {Raw: []byte(`{
+"internal_ipv4": "127.0.0.1",
+"internal_ipv6": "::1"
+}`)},
+ },
+ },
+ },
+ Hosts: map[string]host{
+ "localhost": {},
+ },
+ },
+ except: map[string]any{
+ "internal_ipv4": "127.0.0.1",
+ "internal_ipv6": "::1",
+ "artifact": map[string]any{
+ "images": []any{"abc"},
+ },
+ "groups": map[string]any{"all": []string{"localhost"}},
+ "inventory_hosts": map[string]any{
+ "localhost": map[string]any{
+ "internal_ipv4": "127.0.0.1",
+ "internal_ipv6": "::1",
+ "artifact": map[string]any{
+ "images": []any{"abc"},
+ },
+ "inventory_name": "localhost",
+ "hostname": "localhost",
+ },
+ },
+ "inventory_name": "localhost",
+ "hostname": "localhost",
+ },
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ v := variable{value: tc.value}
+
+ result, err := v.Get(GetAllVariable("localhost"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ assert.Equal(t, tc.except, result)
+ })
+ }
+}
diff --git a/feature/pkg/variable/source/file.go b/feature/pkg/variable/source/file.go
new file mode 100644
index 000000000..953908e80
--- /dev/null
+++ b/feature/pkg/variable/source/file.go
@@ -0,0 +1,95 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package source
+
+import (
+ "os"
+ "path/filepath"
+ "strings"
+
+ "k8s.io/klog/v2"
+)
+
+var _ Source = &fileSource{}
+
+// NewFileSource returns a new fileSource.
+func NewFileSource(path string) (Source, error) {
+ if _, err := os.Stat(path); err != nil {
+ if err := os.MkdirAll(path, os.ModePerm); err != nil {
+ klog.V(4).ErrorS(err, "create source path error", "path", path)
+
+ return nil, err
+ }
+ }
+
+ return &fileSource{path: path}, nil
+}
+
+type fileSource struct {
+ path string
+}
+
+func (f *fileSource) Read() (map[string][]byte, error) {
+ de, err := os.ReadDir(f.path)
+ if err != nil {
+ klog.V(4).ErrorS(err, "read dir error", "path", f.path)
+
+ return nil, err
+ }
+
+ var result map[string][]byte
+ for _, entry := range de {
+ if entry.IsDir() {
+ continue
+ }
+
+ if result == nil {
+ result = make(map[string][]byte)
+ }
+ // only read json data
+ if strings.HasSuffix(entry.Name(), ".json") {
+ data, err := os.ReadFile(filepath.Join(f.path, entry.Name()))
+ if err != nil {
+ klog.V(4).ErrorS(err, "read file error", "filename", entry.Name())
+
+ return nil, err
+ }
+
+ result[entry.Name()] = data
+ }
+ }
+
+ return result, nil
+}
+
+func (f *fileSource) Write(data []byte, filename string) error {
+ file, err := os.Create(filepath.Join(f.path, filename))
+ if err != nil {
+ klog.V(4).ErrorS(err, "create file error", "filename", filename)
+
+ return err
+ }
+ defer file.Close()
+
+ if _, err := file.Write(data); err != nil {
+ klog.V(4).ErrorS(err, "write file error", "filename", filename)
+
+ return err
+ }
+
+ return nil
+}
diff --git a/feature/pkg/variable/source/memory.go b/feature/pkg/variable/source/memory.go
new file mode 100644
index 000000000..0ad72e71e
--- /dev/null
+++ b/feature/pkg/variable/source/memory.go
@@ -0,0 +1,24 @@
+package source
+
+var _ Source = &memorySource{}
+
+type memorySource struct {
+ data map[string][]byte
+}
+
+// NewMemorySource returns a new memorySource.
+func NewMemorySource() Source {
+ return &memorySource{
+ data: make(map[string][]byte),
+ }
+}
+
+func (m *memorySource) Read() (map[string][]byte, error) {
+ return m.data, nil
+}
+
+func (m *memorySource) Write(data []byte, filename string) error {
+ m.data[filename] = data
+
+ return nil
+}
diff --git a/feature/pkg/variable/source/source.go b/feature/pkg/variable/source/source.go
new file mode 100644
index 000000000..600b4dd07
--- /dev/null
+++ b/feature/pkg/variable/source/source.go
@@ -0,0 +1,33 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package source
+
+// SourceType how to store variable
+type SourceType int
+
+const (
+ // MemorySource store variable in memory
+ MemorySource SourceType = iota
+ // FileSource store variable in file
+ FileSource SourceType = iota
+)
+
+// Source is the source from which config is loaded.
+type Source interface {
+ Read() (map[string][]byte, error)
+ Write(data []byte, filename string) error
+}
diff --git a/feature/pkg/variable/variable.go b/feature/pkg/variable/variable.go
new file mode 100644
index 000000000..67dfd96e8
--- /dev/null
+++ b/feature/pkg/variable/variable.go
@@ -0,0 +1,137 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package variable
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "path/filepath"
+ "strings"
+
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/klog/v2"
+ ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
+
+ kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
+ _const "github.com/kubesphere/kubekey/v4/pkg/const"
+ "github.com/kubesphere/kubekey/v4/pkg/variable/source"
+)
+
+var (
+ emptyGetFunc GetFunc = func(Variable) (any, error) {
+ return nil, errors.New("nil value returned")
+ }
+ emptyMergeFunc MergeFunc = func(Variable) error {
+ return nil
+ }
+)
+
+// GetFunc get data from variable
+type GetFunc func(Variable) (any, error)
+
+// MergeFunc merge data to variable
+type MergeFunc func(Variable) error
+
+// Variable store all vars which pipeline used.
+type Variable interface {
+ Get(getFunc GetFunc) (any, error)
+ Merge(mergeFunc MergeFunc) error
+}
+
+// New variable. generate value from config args. and render to source.
+func New(ctx context.Context, client ctrlclient.Client, pipeline kkcorev1.Pipeline, st source.SourceType) (Variable, error) {
+ var err error
+ // new source
+ var s source.Source
+
+ switch st {
+ case source.MemorySource:
+ s = source.NewMemorySource()
+ case source.FileSource:
+ s, err = source.NewFileSource(filepath.Join(_const.RuntimeDirFromPipeline(pipeline), _const.RuntimePipelineVariableDir))
+ if err != nil {
+ klog.V(4).ErrorS(err, "create file source failed", "path", filepath.Join(_const.RuntimeDirFromPipeline(pipeline), _const.RuntimePipelineVariableDir), "pipeline", ctrlclient.ObjectKeyFromObject(&pipeline))
+
+ return nil, err
+ }
+ default:
+ return nil, fmt.Errorf("unsupported source type: %v", st)
+ }
+
+ // get config
+ var config = &kkcorev1.Config{}
+ if pipeline.Spec.ConfigRef != nil {
+ if err := client.Get(ctx, types.NamespacedName{Namespace: pipeline.Spec.ConfigRef.Namespace, Name: pipeline.Spec.ConfigRef.Name}, config); err != nil {
+ klog.V(4).ErrorS(err, "get config from pipeline error", "config", pipeline.Spec.ConfigRef, "pipeline", ctrlclient.ObjectKeyFromObject(&pipeline))
+
+ return nil, err
+ }
+ }
+
+ // get inventory
+ var inventory = &kkcorev1.Inventory{}
+ if pipeline.Spec.InventoryRef != nil {
+ if err := client.Get(ctx, types.NamespacedName{Namespace: pipeline.Spec.InventoryRef.Namespace, Name: pipeline.Spec.InventoryRef.Name}, inventory); err != nil {
+ klog.V(4).ErrorS(err, "get inventory from pipeline error", "inventory", pipeline.Spec.InventoryRef, "pipeline", ctrlclient.ObjectKeyFromObject(&pipeline))
+
+ return nil, err
+ }
+ }
+
+ v := &variable{
+ key: string(pipeline.UID),
+ source: s,
+ value: &value{
+ Config: *config,
+ Inventory: *inventory,
+ Hosts: make(map[string]host),
+ },
+ }
+
+ if gd, ok := convertGroup(*inventory)["all"].([]string); ok {
+ for _, hostname := range gd {
+ v.value.Hosts[hostname] = host{
+ RemoteVars: make(map[string]any),
+ RuntimeVars: make(map[string]any),
+ }
+ }
+ }
+
+ // read data from source
+ data, err := v.source.Read()
+ if err != nil {
+ klog.V(4).ErrorS(err, "read data from source error", "pipeline", ctrlclient.ObjectKeyFromObject(&pipeline))
+
+ return nil, err
+ }
+
+ for k, d := range data {
+ // set hosts
+ h := host{}
+ if err := json.Unmarshal(d, &h); err != nil {
+ klog.V(4).ErrorS(err, "unmarshal host error", "pipeline", ctrlclient.ObjectKeyFromObject(&pipeline))
+
+ return nil, err
+ }
+
+ v.value.Hosts[strings.TrimSuffix(k, ".json")] = h
+ }
+
+ return v, nil
+}
diff --git a/feature/plugins/playbooks/backup.yaml b/feature/plugins/playbooks/backup.yaml
new file mode 100644
index 000000000..04f6fe586
--- /dev/null
+++ b/feature/plugins/playbooks/backup.yaml
@@ -0,0 +1,5 @@
+---
+- hosts:
+ - etcd|random
+ roles:
+ - etcd/backup
diff --git a/feature/plugins/playbooks/restore.yaml b/feature/plugins/playbooks/restore.yaml
new file mode 100644
index 000000000..842adf5a1
--- /dev/null
+++ b/feature/plugins/playbooks/restore.yaml
@@ -0,0 +1,15 @@
+---
+- hosts:
+ - kube_control_plane
+ roles:
+ - kubernetes/stop
+
+- hosts:
+ - etcd
+ roles:
+ - etcd/restore
+
+- hosts:
+ - kube_control_plane
+ roles:
+ - kubernetes/start
diff --git a/feature/plugins/playbooks/sonobuoy.yaml b/feature/plugins/playbooks/sonobuoy.yaml
new file mode 100644
index 000000000..dc27d1afb
--- /dev/null
+++ b/feature/plugins/playbooks/sonobuoy.yaml
@@ -0,0 +1,6 @@
+---
+- hosts:
+ - cluster
+ roles:
+ - sonobuoy
+
diff --git a/feature/plugins/playbooks/upgrade_kernel.yaml b/feature/plugins/playbooks/upgrade_kernel.yaml
new file mode 100644
index 000000000..f1288d401
--- /dev/null
+++ b/feature/plugins/playbooks/upgrade_kernel.yaml
@@ -0,0 +1,16 @@
+---
+- hosts:
+ - localhost
+ vars_files:
+ - vars/upgrade_kernel.yaml
+ tags: ["always"]
+ roles:
+ - os/init-kernel
+
+- hosts:
+ - os
+ vars_files:
+ - vars/upgrade_kernel.yaml
+ tags: ["always"]
+ roles:
+ - os/upgrade-kernel
diff --git a/feature/plugins/playbooks/vars/upgrade_kernel.yaml b/feature/plugins/playbooks/vars/upgrade_kernel.yaml
new file mode 100644
index 000000000..13eff7e0c
--- /dev/null
+++ b/feature/plugins/playbooks/vars/upgrade_kernel.yaml
@@ -0,0 +1,2 @@
+kernel_version: 5.4.278-1.el7
+arch: amd64
diff --git a/feature/plugins/roles/etcd/backup/defaults/main.yaml b/feature/plugins/roles/etcd/backup/defaults/main.yaml
new file mode 100644
index 000000000..7ec090cee
--- /dev/null
+++ b/feature/plugins/roles/etcd/backup/defaults/main.yaml
@@ -0,0 +1,5 @@
+work_dir: /kubekey
+etcd:
+ env:
+ data_dir: /var/lib/etcd
+ token: k8s_etcd
diff --git a/feature/plugins/roles/etcd/backup/tasks/main.yaml b/feature/plugins/roles/etcd/backup/tasks/main.yaml
new file mode 100644
index 000000000..a5a3bd714
--- /dev/null
+++ b/feature/plugins/roles/etcd/backup/tasks/main.yaml
@@ -0,0 +1,15 @@
+---
+- name: Generate backup from etcd
+ command: |
+ if [ ! -d /tmp/kubekey/etcd/ ]; then
+ mkdir -p /tmp/kubekey/etcd/
+ fi
+ export $(cat /etc/etcd.env | grep ETCDCTL_CACERT)
+ export $(cat /etc/etcd.env | grep ETCDCTL_CERT)
+ export $(cat /etc/etcd.env | grep ETCDCTL_KEY)
+ ETCDCTL_API=3 etcdctl --endpoints=https://{{ .internal_ipv4 }}:2379 snapshot save /tmp/kubekey/etcd/snapshot.db
+
+- name: Fetch backup to local
+ fetch:
+ src: /tmp/kubekey/etcd/snapshot.db
+ dest: "{{ .work_dir }}/kubekey/etcd/snapshot.db"
diff --git a/feature/plugins/roles/etcd/restore/defaults/main.yaml b/feature/plugins/roles/etcd/restore/defaults/main.yaml
new file mode 100644
index 000000000..7ec090cee
--- /dev/null
+++ b/feature/plugins/roles/etcd/restore/defaults/main.yaml
@@ -0,0 +1,5 @@
+work_dir: /kubekey
+etcd:
+ env:
+ data_dir: /var/lib/etcd
+ token: k8s_etcd
diff --git a/feature/plugins/roles/etcd/restore/tasks/main.yaml b/feature/plugins/roles/etcd/restore/tasks/main.yaml
new file mode 100644
index 000000000..654b114c6
--- /dev/null
+++ b/feature/plugins/roles/etcd/restore/tasks/main.yaml
@@ -0,0 +1,31 @@
+---
+- name: Sync etcd snapshot to remote
+ copy:
+ src: "{{ .work_dir }}/kubekey/etcd/snapshot.db"
+ dest: /tmp/kubekey/etcd/snapshot.db
+
+- name: Stop etcd
+ command: systemctl stop etcd
+
+- name: Remove etcd data dir
+ command: |
+ rm -rf /var/lib/etcd/*
+
+- name: Restore etcd by snapshot
+ command: |
+ export $(cat /etc/etcd.env | grep ETCDCTL_CACERT)
+ export $(cat /etc/etcd.env | grep ETCDCTL_CERT)
+ export $(cat /etc/etcd.env | grep ETCDCTL_KEY)
+ etcdctl snapshot restore /tmp/kubekey/etcd/snapshot.db \
+ --name={{ .inventory_name }} --endpoints=https://{{ .internal_ipv4 }}:2379 \
+ {{- $ips := list -}}
+ {{- range $element := .groups.etcd -}}
+ {{- $ips = append $ips (printf "%s=https://%s:2380" (index .inventory_hosts $element "inventory_name") (index .inventory_hosts $element "internal_ipv4") -}}
+ {{- end -}}
+ --initial-cluster={{ $ips | join "," }} \
+ --initial-advertise-peer-urls=https://{{ .internal_ipv4 }}:2380\
+ --initial-cluster-token={{ .etcd.env.token }} \
+ --data-dir={{ .etcd.env.data_dir }}
+
+- name: Start etcd
+ command: systemctl start etcd
diff --git a/feature/plugins/roles/kubernetes/start/defaults/main.yaml b/feature/plugins/roles/kubernetes/start/defaults/main.yaml
new file mode 100644
index 000000000..4c9d1966e
--- /dev/null
+++ b/feature/plugins/roles/kubernetes/start/defaults/main.yaml
@@ -0,0 +1,2 @@
+cri:
+ container_manager: docker
diff --git a/feature/plugins/roles/kubernetes/start/tasks/main.yaml b/feature/plugins/roles/kubernetes/start/tasks/main.yaml
new file mode 100644
index 000000000..498142585
--- /dev/null
+++ b/feature/plugins/roles/kubernetes/start/tasks/main.yaml
@@ -0,0 +1,14 @@
+---
+- name: Start cri in kubernetes
+ block:
+ - name: Stop docker in kubernetes
+ command: |
+ systemctl start docker
+ when: .cri.container_manager | eq "docker"
+ - name: Start containerd in kubernetes
+ command: |
+ systemctl start containerd
+ when: .cri.container_manager | eq "containerd"
+
+- name: Start kubelet in kubernetes
+ command: systemctl start kubelet
diff --git a/feature/plugins/roles/kubernetes/stop/defaults/main.yaml b/feature/plugins/roles/kubernetes/stop/defaults/main.yaml
new file mode 100644
index 000000000..4c9d1966e
--- /dev/null
+++ b/feature/plugins/roles/kubernetes/stop/defaults/main.yaml
@@ -0,0 +1,2 @@
+cri:
+ container_manager: docker
diff --git a/feature/plugins/roles/kubernetes/stop/tasks/main.yaml b/feature/plugins/roles/kubernetes/stop/tasks/main.yaml
new file mode 100644
index 000000000..91a06755b
--- /dev/null
+++ b/feature/plugins/roles/kubernetes/stop/tasks/main.yaml
@@ -0,0 +1,14 @@
+---
+- name: Stop kubelet in kubernetes
+ command: systemctl stop kubelet
+
+- name: Stop cri in kubernetes
+ block:
+ - name: Stop docker in kubernetes
+ command: |
+ systemctl stop docker
+ when: .cri.container_manager | eq "docker"
+ - name: Stop containerd in kubernetes
+ command: |
+ systemctl stop containerd
+ when: .cri.container_manager | eq "containerd"
diff --git a/feature/plugins/roles/os/init-kernel/defaults/main.yaml b/feature/plugins/roles/os/init-kernel/defaults/main.yaml
new file mode 100644
index 000000000..c78b7217d
--- /dev/null
+++ b/feature/plugins/roles/os/init-kernel/defaults/main.yaml
@@ -0,0 +1,5 @@
+rpm_url:
+ kernel_lt:
+ amd64: http://mirrors.coreix.net/elrepo-archive-archive/kernel/el7/x86_64/RPMS/kernel-lt-{{ .kernel_version }}.elrepo.x86_64.rpm
+ kernel_lt_devel:
+ amd64: http://mirrors.coreix.net/elrepo-archive-archive/kernel/el7/x86_64/RPMS/kernel-lt-devel-{{ .kernel_version }}.elrepo.x86_64.rpm
diff --git a/feature/plugins/roles/os/init-kernel/tasks/centos.yaml b/feature/plugins/roles/os/init-kernel/tasks/centos.yaml
new file mode 100644
index 000000000..8faa937a9
--- /dev/null
+++ b/feature/plugins/roles/os/init-kernel/tasks/centos.yaml
@@ -0,0 +1,30 @@
+---
+- name: Download kernel-lt rpm
+ command: |
+ artifact_name={{ get .rpm_url.kernel_lt .arch | splitList "/" | last }}
+ artifact_path={{ .work_dir }}/kubekey/kernel/{{ .kernel_version }}/{{ .arch }}
+ if [ ! -f $artifact_path/$artifact_name ]; then
+ mkdir -p $artifact_path
+ # download online
+ http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .rpm_url.kernel_lt .arch }})
+ if [ $http_code != 200 ]; then
+ echo "http code is $http_code"
+ exit 1
+ fi
+ curl -L -o $artifact_path/$artifact_name {{ get .rpm_url.kernel_lt .arch }}
+ fi
+
+- name: Download kernel-lt-devel rpm
+ command: |
+ artifact_name={{ get .rpm_url.kernel_lt_devel .arch | splitList "/" | last }}
+ artifact_path={{ .work_dir }}/kubekey/kernel/{{ .kernel_version }}/{{ .arch }}
+ if [ ! -f $artifact_path/$artifact_name ]; then
+ mkdir -p $artifact_path
+ # download online
+ http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .rpm_url.kernel_lt_devel .arch }})
+ if [ $http_code != 200 ]; then
+ echo "http code is $http_code"
+ exit 1
+ fi
+ curl -L -o $artifact_path/$artifact_name {{ get .rpm_url.kernel_lt_devel .arch }}
+ fi
diff --git a/feature/plugins/roles/os/init-kernel/tasks/main.yaml b/feature/plugins/roles/os/init-kernel/tasks/main.yaml
new file mode 100644
index 000000000..c8f997e3d
--- /dev/null
+++ b/feature/plugins/roles/os/init-kernel/tasks/main.yaml
@@ -0,0 +1,3 @@
+---
+- include_tasks: centos.yaml
+ tags: ["centos"]
diff --git a/feature/plugins/roles/os/upgrade-kernel/tasks/centos.yaml b/feature/plugins/roles/os/upgrade-kernel/tasks/centos.yaml
new file mode 100644
index 000000000..5780a1e9b
--- /dev/null
+++ b/feature/plugins/roles/os/upgrade-kernel/tasks/centos.yaml
@@ -0,0 +1,31 @@
+---
+- name: add aliyuns repo
+ command: |
+ now=$(date +"%Y-%m-%d %H:%M:%S")
+ cp /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.bak-$now
+ sudo sed -e 's|^mirrorlist=|#mirrorlist=|g' \
+ -e 's|^#baseurl=http://mirror.centos.org|baseurl=https://mirrors.aliyun.com|g' \
+ -i.bak \
+ /etc/yum.repos.d/CentOS-Base.repo
+ sudo yum clean all
+ sudo yum makecache
+
+- name: install necessary dependency rpm
+ command: |
+ sudo yum install linux-firmware perl -y
+
+- name: copy rpm to remote
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/kernel/{{ .kernel_version }}/{{ .arch }}/
+ dest: /tmp/kubekey/kernel/
+
+- name: install rpm
+ command: |
+ rpm -ivh /tmp/kubekey/kernel/kernel-lt*
+
+- name: set kernel
+ command: |
+ grub2-set-default 0
+ grub2-mkconfig -o /boot/grub2/grub.cfg
+# reboot -h now
diff --git a/feature/plugins/roles/os/upgrade-kernel/tasks/main.yaml b/feature/plugins/roles/os/upgrade-kernel/tasks/main.yaml
new file mode 100644
index 000000000..c8f997e3d
--- /dev/null
+++ b/feature/plugins/roles/os/upgrade-kernel/tasks/main.yaml
@@ -0,0 +1,3 @@
+---
+- include_tasks: centos.yaml
+ tags: ["centos"]
diff --git a/feature/plugins/roles/sonobuoy/defaults/main.yaml b/feature/plugins/roles/sonobuoy/defaults/main.yaml
new file mode 100644
index 000000000..d3cf5d0ff
--- /dev/null
+++ b/feature/plugins/roles/sonobuoy/defaults/main.yaml
@@ -0,0 +1,28 @@
+sonobuoy_version: v0.57.1
+work_dir: /kubekey
+sonobuoy:
+ amd64: |
+ {{- if .kkzone | eq "cn" }}
+ https://kubernetes-release.pek3b.qingstor.com/etcd/release/download/{{ .sonobuoy_version }}/sonobuoy_{{ .sonobuoy_version | trimPrefix "v" }}_linux_amd64.tar.gz
+ {{- else }}
+ https://github.com/vmware-tanzu/sonobuoy/releases/download/{{ .sonobuoy_version }}/sonobuoy_{{ .sonobuoy_version | trimPrefix "v" }}_linux_amd64.tar.gz
+ {{- end }}
+ arm64: |
+ {{- if .kkzone | eq "cn" }}
+ https://kubernetes-release.pek3b.qingstor.com/etcd/release/download/{{ .sonobuoy_version }}/sonobuoy_{{ .sonobuoy_version | trimPrefix "v" }}_linux_arm64.tar.gz
+ {{- else }}
+ https://github.com/vmware-tanzu/sonobuoy/releases/download/{{ .sonobuoy_version }}/sonobuoy_{{ .sonobuoy_version | trimPrefix "v" }}_linux_arm64.tar.gz
+ {{- end }}
+plugins:
+ systemd_logs:
+ enabled: false
+ e2e:
+ enabled: false
+ e2e_ks:
+ enabled: false
+ image: registry.cn-beijing.aliyuncs.com/kubesphereio/conformance:{{ .kube_version }}
+ kube_bench:
+ enabled: false
+ image: sonobuoy/kube-bench:v0.6.17
+
+
diff --git a/feature/plugins/roles/sonobuoy/tasks/main.yaml b/feature/plugins/roles/sonobuoy/tasks/main.yaml
new file mode 100644
index 000000000..1164fe550
--- /dev/null
+++ b/feature/plugins/roles/sonobuoy/tasks/main.yaml
@@ -0,0 +1,23 @@
+---
+- name: Generate sonobuoy plugins
+ template:
+ src: plugins/
+ dest: sonobuoy/plugins/
+
+- name: Run sonobuoy
+ command: |
+ # run and waiting
+ sonobuoy run --wait \
+ {{ if .plugins.systemd_logs.enabled }}-p systemd-logs {{ end }}\
+ {{ if .plugins.e2e.enabled }}-p e2e {{ end }}\
+ {{ if .plugins.e2e_ks.enabled }}-p sonobuoy/plugins/e2e-ks.yaml {{ end }}\
+ {{ if .plugins.kube_bench.enabled }}-p sonobuoy/plugins/kube-bench.yaml -p sonobuoy/plugins/kube-bench-master.yaml {{ end }}\
+
+- name: Retrieve result
+ command: |
+ cd sonobuoy/ && sonobuoy retrieve
+
+- name: Clean sonobuoy
+ command: |
+ sonobuoy delete
+
diff --git a/feature/plugins/roles/sonobuoy/templates/plugins/e2e-ks.yaml b/feature/plugins/roles/sonobuoy/templates/plugins/e2e-ks.yaml
new file mode 100644
index 000000000..27be18ec3
--- /dev/null
+++ b/feature/plugins/roles/sonobuoy/templates/plugins/e2e-ks.yaml
@@ -0,0 +1,47 @@
+podSpec:
+ containers: [ ]
+ nodeSelector:
+ kubernetes.io/os: linux
+ restartPolicy: Never
+ serviceAccountName: sonobuoy-serviceaccount
+ tolerations:
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/master
+ operator: Exists
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - key: kubernetes.io/e2e-evict-taint-key
+ operator: Exists
+ sonobuoy-config:
+ driver: Job
+ plugin-name: e2e-ks
+ result-format: junit
+ spec:
+ command:
+ - /run_e2e.sh
+ env:
+ - name: E2E_EXTRA_ARGS
+ value: --progress-report-url=http://localhost:8099/progress
+ - name: E2E_FOCUS
+ value: \[Conformance\]
+ - name: E2E_PARALLEL
+ value: "false"
+ - name: E2E_USE_GO_RUNNER
+ value: "true"
+ - name: RESULTS_DIR
+ value: /tmp/sonobuoy/results
+ - name: SONOBUOY_K8S_VERSION
+ value: {{ .kube_version }}
+ - name: SONOBUOY_PROGRESS_PORT
+ value: "8099"
+ - name: SONOBUOY
+ value: "true"
+ - name: SONOBUOY_CONFIG_DIR
+ value: /tmp/sonobuoy/config
+ - name: SONOBUOY_RESULTS_DIR
+ value: /tmp/sonobuoy/results
+ image: {{ .plugins.e2e_ks.image }}
+ name: e2e-ks
+ volumeMounts:
+ - mountPath: /tmp/sonobuoy/results
+ name: results
diff --git a/feature/plugins/roles/sonobuoy/templates/plugins/kube-bench-master.yaml b/feature/plugins/roles/sonobuoy/templates/plugins/kube-bench-master.yaml
new file mode 100644
index 000000000..e454464c6
--- /dev/null
+++ b/feature/plugins/roles/sonobuoy/templates/plugins/kube-bench-master.yaml
@@ -0,0 +1,86 @@
+podSpec:
+ containers: []
+ dnsPolicy: ClusterFirstWithHostNet
+ hostIPC: true
+ hostNetwork: true
+ hostPID: true
+ serviceAccountName: sonobuoy-serviceaccount
+ tolerations:
+ - operator: Exists
+ volumes:
+ - name: var-lib-etcd
+ hostPath:
+ path: "/var/lib/etcd"
+ - name: var-lib-kubelet
+ hostPath:
+ path: "/var/lib/kubelet"
+ - name: lib-systemd
+ hostPath:
+ path: "/lib/systemd"
+ - name: etc-systemd
+ hostPath:
+ path: "/etc/systemd"
+ - name: etc-kubernetes
+ hostPath:
+ path: "/etc/kubernetes"
+ # Uncomment this volume definition if you wish to use Kubernetes version auto-detection in kube-bench.
+ # - name: usr-bin
+ # hostPath:
+ # path: "/usr/bin"
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+sonobuoy-config:
+ driver: DaemonSet
+ plugin-name: kube-bench-master
+ result-format: junit
+spec:
+ command:
+ - /bin/sh
+ args:
+ - -c
+ - /run-kube-bench.sh; while true; do echo "Sleeping for 1h to avoid daemonset restart"; sleep 3600; done
+ env:
+ - name: KUBERNETES_VERSION
+ value: {{ .kube_version }}
+ - name: TARGET_MASTER
+ value: "true"
+ - name: TARGET_NODE
+ value: "false"
+ - name: TARGET_CONTROLPLANE
+ value: "false"
+ - name: TARGET_ETCD
+ value: "false"
+ - name: TARGET_POLICIES
+ value: "false"
+ image: {{ .plugins.kube_bench.image }}
+ name: plugin
+ resources: {}
+ volumeMounts:
+ - mountPath: /tmp/sonobuoy/results
+ name: results
+ - name: var-lib-etcd
+ mountPath: /var/lib/etcd
+ readOnly: true
+ - name: var-lib-kubelet
+ mountPath: /var/lib/kubelet
+ readOnly: true
+ - name: etc-systemd
+ mountPath: /etc/systemd
+ readOnly: true
+ - name: lib-systemd
+ mountPath: /lib/systemd
+ readOnly: true
+ - name: etc-kubernetes
+ mountPath: /etc/kubernetes
+ readOnly: true
+ # /usr/bin from the host is mounted to access kubectl / kubelet, used by kube-bench for auto-detecting the Kubernetes version.
+ # It is mounted at the path /usr/local/mount-from-host/bin to avoid overwriting /usr/bin within the container.
+ # You can omit this mount if you provide the version using the KUBERNETES_VERSION environment variable.
+ # - name: usr-bin
+ # mountPath: /usr/local/mount-from-host/bin
+ # readOnly: true
diff --git a/feature/plugins/roles/sonobuoy/templates/plugins/kube-bench.yaml b/feature/plugins/roles/sonobuoy/templates/plugins/kube-bench.yaml
new file mode 100644
index 000000000..9f232538f
--- /dev/null
+++ b/feature/plugins/roles/sonobuoy/templates/plugins/kube-bench.yaml
@@ -0,0 +1,86 @@
+podSpec:
+ containers: []
+ dnsPolicy: ClusterFirstWithHostNet
+ hostIPC: true
+ hostNetwork: true
+ hostPID: true
+ serviceAccountName: sonobuoy-serviceaccount
+ tolerations:
+ - operator: Exists
+ volumes:
+ - name: var-lib-etcd
+ hostPath:
+ path: "/var/lib/etcd"
+ - name: var-lib-kubelet
+ hostPath:
+ path: "/var/lib/kubelet"
+ - name: lib-systemd
+ hostPath:
+ path: "/lib/systemd"
+ - name: etc-systemd
+ hostPath:
+ path: "/etc/systemd"
+ - name: etc-kubernetes
+ hostPath:
+ path: "/etc/kubernetes"
+ # Uncomment this volume definition if you wish to use Kubernetes version auto-detection in kube-bench.
+ # - name: usr-bin
+ # hostPath:
+ # path: "/usr/bin"
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: DoesNotExist
+sonobuoy-config:
+ driver: DaemonSet
+ plugin-name: kube-bench-node
+ result-format: junit
+spec:
+ command:
+ - /bin/sh
+ args:
+ - -c
+ - /run-kube-bench.sh; while true; do echo "Sleeping for 1h to avoid daemonset restart"; /bin/sleep 3600; done
+ env:
+ - name: KUBERNETES_VERSION
+ value: {{ .kube_version }}
+ - name: TARGET_MASTER
+ value: "false"
+ - name: TARGET_NODE
+ value: "true"
+ - name: TARGET_CONTROLPLANE
+ value: "false"
+ - name: TARGET_ETCD
+ value: "false"
+ - name: TARGET_POLICIES
+ value: "false"
+ image: {{ .plugins.kube_bench.image }}
+ name: plugin
+ resources: {}
+ volumeMounts:
+ - mountPath: /tmp/sonobuoy/results
+ name: results
+ - name: var-lib-etcd
+ mountPath: /var/lib/etcd
+ readOnly: true
+ - name: var-lib-kubelet
+ mountPath: /var/lib/kubelet
+ readOnly: true
+ - name: lib-systemd
+ mountPath: /lib/systemd
+ readOnly: true
+ - name: etc-systemd
+ mountPath: /etc/systemd
+ readOnly: true
+ - name: etc-kubernetes
+ mountPath: /etc/kubernetes
+ readOnly: true
+ # /usr/bin is mounted to access kubectl / kubelet, used by kube-bench for auto-detecting the Kubernetes version.
+ # It is mounted at the path /usr/local/mount-from-host/bin to avoid overwriting /usr/bin within the container.
+ # You can omit this mount if you provide the version using the KUBERNETES_VERSION environment variable.
+ # - name: usr-bin
+ # mountPath: /usr/local/mount-from-host/bin
+ # readOnly: true
diff --git a/feature/scripts/ci-lint-dockerfiles.sh b/feature/scripts/ci-lint-dockerfiles.sh
new file mode 100755
index 000000000..c62fb3bd7
--- /dev/null
+++ b/feature/scripts/ci-lint-dockerfiles.sh
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+# Copyright 2022 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+HADOLINT_VER=${1:-latest}
+HADOLINT_FAILURE_THRESHOLD=${2:-warning}
+
+FILES=$(find -- * -name Dockerfile)
+while read -r file; do
+ echo "Linting: ${file}"
+ # Configure the linter to fail for warnings and errors. Can be set to: error | warning | info | style | ignore | none
+ docker run --rm -i ghcr.io/hadolint/hadolint:"${HADOLINT_VER}" hadolint --failure-threshold "${HADOLINT_FAILURE_THRESHOLD}" - < "${file}"
+done <<< "${FILES}"
diff --git a/feature/scripts/docker-install.sh b/feature/scripts/docker-install.sh
new file mode 100755
index 000000000..9bb704105
--- /dev/null
+++ b/feature/scripts/docker-install.sh
@@ -0,0 +1,526 @@
+#!/bin/sh
+set -e
+# Docker CE for Linux installation script
+#
+# See https://docs.docker.com/engine/install/ for the installation steps.
+#
+# This script is meant for quick & easy install via:
+# $ curl -fsSL https://get.docker.com -o get-docker.sh
+# $ sh get-docker.sh
+#
+# For test builds (ie. release candidates):
+# $ curl -fsSL https://test.docker.com -o test-docker.sh
+# $ sh test-docker.sh
+#
+# NOTE: Make sure to verify the contents of the script
+# you downloaded matches the contents of install.sh
+# located at https://github.com/docker/docker-install
+# before executing.
+#
+# Git commit from https://github.com/docker/docker-install when
+# the script was uploaded (Should only be modified by upload job):
+SCRIPT_COMMIT_SHA="7cae5f8b0decc17d6571f9f52eb840fbc13b2737"
+
+
+# The channel to install from:
+# * nightly
+# * test
+# * stable
+# * edge (deprecated)
+DEFAULT_CHANNEL_VALUE="stable"
+if [ -z "$CHANNEL" ]; then
+ CHANNEL=$DEFAULT_CHANNEL_VALUE
+fi
+
+#DEFAULT_DOWNLOAD_URL="https://download.docker.com"
+DEFAULT_DOWNLOAD_URL="https://mirrors.aliyun.com/docker-ce"
+if [ -z "$DOWNLOAD_URL" ]; then
+ DOWNLOAD_URL=$DEFAULT_DOWNLOAD_URL
+fi
+
+DEFAULT_REPO_FILE="docker-ce.repo"
+if [ -z "$REPO_FILE" ]; then
+ REPO_FILE="$DEFAULT_REPO_FILE"
+fi
+
+mirror=''
+DRY_RUN=${DRY_RUN:-}
+while [ $# -gt 0 ]; do
+ case "$1" in
+ --mirror)
+ mirror="$2"
+ shift
+ ;;
+ --dry-run)
+ DRY_RUN=1
+ ;;
+ --*)
+ echo "Illegal option $1"
+ ;;
+ esac
+ shift $(( $# > 0 ? 1 : 0 ))
+done
+
+case "$mirror" in
+ Aliyun)
+ DOWNLOAD_URL="https://mirrors.aliyun.com/docker-ce"
+ ;;
+ AzureChinaCloud)
+ DOWNLOAD_URL="https://mirror.azure.cn/docker-ce"
+ ;;
+esac
+
+# docker-ce-rootless-extras is packaged since Docker 20.10.0
+has_rootless_extras="1"
+if echo "$VERSION" | grep -q '^1'; then
+ has_rootless_extras=
+fi
+
+command_exists() {
+ command -v "$@" > /dev/null 2>&1
+}
+
+is_dry_run() {
+ if [ -z "$DRY_RUN" ]; then
+ return 1
+ else
+ return 0
+ fi
+}
+
+is_wsl() {
+ case "$(uname -r)" in
+ *microsoft* ) true ;; # WSL 2
+ *Microsoft* ) true ;; # WSL 1
+ * ) false;;
+ esac
+}
+
+is_darwin() {
+ case "$(uname -s)" in
+ *darwin* ) true ;;
+ *Darwin* ) true ;;
+ * ) false;;
+ esac
+}
+
+deprecation_notice() {
+ distro=$1
+ date=$2
+ echo
+ echo "DEPRECATION WARNING:"
+ echo " The distribution, $distro, will no longer be supported in this script as of $date."
+ echo " If you feel this is a mistake please submit an issue at https://github.com/docker/docker-install/issues/new"
+ echo
+ sleep 10
+}
+
+get_distribution() {
+ lsb_dist=""
+ # Every system that we officially support has /etc/os-release
+ if [ -r /etc/os-release ]; then
+ lsb_dist="$(. /etc/os-release && echo "$ID")"
+ fi
+ # Returning an empty string here should be alright since the
+ # case statements don't act unless you provide an actual value
+ echo "$lsb_dist"
+}
+
+add_debian_backport_repo() {
+ debian_version="$1"
+ backports="deb http://ftp.debian.org/debian $debian_version-backports main"
+ if ! grep -Fxq "$backports" /etc/apt/sources.list; then
+ (set -x; $sh_c "echo \"$backports\" >> /etc/apt/sources.list")
+ fi
+}
+
+echo_docker_as_nonroot() {
+ if is_dry_run; then
+ return
+ fi
+ if command_exists docker && [ -e /var/run/docker.sock ]; then
+ (
+ set -x
+ $sh_c 'docker version'
+ ) || true
+ fi
+
+ # intentionally mixed spaces and tabs here -- tabs are stripped by "<<-EOF", spaces are kept in the output
+ echo
+ echo "================================================================================"
+ echo
+ if [ -n "$has_rootless_extras" ]; then
+ echo "To run Docker as a non-privileged user, consider setting up the"
+ echo "Docker daemon in rootless mode for your user:"
+ echo
+ echo " dockerd-rootless-setuptool.sh install"
+ echo
+ echo "Visit https://docs.docker.com/go/rootless/ to learn about rootless mode."
+ echo
+ fi
+ echo
+ echo "To run the Docker daemon as a fully privileged service, but granting non-root"
+ echo "users access, refer to https://docs.docker.com/go/daemon-access/"
+ echo
+ echo "WARNING: Access to the remote API on a privileged Docker daemon is equivalent"
+ echo " to root access on the host. Refer to the 'Docker daemon attack surface'"
+ echo " documentation for details: https://docs.docker.com/go/attack-surface/"
+ echo
+ echo "================================================================================"
+ echo
+}
+
+# Check if this is a forked Linux distro
+check_forked() {
+
+ # Check for lsb_release command existence, it usually exists in forked distros
+ if command_exists lsb_release; then
+ # Check if the `-u` option is supported
+ set +e
+ lsb_release -a -u > /dev/null 2>&1
+ lsb_release_exit_code=$?
+ set -e
+
+ # Check if the command has exited successfully, it means we're in a forked distro
+ if [ "$lsb_release_exit_code" = "0" ]; then
+ # Print info about current distro
+ cat <<-EOF
+ You're using '$lsb_dist' version '$dist_version'.
+ EOF
+
+ # Get the upstream release info
+ lsb_dist=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'id' | cut -d ':' -f 2 | tr -d '[:space:]')
+ dist_version=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'codename' | cut -d ':' -f 2 | tr -d '[:space:]')
+
+ # Print info about upstream distro
+ cat <<-EOF
+ Upstream release is '$lsb_dist' version '$dist_version'.
+ EOF
+ else
+ if [ -r /etc/debian_version ] && [ "$lsb_dist" != "ubuntu" ] && [ "$lsb_dist" != "raspbian" ]; then
+ if [ "$lsb_dist" = "osmc" ]; then
+ # OSMC runs Raspbian
+ lsb_dist=raspbian
+ else
+ # We're Debian and don't even know it!
+ lsb_dist=debian
+ fi
+ dist_version="$(sed 's/\/.*//' /etc/debian_version | sed 's/\..*//')"
+ case "$dist_version" in
+ 10)
+ dist_version="buster"
+ ;;
+ 9)
+ dist_version="stretch"
+ ;;
+ 8|'Kali Linux 2')
+ dist_version="jessie"
+ ;;
+ esac
+ fi
+ fi
+ fi
+}
+
+semverParse() {
+ major="${1%%.*}"
+ minor="${1#$major.}"
+ minor="${minor%%.*}"
+ patch="${1#$major.$minor.}"
+ patch="${patch%%[-.]*}"
+}
+
+do_install() {
+ echo "# Executing docker install script, commit: $SCRIPT_COMMIT_SHA"
+
+ if command_exists docker; then
+ docker_version="$(docker -v | cut -d ' ' -f3 | cut -d ',' -f1)"
+ MAJOR_W=1
+ MINOR_W=10
+
+ semverParse "$docker_version"
+
+ shouldWarn=0
+ if [ "$major" -lt "$MAJOR_W" ]; then
+ shouldWarn=1
+ fi
+
+ if [ "$major" -le "$MAJOR_W" ] && [ "$minor" -lt "$MINOR_W" ]; then
+ shouldWarn=1
+ fi
+
+ cat >&2 <<-'EOF'
+ Warning: the "docker" command appears to already exist on this system.
+
+ If you already have Docker installed, this script can cause trouble, which is
+ why we're displaying this warning and provide the opportunity to cancel the
+ installation.
+
+ If you installed the current Docker package using this script and are using it
+ EOF
+
+ if [ $shouldWarn -eq 1 ]; then
+ cat >&2 <<-'EOF'
+ again to update Docker, we urge you to migrate your image store before upgrading
+ to v1.10+.
+
+ You can find instructions for this here:
+ https://github.com/docker/docker/wiki/Engine-v1.10.0-content-addressability-migration
+ EOF
+ else
+ cat >&2 <<-'EOF'
+ again to update Docker, you can safely ignore this message.
+ EOF
+ fi
+
+ cat >&2 <<-'EOF'
+
+ You may press Ctrl+C now to abort this script.
+ EOF
+ ( set -x; sleep 20 )
+ fi
+
+ user="$(id -un 2>/dev/null || true)"
+
+ sh_c='sh -c'
+ if [ "$user" != 'root' ]; then
+ if command_exists sudo; then
+ sh_c='sudo -E sh -c'
+ elif command_exists su; then
+ sh_c='su -c'
+ else
+ cat >&2 <<-'EOF'
+ Error: this installer needs the ability to run commands as root.
+ We are unable to find either "sudo" or "su" available to make this happen.
+ EOF
+ exit 1
+ fi
+ fi
+
+ if is_dry_run; then
+ sh_c="echo"
+ fi
+
+ # perform some very rudimentary platform detection
+ lsb_dist=$( get_distribution )
+ lsb_dist="$(echo "$lsb_dist" | tr '[:upper:]' '[:lower:]')"
+
+ if is_wsl; then
+ echo
+ echo "WSL DETECTED: We recommend using Docker Desktop for Windows."
+ echo "Please get Docker Desktop from https://www.docker.com/products/docker-desktop"
+ echo
+ cat >&2 <<-'EOF'
+
+ You may press Ctrl+C now to abort this script.
+ EOF
+ ( set -x; sleep 20 )
+ fi
+
+ case "$lsb_dist" in
+
+ ubuntu)
+ if command_exists lsb_release; then
+ dist_version="$(lsb_release --codename | cut -f2)"
+ fi
+ if [ -z "$dist_version" ] && [ -r /etc/lsb-release ]; then
+ dist_version="$(. /etc/lsb-release && echo "$DISTRIB_CODENAME")"
+ fi
+ ;;
+
+ debian|raspbian)
+ dist_version="$(sed 's/\/.*//' /etc/debian_version | sed 's/\..*//')"
+ case "$dist_version" in
+ 10)
+ dist_version="buster"
+ ;;
+ 9)
+ dist_version="stretch"
+ ;;
+ 8)
+ dist_version="jessie"
+ ;;
+ esac
+ ;;
+
+ centos|rhel)
+ if [ -z "$dist_version" ] && [ -r /etc/os-release ]; then
+ dist_version="$(. /etc/os-release && echo "$VERSION_ID")"
+ fi
+ ;;
+
+ *)
+ if command_exists lsb_release; then
+ dist_version="$(lsb_release --release | cut -f2)"
+ fi
+ if [ -z "$dist_version" ] && [ -r /etc/os-release ]; then
+ dist_version="$(. /etc/os-release && echo "$VERSION_ID")"
+ fi
+ ;;
+
+ esac
+
+ # Check if this is a forked Linux distro
+ check_forked
+
+ # Run setup for each distro accordingly
+ case "$lsb_dist" in
+ ubuntu|debian|raspbian)
+ pre_reqs="apt-transport-https ca-certificates curl"
+ if [ "$lsb_dist" = "debian" ]; then
+ # libseccomp2 does not exist for debian jessie main repos for aarch64
+ if [ "$(uname -m)" = "aarch64" ] && [ "$dist_version" = "jessie" ]; then
+ add_debian_backport_repo "$dist_version"
+ fi
+ fi
+
+ if ! command -v gpg > /dev/null; then
+ pre_reqs="$pre_reqs gnupg"
+ fi
+ apt_repo="deb [arch=$(dpkg --print-architecture)] $DOWNLOAD_URL/linux/$lsb_dist $dist_version $CHANNEL"
+ (
+ if ! is_dry_run; then
+ set -x
+ fi
+ $sh_c 'apt-get update -qq >/dev/null'
+ $sh_c "DEBIAN_FRONTEND=noninteractive apt-get install -y -qq $pre_reqs >/dev/null"
+ $sh_c "curl -fsSL \"$DOWNLOAD_URL/linux/$lsb_dist/gpg\" | apt-key add -qq - >/dev/null"
+ $sh_c "echo \"$apt_repo\" > /etc/apt/sources.list.d/docker.list"
+ $sh_c 'apt-get update -qq >/dev/null'
+ )
+ pkg_version=""
+ if [ -n "$VERSION" ]; then
+ if is_dry_run; then
+ echo "# WARNING: VERSION pinning is not supported in DRY_RUN"
+ else
+ # Will work for incomplete versions IE (17.12), but may not actually grab the "latest" if in the test channel
+ pkg_pattern="$(echo "$VERSION" | sed "s/-ce-/~ce~.*/g" | sed "s/-/.*/g").*-0~$lsb_dist"
+ search_command="apt-cache madison 'docker-ce' | grep '$pkg_pattern' | head -1 | awk '{\$1=\$1};1' | cut -d' ' -f 3"
+ pkg_version="$($sh_c "$search_command")"
+ echo "INFO: Searching repository for VERSION '$VERSION'"
+ echo "INFO: $search_command"
+ if [ -z "$pkg_version" ]; then
+ echo
+ echo "ERROR: '$VERSION' not found amongst apt-cache madison results"
+ echo
+ exit 1
+ fi
+ search_command="apt-cache madison 'docker-ce-cli' | grep '$pkg_pattern' | head -1 | awk '{\$1=\$1};1' | cut -d' ' -f 3"
+ # Don't insert an = for cli_pkg_version, we'll just include it later
+ cli_pkg_version="$($sh_c "$search_command")"
+ pkg_version="=$pkg_version"
+ fi
+ fi
+ (
+ if ! is_dry_run; then
+ set -x
+ fi
+ if [ -n "$cli_pkg_version" ]; then
+ $sh_c "apt-get install -y -qq --no-install-recommends docker-ce-cli=$cli_pkg_version >/dev/null"
+ fi
+ $sh_c "apt-get install -y -qq --no-install-recommends docker-ce$pkg_version >/dev/null"
+ # shellcheck disable=SC2030
+ if [ -n "$has_rootless_extras" ]; then
+ # Install docker-ce-rootless-extras without "--no-install-recommends", so as to install slirp4netns when available
+ $sh_c "DEBIAN_FRONTEND=noninteractive apt-get install -y -qq docker-ce-rootless-extras$pkg_version >/dev/null"
+ fi
+ )
+ echo_docker_as_nonroot
+ exit 0
+ ;;
+ centos|fedora|rhel)
+ yum_repo="$DOWNLOAD_URL/linux/$lsb_dist/$REPO_FILE"
+ if ! curl -Ifs "$yum_repo" > /dev/null; then
+ echo "Error: Unable to curl repository file $yum_repo, is it valid?"
+ exit 1
+ fi
+ if [ "$lsb_dist" = "fedora" ]; then
+ pkg_manager="dnf"
+ config_manager="dnf config-manager"
+ enable_channel_flag="--set-enabled"
+ disable_channel_flag="--set-disabled"
+ pre_reqs="dnf-plugins-core"
+ pkg_suffix="fc$dist_version"
+ else
+ pkg_manager="yum"
+ config_manager="yum-config-manager"
+ enable_channel_flag="--enable"
+ disable_channel_flag="--disable"
+ pre_reqs="yum-utils"
+ pkg_suffix="el"
+ fi
+ (
+ if ! is_dry_run; then
+ set -x
+ fi
+ $sh_c "$pkg_manager install -y -q $pre_reqs"
+ $sh_c "$config_manager --add-repo $yum_repo"
+
+ if [ "$CHANNEL" != "stable" ]; then
+ $sh_c "$config_manager $disable_channel_flag docker-ce-*"
+ $sh_c "$config_manager $enable_channel_flag docker-ce-$CHANNEL"
+ fi
+ $sh_c "$pkg_manager makecache"
+ )
+ pkg_version=""
+ if [ -n "$VERSION" ]; then
+ if is_dry_run; then
+ echo "# WARNING: VERSION pinning is not supported in DRY_RUN"
+ else
+ pkg_pattern="$(echo "$VERSION" | sed "s/-ce-/\\\\.ce.*/g" | sed "s/-/.*/g").*$pkg_suffix"
+ search_command="$pkg_manager list --showduplicates 'docker-ce' | grep '$pkg_pattern' | tail -1 | awk '{print \$2}'"
+ pkg_version="$($sh_c "$search_command")"
+ echo "INFO: Searching repository for VERSION '$VERSION'"
+ echo "INFO: $search_command"
+ if [ -z "$pkg_version" ]; then
+ echo
+ echo "ERROR: '$VERSION' not found amongst $pkg_manager list results"
+ echo
+ exit 1
+ fi
+ search_command="$pkg_manager list --showduplicates 'docker-ce-cli' | grep '$pkg_pattern' | tail -1 | awk '{print \$2}'"
+ # It's okay for cli_pkg_version to be blank, since older versions don't support a cli package
+ cli_pkg_version="$($sh_c "$search_command" | cut -d':' -f 2)"
+ # Cut out the epoch and prefix with a '-'
+ pkg_version="-$(echo "$pkg_version" | cut -d':' -f 2)"
+ fi
+ fi
+ (
+ if ! is_dry_run; then
+ set -x
+ fi
+ # install the correct cli version first
+ if [ -n "$cli_pkg_version" ]; then
+ $sh_c "$pkg_manager install -y -q docker-ce-cli-$cli_pkg_version"
+ fi
+ $sh_c "$pkg_manager install -y -q docker-ce$pkg_version"
+ # shellcheck disable=SC2031
+ if [ -n "$has_rootless_extras" ]; then
+ $sh_c "$pkg_manager install -y -q docker-ce-rootless-extras$pkg_version"
+ fi
+ )
+ exit 0
+ ;;
+ *)
+ if [ -z "$lsb_dist" ]; then
+ if is_darwin; then
+ echo
+ echo "ERROR: Unsupported operating system 'macOS'"
+ echo "Please get Docker Desktop from https://www.docker.com/products/docker-desktop"
+ echo
+ exit 1
+ fi
+ fi
+ echo
+ echo "ERROR: Unsupported distribution '$lsb_dist'"
+ echo
+ exit 1
+ ;;
+ esac
+
+ exit 1
+}
+
+# wrapped up in a function so that we have some protection against only getting
+# half the file during "curl | sh"
+do_install
diff --git a/feature/scripts/downloadKubekey.sh b/feature/scripts/downloadKubekey.sh
new file mode 100755
index 000000000..8fd9b9471
--- /dev/null
+++ b/feature/scripts/downloadKubekey.sh
@@ -0,0 +1,96 @@
+#!/bin/sh
+
+# Copyright 2020 The KubeSphere Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+ISLINUX=true
+OSTYPE="linux"
+
+if [ "x$(uname)" != "xLinux" ]; then
+ echo ""
+ echo 'Warning: Non-Linux operating systems are not supported! After downloading, please copy the tar.gz file to linux.'
+ ISLINUX=false
+fi
+
+# Fetch latest version
+if [ "x${VERSION}" = "x" ]; then
+ VERSION="$(curl -sL https://api.github.com/repos/kubesphere/kubekey/releases |
+ grep -o 'download/v[0-9]*.[0-9]*.[0-9]*/' |
+ sort --version-sort |
+ tail -1 | awk -F'/' '{ print $2}')"
+ VERSION="${VERSION##*/}"
+fi
+
+if [ -z "${ARCH}" ]; then
+ case "$(uname -m)" in
+ x86_64)
+ ARCH=amd64
+ ;;
+ armv8*)
+ ARCH=arm64
+ ;;
+ aarch64*)
+ ARCH=arm64
+ ;;
+ *)
+ echo "${ARCH}, isn't supported"
+ exit 1
+ ;;
+ esac
+fi
+
+if [ "x${VERSION}" = "x" ]; then
+ echo "Unable to get latest Kubekey version. Set VERSION env var and re-run. For example: export VERSION=v1.0.0"
+ echo ""
+ exit
+fi
+
+DOWNLOAD_URL="https://github.com/kubesphere/kubekey/releases/download/${VERSION}/kubekey-${VERSION}-${OSTYPE}-${ARCH}.tar.gz"
+if [ "x${KKZONE}" = "xcn" ]; then
+ DOWNLOAD_URL="https://kubernetes.pek3b.qingstor.com/kubekey/releases/download/${VERSION}/kubekey-${VERSION}-${OSTYPE}-${ARCH}.tar.gz"
+fi
+
+echo ""
+echo "Downloading kubekey ${VERSION} from ${DOWNLOAD_URL} ..."
+echo ""
+
+curl -fsLO "$DOWNLOAD_URL"
+if [ $? -ne 0 ]; then
+ echo ""
+ echo "Failed to download Kubekey ${VERSION} !"
+ echo ""
+ echo "Please verify the version you are trying to download."
+ echo ""
+ exit
+fi
+
+if [ ${ISLINUX} = true ]; then
+ filename="kubekey-${VERSION}-${OSTYPE}-${ARCH}.tar.gz"
+ ret='0'
+ command -v tar >/dev/null 2>&1 || { ret='1'; }
+ if [ "$ret" -eq 0 ]; then
+ tar -xzf "${filename}"
+ else
+ echo "Kubekey ${VERSION} Download Complete!"
+ echo ""
+ echo "Try to unpack the ${filename} failed."
+ echo "tar: command not found, please unpack the ${filename} manually."
+ exit
+ fi
+fi
+
+echo ""
+echo "Kubekey ${VERSION} Download Complete!"
+echo ""
+
diff --git a/feature/scripts/harborCreateRegistriesAndReplications.sh b/feature/scripts/harborCreateRegistriesAndReplications.sh
new file mode 100644
index 000000000..86375c486
--- /dev/null
+++ b/feature/scripts/harborCreateRegistriesAndReplications.sh
@@ -0,0 +1,64 @@
+#!/bin/bash
+
+
+function createRegistries() {
+
+ # create registry
+ curl -k -u $Harbor_UserPwd -X POST -H "Content-Type: application/json" "https://${Harbor_master1_Address}/api/v2.0/registries" -d "{\"name\": \"master1_2_master2\", \"type\": \"harbor\", \"url\":\"https://${master2_Address}:7443\", \"credential\": {\"access_key\": \"${Harbor_User}\", \"access_secret\": \"${Harbor_Passwd}\"}, \"insecure\": true}"
+ # create registry
+ curl -k -u $Harbor_UserPwd -X POST -H "Content-Type: application/json" "https://${Harbor_master1_Address}/api/v2.0/registries" -d "{\"name\": \"master1_2_master3\", \"type\": \"harbor\", \"url\":\"https://${master3_Address}:7443\", \"credential\": {\"access_key\": \"${Harbor_User}\", \"access_secret\": \"${Harbor_Passwd}\"}, \"insecure\": true}"
+
+ # create registry
+ curl -k -u $Harbor_UserPwd -X POST -H "Content-Type: application/json" "https://${Harbor_master2_Address}/api/v2.0/registries" -d "{\"name\": \"master2_2_master1\", \"type\": \"harbor\", \"url\":\"https://${master1_Address}:7443\", \"credential\": {\"access_key\": \"${Harbor_User}\", \"access_secret\": \"${Harbor_Passwd}\"}, \"insecure\": true}"
+ # create registry
+ curl -k -u $Harbor_UserPwd -X POST -H "Content-Type: application/json" "https://${Harbor_master2_Address}/api/v2.0/registries" -d "{\"name\": \"master2_2_master3\", \"type\": \"harbor\", \"url\":\"https://${master3_Address}:7443\", \"credential\": {\"access_key\": \"${Harbor_User}\", \"access_secret\": \"${Harbor_Passwd}\"}, \"insecure\": true}"
+
+ # create registry
+ curl -k -u $Harbor_UserPwd -X POST -H "Content-Type: application/json" "https://${Harbor_master3_Address}/api/v2.0/registries" -d "{\"name\": \"master3_2_master1\", \"type\": \"harbor\", \"url\":\"https://${master1_Address}:7443\", \"credential\": {\"access_key\": \"${Harbor_User}\", \"access_secret\": \"${Harbor_Passwd}\"}, \"insecure\": true}"
+ # create registry
+ curl -k -u $Harbor_UserPwd -X POST -H "Content-Type: application/json" "https://${Harbor_master3_Address}/api/v2.0/registries" -d "{\"name\": \"master3_2_master2\", \"type\": \"harbor\", \"url\":\"https://${master2_Address}:7443\", \"credential\": {\"access_key\": \"${Harbor_User}\", \"access_secret\": \"${Harbor_Passwd}\"}, \"insecure\": true}"
+
+}
+
+function listRegistries() {
+ curl -k -u $Harbor_UserPwd -X GET -H "Content-Type: application/json" "https://${Harbor_master1_Address}/api/v2.0/registries"
+ curl -k -u $Harbor_UserPwd -X GET -H "Content-Type: application/json" "https://${Harbor_master2_Address}/api/v2.0/registries"
+ curl -k -u $Harbor_UserPwd -X GET -H "Content-Type: application/json" "https://${Harbor_master3_Address}/api/v2.0/registries"
+
+}
+
+function createReplication() {
+
+ curl -k -u $Harbor_UserPwd -X POST -H "Content-Type: application/json" "https://${Harbor_master1_Address}/api/v2.0/replication/policies" -d "{\"name\": \"master1_2_master2\", \"enabled\": true, \"deletion\":true, \"override\":true, \"replicate_deletion\":true, \"dest_registry\":{ \"id\": 1, \"name\": \"master1_2_master2\"}, \"trigger\": {\"type\": \"event_based\"}, \"dest_namespace_replace_count\":1 }"
+ curl -k -u $Harbor_UserPwd -X POST -H "Content-Type: application/json" "https://${Harbor_master1_Address}/api/v2.0/replication/policies" -d "{\"name\": \"master1_2_master3\", \"enabled\": true, \"deletion\":true, \"override\":true, \"replicate_deletion\":true, \"dest_registry\":{ \"id\": 2, \"name\": \"master1_2_master3\"}, \"trigger\": {\"type\": \"event_based\"}, \"dest_namespace_replace_count\":1 }"
+
+ curl -k -u $Harbor_UserPwd -X POST -H "Content-Type: application/json" "https://${Harbor_master2_Address}/api/v2.0/replication/policies" -d "{\"name\": \"master2_2_master1\", \"enabled\": true, \"deletion\":true, \"override\":true, \"replicate_deletion\":true, \"dest_registry\":{ \"id\": 1, \"name\": \"master2_2_master1\"}, \"trigger\": {\"type\": \"event_based\"}, \"dest_namespace_replace_count\":1 }"
+ curl -k -u $Harbor_UserPwd -X POST -H "Content-Type: application/json" "https://${Harbor_master2_Address}/api/v2.0/replication/policies" -d "{\"name\": \"master2_2_master3\", \"enabled\": true, \"deletion\":true, \"override\":true, \"replicate_deletion\":true, \"dest_registry\":{ \"id\": 2, \"name\": \"master2_2_master3\"}, \"trigger\": {\"type\": \"event_based\"}, \"dest_namespace_replace_count\":1 }"
+
+ curl -k -u $Harbor_UserPwd -X POST -H "Content-Type: application/json" "https://${Harbor_master3_Address}/api/v2.0/replication/policies" -d "{\"name\": \"master3_2_master1\", \"enabled\": true, \"deletion\":true, \"override\":true, \"replicate_deletion\":true, \"dest_registry\":{ \"id\": 1, \"name\": \"master3_2_master1\"}, \"trigger\": {\"type\": \"event_based\"}, \"dest_namespace_replace_count\":1 }"
+ curl -k -u $Harbor_UserPwd -X POST -H "Content-Type: application/json" "https://${Harbor_master3_Address}/api/v2.0/replication/policies" -d "{\"name\": \"master3_2_master2\", \"enabled\": true, \"deletion\":true, \"override\":true, \"replicate_deletion\":true, \"dest_registry\":{ \"id\": 2, \"name\": \"master3_2_master2\"}, \"trigger\": {\"type\": \"event_based\"}, \"dest_namespace_replace_count\":1 }"
+}
+
+function listReplications() {
+
+ curl -k -u $Harbor_UserPwd -X GET -H "Content-Type: application/json" "https://${Harbor_master1_Address}/api/v2.0/replication/policies"
+ curl -k -u $Harbor_UserPwd -X GET -H "Content-Type: application/json" "https://${Harbor_master2_Address}/api/v2.0/replication/policies"
+ curl -k -u $Harbor_UserPwd -X GET -H "Content-Type: application/json" "https://${Harbor_master3_Address}/api/v2.0/replication/policies"
+}
+
+#### main ######
+Harbor_master1_Address=master1:7443
+master1_Address=192.168.122.61
+Harbor_master2_Address=master2:7443
+master2_Address=192.168.122.62
+Harbor_master3_Address=master3:7443
+master3_Address=192.168.122.63
+Harbor_User=admin #登录Harbor的用户
+Harbor_Passwd="Harbor12345" #登录Harbor的用户密码
+Harbor_UserPwd="$Harbor_User:$Harbor_Passwd"
+
+
+createRegistries
+listRegistries
+createReplication
+listReplications
diff --git a/feature/scripts/harbor_keepalived/check_harbor.sh b/feature/scripts/harbor_keepalived/check_harbor.sh
new file mode 100644
index 000000000..0a3fb0bda
--- /dev/null
+++ b/feature/scripts/harbor_keepalived/check_harbor.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+#count=$(docker-compose -f /opt/harbor/docker-compose.yml ps -a|grep healthy|wc -l)
+# 不能频繁调用docker-compose 否则会有非常多的临时目录被创建:/tmp/_MEI*
+count=$(docker ps |grep goharbor|grep healthy|wc -l)
+status=$(ss -tlnp|grep -w 443|wc -l)
+if [ $count -ne 11 -a ];then
+ exit 8
+elif [ $status -lt 2 ];then
+ exit 9
+else
+ exit 0
+fi
diff --git a/feature/scripts/harbor_keepalived/docker-compose-keepalived-backup.yaml b/feature/scripts/harbor_keepalived/docker-compose-keepalived-backup.yaml
new file mode 100644
index 000000000..7328f96a8
--- /dev/null
+++ b/feature/scripts/harbor_keepalived/docker-compose-keepalived-backup.yaml
@@ -0,0 +1,14 @@
+version: '3.8'
+
+# Docker-Compose 单容器使用参考 YAML 配置文件
+# 更多配置参数请参考镜像 README.md 文档中说明
+services:
+ keepalived:
+ image: 'registry.cn-shenzhen.aliyuncs.com/colovu/keepalived:2.1'
+ privileged: true
+ network_mode: host
+ volumes:
+ - ./keepalived-backup.conf:/srv/conf/keepalived/keepalived.conf
+ - ./check_harbor.sh:/srv/conf/keepalived/check_harbor.sh
+ container_name: keepalived
+ restart: on-failure
diff --git a/feature/scripts/harbor_keepalived/docker-compose-keepalived-master.yaml b/feature/scripts/harbor_keepalived/docker-compose-keepalived-master.yaml
new file mode 100644
index 000000000..64f35aee4
--- /dev/null
+++ b/feature/scripts/harbor_keepalived/docker-compose-keepalived-master.yaml
@@ -0,0 +1,14 @@
+version: '3.8'
+
+# Docker-Compose 单容器使用参考 YAML 配置文件
+# 更多配置参数请参考镜像 README.md 文档中说明
+services:
+ keepalived:
+ image: 'registry.cn-shenzhen.aliyuncs.com/colovu/keepalived:2.1'
+ privileged: true
+ network_mode: host
+ volumes:
+ - ./keepalived-master.conf:/srv/conf/keepalived/keepalived.conf
+ - ./check_harbor.sh:/srv/conf/keepalived/check_harbor.sh
+ container_name: keepalived
+ restart: on-failure
diff --git a/feature/scripts/harbor_keepalived/keepalived-backup.conf b/feature/scripts/harbor_keepalived/keepalived-backup.conf
new file mode 100644
index 000000000..be916c90f
--- /dev/null
+++ b/feature/scripts/harbor_keepalived/keepalived-backup.conf
@@ -0,0 +1,31 @@
+vrrp_script check_harbor {
+ script "/srv/conf/keepalived/check_harbor.sh"
+ interval 10 # 间隔时间,单位为秒,默认1秒
+ fall 2 # 脚本几次失败转换为失败
+ rise 2 # 脚本连续监测成功后,把服务器从失败标记为成功的次数
+ timeout 5
+ init_fail
+}
+global_defs {
+ script_user root
+ router_id harbor-ha
+ enable_script_security
+ lvs_sync_daemon ens3 VI_1
+}
+vrrp_instance VI_1 {
+ state BACKUP
+ interface ens3
+ virtual_router_id 31 # 如果同一个局域网中有多套keepalive,那么要保证该id唯一
+ priority 50
+ advert_int 1
+ authentication {
+ auth_type PASS
+ auth_pass k8s-test
+ }
+ virtual_ipaddress {
+ 192.168.122.59
+ }
+ track_script {
+ check_harbor
+ }
+}
diff --git a/feature/scripts/harbor_keepalived/keepalived-master.conf b/feature/scripts/harbor_keepalived/keepalived-master.conf
new file mode 100644
index 000000000..de3566e48
--- /dev/null
+++ b/feature/scripts/harbor_keepalived/keepalived-master.conf
@@ -0,0 +1,31 @@
+vrrp_script check_harbor {
+ script "/srv/conf/keepalived/check_harbor.sh"
+ interval 10 # 间隔时间,单位为秒,默认1秒
+ fall 2 # 脚本几次失败转换为失败
+ rise 2 # 脚本连续监测成功后,把服务器从失败标记为成功的次数
+ timeout 5
+ init_fail
+}
+global_defs {
+ script_user root
+ router_id harbor-ha
+ enable_script_security
+ lvs_sync_daemon ens3 VI_1
+}
+vrrp_instance VI_1 {
+ state MASTER
+ interface ens3
+ virtual_router_id 31 # 如果同一个局域网中有多套keepalive,那么要保证该id唯一
+ priority 100
+ advert_int 1
+ authentication {
+ auth_type PASS
+ auth_pass k8s-test
+ }
+ virtual_ipaddress {
+ 192.168.122.59
+ }
+ track_script {
+ check_harbor
+ }
+}
diff --git a/feature/version/version.go b/feature/version/version.go
new file mode 100644
index 000000000..c1ca6b279
--- /dev/null
+++ b/feature/version/version.go
@@ -0,0 +1,78 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package version implements version handling code.
+package version
+
+import (
+ _ "embed"
+ "encoding/json"
+ "fmt"
+ "runtime"
+)
+
+var (
+ gitMajor string // major version, always numeric
+ gitMinor string // minor version, numeric possibly followed by "+"
+ gitVersion string // semantic version, derived by build scripts
+ gitCommit string // sha1 from git, output of $(git rev-parse HEAD)
+ gitTreeState string // state of git tree, either "clean" or "dirty"
+ buildDate string // build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ')
+)
+
+// Info exposes information about the version used for the current running code.
+type Info struct {
+ Major string `json:"major,omitempty"`
+ Minor string `json:"minor,omitempty"`
+ GitVersion string `json:"gitVersion,omitempty"`
+ GitCommit string `json:"gitCommit,omitempty"`
+ GitTreeState string `json:"gitTreeState,omitempty"`
+ BuildDate string `json:"buildDate,omitempty"`
+ GoVersion string `json:"goVersion,omitempty"`
+ Compiler string `json:"compiler,omitempty"`
+ Platform string `json:"platform,omitempty"`
+}
+
+// Get returns an Info object with all the information about the current running code.
+func Get() Info {
+ return Info{
+ Major: gitMajor,
+ Minor: gitMinor,
+ GitVersion: gitVersion,
+ GitCommit: gitCommit,
+ GitTreeState: gitTreeState,
+ BuildDate: buildDate,
+ GoVersion: runtime.Version(),
+ Compiler: runtime.Compiler,
+ Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH),
+ }
+}
+
+// String returns info as a human-friendly version string.
+func (info Info) String() string {
+ return info.GitVersion
+}
+
+// ParseFilesSha256 Load files' sha256 from components.json
+func ParseFilesSha256(componentsJSON []byte) (map[string]map[string]map[string]string, error) {
+ m := make(map[string]map[string]map[string]string)
+ err := json.Unmarshal(componentsJSON, &m)
+ if err != nil {
+ return nil, err
+ }
+
+ return m, nil
+}