diff --git a/builtin/roles/addons/cni/defaults/main.yaml b/builtin/roles/addons/cni/defaults/main.yaml index 21497f1db..722bc1561 100644 --- a/builtin/roles/addons/cni/defaults/main.yaml +++ b/builtin/roles/addons/cni/defaults/main.yaml @@ -1,33 +1,33 @@ cni: kube_proxy: | - {{- .kubernetes.kube_proxy.enabled | default true -}} + {{ .kubernetes.kube_proxy.enabled | default true }} # apiVersion for policy may be changed for difference kubernetes version. https://kube-api.ninja api_version_policy: | - {{- if .kube_version | semverCompare " 50. it default true. typha: | - {{- if gt (.groups.k8s_cluster | default list | len) 50 -}} + {{- if gt (.groups.k8s_cluster | default list | len) 50 }} true - {{- else -}} + {{- else }} false - {{- end -}} + {{- end }} veth_mtu: 0 ipip_mode: Always vxlan_mode: Never @@ -94,19 +94,19 @@ cni: spire_server_repository: | {{ .ghcrio_registry }}/spiffe/spire-server k8s_endpoint: | - {{- if and .kubernetes.control_plane_endpoint (ne .kubernetes.control_plane_endpoint "") -}} - {{- .kubernetes.control_plane_endpoint -}} - {{- else -}} - {{- .groups.kube_control_plane | default list | first -}} - {{- end -}} + {{- if and .kubernetes.control_plane_endpoint (ne .kubernetes.control_plane_endpoint "") }} + {{ .kubernetes.control_plane_endpoint }} + {{- else }} + {{ .groups.kube_control_plane | default list | first }} + {{- end }} k8s_port: | - {{- .kubernetes.apiserver.port | default 6443 -}} + {{ .kubernetes.apiserver.port | default 6443 }} kubeovn: replica: 1 registry: | {{ .dockerio_registry }}/kubeovn hybridnet: registry: | - {{- .dockerio_registry -}} + {{ .dockerio_registry }} # hybridnet_image: hybridnetdev/hybridnet # hybridnet_tag: v0.8.8 diff --git a/builtin/roles/addons/cni/tasks/cilium.yaml b/builtin/roles/addons/cni/tasks/cilium.yaml index dd0ce3e0a..c1235d2b7 100644 --- a/builtin/roles/addons/cni/tasks/cilium.yaml +++ b/builtin/roles/addons/cni/tasks/cilium.yaml @@ -27,9 +27,9 @@ --set operator.replicas={{ .cni.cilium.operator_replicas }} \ --set ipv6.enabled={{ .cni.ipv6_support }} \ --set ipv4NativeRoutingCIDR: {{ .cni.kube_pods_v4_cidr }} \ - {{- if .cni.ipv6_support -}} + {{- if .cni.ipv6_support }} --set ipv6NativeRoutingCIDR: {{ .cni.kube_pods_v6_cidr }} \ - {{- end -}} - {{- if .cni.kube_proxy -}} + {{- end }} + {{- if .cni.kube_proxy }} --set kubeProxyReplacement=strict --set k8sServiceHost={{ .cni.cilium.k8s_endpoint }} --set k8sServicePort={{ .cni.cilium.k8s_port }} - {{- end -}} + {{- end }} diff --git a/builtin/roles/addons/cni/tasks/hybridnet.yaml b/builtin/roles/addons/cni/tasks/hybridnet.yaml index 9eba898dc..21fcf402d 100644 --- a/builtin/roles/addons/cni/tasks/hybridnet.yaml +++ b/builtin/roles/addons/cni/tasks/hybridnet.yaml @@ -10,10 +10,10 @@ - name: Install hybridnet command: | helm install hybridnet /etc/kubernetes/cni/hybridnet-{{ .hybridnet_version }}.tgz --namespace kube-system \ - {{- if ne .cni.hybridnet.hybridnet_image "" -}} + {{- if ne .cni.hybridnet.hybridnet_image "" }} --set images.hybridnet.image={{ .cni.hybridnet.hybridnet_image }} \ - {{- end -}} - {{- if ne .cni.hybridnet.hybridnet_tag "" -}} + {{- end }} + {{- if ne .cni.hybridnet.hybridnet_tag "" }} --set images.hybridnet.tag={{ .cni.hybridnet.hybridnet_tag }} \ - {{- end -}} + {{- end }} --set image.registryURL={{ .cni.hybridnet.registry }} \ diff --git a/builtin/roles/addons/cni/tasks/kubeovn.yaml b/builtin/roles/addons/cni/tasks/kubeovn.yaml index deac54ca9..5c892188c 100644 --- a/builtin/roles/addons/cni/tasks/kubeovn.yaml +++ b/builtin/roles/addons/cni/tasks/kubeovn.yaml @@ -16,15 +16,15 @@ - name: Install kubeovn command: | helm install kubeovn /etc/kubernetes/cni/kubeovn-{{ .kubeovn_version }}.tgz --set replicaCount={{ .cni.kubeovn.replica }} \ - {{ $ips := list }} - {{- range .groups.kube_control_plane | default list -}} - {{- $ips = append $ips (index $.inventory_hosts . "internal_ipv4") -}} - {{- end -}} - --set MASTER_NODES={{ $ips |join "," }} \ + {{- $ips := list }} + {{- range .groups.kube_control_plane | default list }} + {{- $ips = append $ips (index $.inventory_hosts . "internal_ipv4") }} + {{- end }} + --set MASTER_NODES={{ $ips | join "," }} \ --set global.registry.address={{ .cni.kubeovn.registry }} \ --set ipv4.POD_CIDR={{ .cni.kubeovn.kube_pods_v4_cidr }} --set ipv4.SVC_CIDR={{ .cni.kubeovn.kube_svc_cidr }} \ - {{- if .cni.ipv6_support -}} + {{- if .cni.ipv6_support }} --set networking.NET_STACK=dual_stack \ --set dual_stack.POD_CIDR={{ .cni.kubeovn.kube_pods_v4_cidr }},{{ .cni.kubeovn.kube_pods_v6_cidr }} \ --set dual_stack.SVC_CIDR={{ .cni.kubeovn.kube_svc_cidr }} \ - {{- end -}} + {{- end }} diff --git a/builtin/roles/addons/cni/templates/calico/v3.27.yaml b/builtin/roles/addons/cni/templates/calico/v3.27.yaml index e6709565e..2f66392f4 100644 --- a/builtin/roles/addons/cni/templates/calico/v3.27.yaml +++ b/builtin/roles/addons/cni/templates/calico/v3.27.yaml @@ -51,7 +51,7 @@ data: "log_file_path": "/var/log/calico/cni/cni.log", "datastore_type": "kubernetes", "nodename": "__KUBERNETES_NODE_NAME__", - "mtu": __CNI_MTU__, + "mtu": "__CNI_MTU__", "ipam": { "type": "calico-ipam" }, @@ -5149,7 +5149,9 @@ spec: spec: nodeSelector: kubernetes.io/os: linux -{{ .cni.calico.node_selector|to_yaml:8|safe }} +{{- if .cni.calico.node_selector }} +{{ .cni.calico.node_selector | toYaml | indent 8 }} +{{- end }} tolerations: # Mark the pod as a critical add-on for rescheduling. - key: CriticalAddonsOnly @@ -5244,7 +5246,9 @@ spec: spec: nodeSelector: kubernetes.io/os: linux -{{ .cni.calico.node_selector|to_yaml:8|safe }} +{{- if .cni.calico.node_selector }} +{{ .cni.calico.node_selector| toYaml | indent 8 }} +{{- end }} hostNetwork: true # Typha supports graceful shut down, disconnecting clients slowly during the grace period. # The TYPHA_SHUTDOWNTIMEOUTSECS env var should be kept in sync with this value. diff --git a/builtin/roles/addons/sc/defaults/main.yaml b/builtin/roles/addons/sc/defaults/main.yaml index 8c9a3eb28..a1234abd6 100644 --- a/builtin/roles/addons/sc/defaults/main.yaml +++ b/builtin/roles/addons/sc/defaults/main.yaml @@ -11,5 +11,5 @@ sc: enabled: false default: false server: | - {{ groups.nfs | first }} + {{ .groups.nfs | default list | first }} path: /share/kubernetes diff --git a/builtin/roles/certs/renew-kubernetes/tasks/kube.yaml b/builtin/roles/certs/renew-kubernetes/tasks/kube.yaml index fa0e3b35c..564a8b029 100644 --- a/builtin/roles/certs/renew-kubernetes/tasks/kube.yaml +++ b/builtin/roles/certs/renew-kubernetes/tasks/kube.yaml @@ -9,31 +9,31 @@ tags: ["certs"] run_once: true command: | - {{- if .kubeadm_install_version.stdout | semverCompare "> $chronyConfigFile # add server - {{- range $server := .ntp_servers -}} - {{- range $.inventory_hosts -}} - {{- if eq .inventory_name $server -}} - {{- $server = .internal_ipv4 -}} - {{- end -}} - {{- end -}} + {{- range $server := .ntp_servers }} + {{- range $.inventory_hosts }} + {{- if eq .hostname $server }} + {{- $server = .internal_ipv4 }} + {{- end }} + {{- end }} grep -q '^server {{ $server }} iburst' $chronyConfigFile || sed '1a server {{ $server }} iburst' -i $chronyConfigFile - {{- end -}} + {{- end }} - name: Set timezone command: | diff --git a/builtin/roles/init/init-os/tasks/main.yaml b/builtin/roles/init/init-os/tasks/main.yaml index 7a8637d2c..58bdf1087 100644 --- a/builtin/roles/init/init-os/tasks/main.yaml +++ b/builtin/roles/init/init-os/tasks/main.yaml @@ -14,6 +14,7 @@ command: | hostnamectl set-hostname {{ .inventory_name }} \ && sed -i '/^127.0.1.1/s/.*/127.0.1.1 {{ .inventory_name }}/g' /etc/hosts + when: .inventory_name | ne "localhost" - name: Sync init os to remote template: diff --git a/builtin/roles/init/init-os/templates/init-os.sh b/builtin/roles/init/init-os/templates/init-os.sh index c530ba930..8a3d43d13 100644 --- a/builtin/roles/init/init-os/templates/init-os.sh +++ b/builtin/roles/init/init-os/templates/init-os.sh @@ -176,10 +176,10 @@ cat >>/etc/hosts<=v1.24.0" -}} + {{- if .kube_version | semverCompare ">=v1.24.0" }} kubeadm/kubeadm-init.v1beta3 - {{- else -}} + {{- else }} kubeadm/kubeadm-init.v1beta2 - {{- end -}} + {{- end }} dest: /etc/kubernetes/kubeadm-config.yaml - name: Init kubernetes cluster @@ -78,8 +78,8 @@ - name: Remote master taint ignore_errors: true command: | - /usr/local/bin/kubectl taint nodes {{ .inventory_name }} node-role.kubernetes.io/master=:NoSchedule- - /usr/local/bin/kubectl taint nodes {{ .inventory_name }} node-role.kubernetes.io/control-plane=:NoSchedule- + /usr/local/bin/kubectl taint nodes {{ .hostname }} node-role.kubernetes.io/master=:NoSchedule- + /usr/local/bin/kubectl taint nodes {{ .hostname }} node-role.kubernetes.io/control-plane=:NoSchedule- - name: Add work label command: | - /usr/local/bin/kubectl label --overwrite node {{ .inventory_name }} node-role.kubernetes.io/worker= + /usr/local/bin/kubectl label --overwrite node {{ .hostname }} node-role.kubernetes.io/worker= diff --git a/builtin/roles/install/kubernetes/tasks/join_kubernetes.yaml b/builtin/roles/install/kubernetes/tasks/join_kubernetes.yaml index 827f6c875..20b38f005 100644 --- a/builtin/roles/install/kubernetes/tasks/join_kubernetes.yaml +++ b/builtin/roles/install/kubernetes/tasks/join_kubernetes.yaml @@ -2,11 +2,11 @@ - name: Generate kubeadm join config template: src: | - {{- if .kube_version | semverCompare ">=v1.24.0" -}} + {{- if .kube_version | semverCompare ">=v1.24.0" }} kubeadm/kubeadm-join.v1beta3 - {{- else -}} + {{- else }} kubeadm/kubeadm-join.v1beta2 - {{- end -}} + {{- end }} dest: /etc/kubernetes/kubeadm-config.yaml - name: Sync audit policy file to remote @@ -36,8 +36,8 @@ - name: Remote master taint ignore_errors: true command: | - /usr/local/bin/kubectl taint nodes {{ .inventory_name }} node-role.kubernetes.io/master=:NoSchedule- - /usr/local/bin/kubectl taint nodes {{ .inventory_name }} node-role.kubernetes.io/control-plane=:NoSchedule- + /usr/local/bin/kubectl taint nodes {{ .hostname }} node-role.kubernetes.io/master=:NoSchedule- + /usr/local/bin/kubectl taint nodes {{ .hostname }} node-role.kubernetes.io/control-plane=:NoSchedule- - name: Add work label command: | - /usr/local/bin/kubectl label --overwrite node {{ .inventory_name }} node-role.kubernetes.io/worker= + /usr/local/bin/kubectl label --overwrite node {{ .hostname }} node-role.kubernetes.io/worker= diff --git a/builtin/roles/install/kubernetes/tasks/main.yaml b/builtin/roles/install/kubernetes/tasks/main.yaml index 7c1c348ad..13ebfdf1c 100644 --- a/builtin/roles/install/kubernetes/tasks/main.yaml +++ b/builtin/roles/install/kubernetes/tasks/main.yaml @@ -1,7 +1,7 @@ --- - name: Check kubernetes if installed ignore_errors: true - command: kubectl get node --field-selector metadata.name={{ .inventory_name }} + command: kubectl get node --field-selector metadata.name={{ .hostname }} register: kube_node_info_important - include_tasks: install_binaries.yaml @@ -15,10 +15,10 @@ run_once: true set_fact: init_kubernetes_node: | - {{ .groups.kube_control_plane | default list | first }} + {{ index .inventory_hosts (.groups.kube_control_plane | default list | first) "hostname" }} - name: Init kubernetes - when: eq .inventory_name .init_kubernetes_node + when: eq .hostname .init_kubernetes_node block: - include_tasks: init_kubernetes.yaml when: .kube_node_info_important.stderr | ne "" @@ -54,7 +54,7 @@ - include_tasks: join_kubernetes.yaml when: - .kube_node_info_important.stderr | ne "" - - ne .inventory_name .init_kubernetes_node + - ne .hostname .init_kubernetes_node - include_tasks: deploy_haproxy.yaml when: @@ -63,7 +63,7 @@ - name: Add custom label to cluster command: | - {{- range $k, $v := .kubernetes.custom_label -}} - /usr/local/bin/kubectl label --overwrite node {{ $.inventory_name }} {{ $k }}={{ $v }} - {{- end -}} + {{- range $k, $v := .kubernetes.custom_label }} + /usr/local/bin/kubectl label --overwrite node {{ $.hostname }} {{ $k }}={{ $v }} + {{- end }} when: .kubernetes.custom_label | len | lt 0 diff --git a/builtin/roles/install/kubernetes/templates/dns/coredns.deployment b/builtin/roles/install/kubernetes/templates/dns/coredns.deployment index 990e41b91..13dca952f 100644 --- a/builtin/roles/install/kubernetes/templates/dns/coredns.deployment +++ b/builtin/roles/install/kubernetes/templates/dns/coredns.deployment @@ -246,7 +246,7 @@ data: } {{- end }} - {{- if $.kubernetes.coredns.dns_etc_hosts | len | lt 0) }} + {{- if $.kubernetes.coredns.dns_etc_hosts | len | lt 0 }} hosts /etc/coredns/hosts { fallthrough } @@ -254,9 +254,9 @@ data: } {{- end }} -{{- if .kubernetes.coredns.dns_etc_hosts | len | lt 0) }} +{{- if .kubernetes.coredns.dns_etc_hosts | len | lt 0 }} hosts: | {{- range .kubernetes.coredns.dns_etc_hosts }} - {{ $. }} + {{ . }} {{- end }} {{- end }} diff --git a/builtin/roles/install/kubernetes/templates/dns/nodelocaldns.daemonset b/builtin/roles/install/kubernetes/templates/dns/nodelocaldns.daemonset index 59fc01ca6..c205438a7 100644 --- a/builtin/roles/install/kubernetes/templates/dns/nodelocaldns.daemonset +++ b/builtin/roles/install/kubernetes/templates/dns/nodelocaldns.daemonset @@ -117,7 +117,7 @@ data: log errors loadbalance - cache {{ index $ez "cache" }} + cache {{ .cache }} reload loop bind 169.254.25.10 @@ -144,7 +144,7 @@ data: max_fails {{ .max_fails | default 2 }} expire {{ .expire | default "10s" }} {{- if .tls }} - tls {{ .tls.cert_file }} {{ i.tls.key_file }} {{ .tls.ca_file }} + tls {{ .tls.cert_file }} {{ .tls.key_file }} {{ .tls.ca_file }} {{- end }} {{- if .tls_servername }} tls_servername {{ .tls_servername }} @@ -221,7 +221,7 @@ data: {{- end }} } -{{- if .kubernetes.coredns.dns_etc_hosts | len | lt 0) }} +{{- if .kubernetes.coredns.dns_etc_hosts | len | lt 0 }} hosts: | {{- range .kubernetes.coredns.dns_etc_hosts }} {{ . }} diff --git a/builtin/roles/install/kubernetes/templates/haproxy/haproxy.cfg b/builtin/roles/install/kubernetes/templates/haproxy/haproxy.cfg index 13a982d99..8f095ffe8 100644 --- a/builtin/roles/install/kubernetes/templates/haproxy/haproxy.cfg +++ b/builtin/roles/install/kubernetes/templates/haproxy/haproxy.cfg @@ -37,5 +37,5 @@ backend kube_api_backend option httpchk GET /healthz http-check expect status 200 {{- range .groups.kube_control_plane | default list }} - server {{ index $.inventory_hosts . "inventory_name" }} {{ index $.inventory_hosts . "internal_ipv4" }}:{{ $.kubernetes.apiserver.port }} check check-ssl verify none + server {{ index $.inventory_hosts . "hostname" }} {{ index $.inventory_hosts . "internal_ipv4" }}:{{ $.kubernetes.apiserver.port }} check check-ssl verify none {{- end }} diff --git a/builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-init.v1beta2 b/builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-init.v1beta2 index a1eefcc8a..a3986464f 100644 --- a/builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-init.v1beta2 +++ b/builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-init.v1beta2 @@ -63,7 +63,7 @@ apiServer: - kubernetes.default.svc.{{ if and .kubernetes.control_plane_endpoint (ne .kubernetes.control_plane_endpoint "") }}{{ .kubernetes.control_plane_endpoint }}{{ else }}{{ .init_kubernetes_node }}{{ end }} - kubernetes.default.svc.{{ .kubernetes.networking.dns_domain }} {{- range .groups.k8s_cluster | default list }} - - {{ . }}.{{ $.kubernetes.networking.dns_domain }} + - {{ index $.inventory_hosts . "hostname" }}.{{ $.kubernetes.networking.dns_domain }} - {{ index $.inventory_hosts . "internal_ipv4" }} {{- if index $.inventory_hosts . "internal_ipv6" }} - {{ index $.inventory_hosts . "internal_ipv6" }} @@ -81,7 +81,7 @@ apiServer: {{- end }} controllerManager: extraArgs: -{{- if and .internal_ipv6 (ne .internal_ipv6 "") }} +{{- if gt ( .kubernetes.networking.pod_cidr | splitList "," | len) 1 }} node-cidr-mask-size-ipv4: "{{ .kubernetes.controller_manager.kube_network_node_prefix }}" node-cidr-mask-size-ipv6: "64" {{- else }} diff --git a/builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-init.v1beta3 b/builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-init.v1beta3 index d9aa64397..ec02d2ff9 100644 --- a/builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-init.v1beta3 +++ b/builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-init.v1beta3 @@ -62,7 +62,7 @@ apiServer: - kubernetes.default.svc.{{ if and .kubernetes.control_plane_endpoint (ne .kubernetes.control_plane_endpoint "") }}{{ .kubernetes.control_plane_endpoint }}{{ else }}{{ .init_kubernetes_node }}{{ end }} - kubernetes.default.svc.{{ .kubernetes.networking.dns_domain }} {{- range .groups.k8s_cluster | default list }} - - {{ . }}.{{ .kubernetes.networking.dns_domain }} + - {{ index $.inventory_hosts . "hostname" }}.{{ .kubernetes.networking.dns_domain }} - {{ index $.inventory_hosts . "internal_ipv4" }} {{- if index $.inventory_hosts . "internal_ipv6" }} - {{ index $.inventory_hosts . "internal_ipv6" }} @@ -80,7 +80,7 @@ apiServer: {{- end }} controllerManager: extraArgs: -{{- if and .internal_ipv6 (ne .internal_ipv6 "") }} +{{- if gt ( .kubernetes.networking.pod_cidr | splitList "," | len) 1 }} node-cidr-mask-size-ipv4: "{{ .kubernetes.controller_manager.kube_network_node_prefix }}" node-cidr-mask-size-ipv6: "64" {{- else }} diff --git a/builtin/roles/install/kubernetes/templates/kubeadm/kubelet.env b/builtin/roles/install/kubernetes/templates/kubeadm/kubelet.env index 66089b19b..1d3fd5ed3 100644 --- a/builtin/roles/install/kubernetes/templates/kubeadm/kubelet.env +++ b/builtin/roles/install/kubernetes/templates/kubeadm/kubelet.env @@ -7,7 +7,7 @@ EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env # This is a file that the user can use for overrides of the kubelet args as a last resort. Preferably, the user should use # the .NodeRegistration.KubeletExtraArgs object in the configuration files instead. KUBELET_EXTRA_ARGS should be sourced from this file. EnvironmentFile=-/etc/default/kubelet -Environment="KUBELET_EXTRA_ARGS=--node-ip={{ .internal_ipv4 }} --hostname-override={{ .inventory_name }} {{ range $k,$v := .kubernetes.kubelet.extra_args }}--{{ $k }} {{ $v }} {{ end }}" +Environment="KUBELET_EXTRA_ARGS=--node-ip={{ .internal_ipv4 }} --hostname-override={{ .hostname }} {{ range $k,$v := .kubernetes.kubelet.extra_args }}--{{ $k }} {{ $v }} {{ end }}" ExecStart= ExecStart=/usr/local/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS diff --git a/builtin/roles/install/kubernetes/templates/kubevip/kubevip.BGP b/builtin/roles/install/kubernetes/templates/kubevip/kubevip.BGP index dcbf83fd8..1578f35c2 100644 --- a/builtin/roles/install/kubernetes/templates/kubevip/kubevip.BGP +++ b/builtin/roles/install/kubernetes/templates/kubevip/kubevip.BGP @@ -30,10 +30,10 @@ spec: value: "true" - name: bgp_routerid value: | - {{ $ips := list }} - {{- range .groups.kube_control_plane | default list -}} - {{- $ips = append $ips (index $.inventory_hosts . "internal_ipv4") -}} - {{- end -}} + {{- $ips := list }} + {{- range .groups.kube_control_plane | default list }} + {{- $ips = append $ips (index $.inventory_hosts . "internal_ipv4") }} + {{- end }} {{ $ips | join "," }} - name: bgp_as value: "65000" @@ -43,10 +43,10 @@ spec: value: "65000" - name: bgp_peers value: | - {{ $ips := list }} - {{- range .groups.kube_control_plane | default list -}} - {{- $ips = append $ips (printf "%s:65000::false" (index $.inventory_hosts . "internal_ipv4")) -}} - {{- end -}} + {{- $ips := list }} + {{- range .groups.kube_control_plane | default list }} + {{- $ips = append $ips (printf "%s:65000::false" (index $.inventory_hosts . "internal_ipv4")) }} + {{- end }} {{ $ips | join "," }} - name: lb_enable value: "true" diff --git a/builtin/roles/precheck/env_check/tasks/main.yaml b/builtin/roles/precheck/env_check/tasks/main.yaml index 2c8f595b3..ebc767f81 100644 --- a/builtin/roles/precheck/env_check/tasks/main.yaml +++ b/builtin/roles/precheck/env_check/tasks/main.yaml @@ -11,12 +11,12 @@ - and .kubernetes.kube_vip.address (ne .kubernetes.kube_vip.address "") - .kubernetes.kube_vip.address | regexMatch "^((25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9]?[0-9])\\.(25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9]?[0-9])\\.(25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9]?[0-9])\\.(25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9]?[0-9])|(([0-9a-fA-F]{1,4}:){7}([0-9a-fA-F]{1,4}|:)|(([0-9a-fA-F]{1,4}:){1,6}|:):([0-9a-fA-F]{1,4}|:){1,6}([0-9a-fA-F]{1,4}|:)))$" - | - {{- $existIP := false -}} - {{- range .groups.all | default list -}} - {{- if eq $.kubernetes.kube_vip.address (index $.inventory_hosts . "internal_ipv4") -}} - {{ $existIP = true }} - {{- end -}} - {{- end -}} + {{- $existIP := false }} + {{- range .groups.all | default list }} + {{- if eq $.kubernetes.kube_vip.address (index $.inventory_hosts . "internal_ipv4") }} + {{- $existIP = true }} + {{- end }} + {{- end }} {{ not $existIP }} fail_msg: | "kubernetes.control_plane_endpoint" should be a un-used ip address when "kubernetes.kube_vip.enabled" is true diff --git a/builtin/roles/precheck/env_check/tasks/os.yaml b/builtin/roles/precheck/env_check/tasks/os.yaml index a434d6b81..d5282b04e 100644 --- a/builtin/roles/precheck/env_check/tasks/os.yaml +++ b/builtin/roles/precheck/env_check/tasks/os.yaml @@ -1,7 +1,7 @@ --- - name: Stop if bad hostname assert: - that: .inventory_name | regexMatch "^[a-z0-9]([a-z0-9-]*[a-z0-9])?(\\.[a-z0-9]([a-z0-9-]*[a-z0-9])?)*$" + that: .hostname | regexMatch "^[a-z0-9]([a-z0-9-]*[a-z0-9])?(\\.[a-z0-9]([a-z0-9-]*[a-z0-9])?)*$" fail_msg: "Hostname must consist of lower case alphanumeric characters, '.' or '-', and must start and end with an alphanumeric character" - name: Stop if the os does not support @@ -13,11 +13,11 @@ assert: that: or (.cluster_require.supported_architectures.amd64 | has .os.architecture) (.cluster_require.supported_architectures.arm64 | has .os.architecture) success_msg: | - {{- if .cluster_require.supported_architectures.amd64 | has .os.architecture -}} + {{- if .cluster_require.supported_architectures.amd64 | has .os.architecture }} amd64 - {{- else -}} + {{- else }} arm64 - {{- end -}} + {{- end }} fail_msg: "{{ .os.architecture }} is not a known arch" register: binary_type diff --git a/pkg/const/common.go b/pkg/const/common.go index 1fbecb5ca..1a49b0f34 100644 --- a/pkg/const/common.go +++ b/pkg/const/common.go @@ -45,8 +45,11 @@ const ( // === From inventory === ) const ( // === From system generate === - // VariableHostName the value is host_name - VariableHostName = "inventory_name" + // VariableInventoryName the value which defined in inventory.spec.host. + VariableInventoryName = "inventory_name" + // VariableHostName the value is node hostname, default VariableInventoryName. + // if VariableInventoryName is "localhost". try to set the actual name. + VariableHostName = "hostname" // VariableGlobalHosts the value is host_var which defined in inventory. VariableGlobalHosts = "inventory_hosts" // VariableGroupsAll the value is a all host_name slice of VariableGroups. diff --git a/pkg/executor/executor.go b/pkg/executor/executor.go index ccd034c7e..7e11b8b1c 100644 --- a/pkg/executor/executor.go +++ b/pkg/executor/executor.go @@ -54,7 +54,7 @@ func NewTaskExecutor(client ctrlclient.Client, pipeline *kubekeyv1.Pipeline, log // get variable v, err := variable.New(client, *pipeline) if err != nil { - klog.V(4).ErrorS(nil, "convert playbook error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline)) + klog.V(5).ErrorS(nil, "convert playbook error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline)) return nil } @@ -110,7 +110,7 @@ func (e executor) Exec(ctx context.Context) error { hosts = ahn.([]string) } if len(hosts) == 0 { // if hosts is empty skip this playbook - klog.V(4).Info("Hosts is empty", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline)) + klog.V(5).Info("Hosts is empty", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline)) continue } @@ -123,7 +123,7 @@ func (e executor) Exec(ctx context.Context) error { } // merge host information to runtime variable if err := e.variable.Merge(variable.MergeRemoteVariable(h, gfv)); err != nil { - klog.V(4).ErrorS(err, "Merge gather fact error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "host", h) + klog.V(5).ErrorS(err, "Merge gather fact error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "host", h) return fmt.Errorf("merge gather fact error: %w", err) } } @@ -142,18 +142,17 @@ func (e executor) Exec(ctx context.Context) error { } } - // generate task by each batch. + // generate and execute task. for _, serials := range batchHosts { // each batch hosts should not be empty. if len(serials) == 0 { - klog.V(4).ErrorS(nil, "Host is empty", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline)) + klog.V(5).ErrorS(nil, "Host is empty", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline)) return fmt.Errorf("host is empty") } if err := e.mergeVariable(ctx, e.variable, play.Vars, serials...); err != nil { return fmt.Errorf("merge variable error: %w", err) } - // generate task from pre tasks if err := e.execBlock(ctx, execBlockOptions{ hosts: serials, @@ -163,7 +162,6 @@ func (e executor) Exec(ctx context.Context) error { }); err != nil { return fmt.Errorf("execute pre-tasks from play error: %w", err) } - // generate task from role for _, role := range play.Roles { if err := e.mergeVariable(ctx, e.variable, role.Vars, serials...); err != nil { @@ -213,7 +211,7 @@ func (e executor) Exec(ctx context.Context) error { func (e executor) getGatherFact(ctx context.Context, hostname string, vars variable.Variable) (map[string]any, error) { v, err := vars.Get(variable.GetParamVariable(hostname)) if err != nil { - klog.V(4).ErrorS(err, "Get host variable error", "hostname", hostname) + klog.V(5).ErrorS(err, "Get host variable error", "hostname", hostname) return nil, err } connectorVars := make(map[string]any) @@ -224,11 +222,11 @@ func (e executor) getGatherFact(ctx context.Context, hostname string, vars varia } conn, err := connector.NewConnector(hostname, connectorVars) if err != nil { - klog.V(4).ErrorS(err, "New connector error", "hostname", hostname) + klog.V(5).ErrorS(err, "New connector error", "hostname", hostname) return nil, err } if err := conn.Init(ctx); err != nil { - klog.V(4).ErrorS(err, "Init connection error", "hostname", hostname) + klog.V(5).ErrorS(err, "Init connection error", "hostname", hostname) return nil, err } defer conn.Close(ctx) @@ -236,7 +234,7 @@ func (e executor) getGatherFact(ctx context.Context, hostname string, vars varia if gf, ok := conn.(connector.GatherFacts); ok { return gf.Info(ctx) } - klog.V(4).ErrorS(nil, "gather fact is not defined in this connector", "hostname", hostname) + klog.V(5).ErrorS(nil, "gather fact is not defined in this connector", "hostname", hostname) return nil, nil } @@ -275,7 +273,7 @@ func (e executor) execBlock(ctx context.Context, options execBlockOptions) error when: append(options.when, at.When.Data...), tags: tags, }); err != nil { - klog.V(4).ErrorS(err, "execute tasks from block error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "block", at.Name) + klog.V(5).ErrorS(err, "execute tasks from block error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "block", at.Name) errs = errors.Join(errs, err) } @@ -289,7 +287,7 @@ func (e executor) execBlock(ctx context.Context, options execBlockOptions) error when: append(options.when, at.When.Data...), tags: tags, }); err != nil { - klog.V(4).ErrorS(err, "execute tasks from rescue error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "block", at.Name) + klog.V(5).ErrorS(err, "execute tasks from rescue error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "block", at.Name) errs = errors.Join(errs, err) } } @@ -304,7 +302,7 @@ func (e executor) execBlock(ctx context.Context, options execBlockOptions) error when: append(options.when, at.When.Data...), tags: tags, }); err != nil { - klog.V(4).ErrorS(err, "execute tasks from always error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "block", at.Name) + klog.V(5).ErrorS(err, "execute tasks from always error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "block", at.Name) errs = errors.Join(errs, err) } } @@ -323,14 +321,14 @@ func (e executor) execBlock(ctx context.Context, options execBlockOptions) error task.GenerateName = e.pipeline.Name + "-" task.Namespace = e.pipeline.Namespace if err := controllerutil.SetControllerReference(e.pipeline, task, e.client.Scheme()); err != nil { - klog.V(4).ErrorS(err, "Set controller reference error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "block", at.Name) + klog.V(5).ErrorS(err, "Set controller reference error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "block", at.Name) return err } // complete module by unknown field for n, a := range at.UnknownFiled { data, err := json.Marshal(a) if err != nil { - klog.V(4).ErrorS(err, "Marshal unknown field error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "block", at.Name, "field", n) + klog.V(5).ErrorS(err, "Marshal unknown field error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "block", at.Name, "field", n) return err } if m := modules.FindModule(n); m != nil { @@ -340,20 +338,21 @@ func (e executor) execBlock(ctx context.Context, options execBlockOptions) error } } if task.Spec.Module.Name == "" { // action is necessary for a task - klog.V(4).ErrorS(nil, "No module/action detected in task", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "block", at.Name) + klog.V(5).ErrorS(nil, "No module/action detected in task", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "block", at.Name) return fmt.Errorf("no module/action detected in task: %s", task.Name) } // create task if err := e.client.Create(ctx, task); err != nil { - klog.V(4).ErrorS(err, "create task error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "block", at.Name) + klog.V(5).ErrorS(err, "create task error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "block", at.Name) return err } for { var roleLog string if task.Annotations[kubekeyv1alpha1.TaskAnnotationRole] != "" { - roleLog = "[" + task.Annotations[kubekeyv1alpha1.TaskAnnotationRole] + "]" + roleLog = "[" + task.Annotations[kubekeyv1alpha1.TaskAnnotationRole] + "] " } + klog.V(5).InfoS("begin run task", "task", ctrlclient.ObjectKeyFromObject(task)) fmt.Fprintf(e.logOutput, "%s %s%s\n", time.Now().Format(time.TimeOnly+" MST"), roleLog, task.Spec.Name) // exec task task.Status.Phase = kubekeyv1alpha1.TaskPhaseRunning @@ -361,7 +360,7 @@ func (e executor) execBlock(ctx context.Context, options execBlockOptions) error klog.V(5).ErrorS(err, "update task status error", "task", ctrlclient.ObjectKeyFromObject(task)) } if err := e.executeTask(ctx, task, options); err != nil { - klog.V(4).ErrorS(err, "exec task error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "block", at.Name) + klog.V(5).ErrorS(err, "exec task error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "block", at.Name) return err } if err := e.client.Status().Update(ctx, task); err != nil { @@ -435,7 +434,7 @@ func (e executor) executeTask(ctx context.Context, task *kubekeyv1alpha1.Task, o } } if stderr != "" && task.Spec.IgnoreError != nil && *task.Spec.IgnoreError { - klog.V(4).ErrorS(nil, "task run failed", "host", h, "stdout", stdout, "stderr", stderr, "task", ctrlclient.ObjectKeyFromObject(task)) + klog.V(5).ErrorS(nil, "task run failed", "host", h, "stdout", stdout, "stderr", stderr, "task", ctrlclient.ObjectKeyFromObject(task)) } else if stderr != "" { klog.ErrorS(nil, "task run failed", "host", h, "stdout", stdout, "stderr", stderr, "task", ctrlclient.ObjectKeyFromObject(task)) } @@ -455,7 +454,7 @@ func (e executor) executeTask(ctx context.Context, task *kubekeyv1alpha1.Task, o // progress bar for task var bar = progressbar.NewOptions(-1, progressbar.OptionSetWriter(e.logOutput), - progressbar.OptionSpinnerType(59), + progressbar.OptionSpinnerCustom([]string{" "}), progressbar.OptionEnableColorCodes(true), progressbar.OptionSetDescription(fmt.Sprintf("[\033[36m%s\033[0m]%s \033[36mrunning\033[0m", h, placeholder)), progressbar.OptionOnCompletion(func() { @@ -463,8 +462,6 @@ func (e executor) executeTask(ctx context.Context, task *kubekeyv1alpha1.Task, o klog.ErrorS(err, "failed to write output", "host", h) } }), - progressbar.OptionShowElapsedTimeOnFinish(), - progressbar.OptionSetPredictTime(false), ) go func() { for !bar.IsFinished() { @@ -497,12 +494,6 @@ func (e executor) executeTask(ctx context.Context, task *kubekeyv1alpha1.Task, o stderr = fmt.Sprintf("get variable error: %v", err) return } - // execute module with loop - loop, err := e.parseLoop(ctx, ha.(map[string]any), task) - if err != nil { - stderr = fmt.Sprintf("parse loop vars error: %v", err) - return - } // check when condition if len(task.Spec.When) > 0 { ok, err := tmpl.ParseBool(ha.(map[string]any), task.Spec.When) @@ -515,8 +506,9 @@ func (e executor) executeTask(ctx context.Context, task *kubekeyv1alpha1.Task, o return } } + // execute module with loop // if loop is empty. execute once, and the item is null - for _, item := range loop { + for _, item := range e.parseLoop(ctx, ha.(map[string]any), task) { // set item to runtime variable if err := e.variable.Merge(variable.MergeRuntimeVariable(h, map[string]any{ _const.VariableItem: item, @@ -562,13 +554,13 @@ func (e executor) executeTask(ctx context.Context, task *kubekeyv1alpha1.Task, o // loop is json string. try convertor to string slice by json. // loop is normal string. set it to empty slice and return. // loop is string slice. return it. -func (e executor) parseLoop(ctx context.Context, ha map[string]any, task *kubekeyv1alpha1.Task) ([]any, error) { +func (e executor) parseLoop(ctx context.Context, ha map[string]any, task *kubekeyv1alpha1.Task) []any { switch { case task.Spec.Loop.Raw == nil: // loop is not set. add one element to execute once module. - return []any{nil}, nil + return []any{nil} default: - return variable.Extension2Slice(ha, task.Spec.Loop), nil + return variable.Extension2Slice(ha, task.Spec.Loop) } } diff --git a/pkg/manager/command_manager.go b/pkg/manager/command_manager.go index cfee50bdd..1c91246c9 100644 --- a/pkg/manager/command_manager.go +++ b/pkg/manager/command_manager.go @@ -42,13 +42,25 @@ type commandManager struct { } func (m *commandManager) Run(ctx context.Context) error { - fmt.Fprintf(m.logOutput, "%s [Pipeline %s] start\n", time.Now().Format(time.RFC822), ctrlclient.ObjectKeyFromObject(m.Pipeline)) + fmt.Fprint(m.logOutput, ` + + _ __ _ _ __ +| | / / | | | | / / +| |/ / _ _| |__ ___| |/ / ___ _ _ +| \| | | | '_ \ / _ \ \ / _ \ | | | +| |\ \ |_| | |_) | __/ |\ \ __/ |_| | +\_| \_/\__,_|_.__/ \___\_| \_/\___|\__, | + __/ | + |___/ + +`) + fmt.Fprintf(m.logOutput, "%s [Pipeline %s] start\n", time.Now().Format(time.TimeOnly+" MST"), ctrlclient.ObjectKeyFromObject(m.Pipeline)) cp := m.Pipeline.DeepCopy() defer func() { - fmt.Fprintf(m.logOutput, "%s [Pipeline %s] finish. total: %v,success: %v,ignored: %v,failed: %v\n", time.Now().Format(time.RFC3339), ctrlclient.ObjectKeyFromObject(m.Pipeline), + fmt.Fprintf(m.logOutput, "%s [Pipeline %s] finish. total: %v,success: %v,ignored: %v,failed: %v\n", time.Now().Format(time.TimeOnly+" MST"), ctrlclient.ObjectKeyFromObject(m.Pipeline), m.Pipeline.Status.TaskResult.Total, m.Pipeline.Status.TaskResult.Success, m.Pipeline.Status.TaskResult.Ignored, m.Pipeline.Status.TaskResult.Failed) if !m.Pipeline.Spec.Debug && m.Pipeline.Status.Phase == kubekeyv1.PipelinePhaseSucceed { - fmt.Fprintf(m.logOutput, "%s [Pipeline %s] clean runtime directory\n", time.Now().Format(time.RFC822), ctrlclient.ObjectKeyFromObject(m.Pipeline)) + fmt.Fprintf(m.logOutput, "%s [Pipeline %s] clean runtime directory\n", time.Now().Format(time.TimeOnly+" MST"), ctrlclient.ObjectKeyFromObject(m.Pipeline)) // clean runtime directory if err := os.RemoveAll(_const.GetRuntimeDir()); err != nil { klog.ErrorS(err, "clean runtime directory error", "pipeline", ctrlclient.ObjectKeyFromObject(m.Pipeline), "runtime_dir", _const.GetRuntimeDir()) diff --git a/pkg/variable/helper.go b/pkg/variable/helper.go index 56e18b42d..0147a9658 100644 --- a/pkg/variable/helper.go +++ b/pkg/variable/helper.go @@ -182,7 +182,7 @@ func getLocalIP(ipType string) string { if ipType == _const.VariableIPv4 && ipNet.IP.To4() != nil { return ipNet.IP.String() } - if ipType == _const.VariableIPv6 && ipNet.IP.To16() != nil { + if ipType == _const.VariableIPv6 && ipNet.IP.To16() != nil && ipNet.IP.To4() == nil { return ipNet.IP.String() } } diff --git a/pkg/variable/helper_test.go b/pkg/variable/helper_test.go index d0f2cc14e..57f807fe2 100644 --- a/pkg/variable/helper_test.go +++ b/pkg/variable/helper_test.go @@ -21,6 +21,7 @@ import ( "github.com/stretchr/testify/assert" + kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1" "github.com/kubesphere/kubekey/v4/pkg/converter/tmpl" ) @@ -288,3 +289,69 @@ func TestParseVariable(t *testing.T) { }) } } + +func TestHostsInGroup(t *testing.T) { + testcases := []struct { + name string + inventory kubekeyv1.Inventory + groupName string + except []string + }{ + { + name: "single group", + inventory: kubekeyv1.Inventory{ + Spec: kubekeyv1.InventorySpec{ + Groups: map[string]kubekeyv1.InventoryGroup{ + "g1": { + Hosts: []string{"h1", "h2", "h3"}, + }, + }, + }, + }, + groupName: "g1", + except: []string{"h1", "h2", "h3"}, + }, + { + name: "group in group", + inventory: kubekeyv1.Inventory{ + Spec: kubekeyv1.InventorySpec{ + Groups: map[string]kubekeyv1.InventoryGroup{ + "g1": { + Hosts: []string{"h1", "h2", "h3"}, + Groups: []string{"g2"}, + }, + "g2": { + Hosts: []string{"h4"}, + }, + }, + }, + }, + groupName: "g1", + except: []string{"h1", "h2", "h3", "h4"}, + }, + { + name: "repeat hosts in group", + inventory: kubekeyv1.Inventory{ + Spec: kubekeyv1.InventorySpec{ + Groups: map[string]kubekeyv1.InventoryGroup{ + "g1": { + Hosts: []string{"h1", "h2", "h3"}, + Groups: []string{"g2"}, + }, + "g2": { + Hosts: []string{"h3", "h4"}, + }, + }, + }, + }, + groupName: "g1", + except: []string{"h4", "h1", "h2", "h3"}, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + assert.ElementsMatch(t, tc.except, hostsInGroup(tc.inventory, tc.groupName)) + }) + } +} diff --git a/pkg/variable/internal.go b/pkg/variable/internal.go index be2e15f23..f3c5702d3 100644 --- a/pkg/variable/internal.go +++ b/pkg/variable/internal.go @@ -75,7 +75,10 @@ func (v value) getParameterVariable() map[string]any { hostVars := Extension2Variables(v.Inventory.Spec.Hosts[hostname]) // set inventory_name to hostVars // "inventory_name" is the hostname configured in the inventory file. - hostVars[_const.VariableHostName] = hostname + hostVars[_const.VariableInventoryName] = hostname + if _, ok := hostVars[_const.VariableHostName]; !ok { + hostVars[_const.VariableHostName] = hostname + } // merge group vars to host vars for _, gv := range v.Inventory.Spec.Groups { if slices.Contains(gv.Hosts, hostname) { @@ -84,6 +87,10 @@ func (v value) getParameterVariable() map[string]any { } // set default localhost if hostname == _const.VariableLocalHost { + if os, ok := v.Hosts[hostname].RemoteVars[_const.VariableOS]; ok { + // try to set hostname by current actual hostname. + hostVars[_const.VariableHostName] = os.(map[string]any)[_const.VariableOSHostName] + } if _, ok := hostVars[_const.VariableIPv4]; !ok { hostVars[_const.VariableIPv4] = getLocalIP(_const.VariableIPv4) } diff --git a/pkg/variable/internal_test.go b/pkg/variable/internal_test.go index 31bc909b1..fd4f19aed 100644 --- a/pkg/variable/internal_test.go +++ b/pkg/variable/internal_test.go @@ -71,9 +71,11 @@ func TestGetAllVariable(t *testing.T) { "images": []interface{}{"abc"}, }, "inventory_name": "localhost", + "hostname": "localhost", }, }, "inventory_name": "localhost", + "hostname": "localhost", }, }, }