Skip to content

Commit 6e6792b

Browse files
committed
调整addetcd/addnode/addmaster脚本
1 parent d2d164b commit 6e6792b

File tree

8 files changed

+22
-21
lines changed

8 files changed

+22
-21
lines changed

22.upgrade.yml

+2
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,7 @@
11
# WARNING: Upgrade the k8s cluster can be risky. Make sure you know what you are doing.
22
# Read the guide: 'op/upgrade.md' .
3+
# Usage: `ansible-playbook /etc/ansible/22.upgrade.yml -t upgrade_k8s`
4+
# or `easzctl upgrade`
35

46
# update masters
57
- hosts:

roles/kube-master/tasks/main.yml

+3-5
Original file line numberDiff line numberDiff line change
@@ -31,15 +31,13 @@
3131
# 创建aggregator proxy相关证书
3232
- name: 创建 aggregator proxy证书签名请求
3333
template: src=aggregator-proxy-csr.json.j2 dest={{ ca_dir }}/aggregator-proxy-csr.json
34-
tags: upgrade_k8s
3534

3635
- name: 创建 aggregator-proxy证书和私钥
3736
shell: "cd {{ ca_dir }} && {{ bin_dir }}/cfssl gencert \
3837
-ca={{ ca_dir }}/ca.pem \
3938
-ca-key={{ ca_dir }}/ca-key.pem \
4039
-config={{ ca_dir }}/ca-config.json \
4140
-profile=kubernetes aggregator-proxy-csr.json | {{ bin_dir }}/cfssljson -bare aggregator-proxy"
42-
tags: upgrade_k8s
4341

4442
- block:
4543
- name: 生成 basic-auth 随机密码
@@ -66,17 +64,17 @@
6664
- kube-apiserver.service
6765
- kube-controller-manager.service
6866
- kube-scheduler.service
69-
tags: upgrade_k8s, restart_master
67+
tags: restart_master
7068

7169
# 为兼容v1.8版本,配置不同 kube-apiserver的systemd unit文件
7270
- name: 获取 k8s 版本信息
7371
shell: "{{ bin_dir }}/kube-apiserver --version"
7472
register: k8s_ver
75-
tags: upgrade_k8s, restart_master
73+
tags: restart_master
7674

7775
- name: 创建kube-apiserver v1.8的systemd unit文件
7876
template: src=kube-apiserver-v1.8.service.j2 dest=/etc/systemd/system/kube-apiserver.service
79-
tags: upgrade_k8s, restart_master
77+
tags: restart_master
8078
when: "'v1.8' in k8s_ver.stdout"
8179

8280
- name: enable master 服务

roles/kube-node/tasks/main.yml

+5-2
Original file line numberDiff line numberDiff line change
@@ -80,13 +80,15 @@
8080
- name: 注册变量 DNS_SVC_IP
8181
shell: echo {{ SERVICE_CIDR }}|cut -d/ -f1|awk -F. '{print $1"."$2"."$3"."$4+2}'
8282
register: DNS_SVC_IP
83+
tags: restart_node
8384

8485
- name: 设置变量 CLUSTER_DNS_SVC_IP
8586
set_fact: CLUSTER_DNS_SVC_IP={{ DNS_SVC_IP.stdout }}
87+
tags: restart_node
8688

8789
- name: 创建kubelet的systemd unit文件
8890
template: src=kubelet.service.j2 dest=/etc/systemd/system/kubelet.service
89-
tags: upgrade_k8s, restart_node
91+
tags: restart_node
9092

9193
- name: 开机启用kubelet 服务
9294
shell: systemctl enable kubelet
@@ -113,8 +115,8 @@
113115
when: "inventory_hostname in groups['kube-master']"
114116

115117
- name: 创建kube-proxy 服务文件
116-
tags: reload-kube-proxy, upgrade_k8s, restart_node
117118
template: src=kube-proxy.service.j2 dest=/etc/systemd/system/kube-proxy.service
119+
tags: reload-kube-proxy, restart_node
118120

119121
- name: 开机启用kube-proxy 服务
120122
shell: systemctl enable kube-proxy
@@ -131,6 +133,7 @@
131133
until: '"running" in kubelet_status.stdout'
132134
retries: 8
133135
delay: 2
136+
tags: reload-kube-proxy, upgrade_k8s, restart_node
134137

135138
- name: 轮询等待node达到Ready状态
136139
shell: "{{ bin_dir }}/kubectl get node {{ inventory_hostname }}|awk 'NR>1{print $2}'"

roles/kube-node/tasks/node_lb.yml

+1
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
- name: fail info1
33
fail: msg="an 'kube-node' node CAN NOT be a 'ex-lb' node at the same time"
44
when: "inventory_hostname in groups['ex-lb']"
5+
tags: restart_lb
56

67
- name: 安装 haproxy
78
package: name=haproxy state=present

tools/19.addetcd.yml

+2-1
Original file line numberDiff line numberDiff line change
@@ -6,14 +6,15 @@
66
tasks:
77
- name: add a new etcd member
88
shell: "ETCDCTL_API=3 {{ bin_dir }}/etcdctl member add {{ NODE_NAME }} --peer-urls=https://{{ NODE_TO_ADD }}:2380"
9+
# new etcd node will be groups.etcd[0]
910
delegate_to: "{{ groups.etcd[1] }}"
1011

1112
# start the new-etcd node
1213
- hosts: "{{ NODE_TO_ADD }}"
1314
vars:
1415
CLUSTER_STATE: existing
1516
roles:
16-
- { role: chrony, when: "hostvars[groups.deploy[0]]['NTP_ENABLED'] == 'yes'" }
17+
- { role: chrony, when: "groups['chrony']|length > 0" }
1718
- prepare
1819
- etcd
1920

tools/20.addnode.yml

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
1-
# Note: this playbook cann't run independently
1+
# Note: this playbook can not run independently
22
# Usage: easzctl add-node 1.1.1.1
33

44
- hosts: "{{ NODE_TO_ADD }}"
55
roles:
6-
- { role: chrony, when: "hostvars[groups.deploy[0]]['NTP_ENABLED'] == 'yes'" }
6+
- { role: chrony, when: "groups['chrony']|length > 0" }
77
- prepare
88
- { role: docker, when: "CONTAINER_RUNTIME == 'docker'" }
99
- { role: containerd, when: "CONTAINER_RUNTIME == 'containerd'" }

tools/21.addmaster.yml

+4-8
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33

44
- hosts: "{{ NODE_TO_ADD }}"
55
roles:
6-
- { role: chrony, when: "hostvars[groups.deploy[0]]['NTP_ENABLED'] == 'yes'" }
6+
- { role: chrony, when: "groups['chrony']|length > 0" }
77
- prepare
88
- { role: docker, when: "CONTAINER_RUNTIME == 'docker'" }
99
- { role: containerd, when: "CONTAINER_RUNTIME == 'containerd'" }
@@ -18,16 +18,12 @@
1818
tasks:
1919
- name: Making master nodes SchedulingDisabled
2020
shell: "{{ bin_dir }}/kubectl cordon {{ NODE_TO_ADD }} "
21-
delegate_to: "{{ groups.deploy[0] }}"
22-
when: DEPLOY_MODE != "allinone"
21+
when: "inventory_hostname not in groups['kube-node']"
2322
ignore_errors: true
2423

2524
- name: Setting master role name
2625
shell: "{{ bin_dir }}/kubectl label node {{ NODE_TO_ADD }} kubernetes.io/role=master --overwrite"
2726
ignore_errors: true
28-
delegate_to: "{{ groups.deploy[0] }}"
2927

30-
# reconfigure and restart the haproxy service
31-
- hosts: lb
32-
roles:
33-
- lb
28+
# reconfigure and restart the haproxy service on 'kube-node' nodes
29+
# refer to the function 'add-node()' in 'tools/easzctl'

tools/easzctl

+3-3
Original file line numberDiff line numberDiff line change
@@ -89,9 +89,6 @@ function add-master() {
8989
# check new master's address regexp
9090
[[ $1 =~ ^(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})(\.(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})){3}$ ]] || { echo "[ERROR] Invalid ip address!"; return 2; }
9191

92-
# check if k8s with DPLOY_MODE='multi-master'
93-
grep '^DEPLOY_MODE=multi-master' $BASEPATH/hosts || { echo "[ERROR] only k8s with DPLOY_MODE='multi-master' can have master node added!"; return 2; }
94-
9592
# check if the new master already exsited
9693
sed -n '/^\[kube-master/,/^\[kube-node/p' $BASEPATH/hosts|grep "^$1" && { echo "[ERROR] master $1 already existed!"; return 2; }
9794

@@ -101,6 +98,9 @@ function add-master() {
10198
# check if playbook runs successfully
10299
ansible-playbook $BASEPATH/tools/21.addmaster.yml -e NODE_TO_ADD=$1 || { sed -i "/$1 NEW_MASTER=yes/d" $BASEPATH/hosts; return 2; }
103100

101+
# reconfigure and restart the haproxy service on 'kube-node' nodes
102+
ansible-playbook $BASEPATH/05.kube-node.yml -t restart_lb || { echo "[ERROR] Failed to restart the haproxy service on 'kube-node' nodes!"; return 2; }
103+
104104
# save current cluster context if needed
105105
[ -f "$BASEPATH/.cluster/current_cluster" ] && save_context
106106
return 0

0 commit comments

Comments
 (0)