k8s

centos 7 安装kubenetes高可用集群

Posted by wuyunan on 2024-07-05
Estimated Reading Time 49 Minutes
Words 8.7k In Total
Viewed Times

kubenetes 高可用集群安装


1. 集群架构

1. 堆叠ETCD:

每个master节点上运行一个apiserver和etcd, etcd只与本节点apiserver通信。

image-20231024145230236

2. 外部ETCD:

etcd集群运行在单独的主机上,每个etcd都与apiserver节点通信。

image-20231024145250895

3. 所用的部署架构:以下是我们在生产环境

image-20231024145312923


2. 机器环境配置

IP地址 Hostname 备注
192.168.31.111 k8s-master01 master,etcd
192.168.31.112 k8s-master02 master,etcd
192.168.31.113 k8s-master03 master,etcd
192.168.31.120 k8s-node01 node
192.168.31.121 k8s-node02 node
192.168.31.109 k8s-lb01 haproxy + keeplive
192.168.31.110 k8s-lb02 haproxy + keeplive
192.168.31.100 vip apiserver负载均衡入口,虚拟ip

1. 修改hostname

1
2
3
4
5
6
7
8
hostnamectl set-hostname k8s-master01
hostnamectl set-hostname k8s-master02
hostnamectl set-hostname k8s-master03
hostnamectl set-hostname k8s-node01
hostnamectl set-hostname k8s-node02
hostnamectl set-hostname k8s-node03
hostnamectl set-hostname k8s-lb01
hostnamectl set-hostname k8s-lb02

2. 修改HOSTS

1
2
3
4
5
6
7
8
9
10
11
cat >>/etc/hosts<<EOF
192.168.31.110 k8s-lb
192.168.31.109 k8s-lb02
192.168.31.111 k8s-master01
192.168.31.112 k8s-master02
192.168.31.113 k8s-master03
192.168.31.120 k8s-node01
192.168.31.121 k8s-node02
192.168.31.122 k8s-node03
EOF

3. ssh认证

1
2
3
4
5
6
7
yum install -y sshpass
ssh-keygen -f /root/.ssh/id_rsa -P ''

export HOSTS="k8s-master01 k8s-master02 k8s-master03 k8s-node01 k8s-node02 k8s-node03 k8s-lb k8s-lb02"
export SSHPASS=wuyunan

for HOST in $HOSTS; do sshpass -e ssh-copy-id -o StrictHostKeyChecking=no $HOST ;done

4. 配置时间同步

方法1:使用ntpdate

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
#安装ntpdate,需配置yum源
yum install ntpdate -y
#执行同步,可以使用自己的ntp服务器如果没有
ntpdate time1.aliyun.com
#写入定时任务
crontab -e
*/5 * * * * /usr/sbin/ntpdate pool.ntp.org > /dev/null 2>&1


# 服务端

yum install chrony -y
cat > /etc/chrony.conf << EOF
pool ntp.aliyun.com iburst
driftfile /var/lib/chrony/drift
makestep 1.0 3
rtcsync
allow 192.168.31.0/24
local stratum 10
keyfile /etc/chrony.keys
leapsectz right/UTC
logdir /var/log/chrony
EOF

systemctl restart chronyd ; systemctl enable chronyd
# 客户端

yum install chrony -y
cat > /etc/chrony.conf << EOF
pool 192.168.31.111 iburst
driftfile /var/lib/chrony/drift
makestep 1.0 3
rtcsync
keyfile /etc/chrony.keys
leapsectz right/UTC
logdir /var/log/chrony
EOF

systemctl restart chronyd ; systemctl enable chronyd
#使用客户端进行验证
chronyc sources -v

5. 关闭selinux

1
2
3
4
5
6
7
8
9
sed -ri 's/(^SELINUX=).*/\1disabled/' /etc/selinux/config
setenforce 0


#or

setenforce 0
sed -ri 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
sestatus

6. 关闭swap

1
2
3
4
5
6
7
8
9
10
11
12
#关闭swap
sed -ri 's@(^.*swap *swap.*0 0$)@#\1@' /etc/fstab
swapoff -a

#or
swapoff -a && sysctl -w vm.swappiness=0
sed -ri 's/.*swap.*/#&/' /etc/fstab

swapoff -a
sed -ri 's/.*swap.*/#&/' /etc/fstab
echo "vm.swappiness=0" >> /etc/sysctl.conf
sysctl -p

7. 关闭防火墙

1
2
3
systemctl stop firewalld
systemctl disable firewalld
firewall-cmd --state

8. 关闭dnsmasq

1
systemctl disable --now dnsmasq

9. 关闭postfix

1
systemctl  disable --now postfix

10. 关闭NetworkManager

1
systemctl disable --now NetworkManager

11. 所有节点修改资源限制

1
2
3
4
5
6
7
8
9
10
11
12
ulimit -SHn 65535
cat > /etc/security/limits.conf <<EOF
* soft core unlimited
* hard core unlimited
* soft nproc 1000000
* hard nproc 1000000
* soft nofile 1000000
* hard nofile 1000000
* soft memlock 32000
* hard memlock 32000
* soft msgqueue 8192000
EOF

12. 升级系统内核

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
awk -F\' '$1=="menuentry " {print $2}' /etc/grub2.cfg

cat /boot/grub2/grub.cfg | grep menuentry


grub2-mkconfig -o /boot/grub2/grub.cfg


#查看内核版本
uname -sr

#系统更新
yum update

#载入公钥
rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org

#安装 ELRepo 最新版本
yum install -y https://www.elrepo.org/elrepo-release-7.el7.elrepo.noarch.rpm

#列出可以使用的 kernel 包版本
yum list available --disablerepo=* --enablerepo=elrepo-kernel

image-20231024145341975

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
#安装指定的 kernel 版本:(已查看版本为准,采用lt长期支持版本)
yum install -y kernel-lt-5.4.258-1.el7.elrepo --enablerepo=elrepo-kernel


#设置开机从新内核启动
grub2-set-default "CentOS Linux (5.4.258-1.el7.elrepo.x86_64) 7 (Core)"

#查看内核启动项
grub2-editenv list
saved_entry=CentOS Linux (5.4.258-1.el7.elrepo.x86_64) 7 (Core)

#重启系统使内核生效:
reboot

#启动完成查看内核版本是否更新:
uname -sr
Linux 5.4.258-1.el7.elrepo.x86_64

13. 加载ipvs模块

1
2
3
4
5
6
7
8
#安装基础软件
yum install wget git jq psmisc net-tools yum-utils device-mapper-persistent-data curl conntrack ipvsadm ipset iptables sysstat libseccomp rsync telnet lvm2 -y

modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack

加载ipvs模块

修改 : /etc/modules-load.d/ipvs.conf

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
vi /etc/modules-load.d/ipvs.conf
ip_vs
ip_vs_lc
ip_vs_wlc
ip_vs_rr
ip_vs_wrr
ip_vs_lblc
ip_vs_lblcr
ip_vs_dh
ip_vs_sh
ip_vs_fo
ip_vs_nq
ip_vs_sed
ip_vs_ftp
ip_vs_sh
nf_conntrack
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip
systemctl enable --now systemd-modules-load.service

14. 修改内核参数

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
#修改内核参数
cat >/etc/sysctl.conf<<EOF

net.core.netdev_max_backlog=16384 # 每CPU网络设备积压队列长度
net.core.rmem_max = 16777216 # 所有协议类型读写的缓存区大小
net.core.wmem_max = 16777216
net.core.somaxconn = 32768 # 第二个积压队列长度

net.ipv4.neigh.default.gc_stale_time=120

net.ipv4.conf.all.rp_filter=0 # 默认为1,系统会严格校验数据包的反向路径,可能导致丢包
net.ipv4.conf.default.rp_filter=0
net.ipv4.conf.default.arp_announce=2
net.ipv4.conf.lo.arp_announce=2
net.ipv4.conf.all.arp_announce=2

net.ipv4.ip_local_port_range= 45001 65000
net.ipv4.ip_forward=1

net.ipv4.tcp_syncookies=1
net.ipv4.tcp_synack_retries=2
net.ipv4.tcp_keepalive_time=600
net.ipv4.tcp_keepalive_intvl=30
net.ipv4.tcp_keepalive_probes=10
net.ipv4.tcp_max_syn_backlog = 8096 # 第一个积压队列长度
net.ipv4.tcp_max_tw_buckets=6000


net.ipv6.conf.all.disable_ipv6=1
net.ipv6.conf.default.disable_ipv6=1
net.ipv6.conf.lo.disable_ipv6=1
net.ipv6.neigh.default.gc_thresh1=8192
net.ipv6.neigh.default.gc_thresh2=32768
net.ipv6.neigh.default.gc_thresh3=65536


net.bridge.bridge-nf-call-ip6tables=1
net.bridge.bridge-nf-call-iptables=1

net.netfilter.nf_conntrack_max=2310720

fs.inotify.max_user_instances=8192 # 表示每一个real user ID可创建的inotify instatnces的数量上限,默认128.
fs.inotify.max_user_watches=524288 # 同一用户同时可以添加的watch数目,默认8192。
fs.file-max=52706963
fs.nr_open=52706963
kernel.pid_max = 4194303
net.bridge.bridge-nf-call-arptables=1
vm.swappiness=0 # 禁止使用 swap 空间,只有当系统 OOM 时才允许使用它
vm.overcommit_memory=1 # 不检查物理内存是否够用
vm.panic_on_oom=0 # 开启 OOM
vm.max_map_count = 262144
EOF

#重启
reboot


#重启服务器执行检查
lsmod | grep -e ip_vs -e nf_conntrack

15. 安装containerd

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

#配置Containerd所需的模块
cat << EOF > /etc/modules-load.d/containerd.conf
overlay
br_netfilter
EOF

modprobe overlay
modprobe br_netfilter

#安装containerd
yum install -y containerd.io

mkdir /etc/containerd/certs.d/docker.io -pv
containerd config default | tee /etc/containerd/config.toml

# 修改Containerd的配置文件
sed -i "s#SystemdCgroup\ \=\ false#SystemdCgroup\ \=\ true#g" /etc/containerd/config.toml
cat /etc/containerd/config.toml | grep SystemdCgroup

sed -i "s#registry.k8s.io#registry.aliyuncs.com#g" /etc/containerd/config.toml
cat /etc/containerd/config.toml | grep sandbox_image

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
# wget https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.24.2/crictl-v1.24.2-linux-amd64.tar.gz

for HOST in $K8S_HOSTS; do scp /root/crictl-v1.28.0-linux-amd64.tar.gz root@$HOST:/root/ ;done

#解压
tar xf crictl-v*-linux-amd64.tar.gz -C /usr/bin/
#生成配置文件
cat > /etc/crictl.yaml <<EOF
runtime-endpoint: unix:///run/containerd/containerd.sock
image-endpoint: unix:///run/containerd/containerd.sock
timeout: 10
debug: false
EOF

#测试
systemctl restart containerd
crictl info

16. 下载证书工具

1
2
3
4
5
6
7
8
9
10
# master1节点下载证书生成工具
wget -c https://github.com/cloudflare/cfssl/releases/download/v1.6.0/cfssl_1.6.0_linux_amd64 -O /usr/local/bin/cfssl
wget -c https://github.com/cloudflare/cfssl/releases/download/v1.6.0/cfssljson_1.6.0_linux_amd64 -O /usr/local/bin/cfssljson
wget -c https://github.com/cloudflare/cfssl/releases/download/v1.6.0/cfssl-certinfo_1.6.0_linux_amd64 -O /usr/local/bin/cfssl-certinfo

# 软件包内有
cp cfssl /usr/local/bin/cfssl
cp cfssljson /usr/local/bin/cfssljson

chmod +x /usr/local/bin/cfssl*

3. 安装etcd高可用集群

1. etcd证书

1. etcd跟证书

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
# 
mkdir -p /etc/kubernetes/pki/etcd/

cd /etc/kubernetes/pki/

cat > etcd-ca-csr.json <<"EOF"
{
"CN": "etcd",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [{
"C": "CN",
"ST": "Shanghai",
"L": "Shanghai",
"O": "kubernetes",
"OU": "RND"
}],
"ca": {
"expiry": "87600h"
}
}
EOF


2. 生成根证书

1
cfssl gencert -initca etcd-ca-csr.json | cfssljson -bare /etc/kubernetes/pki/etcd/ca -

2. 颁发etcd证书

1. 生成证书申请

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
cat > etcd-hiapps-csr.json <<"EOF"
{
"CN": "etcd-hiapps",
"key": {
"algo": "rsa",
"size": 2048
},
"hosts": [
"127.0.0.1",
"k8s-master01",
"k8s-master02",
"k8s-master03",
"192.168.31.111",
"192.168.31.112",
"192.168.31.113"
],
"names": [
{
"C": "CN",
"ST": "Shanghai",
"L": "Shanghai",
"O": "HIAPPS",
"OU": "RND"
}
]
}
EOF

2. 签发etcd证书

1
2
3
4
5
6
cfssl gencert \
-ca=/etc/kubernetes/pki/etcd/ca.pem \
-ca-key=/etc/kubernetes/pki/etcd/ca-key.pem \
-config=/etc/kubernetes/pki/ca-config.json \
-profile=etcd \
etcd-hiapps-csr.json | cfssljson -bare //etc/kubernetes/pki/etcd/etcd

3. 分发证书

1
2
3
#

for HOST in $Master; do echo $NODE; scp -r /etc/kubernetes/pki/etcd/ root@$HOST:/etc/kubernetes/pki/etcd; done

4. 安装etcd 启动参数

1. 在k8s-master01机器上

1
2
3
4
5
6
7
8
9
10
11
12
13
14
cat >  /etc/etcd/etcd.conf <<"EOF"
#[Member]
ETCD_NAME="etcd1"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.31.111:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.31.111:2379,http://127.0.0.1:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.31.111:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.31.111:2379"
ETCD_INITIAL_CLUSTER="etcd1=https://192.168.31.111:2380,etcd2=https://192.168.31.112:2380,etcd3=https://192.168.31.113:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF

2. 在k8s-master02机器上

1
2
3
4
5
6
7
8
9
10
11
12
13
14
cat >  /etc/etcd/etcd.conf <<"EOF"
#[Member]
ETCD_NAME="etcd2"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.31.112:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.31.112:2379,http://127.0.0.1:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.31.112:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.31.112:2379"
ETCD_INITIAL_CLUSTER="etcd1=https://192.168.31.111:2380,etcd2=https://192.168.31.112:2380,etcd3=https://192.168.31.113:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF

3. 在k8s-master03机器上

1
2
3
4
5
6
7
8
9
10
11
12
13
14
cat >  /etc/etcd/etcd.conf <<"EOF"
#[Member]
ETCD_NAME="etcd3"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.31.113:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.31.113:2379,http://127.0.0.1:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.31.113:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.31.113:2379"
ETCD_INITIAL_CLUSTER="etcd1=https://192.168.31.111:2380,etcd2=https://192.168.31.112:2380,etcd3=https://192.168.31.113:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF

5. 在所有etcd机器上运行

下载

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
# 下载安装包

# wget https://github.com/etcd-io/etcd/releases/download/v3.5.6/etcd-v3.5.6-linux-amd64.tar.gz


# 解压etcd安装文件
tar -xf etcd*.tar.gz && mv etcd-*/etcd /usr/local/bin/ && mv etcd-*/etcdctl /usr/local/bin/

# 查看/usr/local/bin下内容
ls /usr/local/bin/

containerd crictl etcdctl kube-proxy
containerd-shim critest kube-apiserver kube-scheduler
containerd-shim-runc-v1 ctd-decoder kube-controller-manager
containerd-shim-runc-v2 ctr kubectl
containerd-stress etcd kubelet


#查看版本

[root@k8s-master1 ~]# kubelet --version
Kubernetes v1.25.4
[root@k8s-master1 ~]# etcdctl version
etcdctl version: 3.5.8
API version: 3.5
[root@k8s-master1 ~]#

1. 创建etcd工作数据目录

1
mkdir -p /var/lib/etcd/default.etcd

2. 为所有机器创建系统Service

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
cat > /usr/lib/systemd/system/etcd.service <<"EOF"
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
EnvironmentFile=-/etc/etcd/etcd.conf
WorkingDirectory=/var/lib/etcd/
ExecStart=/usr/local/bin/etcd \
--cert-file=/etc/kubernetes/pki/etcd/etcd.pem \
--key-file=/etc/kubernetes/pki/etcd/etcd-key.pem \
--trusted-ca-file=/etc/kubernetes/pki/etcd/ca.pem \
--peer-cert-file=/etc/kubernetes/pki/etcd/etcd.pem \
--peer-key-file=/etc/kubernetes/pki/etcd/etcd-key.pem \
--peer-trusted-ca-file=/etc/kubernetes/pki/etcd/ca.pem \
--peer-client-cert-auth \
--client-cert-auth
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

3. 设置开机运行并立即运行

1
2
systemctl daemon-reload
systemctl enable etcd --now

6. 测试etcd运行状态

1. 可视化工具

1
./etcdkeeper  -cacert /etc/kubernetes/pki/etcd/ca.pem -cert /etc/kubernetes/pki/etcd/etcd.pem -key /etc/kubernetes/pki/etcd/etcd-key.pem

2. 测试etcd运行状态

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
export ETCDCTL_API=3
ETCD_HOST_1=192.168.31.111
ETCD_HOST_2=192.168.31.112
ETCD_HOST_3=192.168.31.113
ENDPOINTS=$ETCD_HOST_1:2379,$ETCD_HOST_2:2379,$ETCD_HOST_3:2379

etcdctl --endpoints=$ENDPOINTS member list

#配置一下可以免密
export ETCDCTL_DIAL_TIMEOUT=3s
export ETCDCTL_CACERT=/etc/kubernetes/pki/etcd/ca.pem
export ETCDCTL_CERT=/etc/kubernetes/pki/etcd/etcd.pem
export ETCDCTL_KEY=/etc/kubernetes/pki/etcd/etcd-key.pem
export ETCDCTL_ENDPOINTS=$ETCD_HOST_1:2379,$ETCD_HOST_2:2379,$ETCD_HOST_3:2379

etcdctl member list --write-out=table

查看集群健康

1
2
3
4
5
6
7
8

etcdctl --write-out=table --endpoints=https://192.168.31.111:2379,https://192.168.31.112:2379,https://192.168.31.113:2379 --cacert=/etc/kubernetes/pki/etcd/ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem endpoint health

etcdctl --write-out=table endpoint health

etcdctl --write-out=table --endpoints=https://192.168.31.111:2379,https://192.168.31.112:2379,https://192.168.31.113:2379 --cacert=/etc/kubernetes/pki/etcd/ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem endpoint status

etcdctl --write-out=table endpoint status

#查看性能

1
2
3

etcdctl --write-out=table --endpoints=https://192.168.31.111:2379,https://192.168.31.112:2379,https://192.168.31.113:2379 --cacert=/etc/kubernetes/pki/etcd/ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem check perf
etcdctl check perf

4. haproxy+ keeplive

机器说明

IP地址 Hostname 备注
192.168.31.109 k8s-lb01 haproxy+ keeplive
192.168.31.110 k8s-lb02 haproxy+ keeplive
192.168.31.100 vip apiserver负载均衡入口,虚拟ip

haproxy

每台机器都配置

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
cat > /etc/haproxy/haproxy.cfg <<"EOF"

global

log /dev/log local0 warning
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 4000
user haproxy
group haproxy
daemon

stats socket /var/lib/haproxy/stats


defaults

log global
option httplog
option dontlognull
timeout connect 5000
timeout client 50000
timeout server 50000



frontend kube-apiserver
bind *:8443
mode tcp
option tcplog
default_backend kube-apiserver



backend kube-apiserver
mode tcp
option tcplog
option tcp-check
balance roundrobin
default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
server kube-apiserver-1 192.168.31.111:6443 check # Replace the IP address with your own.
server kube-apiserver-2 192.168.31.112:6443 check # Replace the IP address with your own.
server kube-apiserver-3 192.168.31.113:6443 check # Replace the IP address with your own.

EOF


keeplive配置

第一台机器

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
cat > /etc/keepalived/keepalived.conf <<"EOF"

global_defs {

notification_email {

}

router_id LVS_DEVEL
vrrp_skip_check_adv_addr
vrrp_garp_interval 0
vrrp_gna_interval 0

}

vrrp_script chk_haproxy {
script "killall -0 haproxy"
interval 2
weight 2

}

vrrp_instance haproxy-vip {
state BACKUP
priority 100
interface eth0 # Network card
virtual_router_id 60
advert_int 1

authentication {
auth_type PASS
auth_pass 1111
}

unicast_src_ip 192.168.31.109 # The IP address of this machine

unicast_peer {
192.168.31.110 # The IP address of peer machines

}

virtual_ipaddress {
192.168.31.100/24 # The VIP address

}

track_script {
chk_haproxy
}

}


第二台机器

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
EOF
cat > /etc/keepalived/keepalived.conf <<"EOF"

global_defs {

notification_email {

}

router_id LVS_DEVEL
vrrp_skip_check_adv_addr
vrrp_garp_interval 0
vrrp_gna_interval 0

}

vrrp_script chk_haproxy {
script "killall -0 haproxy"
interval 2
weight 2

}

vrrp_instance haproxy-vip {
state BACKUP
priority 100
interface eth0 # Network card
virtual_router_id 60
advert_int 1

authentication {
auth_type PASS
auth_pass 1111
}

unicast_src_ip 192.168.31.110 # The IP address of this machine

unicast_peer {
192.168.31.109 # The IP address of peer machines

}

virtual_ipaddress {
192.168.31.100/24 # The VIP address

}

track_script {
chk_haproxy
}

}

EOF

5. 安装kubenetes 1.27

安装kubenetes命令

yum源安装

1
2
3
4
5
6
7
8
9
10
11
12
13
14
#配置k8s V1.27 的yum 源

cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF


yum install -y kubectl-1.27.5-0 kubelet-1.27.5-0 kubeadm-1.27.5-0

可执行文件下载安装

下载安装包

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31

# 下载安装包
# wget https://dl.k8s.io/v1.25.4/kubernetes-server-linux-amd64.tar.gz
# wget https://github.com/etcd-io/etcd/releases/download/v3.5.6/etcd-v3.5.6-linux-amd64.tar.gz


# 解压k8s安装文件
tar -xf kubernetes-server-linux-amd64.tar.gz --strip-components=3 -C /usr/local/bin kubernetes/server/bin/kube{let,ctl,-apiserver,-controller-manager,-scheduler,-proxy}

# 解压etcd安装文件
tar -xf etcd*.tar.gz && mv etcd-*/etcd /usr/local/bin/ && mv etcd-*/etcdctl /usr/local/bin/

# 查看/usr/local/bin下内容
ls /usr/local/bin/

containerd crictl etcdctl kube-proxy
containerd-shim critest kube-apiserver kube-scheduler
containerd-shim-runc-v1 ctd-decoder kube-controller-manager
containerd-shim-runc-v2 ctr kubectl
containerd-stress etcd kubelet


#查看版本

[root@k8s-master1 ~]# kubelet --version
Kubernetes v1.25.4
[root@k8s-master1 ~]# etcdctl version
etcdctl version: 3.5.8
API version: 3.5
[root@k8s-master1 ~]#

分发到其他节点机器

1
2
3
4
5
6
7
8
9
10
11
Master='k8s-master02 k8s-master30'
Work='k8s-node01 k8s-node02'

#kubelet apiserver,controller-manager,scheduler proxy
for NODE in $Master; do echo $NODE; scp /usr/local/bin/kube{let,ctl,-apiserver,-controller-manager,-scheduler,-proxy} $NODE:/usr/local/bin/; done

#etcd
for NODE in $Master; do echo $NODE; scp /usr/local/bin/etcd* $NODE:/usr/local/bin/; done

#kubelet kube-proxy
for NODE in $Work; do echo $NODE; scp /usr/local/bin/kube{let,-proxy} $NODE:/usr/local/bin/ ; done

为了实现docker使用的cgroupdriver与kubelet使用的cgroup的一致性,建议修改如下文件内容。

1
2
3
cat <<EOF >  /etc/sysconfig/kubelet
KUBELET_EXTRA_ARGS="--cgroup-driver=systemd"
EOF

设置kubelet为开机自启动

1
2
3
systemctl enable kubelet

crictl config runtime-endpoint unix:///var/run/containerd/containerd.sock

证书颁发

k8s相关证书目录

证书目录

1
2
3
mkdir -p /etc/kubernetes/pki 

cd /etc/kubernetes/pki

k8s根证书

k8s根证书描述
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23

# 根证书
cat > /etc/kubernetes/pki/ca-csr.json <<"EOF"
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Shanghai",
"L": "Shanghai",
"O": "HIAPPS",
"OU": "CN"
}
],
"ca": {
"expiry": "87600h"
}
}
EOF
生成根证书
1
2
#生成根证书
cfssl gencert -initca /etc/kubernetes/pki/ca-csr.json | cfssljson -bare /etc/kubernetes/pki/ca

证书配置

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
cfssl print-defaults config > ca-config.json
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"server": {
"expiry": "87600h",
"usages": [
"signing",
"key encipherment",
"server auth"
]
},
"client": {
"expiry": "8760h",
"usages": [
"signing",
"key encipherment",
"client auth"
]
},
"peer": {
"expiry": "8760h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
},
"kubernetes": {
"expiry": "8760h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
},
"etcd": {
"expiry": "8760h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}

front-proxy证书

front-proxy根证书
1
2
3
4
5
6
7
8
9
10
11
12
13
# front-proxy根证书
cat > /etc/kubernetes/pki/front-proxy-ca-csr.json <<"EOF"
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
}
}
EOF

#生成根证书
cfssl gencert -initca /etc/kubernetes/pki/front-proxy-ca-csr.json | cfssljson -bare /etc/kubernetes/pki/front-proxy-ca
front-proxy客户端证书
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
# front-proxy颁发证书
cat > /etc/kubernetes/pki/front-proxy-client-csr.json <<"EOF"
{
"CN": "front-proxy-client",
"key": {
"algo": "rsa",
"size": 2048
}
}
EOF

#颁发证书
cfssl gencert -ca=/etc/kubernetes/pki/front-proxy-ca.pem \
-ca-key=/etc/kubernetes/pki/front-proxy-ca-key.pem \
-config=/etc/kubernetes/pki/ca-config.json \
-profile=kubernetes \
/etc/kubernetes/pki/front-proxy-client-csr.json | cfssljson -bare /etc/kubernetes/pki/front-proxy-client

ServerAccount证书

1
2
openssl genrsa -out /etc/kubernetes/pki/sa.key 2048
openssl rsa -in /etc/kubernetes/pki/sa.key -pubout -out /etc/kubernetes/pki/sa.pub

kube-apiserver 部署安装与启动

1. apiserver证书申请与颁发

apiserver证书申请书

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
cat > /etc/kubernetes/pki/kube-apiserver-csr.json << "EOF"
{
"CN": "kubernetes",
"hosts": [
"127.0.0.1",
"192.168.31.109",
"192.168.31.110",
"192.168.31.111",
"192.168.31.112",
"192.168.31.113",
"192.168.31.114",
"192.168.31.115",
"192.168.31.120",
"192.168.31.121",
"192.168.31.122",
"192.168.31.123",
"192.168.31.124",
"192.168.31.125",
"10.96.0.1",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Shanghai",
"L": "Shanghai",
"O": "HIAPPS",
"OU": "RND"
}
]
}
EOF

说明:

如果 hosts 字段不为空则需要指定授权使用该证书的 IP(含VIP) 或域名列表。由于该证书被集群使用,需要将节点的IP都填上,为了方便后期扩容可以多写几个预留的IP。

同时还需要填写 service 网络的首个IP(一般是 kube-apiserver 指定的 service-cluster-ip-range 网段的第一个IP,如 10.96.0.1)。

2. 生成apiserver证书及token文件

1
2
3
4
5
cfssl gencert -ca=/etc/kubernetes/pki/ca.pem \
-ca-key=/etc/kubernetes/pki/ca-key.pem \
-config=/etc/kubernetes/pki/ca-config.json \
-profile=kubernetes \
/etc/kubernetes/pki/kube-apiserver-csr.json | cfssljson -bare /etc/kubernetes/pki/kube-apiserver

生成token文件

1
2
3
cat > /etc/kubernetes/token.csv << EOF
$(head -c 16 /dev/urandom | od -An -t x | tr -d ' '),kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF

说明: 创建TLS机制所需TOKEN

TLS Bootstraping:Master apiserver启用TLS认证后,Node节点kubelet和kube-proxy与kube-apiserver进行通信,必须使用CA签发的有效证书才可以,当Node节点很多时,这种客户端证书颁发需要大量工作,同样也会增加集群扩展复杂度。为了简化流程,Kubernetes引入了TLS bootstraping机制来自动颁发客户端证书,kubelet会以一个低权限用户自动向apiserver申请证书,kubelet的证书由apiserver动态签署。所以强烈建议在Node上使用这种方式,目前主要用于kubelet,kube-proxy还是由我们统一颁发一个证书。

apiserver服务配置文件

k8s-master01机器上创建apiserver服务配置文件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
cat > /etc/kubernetes/kube-apiserver.conf << "EOF"
KUBE_APISERVER_OPTS="--enable-admission-plugins=NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \
--anonymous-auth=false \
--allow-privileged=true \
--apiserver-count=3 \
--audit-log-maxage=30 \
--audit-log-maxbackup=3 \
--audit-log-maxsize=100 \
--audit-log-path=/var/log/kube-apiserver-audit.log \
--bind-address=0.0.0.0 \
--secure-port=6443 \
--advertise-address=192.168.31.111 \
--authorization-mode=Node,RBAC \
--runtime-config=api/all=true \
--enable-bootstrap-token-auth \
--event-ttl=1h \
--service-cluster-ip-range=10.96.0.0/16 \
--token-auth-file=/etc/kubernetes/token.csv \
--service-node-port-range=30000-32767 \
--etcd-cafile=/etc/kubernetes/pki/etcd/ca.pem \
--etcd-certfile=/etc/kubernetes/pki/etcd/etcd.pem \
--etcd-keyfile=/etc/kubernetes/pki/etcd/etcd-key.pem \
--etcd-servers=https://192.168.31.111:2379,https://192.168.31.112:2379,https://192.168.31.113:2379 \
--client-ca-file=/etc/kubernetes/pki/ca.pem \
--tls-cert-file=/etc/kubernetes/pki/kube-apiserver.pem \
--tls-private-key-file=/etc/kubernetes/pki/kube-apiserver-key.pem \
--kubelet-client-certificate=/etc/kubernetes/pki/kube-apiserver.pem \
--kubelet-client-key=/etc/kubernetes/pki/kube-apiserver-key.pem \
--service-account-key-file=/etc/kubernetes/pki/sa.pub \
--service-account-signing-key-file=/etc/kubernetes/pki/sa.key \
--service-account-issuer=https://kubernetes.default.svc.cluster.local \
--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \
--log-dir=/var/log/kubernetes \
--v=4"
EOF

k8s-master02机器上创建apiserver服务配置文件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
cat > /etc/kubernetes/kube-apiserver.conf << "EOF"
KUBE_APISERVER_OPTS="--enable-admission-plugins=NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \
--anonymous-auth=false \
--allow-privileged=true \
--apiserver-count=3 \
--alsologtostderr=true \
--audit-log-maxage=30 \
--audit-log-maxbackup=3 \
--audit-log-maxsize=100 \
--audit-log-path=/var/log/kube-apiserver-audit.log \
--bind-address=0.0.0.0 \
--secure-port=6443 \
--advertise-address=192.168.31.112 \
--authorization-mode=Node,RBAC \
--runtime-config=api/all=true \
--enable-bootstrap-token-auth \
--event-ttl=1h \
--service-cluster-ip-range=10.96.0.0/16 \
--token-auth-file=/etc/kubernetes/token.csv \
--service-node-port-range=30000-32767 \
--etcd-cafile=/etc/kubernetes/pki/etcd/ca.pem \
--etcd-certfile=/etc/kubernetes/pki/etcd/etcd.pem \
--etcd-keyfile=/etc/kubernetes/pki/etcd/etcd-key.pem \
--etcd-servers=https://192.168.31.111:2379,https://192.168.31.112:2379,https://192.168.31.113:2379 \
--client-ca-file=/etc/kubernetes/pki/ca.pem \
--tls-cert-file=/etc/kubernetes/pki/kube-apiserver.pem \
--tls-private-key-file=/etc/kubernetes/pki/kube-apiserver-key.pem \
--kubelet-client-certificate=/etc/kubernetes/pki/kube-apiserver.pem \
--kubelet-client-key=/etc/kubernetes/pki/kube-apiserver-key.pem \
--service-account-key-file=/etc/kubernetes/pki/sa.pub \
--service-account-signing-key-file=/etc/kubernetes/pki/sa.key \
--service-account-issuer=https://kubernetes.default.svc.cluster.local \
--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \
--logtostderr=false \
--log-dir=/var/log/kubernetes \
--v=4"
EOF

k8s-master03机器上创建apiserver服务配置文件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
cat > /etc/kubernetes/kube-apiserver.conf << "EOF"
KUBE_APISERVER_OPTS="--enable-admission-plugins=NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \
--anonymous-auth=false \
--allow-privileged=true \
--apiserver-count=3 \
--alsologtostderr=true \
--audit-log-maxage=30 \
--audit-log-maxbackup=3 \
--audit-log-maxsize=100 \
--audit-log-path=/var/log/kube-apiserver-audit.log \
--bind-address=0.0.0.0 \
--secure-port=6443 \
--advertise-address=192.168.31.113 \
--authorization-mode=Node,RBAC \
--runtime-config=api/all=true \
--enable-bootstrap-token-auth \
--event-ttl=1h \
--service-cluster-ip-range=10.96.0.0/16 \
--token-auth-file=/etc/kubernetes/token.csv \
--service-node-port-range=30000-32767 \
--etcd-cafile=/etc/kubernetes/pki/etcd/ca.pem \
--etcd-certfile=/etc/kubernetes/pki/etcd/etcd.pem \
--etcd-keyfile=/etc/kubernetes/pki/etcd/etcd-key.pem \
--etcd-servers=https://192.168.31.111:2379,https://192.168.31.112:2379,https://192.168.31.113:2379 \
--client-ca-file=/etc/kubernetes/pki/ca.pem \
--tls-cert-file=/etc/kubernetes/pki/kube-apiserver.pem \
--tls-private-key-file=/etc/kubernetes/pki/kube-apiserver-key.pem \
--kubelet-client-certificate=/etc/kubernetes/pki/kube-apiserver.pem \
--kubelet-client-key=/etc/kubernetes/pki/kube-apiserver-key.pem \
--service-account-key-file=/etc/kubernetes/pki/sa.pub \
--service-account-signing-key-file=/etc/kubernetes/pki/sa.key \
--service-account-issuer=https://kubernetes.default.svc.cluster.local \
--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \
--logtostderr=false \
--log-dir=/var/log/kubernetes \
--v=4"
EOF

所有master主机配置kube-apiserver启动服务

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
cat > /etc/systemd/system/kube-apiserver.service << "EOF"
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=etcd.service
Wants=etcd.service

[Service]
EnvironmentFile=-/etc/kubernetes/kube-apiserver.conf
ExecStart=/usr/local/bin/kube-apiserver $KUBE_APISERVER_OPTS
Restart=on-failure
RestartSec=5
Type=notify
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF


启动apiserver

1
2
3
systemctl daemon-reload
systemctl enable --now kube-apiserver
systemctl status kube-apiserver

kube-controller-manage 部署安装与启动

kube-controller-manage证书

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
cat > /etc/kubernetes/pki/kube-controller-manager-csr.json <<"EOF"
{
"CN": "system:kube-controller-manager",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Shanghai",
"L": "Shanghai",
"O": "system:kube-controller-manager",
"OU": "system"
}
]
}
EOF

颁发证书

1
2
3
4
5
cfssl gencert -ca=/etc/kubernetes/pki/ca.pem \
-ca-key=/etc/kubernetes/pki/ca-key.pem \
-config=/etc/kubernetes/pki/ca-config.json \
-profile=kubernetes \
/etc/kubernetes/pki/kube-controller-manager-csr.json | cfssljson -bare /etc/kubernetes/pki/kube-controller-manager

创建kube-controller-manager的kube-controller-manager.kubeconfig

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://192.168.31.100:8443 \
--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig

# 设置一个环境项,一个上下文

kubectl config set-context system:kube-controller-manager@kubernetes \
--cluster=kubernetes \
--user=system:kube-controller-manager \
--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig

# 设置一个用户项

kubectl config set-credentials system:kube-controller-manager \
--client-certificate=/etc/kubernetes/pki/kube-controller-manager.pem \
--client-key=/etc/kubernetes/pki/kube-controller-manager-key.pem \
--embed-certs=true \
--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig

# 设置默认环境

kubectl config use-context system:kube-controller-manager@kubernetes \
--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig

创建kube-controller-manager配置文件(TODO)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
cat > /etc/kubernetes/kube-controller-manager.conf << "EOF"
KUBE_CONTROLLER_MANAGER_OPTS="--port=10252 \
--secure-port=10257 \
--bind-address=127.0.0.1 \
--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig \
--service-cluster-ip-range=10.96.0.0/16 \
--cluster-name=kubernetes \
--cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem \
--cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem \
--allocate-node-cidrs=true \
--cluster-cidr=172.16.0.0/16 \
--experimental-cluster-signing-duration=87600h \
--root-ca-file=/etc/kubernetes/pki/ca.pem \
--service-account-private-key-file=/etc/kubernetes/pki/ca-key.pem \
--leader-elect=true \
--feature-gates=RotateKubeletServerCertificate=true \
--controllers=*,bootstrapsigner,tokencleaner \
--horizontal-pod-autoscaler-use-rest-clients=true \
--horizontal-pod-autoscaler-sync-period=10s \
--tls-cert-file=/etc/kubernetes/pki/kube-controller-manager.pem \
--tls-private-key-file=/etc/kubernetes/pki/kube-controller-manager-key.pem \
--use-service-account-credentials=true \
--alsologtostderr=true \
--logtostderr=false \
--log-dir=/var/log/kubernetes \
--v=2"
EOF

创建服务启动文件(master每个节点执行)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
cat > /usr/lib/systemd/system/kube-controller-manager.service << EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/etc/kubernetes/kube-controller-manager.conf
ExecStart=/usr/local/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS

Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF

创建服务启动文件(master每个节点执行)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
cat > /usr/lib/systemd/system/kube-controller-manager.service << EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes

[Service]
ExecStart=/usr/local/bin/kube-controller-manager --v=2 \
--bind-address=0.0.0.0 \
--root-ca-file=/etc/kubernetes/pki/ca.pem \
--cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem \
--cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem \
--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig \
--tls-cert-file=/etc/kubernetes/pki/kube-controller-manager.pem \
--tls-private-key-file=/etc/kubernetes/pki/kube-controller-manager-key.pem \
--leader-elect=true \
--use-service-account-credentials=true \
--node-monitor-grace-period=40s \
--node-monitor-period=5s \
--controllers=*,bootstrapsigner,tokencleaner \
--allocate-node-cidrs=true \
--service-cluster-ip-range=10.96.0.0/16 \
--cluster-cidr=172.16.0.0/16 \
--requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \
--service-account-private-key-file=/etc/kubernetes/pki/sa.key \
--logging-format=text

Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF

启动服务

1
2
3
4
5
systemctl daemon-reload
systemctl enable --now kube-controller-manager
systemctl stop kube-controller-manager
systemctl restart kube-controller-manager
systemctl status kube-controller-manager

kube-scheduler部署安装与启动

生成kube-scheduler证书

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
cat > /etc/kubernetes/pki/kube-scheduler-csr.json <<"EOF"
{
"CN": "system:kube-scheduler",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Shanghai",
"L": "Shanghai",
"O": "system:kube-scheduler",
"OU": "system"
}
]
}
EOF

颁发证书

1
2
3
4
5
cfssl gencert -ca=/etc/kubernetes/pki/ca.pem \
-ca-key=/etc/kubernetes/pki/ca-key.pem \
-config=/etc/kubernetes/pki/ca-config.json \
-profile=kubernetes \
/etc/kubernetes/pki/kube-scheduler-csr.json | cfssljson -bare /etc/kubernetes/pki/kube-scheduler

创建kube-scheduler的kubeconfig

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://192.168.31.100:8443 \
--kubeconfig=/etc/kubernetes/scheduler.kubeconfig


kubectl config set-credentials system:kube-scheduler \
--client-certificate=/etc/kubernetes/pki/kube-scheduler.pem \
--client-key=/etc/kubernetes/pki/kube-scheduler-key.pem \
--embed-certs=true --kubeconfig=/etc/kubernetes/scheduler.kubeconfig


kubectl config set-context system:kube-scheduler \
--cluster=kubernetes --user=system:kube-scheduler \
--kubeconfig=/etc/kubernetes/scheduler.kubeconfig


kubectl config use-context system:kube-scheduler \
--kubeconfig=/etc/kubernetes/scheduler.kubeconfig

创建服务启动service文件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
cat > /usr/lib/systemd/system/kube-scheduler.service << EOF

[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
ExecStart=/usr/local/bin/kube-scheduler \
--v=2 \
--bind-address=0.0.0.0 \
--leader-elect=true \
--kubeconfig=/etc/kubernetes/scheduler.kubeconfig

Restart=always
RestartSec=10s

[Install]
WantedBy=multi-user.target

EOF

启动服务

1
2
3
4
systemctl daemon-reload
systemctl enable --now kube-scheduler
systemctl restart kube-scheduler
systemctl status kube-scheduler

kube-proxy部署安装与启动

生成kube-proxy证书

生成证书

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
cat > /etc/kubernetes/pki/kube-proxy-csr.json <<"EOF"
{
"CN": "system:kube-proxy",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Shanghai",
"L": "Shanghai",
"O": "system:kube-proxy",
"OU": "RND"
}
]
}
EOF

颁发证书

1
2
3
4
5
cfssl gencert -ca=/etc/kubernetes/pki/ca.pem \
-ca-key=/etc/kubernetes/pki/ca-key.pem \
-config=/etc/kubernetes/pki/ca-config.json \
-profile=kubernetes \
/etc/kubernetes/pki/kube-proxy-csr.json | cfssljson -bare /etc/kubernetes/pki/kube-proxy

创建kube-proxy的kubeconfig

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
kubectl config set-cluster kubernetes     \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://192.168.31.100:8443 \
--kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig

kubectl config set-credentials kube-proxy \
--client-certificate=/etc/kubernetes/pki/kube-proxy.pem \
--client-key=/etc/kubernetes/pki/kube-proxy-key.pem \
--embed-certs=true \
--kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig

kubectl config set-context kube-proxy@kubernetes \
--cluster=kubernetes \
--user=kube-proxy \
--kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig

kubectl config use-context kube-proxy@kubernetes --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
1
2
3
4
5
6
#创建kube-proxy的sa
kubectl -n kube-system create serviceaccount kube-proxy

kubectl create clusterrolebinding system:kube-proxy \
--clusterrole system:node-proxier \
--serviceaccount kube-system:kube-proxy

创建服务配置文件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
cat > /etc/kubernetes/kube-proxy.yaml << EOF
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
clientConnection:
acceptContentTypes: ""
burst: 10
contentType: application/vnd.kubernetes.protobuf
kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig
qps: 5
clusterCIDR: 172.16.0.0/12,fc00:2222::/112
configSyncPeriod: 15m0s
conntrack:
max: null
maxPerCore: 32768
min: 131072
tcpCloseWaitTimeout: 1h0m0s
tcpEstablishedTimeout: 24h0m0s
enableProfiling: false
healthzBindAddress: 0.0.0.0:10256
hostnameOverride: ""
iptables:
masqueradeAll: false
masqueradeBit: 14
minSyncPeriod: 0s
syncPeriod: 30s
ipvs:
masqueradeAll: true
minSyncPeriod: 5s
scheduler: "rr"
syncPeriod: 30s
kind: KubeProxyConfiguration
metricsBindAddress: 127.0.0.1:10249
mode: "ipvs"
nodePortAddresses: null
oomScoreAdj: -999
portRange: ""
udpIdleTimeout: 250ms

EOF

创建服务启动配置文件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
cat >  /usr/lib/systemd/system/kube-proxy.service << EOF
[Unit]
Description=Kubernetes Kube Proxy
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
ExecStart=/usr/local/bin/kube-proxy \
--config=/etc/kubernetes/kube-proxy.yaml \
--v=2

Restart=always
RestartSec=10s

[Install]
WantedBy=multi-user.target

EOF

启动服务

1
2
3
4
5
systemctl daemon-reload
systemctl enable --now kube-proxy
systemctl restart kube-proxy
systemctl status kube-proxy
systemctl stop kube-proxy

kubelet部署安装与启动

创建kubelet的kubeconfig

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
BOOTSTRAP_TOKEN=$(awk -F "," '{print $1}' /etc/kubernetes/token.csv)

kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://192.168.31.100:8443 \
--kubeconfig=/etc/kubernetes/kubelet-bootstrap.kubeconfig

kubectl config set-credentials kubelet-bootstrap \
--token=${BOOTSTRAP_TOKEN} \
--kubeconfig=/etc/kubernetes/kubelet-bootstrap.kubeconfig

kubectl config set-context default \
--cluster=kubernetes \
--user=kubelet-bootstrap \
--kubeconfig=/etc/kubernetes/kubelet-bootstrap.kubeconfig

kubectl config use-context default \
--kubeconfig=/etc/kubernetes/kubelet-bootstrap.kubeconfig


#=============================


kubectl config set-cluster kubernetes \
--server=https://192.168.31.100:8443 \
--certificate-authority='/etc/kubernetes/pki/ca.pem' \
--kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig


kubectl config set-credentials kubelet-bootstrap \
--token=c8ad9c.2e4d610cf3e7426e \
--kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig


kubectl config set-context kubernetes \
--user=kubelet-bootstrap \
--cluster=kubernetes \
--kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig

kubectl config use-context kubernetes \
--kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig


#=============================


kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://192.168.31.100:8443 \
--kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig

kubectl config set-credentials tls-bootstrap-token-user \
--token=c8ad9c.2e4d610cf3e7426e \
--kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig

kubectl config set-context tls-bootstrap-token-user@kubernetes \
--cluster=kubernetes \
--user=tls-bootstrap-token-user \
--kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig

kubectl config use-context tls-bootstrap-token-user@kubernetes \
--kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig

说明:ip修改为自己的ip。通常情况下我们要使用虚拟ip,但是这里暂时没有,我们写其中一个结点的ip,比如k8s-master01结点的ip。

权限文件bootstrap.secret.yaml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
apiVersion: v1
kind: Secret
metadata:
name: bootstrap-token-c8ad9c
namespace: kube-system
type: bootstrap.kubernetes.io/token
stringData:
description: "The default bootstrap token generated by 'kubelet '."
token-id: c8ad9c
token-secret: 2e4d610cf3e7426e
usage-bootstrap-authentication: "true"
usage-bootstrap-signing: "true"
auth-extra-groups: system:bootstrappers:default-node-token,system:bootstrappers:worker,system:bootstrappers:ingress

---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubelet-bootstrap
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:node-bootstrapper
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:bootstrappers:default-node-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: node-autoapprove-bootstrap
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:bootstrappers:default-node-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: node-autoapprove-certificate-rotation
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:nodes
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:kube-apiserver-to-kubelet
rules:
- apiGroups:
- ""
resources:
- nodes/proxy
- nodes/stats
- nodes/log
- nodes/spec
- nodes/metrics
verbs:
- "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:kube-apiserver
namespace: ""
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:kube-apiserver-to-kubelet
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: kube-apiserver

执行

1
kubectl apply -f bootstrap.secret.yaml

sssss

1
2
3
4
5
6
kubectl describe clusterrolebinding cluster-system-anonymous

kubectl describe clusterrolebinding kubelet-bootstrap
kubectl create clusterrolebinding cluster-system-anonymous --clusterrole=cluster-admin --user=kubelet-bootstrap

kubectl create lusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstra --kubeconfig=/etc/kubernetes/kubelet.kubeconfig

kubelet启动配置文件

/etc/kubernetes/kubelet-conf.yml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72

apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
address: 0.0.0.0
port: 10250
readOnlyPort: 10255
authentication:
anonymous:
enabled: false
webhook:
cacheTTL: 2m0s
enabled: true
x509:
clientCAFile: /etc/kubernetes/pki/ca.pem
authorization:
mode: Webhook
webhook:
cacheAuthorizedTTL: 5m0s
cacheUnauthorizedTTL: 30s
cgroupDriver: systemd
cgroupsPerQOS: true
clusterDNS:
- 10.96.0.10
clusterDomain: cluster.local
containerLogMaxFiles: 5
containerLogMaxSize: 10Mi
contentType: application/vnd.kubernetes.protobuf
cpuCFSQuota: true
cpuManagerPolicy: none
cpuManagerReconcilePeriod: 10s
enableControllerAttachDetach: true
enableDebuggingHandlers: true
enforceNodeAllocatable:
- pods
eventBurst: 10
eventRecordQPS: 5
evictionHard:
imagefs.available: 15%
memory.available: 100Mi
nodefs.available: 10%
nodefs.inodesFree: 5%
evictionPressureTransitionPeriod: 5m0s
failSwapOn: true
fileCheckFrequency: 20s
hairpinMode: promiscuous-bridge
healthzBindAddress: 127.0.0.1
healthzPort: 10248
httpCheckFrequency: 20s
imageGCHighThresholdPercent: 85
imageGCLowThresholdPercent: 80
imageMinimumGCAge: 2m0s
iptablesDropBit: 15
iptablesMasqueradeBit: 14
kubeAPIBurst: 10
kubeAPIQPS: 5
makeIPTablesUtilChains: true
maxOpenFiles: 1000000
maxPods: 110
nodeStatusUpdateFrequency: 10s
oomScoreAdj: -999
podPidsLimit: -1
registryBurst: 10
registryPullQPS: 5
resolvConf: /etc/resolv.conf
rotateCertificates: true
runtimeRequestTimeout: 2m0s
serializeImagePulls: true
staticPodPath: /etc/kubernetes/manifests
streamingConnectionIdleTimeout: 4h0m0s
syncFrequency: 1m0s
volumeStatsAggPeriod: 1m0s

创建服务启动配置文件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
cat > /usr/lib/systemd/system/kubelet.service << EOF

[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
After=containerd.service
Requires=containerd.service

[Service]
ExecStart=/usr/local/bin/kubelet
Restart=always
StartLimitInternval=0
RestartSec=10

[Install]
WantedBy=multi-user.target
EOF





cat > /usr/lib/systemd/system/kubelet.service << EOF

[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
After=containerd.service
Requires=containerd.service

[Service]
ExecStart=/usr/local/bin/kubelet \
--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig \
--kubeconfig=/etc/kubernetes/kubelet.kubeconfig \
--config=/etc/kubernetes/kubelet-conf.yml \
--container-runtime-endpoint=unix:///run/containerd/containerd.sock
--container-runtime=remote
# --feature-gates=IPv6DualStack=true
# --runtime-request-timeout=15m
# --cgroup-driver=systemd

[Install]
WantedBy=multi-user.target
EOF

创建服务启动配置文件

1
2
3
4
5
6
7
8
cat > /etc/systemd/system/kubelet.service.d/10-kubelet.conf << EOF
[Service]
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf"
Environment="KUBELET_CONFIG_ARGS=--config=/etc/lib/kubernetes/kubelet-conf.yaml"
Environment="KUBELET_EXTRA_ARGS=--container-runtime-endpoint=unix:///run/containerd/containerd.sock"
ExecStart=
ExecStart=/usr/local/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_EXTRA_ARGS
EOF

启动服务

1
2
3
4
systemctl daemon-reload
systemctl enable --now kubelet
systemctl restart kubelet
systemctl status kubelet

kubectl部署安装与启动

生成kubectl证书

生成证书

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
cat > /etc/kubernetes/pki/admin-csr.json <<"EOF"
{
"CN": "admin",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Shanghai",
"L": "Shanghai",
"O": "system:masters",
"OU": "system"
}
]
}
EOF

颁发证书

1
2
3
4
5
6

cfssl gencert -ca=/etc/kubernetes/pki/ca.pem \
-ca-key=/etc/kubernetes/pki/ca-key.pem \
-config=/etc/kubernetes/pki/ca-config.json \
-profile=kubernetes \
/etc/kubernetes/pki/admin-csr.json | cfssljson -bare /etc/kubernetes/pki/admin

创建kubectl的kubeconfig

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://192.168.31.100:8443 \
--kubeconfig=/etc/kubernetes/kube.config

kubectl config set-credentials admin \
--client-certificate=/etc/kubernetes/pki/admin.pem \
--client-key=/etc/kubernetes/pki/admin-key.pem \
--embed-certs=true \
--kubeconfig=/etc/kubernetes/kube.config

kubectl config set-context kubernetes \
--cluster=kubernetes \
--user=admin \
--kubeconfig=/etc/kubernetes/kube.config

kubectl config use-context kubernetes \
--kubeconfig=/etc/kubernetes/kube.config

说明:ip修改为自己的ip。通常情况下我们要使用虚拟ip,但是这里暂时没有,我们写其中一个结点的ip,比如k8s-master01结点的ip。

创建服务配置文件

准备kubectl配置文件并进行角色绑定

1
2
3
4
5
6
7
8
9
10
11
12
13
mkdir -p /root/.kube

cp /etc/kubernetes/kube.conf ~/.kube/config

kubectl create clusterrolebinding kube-apiserver:kubelet-apis \
--clusterrole=system:kubelet-api-admin \
--user kubernetes \
--kubeconfig=/root/.kube/config


scp kube.config k8s-master02:~/.kube/config

scp kube.config k8s-master03:~/.kube/config

6. k8s部署第三方服务

网络插件


coredns


存储


Dashboard

helm安装dashboard

1
2
helm repo add kubernetes-dashboard https://kubernetes.github.io/dashboard/
helm install kubernetes-dashboard kubernetes-dashboard/kubernetes-dashboard --namespace kube-system

创建用户

dashboard-user.yaml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kube-system
EOF


创建

1
kubectl  apply -f dashboard-user.yaml

创建token

1
kubectl -n kube-system create token admin-user

安装ingress-nginx


监控组建


6. kubenetes常用命令

节点操作

打上污点

1
kubectl taint node k8s-master01 node-role.kubernetes.io/master="":NoSchedule

去掉污点

1
kubectl taint node k8s-master node-role.kubernetes.io/master-

打标签

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
kubectl label node k8s-master01 node-role.kubernetes.io/master=''
kubectl label node k8s-master02 node-role.kubernetes.io/master=''
kubectl label node k8s-master03 node-role.kubernetes.io/master=''

kubectl label node k8s-node01 node-role.kubernetes.io/worker=''
kubectl label node k8s-node02 node-role.kubernetes.io/worker=''
kubectl label node k8s-node03 node-role.kubernetes.io/worker=''



kubectl label node k8s-node01 node-role.kubernetes.io/ingress=''
kubectl label node k8s-node02 node-role.kubernetes.io/ingress=''
kubectl label node k8s-node03 node-role.kubernetes.io/ingress=''

kubectl label node k8s-node{01,02,03} ingress=true

error: unable to upgrade connection: Forbidden (user=kubernetes, verb=create, resource=nodes, subresource=proxy)

1
kubectl create clusterrolebinding kube-apiserver:kubelet-apis --clusterrole=system:kubelet-api-admin --user kubernetes

命令补全

1
2
3
4
yum install bash-completion -y
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
echo "source <(kubectl completion bash)" >> ~/.bashrc

创建docker秘钥

1
2
3
4
5
kubectl create secret docker-registry rigistry-docker-io \
--docker-server=docker.io> \
--docker-username=wuyunan \
--docker-password=wuyunan+ [email protected]

1
2
3
4
5
6
7
8



systemctl disable kubelet.service
systemctl disable kube-proxy.service
systemctl disable kube-scheduler
systemctl disable kube-controller-manager
systemctl disable kube-apiserver

如果您喜欢此博客或发现它对您有用,则欢迎对此发表评论。 也欢迎您共享此博客,以便更多人可以参与。 如果博客中使用的图像侵犯了您的版权,请与作者联系以将其删除。 谢谢 !