- 生产环境中,建议使用版本大于5的Kubernetes版本,如
1.24.6
- 系统:CentOS 7.5
- K8S:1.24.6
- containerd: v1.6.4
- calico: 1.24
集群环境
hostname | IP/VIP | software |
---|---|---|
k8s-master01 | 192.168.1.171 | etcd、apiserver、scheduler、controller-manager、kube-proxy、kubelet |
k8s-master02 | 192.168.1.172 | etcd、apiserver、scheduler、controller-manager、kube-proxy、kubelet |
k8s-master03 | 192.168.1.173 | etcd、apiserver、scheduler、controller-manager、kube-proxy、kubelet |
k8s-node01 | 192.168.1.174 | kube-proxy、kubelet |
k8s-node02 | 192.168.1.175 | kube-proxy、kubelet |
haproxy01 | 192.168.1.161 | haproxy、keepalived |
haproxy02 | 192.168.1.162 | haproxy、keepalived |
VIP | 192.168.1.111 | IPVS |
基本环境配置
修改hosts
所有机器添加hosts解析
cat >> /etc/hosts <<EOF
192.168.1.111 apiserver.sundayhk.com
192.168.1.171 k8s-master01
192.168.1.172 k8s-master02
192.168.1.173 k8s-master03
192.168.1.174 k8s-node01
192.168.1.175 k8s-node02
192.168.1.161 haproxy01
192.168.1.162 haproxy02
EOF
修改主机名
所有机器设置主机名
hostnamectl set-hostname k8s-master01
bash # 生效
name=(k8s-master01 k8s-master02 k8s-master03 k8s-node01 k8s-node02)
for i in ${name[@]};do ssh root@$i hostnamectl set-hostname $i;done
关闭防火墙 selinux swap
所有节点
# CentOS7需要关闭NetworkManager,CentOS8不需要
systemctl disable --now firewalld NetworkManager dnsmasq
iptables -F && iptables -X && iptables -F -t nat && iptables -X -t nat
iptables -P FORWARD ACCEPT
setenforce 0
sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config
swapoff -a
sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
#如果开启了swap分区,kubelet会启动失败(可以通过设置参数--fail-swap-on设置为false) 不建议
阿里云repo源
所有节点
wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
安装基本工具
yum install wget curl ntpdate jq psmisc vim net-tools yum-utils device-mapper-persistent-data lvm2 git -y
时间同步
所有haproxy
ntpdate
timedatectl set-timezone Asia/Shanghai
timedatectl set-local-rtc 0
ntpdate ntp.aliyun.com
echo "*/30 * * * * ntpdate ntp.aliyun.com >/dev/null 2>&1" >> /var/spool/cron/root
systemctl restart crond
chronyd
yum install chrony -y
cat > /etc/chrony.conf << EOF
pool ntp.aliyun.com iburst
driftfile /var/lib/chrony/drift
makestep 1.0 3
rtcsync
allow 192.168.1.0/24
local stratum 10
keyfile /etc/chrony.keys
leapsectz right/UTC
logdir /var/log/chrony
EOF
systemctl enable chronyd --now
所有master和node节点(除haproxy)
yum install chrony -y
cat > /etc/chrony.conf << EOF
pool 192.168.1.111 iburst
driftfile /var/lib/chrony/drift
makestep 1.0 3
rtcsync
keyfile /etc/chrony.keys
leapsectz right/UTC
logdir /var/log/chrony
EOF
systemctl enable chronyd --now
# 使用客户端进行验证
chronyc sources -v
配置limits
所有节点
cat >> /etc/security/limits.conf <<EOF
* soft nofile 655360
* hard nofile 131072
* soft nproc 655350
* hard nproc 655350
* soft memlock unlimited
* hard memlock unlimited
EOF
echo "ulimit -SHn 65535" >> /etc/profile
source /etc/profile
设置免密
Master01配置免密码登录其他节点
yum install -y sshpass
ssh-keygen -f /root/.ssh/id_rsa -P ''
export IP="k8s-master01 k8s-master02 k8s-master03 k8s-node01 k8s-node02"
export SSHPASS=sunday # 密码
for HOST in $IP;do
sshpass -e ssh-copy-id -o StrictHostKeyChecking=no $HOST
done
内核参数
k8s集群中必须的内核参数,所有节点配置k8s内核
cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384
EOF
sysctl --system
升级内核
Docker overlay2需要使用kernel 4.x版本,所以我们需要升级内核 CentOS 7.x 系统自带的 3.10.x 内核存在一些 Bugs,导致运行的 Docker、Kubernetes 不稳定
rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
#默认安装为最新内核
yum --enablerepo=elrepo-kernel install kernel-ml
#修改内核顺序
grub2-set-default 0 && grub2-mkconfig -o /etc/grub2.cfg
#使用下面命令看看确认下是否启动默认内核指向上面安装的内核
grubby --default-kernel
#这里的输出结果应该为我们升级后的内核信息
reboot
#可以等所有初始化步骤结束进行reboot操作
基础组件
主要安装的是集群中用到的各种组件,比如IPVS、Docker-ce、Kubernetes各组件等
安装IPVS
所有节点安装ipvsadm
yum install -y ipvsadm ipset sysstat conntrack libseccomp
所有节点配置ipvs模块,在内核4.19+版本nf_conntrack_ipv4已经改为nf_conntrack, 4.18以下使用nf_conntrack_ipv4即可
cat > /etc/modules-load.d/ipvs.conf << EOF
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip
EOF
systemctl enable --now systemd-modules-load.service
检查是否加载
lsmod | grep -E "ip_vs | nf_conntrack"
安装Containerd
使用Containerd作为Runtime(推荐)
配置containerd及cni插件
wget https://github.com/containerd/containerd/releases/download/v1.6.4/cri-containerd-cni-1.6.4-linux-amd64.tar.gz
mkdir -p /etc/cni/net.d /opt/cni/bin
tar -C / -xvzf cri-containerd-cni-1.6.4-linux-amd64.tar.gz
配置Containerd所需的模块
cat > /etc/modules-load.d/containerd.conf <<EOF
overlay
br_netfilter
EOF
systemctl restart systemd-modules-load.service
配置Containerd所需的内核
cat > /etc/sysctl.d/99-kubernetes-cri.conf << EOF
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF
sysctl --system
配置containerd
mkdir /etc/containerd -p
containerd config default > /etc/containerd/config.toml
sed -i 's/k8s.gcr.io/registry.aliyuncs.com\/google_containers/' /etc/containerd/config.toml
sed -i 's/SystemdCgroup \= false/SystemdCgroup \= true/' /etc/containerd/config.toml
sed -i 's/snapshotter = "overlayfs"/snapshotter = "native"/' /etc/containerd/config.toml
Containerd service
cat > /etc/systemd/system/containerd.service <<EOF
[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target local-fs.target
[Service]
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/local/bin/containerd
Type=notify
Delegate=yes
KillMode=process
Restart=always
RestartSec=5
LimitNPROC=infinity
LimitCORE=infinity
LimitNOFILE=infinity
TasksMax=infinity
OOMScoreAdjust=-999
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable --now containerd
systemctl status containerd
分发二进制包
https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.23.md
下载etcd、kubernetes二进制包
wget https://github.com/etcd-io/etcd/releases/download/v3.5.4/etcd-v3.5.4-linux-amd64.tar.gz
wget https://storage.googleapis.com/kubernetes-release/release/v1.24.6/kubernetes-server-linux-amd64.tar.gz
解压安装到/usr/local/bin/目录
tar -xf etcd-v3.5.4-linux-amd64.tar.gz --strip-components=1 -C /usr/local/bin etcd-v3.5.4-linux-amd64/etcd{,ctl}
tar -xf kubernetes-server-linux-amd64.tar.gz --strip-components=3 -C /usr/local/bin kubernetes/server/bin/kube{let,ctl,-apiserver,-controller-manager,-scheduler,-proxy}
分发组件
Masters='k8s-master02 k8s-master03'
Nodes='k8s-node01 k8s-node02'
for i in $Masters; do
echo $i;
scp /usr/local/bin/kube{let,ctl,-apiserver,-controller-manager,-scheduler,-proxy} $i:/usr/local/bin/;
scp /usr/local/bin/etcd* $NODE:/usr/local/bin/;
done
for i in $Nodes; do scp /usr/local/bin/kube{let,-proxy} $i:/usr/local/bin/; done
所有节点创建/opt/cni/bin目录
mkdir -p /opt/cni/bin
检查版本及是否正常
ls -l /usr/local/bin
kubelet --version
etcdctl version
cfssl工具
wget "https://pkg.cfssl.org/R1.2/cfssl_linux-amd64" -O /usr/local/bin/cfssl
wget "https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64" -O /usr/local/bin/cfssljson
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 -O /usr/local/bin/cfssl-certinfo
chmod +x /usr/local/bin/bin/cfssl*
生成证书
所有Master节点创建etcd证书目录
mkdir /etc/etcd/ssl -p
master01节点创建kubernetes相关目录
mkdir -p /etc/kubernetes/pki
mkdir /opt/pki
ca证书
只需创建一个CA证书,后续创建的所有证书(kubernetes+etcd)都是由它签名
cd /opt/pki
cat > ca-config.json << EOF
{
"signing": {
"default": {
"expiry": "876000h"
},
"profiles": {
"kubernetes": {
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
],
"expiry": "876000h"
}
}
}
}
EOF
cat > ca-csr.json << EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "Kubernetes",
"OU": "Kubernetes-manual"
}
],
"ca": {
"expiry": "876000h"
}
}
EOF
cfssl gencert -initca ca-csr.json | cfssljson -bare /etc/kubernetes/pki/ca
etcd证书
Master01节点生成etcd证书
生成证书的CSR文件:证书签名请求文件,配置了一些域名、公司、单位
cat > etcd-ca-csr.json << EOF
{
"CN": "etcd",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "etcd",
"OU": "Etcd Security"
}
],
"ca": {
"expiry": "876000h"
}
}
EOF
cat > etcd-csr.json << EOF
{
"CN": "etcd",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "etcd",
"OU": "Etcd Security"
}
]
}
EOF
cd /opt/pki
# 生成etcd证书和key
cfssl gencert -initca etcd-ca-csr.json | cfssljson -bare /etc/etcd/ssl/etcd-ca
cfssl gencert \
-ca=/etc/etcd/ssl/etcd-ca.pem \
-ca-key=/etc/etcd/ssl/etcd-ca-key.pem \
-config=ca-config.json \
-hostname=k8s-master01,k8s-master02,k8s-master03,192.168.1.171,192.168.1.172,192.168.1.173 \
-profile=kubernetes etcd-csr.json | cfssljson -bare /etc/etcd/ssl/etcd
同步证书到其他master节点
for i in k8s-master02 k8s-master03;do
ssh $i "mkdir -p /etc/etcd/ssl"
scp /etc/etcd/ssl/*.pem $i:/etc/etcd/ssl/
done
apiserver证书
cat > apiserver-csr.json << EOF
{
"CN": "kube-apiserver",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "Kubernetes",
"OU": "Kubernetes-manual"
}
]
}
EOF
# 生成一个根证书 ,多写了一些IP作为预留IP,为将来添加node做准备
# 10.96.0.1是service网段的第一个地址,需要计算,192.168.1.111为高可用vip地址
cfssl gencert \
-ca=/etc/kubernetes/pki/ca.pem \
-ca-key=/etc/kubernetes/pki/ca-key.pem \
-config=ca-config.json \
-hostname=10.96.0.1,127.0.0.1,kubernetes,kubernetes.default,kubernetes.default.svc,kubernetes.default.svc.cluster,kubernetes.default.svc.cluster.local,apiserver.sundayhk.com,k8s-master01,k8s-master02,k8s-master03,k8s-node01,k8s-node02,192.168.1.111,192.168.1.171,192.168.1.172,192.168.1.173,192.168.1.174,192.168.1.175 \
-profile=kubernetes apiserver-csr.json | cfssljson -bare /etc/kubernetes/pki/apiserver
apiserver聚合证书
cat > front-proxy-ca-csr.json << EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"ca": {
"expiry": "876000h"
}
}
EOF
cat > front-proxy-client-csr.json << EOF
{
"CN": "front-proxy-client",
"key": {
"algo": "rsa",
"size": 2048
}
}
EOF
cfssl gencert -initca front-proxy-ca-csr.json | cfssljson -bare /etc/kubernetes/pki/front-proxy-ca
cfssl gencert \
-ca=/etc/kubernetes/pki/front-proxy-ca.pem \
-ca-key=/etc/kubernetes/pki/front-proxy-ca-key.pem \
-config=ca-config.json \
-profile=kubernetes front-proxy-client-csr.json | cfssljson -bare /etc/kubernetes/pki/front-proxy-client
###controller-manager证书
cat > controller-manager-csr.json << EOF
{
"CN": "system:kube-controller-manager",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "system:kube-controller-manager",
"OU": "Kubernetes-manual"
}
]
}
EOF
cfssl gencert \
-ca=/etc/kubernetes/pki/ca.pem \
-ca-key=/etc/kubernetes/pki/ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
controller-manager-csr.json | cfssljson -bare /etc/kubernetes/pki/controller-manager
# 设置一个集群项
# 注意,如果不是高可用集群,将192.168.1.111改为master01的地址或域名
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://192.168.1.111:8443 \
--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
# 设置一个环境项,一个上下文
kubectl config set-context system:kube-controller-manager@kubernetes \
--cluster=kubernetes \
--user=system:kube-controller-manager \
--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
# set-credentials 设置一个用户项
kubectl config set-credentials system:kube-controller-manager \
--client-certificate=/etc/kubernetes/pki/controller-manager.pem \
--client-key=/etc/kubernetes/pki/controller-manager-key.pem \
--embed-certs=true \
--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
# 设置默认环境
kubectl config use-context system:kube-controller-manager@kubernetes \
--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
scheduler证书
cat > scheduler-csr.json << EOF
{
"CN": "system:kube-scheduler",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "system:kube-scheduler",
"OU": "Kubernetes-manual"
}
]
}
EOF
cfssl gencert \
-ca=/etc/kubernetes/pki/ca.pem \
-ca-key=/etc/kubernetes/pki/ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
scheduler-csr.json | cfssljson -bare /etc/kubernetes/pki/scheduler
# 注意,如果不是高可用集群, 将192.168.1.111改为master01的地址或域名
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://192.168.1.111:8443 \
--kubeconfig=/etc/kubernetes/scheduler.kubeconfig
kubectl config set-credentials system:kube-scheduler \
--client-certificate=/etc/kubernetes/pki/scheduler.pem \
--client-key=/etc/kubernetes/pki/scheduler-key.pem \
--embed-certs=true \
--kubeconfig=/etc/kubernetes/scheduler.kubeconfig
kubectl config set-context system:kube-scheduler@kubernetes \
--cluster=kubernetes \
--user=system:kube-scheduler \
--kubeconfig=/etc/kubernetes/scheduler.kubeconfig
kubectl config use-context system:kube-scheduler@kubernetes \
--kubeconfig=/etc/kubernetes/scheduler.kubeconfig
kube-proxy证书
cat > kube-proxy-csr.json << EOF
{
"CN": "system:kube-proxy",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "system:kube-proxy",
"OU": "Kubernetes-manual"
}
]
}
EOF
cfssl gencert \
-ca=/etc/kubernetes/pki/ca.pem \
-ca-key=/etc/kubernetes/pki/ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
kube-proxy-csr.json | cfssljson -bare /etc/kubernetes/pki/kube-proxy
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://192.168.1.111:8443 \
--kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
kubectl config set-credentials kube-proxy \
--client-certificate=/etc/kubernetes/pki/kube-proxy.pem \
--client-key=/etc/kubernetes/pki/kube-proxy-key.pem \
--embed-certs=true \
--kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
kubectl config set-context kube-proxy@kubernetes \
--cluster=kubernetes \
--user=kube-proxy \
--kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
kubectl config use-context kube-proxy@kubernetes --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
ServiceAccount Key secret
openssl genrsa -out /etc/kubernetes/pki/sa.key 2048
openssl rsa -in /etc/kubernetes/pki/sa.key -pubout -out /etc/kubernetes/pki/sa.pub
admin证书
cat > admin-csr.json << EOF
{
"CN": "admin",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "system:masters",
"OU": "Kubernetes-manual"
}
]
}
EOF
cfssl gencert \
-ca=/etc/kubernetes/pki/ca.pem \
-ca-key=/etc/kubernetes/pki/ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
admin-csr.json | cfssljson -bare /etc/kubernetes/pki/admin
# 注意,如果不是高可用集群,将192.168.1.111改为master01的地址或域名
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://192.168.1.111:8443 \
--kubeconfig=/etc/kubernetes/admin.kubeconfig
kubectl config set-credentials kubernetes-admin \
--client-certificate=/etc/kubernetes/pki/admin.pem \
--client-key=/etc/kubernetes/pki/admin-key.pem \
--embed-certs=true \
--kubeconfig=/etc/kubernetes/admin.kubeconfig
kubectl config set-context kubernetes-admin@kubernetes \
--cluster=kubernetes \
--user=kubernetes-admin \
--kubeconfig=/etc/kubernetes/admin.kubeconfig
kubectl config use-context kubernetes-admin@kubernetes \
--kubeconfig=/etc/kubernetes/admin.kubeconfig
同步至其他master
for i in k8s-master02 k8s-master03;do
scp -r /etc/kubernetes/pki $i:/etc/kubernetes/
scp /etc/kubernetes/*.kubeconfig $i:/etc/kubernetes/
done
[root@k8s-master01 ~]# ls -l /etc/kubernetes/
total 36
-rw------- 1 root root 6456 Oct 18 03:06 admin.kubeconfig
-rw------- 1 root root 6594 Oct 18 20:45 controller-manager.kubeconfig
-rw------- 1 root root 6458 Oct 18 03:17 kube-proxy.kubeconfig
drwxr-xr-x 2 root root 4096 Oct 18 20:57 pki
-rw------- 1 root root 6522 Oct 18 20:45 scheduler.kubeconfig
[root@k8s-master01 ~]# ls -l /etc/kubernetes/pki/
total 92
-rw-r--r-- 1 root root 1025 Oct 18 03:06 admin.csr
-rw------- 1 root root 1679 Oct 18 03:06 admin-key.pem
-rw-r--r-- 1 root root 1444 Oct 18 03:06 admin.pem
-rw-r--r-- 1 root root 1029 Oct 18 02:23 apiserver.csr
-rw------- 1 root root 1679 Oct 18 02:23 apiserver-key.pem
-rw-r--r-- 1 root root 1834 Oct 18 02:23 apiserver.pem
-rw-r--r-- 1 root root 1025 Oct 18 02:23 ca.csr
-rw------- 1 root root 1675 Oct 18 02:23 ca-key.pem
-rw-r--r-- 1 root root 1411 Oct 18 02:23 ca.pem
-rw-r--r-- 1 root root 1082 Oct 18 02:38 controller-manager.csr
-rw------- 1 root root 1679 Oct 18 02:38 controller-manager-key.pem
-rw-r--r-- 1 root root 1501 Oct 18 02:38 controller-manager.pem
-rw-r--r-- 1 root root 891 Oct 18 03:17 front-proxy-ca.csr
-rw------- 1 root root 1675 Oct 18 03:17 front-proxy-ca-key.pem
-rw-r--r-- 1 root root 1143 Oct 18 03:17 front-proxy-ca.pem
-rw-r--r-- 1 root root 903 Oct 18 03:17 front-proxy-client.csr
-rw------- 1 root root 1679 Oct 18 03:17 front-proxy-client-key.pem
-rw-r--r-- 1 root root 1188 Oct 18 03:17 front-proxy-client.pem
-rw-r--r-- 1 root root 1675 Oct 18 03:14 sa.key
-rw-r--r-- 1 root root 451 Oct 18 03:14 sa.pub
-rw-r--r-- 1 root root 1058 Oct 18 03:02 scheduler.csr
-rw------- 1 root root 1679 Oct 18 03:02 scheduler-key.pem
-rw-r--r-- 1 root root 1476 Oct 18 03:02 scheduler.pem
[root@k8s-master01 ~]# ls -l /etc/kubernetes/pki/ | wc -l
24
Ectd配置
etcd配置大致相同,注意修改每个Master节点的etcd配置的主机名和IP地址
master01
cat > /etc/etcd/etcd.config.yaml << EOF
name: 'k8s-master01'
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://192.168.1.171:2380'
listen-client-urls: 'https://192.168.1.171:2379,http://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://192.168.1.171:2380'
advertise-client-urls: 'https://192.168.1.171:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'k8s-master01=https://192.168.1.171:2380,k8s-master02=https://192.168.1.172:2380,k8s-master03=https://192.168.1.173:2380'
initial-cluster-token: 'etcd-k8s-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
cert-file: '/etc/etcd/ssl/etcd.pem'
key-file: '/etc/etcd/ssl/etcd-key.pem'
client-cert-auth: true
trusted-ca-file: '/etc/etcd/ssl/etcd-ca.pem'
auto-tls: true
peer-transport-security:
cert-file: '/etc/etcd/ssl/etcd.pem'
key-file: '/etc/etcd/ssl/etcd-key.pem'
peer-client-cert-auth: true
trusted-ca-file: '/etc/etcd/ssl/etcd-ca.pem'
auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false
EOF
master02
cat > /etc/etcd/etcd.config.yaml << EOF
name: 'k8s-master02'
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://192.168.1.172:2380'
listen-client-urls: 'https://192.168.1.172:2379,http://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://192.168.1.172:2380'
advertise-client-urls: 'https://192.168.1.172:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'k8s-master01=https://192.168.1.171:2380,k8s-master02=https://192.168.1.172:2380,k8s-master03=https://192.168.1.173:2380'
initial-cluster-token: 'etcd-k8s-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
cert-file: '/etc/etcd/ssl/etcd.pem'
key-file: '/etc/etcd/ssl/etcd-key.pem'
client-cert-auth: true
trusted-ca-file: '/etc/etcd/ssl/etcd-ca.pem'
auto-tls: true
peer-transport-security:
cert-file: '/etc/etcd/ssl/etcd.pem'
key-file: '/etc/etcd/ssl/etcd-key.pem'
peer-client-cert-auth: true
trusted-ca-file: '/etc/etcd/ssl/etcd-ca.pem'
auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false
EOF
master03
cat > /etc/etcd/etcd.config.yaml << EOF
name: 'k8s-master03'
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://192.168.1.173:2380'
listen-client-urls: 'https://192.168.1.173:2379,http://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://192.168.1.173:2380'
advertise-client-urls: 'https://192.168.1.173:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'k8s-master01=https://192.168.1.171:2380,k8s-master02=https://192.168.1.172:2380,k8s-master03=https://192.168.1.173:2380'
initial-cluster-token: 'etcd-k8s-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
cert-file: '/etc/etcd/ssl/etcd.pem'
key-file: '/etc/etcd/ssl/etcd-key.pem'
client-cert-auth: true
trusted-ca-file: '/etc/etcd/ssl/etcd-ca.pem'
auto-tls: true
peer-transport-security:
cert-file: '/etc/etcd/ssl/etcd.pem'
key-file: '/etc/etcd/ssl/etcd-key.pem'
peer-client-cert-auth: true
trusted-ca-file: '/etc/etcd/ssl/etcd-ca.pem'
auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false
EOF
启动ETCD
所有Master节点创建etcd service并启动
cat > /usr/lib/systemd/system/etcd.service << EOF
[Unit]
Description=Etcd Service
Documentation=https://coreos.com/etcd/docs/latest/
After=network.target
[Service]
Type=notify
ExecStart=/usr/local/bin/etcd --config-file=/etc/etcd/etcd.config.yaml
Restart=on-failure
RestartSec=10
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
Alias=etcd3.service
EOF
所有haproxy节点创建etcd的证书目录
systemctl daemon-reload
systemctl enable --now etcd
查看etcd状态
export ETCDCTL_API=3
etcdctl --endpoints="192.168.1.171:2379,192.168.1.172:2379,192.168.1.173:2379" --cacert=/etc/etcd/ssl/etcd-ca.pem --cert=/etc/etcd/ssl/etcd.pem --key=/etc/etcd/ssl/etcd-key.pem endpoint status --write-out=table
高可用配置
haproxy
所有haproxy节点安装keepalived和haproxy·
yum install keepalived haproxy -y
所有haproxy配置HAProxy,配置一样
cat > /etc/haproxy/haproxy.cfg <<EOF
global
maxconn 2000
ulimit-n 16384
log 127.0.0.1 local0 err
stats timeout 30s
defaults
log global
mode http
option httplog
timeout connect 5000
timeout client 50000
timeout server 50000
timeout http-request 15s
timeout http-keep-alive 15s
frontend k8s-master
bind 0.0.0.0:8443
mode tcp
option tcplog
tcp-request inspect-delay 5s
default_backend k8s-master
backend k8s-master
mode tcp
option tcplog
option tcp-check
balance roundrobin
default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
server k8s-master01 192.168.1.171:6443 check
server k8s-master02 192.168.1.172:6443 check
server k8s-master03 192.168.1.173:6443 check
EOF
keepalived
keepalived01
所有Haproxy节点配置KeepAlived 配置不一样,注意每个节点的IP和interface
cat > /etc/keepalived/keepalived.conf << EOF
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
}
vrrp_script chk_apiserver {
script "/etc/keepalived/check_apiserver.sh"
interval 5
weight -5
fall 2
rise 1
}
vrrp_instance VI_1 {
state MASTER
interface eth0
mcast_src_ip 192.168.1.161
virtual_router_id 61
priority 100
nopreempt
advert_int 2
authentication {
auth_type PASS
auth_pass K8SHA_KA_AUTH
}
virtual_ipaddress {
192.168.1.111
}
track_script {
chk_apiserver
}
}
EOF
keepalived02
cat > /etc/keepalived/keepalived.conf << EOF
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
}
vrrp_script chk_apiserver {
script "/etc/keepalived/check_apiserver.sh"
interval 5
weight -5
fall 2
rise 1
}
vrrp_instance VI_1 {
state BACKUP
interface eth0
mcast_src_ip 192.168.1.162
virtual_router_id 61
priority 90
nopreempt
advert_int 2
authentication {
auth_type PASS
auth_pass K8SHA_KA_AUTH
}
virtual_ipaddress {
192.168.1.111
}
track_script {
chk_apiserver
}
}
EOF
健康检查配置
所有haproxy节点
cat > /etc/keepalived/check_apiserver.sh << \EOF
#!/bin/bash
err=0
for k in $(seq 1 3)
do
check_code=$(pgrep haproxy)
if [[ $check_code == "" ]]; then
err=$(expr $err + 1)
sleep 1
continue
else
err=0
break
fi
done
if [[ $err != "0" ]]; then
echo "systemctl stop keepalived"
/usr/bin/systemctl stop keepalived
exit 1
else
exit 0
fi
EOF
chmod +x /etc/keepalived/check_apiserver.sh
所有master节点启动haproxy和keepalived
systemctl daemon-reload
systemctl daemon-reload
systemctl enable --now haproxy
systemctl enable --now keepalived
Kubernetes组件配置
所有节点创建相关目录
mkdir -p /etc/kubernetes/manifests/ /var/lib/kubelet /var/log/kubernetes
apiserver
- 注意k8s service网段为10.96.0.0/12,该网段不能和宿主机的网段、Pod网段的重复
- 请按需修改
所有master节点配置kube-apiserver service
cat > /usr/lib/systemd/system/kube-apiserver.service << \EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-apiserver \
--v=2 \
--logtostderr=true \
--allow-privileged=true \
--bind-address=0.0.0.0 \
--secure-port=6443 \
--advertise-address=192.168.1.111 \
--service-cluster-ip-range=10.96.0.0/12 \
--service-node-port-range=30000-32767 \
--etcd-servers=https://192.168.1.171:2379,https://192.168.1.172:2379,https://192.168.1.173:2379 \
--etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \
--etcd-certfile=/etc/etcd/ssl/etcd.pem \
--etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \
--client-ca-file=/etc/kubernetes/pki/ca.pem \
--tls-cert-file=/etc/kubernetes/pki/apiserver.pem \
--tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem \
--kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem \
--kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem \
--service-account-key-file=/etc/kubernetes/pki/sa.pub \
--service-account-signing-key-file=/etc/kubernetes/pki/sa.key \
--service-account-issuer=https://kubernetes.default.svc.cluster.local \
--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota \
--authorization-mode=Node,RBAC \
--enable-bootstrap-token-auth=true \
--requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \
--proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem \
--proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem \
--requestheader-allowed-names=aggregator \
--requestheader-group-headers=X-Remote-Group \
--requestheader-extra-headers-prefix=X-Remote-Extra- \
--requestheader-username-headers=X-Remote-User \
--enable-aggregator-routing=true
# --token-auth-file=/etc/kubernetes/token.csv
Restart=on-failure
RestartSec=10s
LimitNOFILE=65535
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable kube-apiserver
systemctl restart kube-apiserver
systemctl status kube-apiserver
controller-manager
所有Master节点配置kube-controller-manager service
cat > /usr/lib/systemd/system/kube-controller-manager.service << \EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-controller-manager \
--v=2 \
--logtostderr=true \
--bind-address=127.0.0.1 \
--root-ca-file=/etc/kubernetes/pki/ca.pem \
--cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem \
--cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem \
--service-account-private-key-file=/etc/kubernetes/pki/sa.key \
--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig \
--leader-elect=true \
--use-service-account-credentials=true \
--node-monitor-grace-period=40s \
--node-monitor-period=5s \
--pod-eviction-timeout=2m0s \
--controllers=*,bootstrapsigner,tokencleaner \
--allocate-node-cidrs=true \
--service-cluster-ip-range=10.96.0.0/12 \
--cluster-cidr=172.16.0.0/12 \
--node-cidr-mask-size-ipv4=24 \
--requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable kube-controller-manager
systemctl restart kube-controller-manager
systemctl status kube-controller-manager
scheduler
所有Master节点配置kube-scheduler service
cat > /usr/lib/systemd/system/kube-scheduler.service << \EOF
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-scheduler \
--v=2 \
--logtostderr=true \
--bind-address=127.0.0.1 \
--leader-elect=true \
--kubeconfig=/etc/kubernetes/scheduler.kubeconfig
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable kube-scheduler
systemctl restart kube-scheduler
systemctl status kube-scheduler
TLS Bootstrapping配置
在master01创建bootstrap
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true --server=https://192.168.1.111:8443 \
--kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
# token 需与下面的bootstrap.secret.yaml的token-id、token-secret 保持一致
kubectl config set-credentials tls-bootstrap-token-user \
--token=c8ad9c.2e4d610cf3e7426e \
--kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
kubectl config set-context tls-bootstrap-token-user@kubernetes \
--cluster=kubernetes --user=tls-bootstrap-token-user \
--kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
kubectl config use-context tls-bootstrap-token-user@kubernetes \
--kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
# token的位置在bootstrap.secret.yaml,如果修改的话到这个文件修改
mkdir -p /root/.kube
cp /etc/kubernetes/admin.kubeconfig /root/.kube/config
- 注意:bootstrap.secret.yaml的token-id、token-secret,需与上命令token即c8ad9c.2e4d610cf3e7426e保持一致
mkdir ~/bootstrap
cd ~/bootstrap
cat > bootstrap.secret.yaml << EOF
apiVersion: v1
kind: Secret
metadata:
name: bootstrap-token-c8ad9c
namespace: kube-system
type: bootstrap.kubernetes.io/token
stringData:
description: "The default bootstrap token generated by 'kubelet '."
token-id: c8ad9c
token-secret: 2e4d610cf3e7426e
usage-bootstrap-authentication: "true"
usage-bootstrap-signing: "true"
auth-extra-groups: system:bootstrappers:default-node-token,system:bootstrappers:worker,system:bootstrappers:ingress
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubelet-bootstrap
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:node-bootstrapper
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:bootstrappers:default-node-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: node-autoapprove-bootstrap
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:bootstrappers:default-node-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: node-autoapprove-certificate-rotation
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:nodes
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:kube-apiserver-to-kubelet
rules:
- apiGroups:
- ""
resources:
- nodes/proxy
- nodes/stats
- nodes/log
- nodes/spec
- nodes/metrics
verbs:
- "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:kube-apiserver
namespace: ""
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:kube-apiserver-to-kubelet
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: kube-apiserver
EOF
kubectl create -f bootstrap.secret.yaml
查看集群状态
[root@k8s-master01 system]# kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
controller-manager Healthy ok
etcd-0 Healthy {"health":"true","reason":""}
etcd-1 Healthy {"health":"true","reason":""}
etcd-2 Healthy {"health":"true","reason":""}
Node节点配置
分发证书
Master01节点分发证书至Node节点
for i in k8s-master02 k8s-master03 k8s-node01 k8s-node02; do
scp /etc/kubernetes/{bootstrap-kubelet,kube-proxy}.kubeconfig $i:/etc/kubernetes/
scp /etc/kubernetes/pki/{ca.pem,ca-key.pem,front-proxy-ca.pem} $i:/etc/kubernetes/pki/
done
kubelet配置
所有节点创建相关目录
mkdir -p /var/lib/kubelet /var/log/kubernetes /etc/kubernetes/manifests/
所有节点配置kubelet service
cat > /usr/lib/systemd/system/kubelet.service << \EOF
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
After=containerd.service
Requires=containerd.service
[Service]
ExecStart=/usr/local/bin/kubelet \
--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig \
--kubeconfig=/etc/kubernetes/kubelet.kubeconfig \
--config=/etc/kubernetes/kubelet-conf.yaml \
--container-runtime=remote \
--runtime-request-timeout=15m \
--container-runtime-endpoint=unix:///run/containerd/containerd.sock \
--cgroup-driver=systemd \
--node-labels=node.kubernetes.io/node=
Restart=always
StartLimitInterval=0
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF
- 注意node-labels=node.kubernetes.io/node=
''
ubuntu为''
centos为空
创建kubelet的配置文件
注意:如果更改了k8s的service网段,需要更改kubelet-conf.yaml的clusterDNS配置,改成service网段的第十个地址,如10.96.0.10
cat > /etc/kubernetes/kubelet-conf.yaml <<EOF
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
address: 0.0.0.0
port: 10250
readOnlyPort: 10255
authentication:
anonymous:
enabled: false
webhook:
cacheTTL: 2m0s
enabled: true
x509:
clientCAFile: /etc/kubernetes/pki/ca.pem
authorization:
mode: Webhook
webhook:
cacheAuthorizedTTL: 5m0s
cacheUnauthorizedTTL: 30s
cgroupDriver: systemd
cgroupsPerQOS: true
clusterDNS:
- 10.96.0.10
clusterDomain: cluster.local
containerLogMaxFiles: 5
containerLogMaxSize: 10Mi
contentType: application/vnd.kubernetes.protobuf
cpuCFSQuota: true
cpuManagerPolicy: none
cpuManagerReconcilePeriod: 10s
enableControllerAttachDetach: true
enableDebuggingHandlers: true
enforceNodeAllocatable:
- pods
eventBurst: 10
eventRecordQPS: 5
evictionHard:
imagefs.available: 15%
memory.available: 100Mi
nodefs.available: 10%
nodefs.inodesFree: 5%
evictionPressureTransitionPeriod: 5m0s
failSwapOn: true
fileCheckFrequency: 20s
hairpinMode: promiscuous-bridge
healthzBindAddress: 127.0.0.1
healthzPort: 10248
httpCheckFrequency: 20s
imageGCHighThresholdPercent: 85
imageGCLowThresholdPercent: 80
imageMinimumGCAge: 2m0s
iptablesDropBit: 15
iptablesMasqueradeBit: 14
kubeAPIBurst: 10
kubeAPIQPS: 5
makeIPTablesUtilChains: true
maxOpenFiles: 1000000
maxPods: 110
nodeStatusUpdateFrequency: 10s
oomScoreAdj: -999
podPidsLimit: -1
registryBurst: 10
registryPullQPS: 5
resolvConf: /etc/resolv.conf
rotateCertificates: true
runtimeRequestTimeout: 2m0s
serializeImagePulls: true
staticPodPath: /etc/kubernetes/manifests
streamingConnectionIdleTimeout: 4h0m0s
syncFrequency: 1m0s
volumeStatsAggPeriod: 1m0s
EOF
启动所有节点kubelet
systemctl daemon-reload
systemctl enable --now kubelet
systemctl status kubelet
此时系统日志/var/log/messages
Unable to update cni config: no networks found in /etc/cni/net.d 显示只有如下信息为正常
[root@k8s-master01 ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-master01 Ready <none> 7m v1.24.6
k8s-master02 Ready <none> 3m36s v1.24.6
k8s-master03 Ready <none> 3m48s v1.24.6
k8s-node01 Ready <none> 3m31s v1.24.6
k8s-node02 Ready <none> 3m30s v1.24.6
kube-proxy的配置
cat > /etc/kubernetes/kube-proxy.yaml << EOF
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
clientConnection:
acceptContentTypes: ""
burst: 10
contentType: application/vnd.kubernetes.protobuf
kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig
qps: 5
clusterCIDR: 172.16.0.0/12
configSyncPeriod: 15m0s
conntrack:
max: null
maxPerCore: 32768
min: 131072
tcpCloseWaitTimeout: 1h0m0s
tcpEstablishedTimeout: 24h0m0s
enableProfiling: false
healthzBindAddress: 0.0.0.0:10256
hostnameOverride: ""
iptables:
masqueradeAll: false
masqueradeBit: 14
minSyncPeriod: 0s
syncPeriod: 30s
ipvs:
masqueradeAll: true
minSyncPeriod: 5s
scheduler: "rr"
syncPeriod: 30s
kind: KubeProxyConfiguration
metricsBindAddress: 127.0.0.1:10249
mode: "ipvs"
nodePortAddresses: null
oomScoreAdj: -999
portRange: ""
udpIdleTimeout: 250ms
EOF
cat > /usr/lib/systemd/system/kube-proxy.service << EOF
[Unit]
Description=Kubernetes Kube Proxy
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-proxy \\
--config=/etc/kubernetes/kube-proxy.yaml \\
--v=2
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target
EOF
启动kube-proxy
systemctl daemon-reload
systemctl enable --now kube-proxy
systemctl status kube-proxy
安装Calico
以下步骤只在master01执行
CNI插件问题: 注释掉默认containerd cni插件,避免冲突,若此目录中有多个cni配置文件,kubelet 将会使用按文件名的字典顺序排列的第一个作为配置文件,所以前面默认选择使用的是 containerd-net 这个插件。
mv /etc/cni/net.d/10-containerd-net.conflist /etc/cni/net.d/10-containerd-net.conflist.bak
ifconfig cni0 down && ip link delete cni0
systemctl daemon-reload
systemctl restart containerd kubelet
# 直接etcd认证更高性能
curl https://raw.githubusercontent.com/projectcalico/calico/v3.24.5/manifests/calico-etcd.yaml -O
# https://docs.projectcalico.org/getting-started/kubernetes/installation/config-options
这里以apiserver认证方式安装
wget https://docs.projectcalico.org/manifests/calico.yaml --no-check-certificate
修改pod网段
vim calico.yaml
POD_CIDR="172.16.0.0/12"
sed -i 's@# - name: CALICO_IPV4POOL_CIDR@- name: CALICO_IPV4POOL_CIDR@' calico.yaml
sed -i 's@# value: "192.168.0.0/16"@ value: '"$POD_CIDR"'@' calico.yaml
kubectl apply -f calico.yaml
查看calico状态
[root@k8s-master01 ~]# kubectl get pod -n kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-6bb4597c4f-smvmr 1/1 Running 0 2m16s
calico-node-2bmx4 1/1 Running 0 2m16s
calico-node-4jpvn 1/1 Running 0 2m16s
calico-node-7bh4b 1/1 Running 0 2m16s
calico-node-l5wch 1/1 Running 0 2m16s
calico-node-r896n 1/1 Running 0 2m16s
calicoctl (可选)
wget https://github.com/projectcalico/calico/releases/download/v3.24.2/calicoctl-linux-amd64 -O /usr/local/bin/calicoctl
chmod +x /usr/local/bin/calicoctl
# 已经配置~/.kube/config,则不用配置以下两个
export KUBECONFIG=/etc/kubernetes/admin.kubeconfig
export DATASTORE_TYPE=kubernetes
[root@k8s-master01 ~]# calicoctl get nodes
NAME
k8s-master01
k8s-master02
k8s-master03
k8s-node01
k8s-node02
[root@k8s-master01 ~]# calicoctl get ippool -o wide
NAME CIDR NAT IPIPMODE VXLANMODE DISABLED DISABLEBGPEXPORT SELECTOR
default-ipv4-ippool 172.16.0.0/12 true Always Never false false all()
[root@k8s-master01 ~]# calicoctl node status
Calico process is running.
IPv4 BGP status
+---------------+-------------------+-------+----------+-------------+
| PEER ADDRESS | PEER TYPE | STATE | SINCE | INFO |
+---------------+-------------------+-------+----------+-------------+
| 192.168.1.172 | node-to-node mesh | up | 11:10:04 | Established |
| 192.168.1.173 | node-to-node mesh | up | 11:10:04 | Established |
| 192.168.1.174 | node-to-node mesh | up | 13:00:34 | Established |
| 192.168.1.175 | node-to-node mesh | up | 11:10:05 | Established |
+---------------+-------------------+-------+----------+-------------+
安装CoreDNS
git clone https://github.com/coredns/deployment.git
cd deployment/kubernetes
./deploy.sh -s -i 10.96.0.10 | kubectl apply -f -
[root@k8s-master01 kubernetes]# kubectl get po -n kube-system -l k8s-app=kube-dns
NAME READY STATUS RESTARTS AGE
coredns-6f4b4bd8fb-v26qh 1/1 Running 0 47s
安装Metrics-server
同步证书到所有node节点
for i in k8s-node01 k8s-node02;do
scp /etc/kubernetes/pki/front-proxy-ca.pem $i:/etc/kubernetes/pki/
done
mkdir ~/metrics-server && cd ~/metrics-server
cat > metrics-server.yaml << EOF
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-app: metrics-server
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-view: "true"
name: system:aggregated-metrics-reader
rules:
- apiGroups:
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-app: metrics-server
name: system:metrics-server
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
- nodes/stats
- namespaces
- configmaps
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: metrics-server
name: metrics-server-auth-reader
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-app: metrics-server
name: metrics-server:system:auth-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-app: metrics-server
name: system:metrics-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:metrics-server
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: v1
kind: Service
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
ports:
- name: https
port: 443
protocol: TCP
targetPort: https
selector:
k8s-app: metrics-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: metrics-server
strategy:
rollingUpdate:
maxUnavailable: 0
template:
metadata:
labels:
k8s-app: metrics-server
spec:
containers:
- args:
- --cert-dir=/tmp
- --secure-port=4443
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --kubelet-use-node-status-port
- --metric-resolution=15s
- --kubelet-insecure-tls
- --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem # change to front-proxy-ca.crt for kubeadm
- --requestheader-username-headers=X-Remote-User
- --requestheader-group-headers=X-Remote-Group
- --requestheader-extra-headers-prefix=X-Remote-Extra-
image: registry.aliyuncs.com/google_containers/metrics-server:v0.5.0
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /livez
port: https
scheme: HTTPS
periodSeconds: 10
name: metrics-server
ports:
- containerPort: 4443
name: https
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /readyz
port: https
scheme: HTTPS
initialDelaySeconds: 20
periodSeconds: 10
resources:
requests:
cpu: 100m
memory: 200Mi
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
volumeMounts:
- mountPath: /tmp
name: tmp-dir
- name: ca-ssl
mountPath: /etc/kubernetes/pki
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
serviceAccountName: metrics-server
volumes:
- emptyDir: {}
name: tmp-dir
- name: ca-ssl
hostPath:
path: /etc/kubernetes/pki
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
labels:
k8s-app: metrics-server
name: v1beta1.metrics.k8s.io
spec:
group: metrics.k8s.io
groupPriorityMinimum: 100
insecureSkipTLSVerify: true
service:
name: metrics-server
namespace: kube-system
version: v1beta1
versionPriority: 100
EOF
kubectl apply -f metrics-server.yaml
[root@k8s-master01 ~]# kubectl top node
NAME CPU(cores) CPU% MEMORY(bytes) MEMORY%
k8s-master01 164m 8% 1232Mi 65%
k8s-master02 149m 7% 1210Mi 64%
k8s-master03 161m 8% 1197Mi 63%
k8s-node01 64m 6% 867Mi 46%
k8s-node02 63m 6% 1230Mi 65%
安装Dashboard
wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.7.0/aio/deploy/recommended.yaml
# 默认镜像在docker.io/ubernetesui/dashboard 默认国内加速即可
# registry.aliyuncs.com/google_containers:v2.7.0 有问题报错exec /dashboard: exec format error,应该是拉错架构了
kubectl apply -f recommended.yaml
cat > dashboard-user.yaml << EOF
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kubernetes-dashboard
EOF
kubectl apply -f dashboard-user.yaml
更改dashboard的svc为NodePort
[root@k8s-master01 ~]# kubectl edit svc kubernetes-dashboard -n kubernetes-dashboard
...
sessionAffinity: None
type: NodePort # 将Cluster为NodePort
status:
loadBalancer: {}
[root@k8s-master01 ~]# kubectl get svc kubernetes-dashboard -n kubernetes-dashboard
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes-dashboard NodePort 10.101.102.61 <none> 443:32220/TCP 2m54s
创建token访问
kubectl -n kubernetes-dashboard create token admin-user
复制生成的token: yJhbGciOiJSUzI1NiIsImtpZCI6ImVvQzxxx…
使用火狐浏览器访问 https://vip:NodePort (谷歌浏览器不能登陆,需修改dashboard证书)
https://192.168.1.111:32220
集群验证
[root@k8s-master01 ~]# kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
etcd-2 Healthy {"health":"true","reason":""}
controller-manager Healthy ok
etcd-1 Healthy {"health":"true","reason":""}
etcd-0 Healthy {"health":"true","reason":""}
[root@k8s-master01 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master01 Ready <none> 19h v1.24.6
k8s-master02 Ready <none> 17h v1.24.6
k8s-master03 Ready <none> 17h v1.24.6
k8s-node01 Ready <none> 17h v1.24.6
k8s-node02 Ready <none> 17h v1.24.6
busybox测试dns
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
name: busybox
namespace: default
spec:
containers:
- name: busybox
image: busybox:1.28
command:
- sleep
- "3600"
imagePullPolicy: IfNotPresent
restartPolicy: Always
EOF
[root@k8s-master01 ~]# kubectl exec -it busybox -- nslookup kubernetes
Server: 10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local
Name: kubernetes
Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local
[root@k8s-master01 ~]# kubectl exec -it busybox -- ping -c2 www.baidu.com
PING www.baidu.com (14.215.177.38): 56 data bytes
64 bytes from 14.215.177.38: seq=0 ttl=49 time=9.445 ms
64 bytes from 14.215.177.38: seq=1 ttl=49 time=9.524 ms
测试nginx svc以及Pod内部网络通信是否正常
cat > nginx_deploy.yaml << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
spec:
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- image: nginx:alpine
name: nginx
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
name: nginx
spec:
selector:
app: nginx
type: NodePort
ports:
- protocol: TCP
port: 80
targetPort: 80
nodePort: 30001
EOF
kubectl apply -f nginx_deploy.yaml
[root@k8s-master01 ~]# kubectl get svc,pod -owide
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
service/kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 45h <none>
service/nginx NodePort 10.102.100.128 <none> 80:30001/TCP 16m app=nginx
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod/busybox 1/1 Running 0 24m 172.18.195.3 k8s-master03 <none> <none>
pod/nginx-6fb79bc456-s84wf 1/1 Running 0 16m 172.25.92.66 k8s-master02 <none> <none>
for i in k8s-master01 k8s-master02 k8s-master03 k8s-node01 k8s-node01; do
echo $i
ssh root@$i curl -s 10.102.100.128 | grep "using nginx" # nginx svc ip
ssh root@$i curl -s 172.25.92.66 | grep "using nginx" # pod ip
done
访问宿主机vip:nodePort
[root@k8s-master01 ~]# curl -I 192.168.1.111:30001
HTTP/1.1 200 OK
Server: nginx/1.23.2
Date: Thu, 20 Oct 2022 12:22:07 GMT
Content-Type: text/html
Content-Length: 615
Last-Modified: Wed, 19 Oct 2022 10:28:53 GMT
Connection: keep-alive
ETag: "634fd165-267"
Accept-Ranges: bytes
安装命令行补全
yum install bash-completion -y
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
echo "source <(kubectl completion bash)" >> ~/.bashrc
参考
腾讯云Ubuntu二进制搭建高可用(k8s)Kubernetes v1.24.3集群
企业级高可用Kubernetes1.20.x集群(二进制)