一、部署准备(所有节点执行)

1.关闭selinux
sed -i 's/enforcing/disabled/' /etc/selinux/config
setenforce 0
2.禁用Swap
sed -ri 's/.*swap.*/#&/' /etc/fstab
swapoff -a
3.所有节点分别配置主机名

控制节点Master01:

hostnamectl set-hostname master01 && bash

控制节点Master02:

hostnamectl set-hostname master02 && bash

控制节点Master03:

hostnamectl set-hostname master03 && bash

工作节点Node01:

hostnamectl set-hostname node01 && bash
4.所有master节点分别配置host文件
  • 进入hosts文件
vim /etc/hosts
  • 修改文件内容,添加四台主机以及IP
192.168.170.129 master01
192.168.170.130 master02
192.168.170.131 master03
5.四台主机分别下载所需意外组件包和相关依赖包
yum install -y yum-utils device-mapper-persistent-data lvm2 wget net-tools nfs-utils lrzsz gcc gcc-c++ make cmake libxml2-devel openssl-devel curl curl-devel unzip autoconf automake zlib-devel epel-release openssh-server libaio-devel vim ncurses-devel socat conntrack telnet ipvsadm
6.配置Master主机之间免密
ssh-keygen

ssh-copy-id node01
7.关闭防火墙
systemctl stop firewalld && systemctl disable firewalld
8.修改内核参数

所有主机都要执行

modprobe br_netfilter
  • modprobe:用于加载或卸载内核模块的命令。
  • br_netfilter:该模块允许桥接的网络流量被 iptables 规则过滤,通常在启用网络桥接的情况下使用。
  • 该模块主要在 Kubernetes 容器网络环境中使用,确保 Linux 内核能够正确处理网络流量的过滤和转发,特别是在容器间的通信中
    ​所有主机都要执行
cat > /etc/sysctl.d/k8s.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF

sysctl -p /etc/sysctl.d/k8s.conf # 使配置生效
  • net.bridge.bridge-nf-call-ip6tables = 1:允许 IPv6 网络流量通过 Linux 网络桥接时使用 ip6tables 进行过滤。
  • net.bridge.bridge-nf-call-iptables = 1:允许 IPv4 网络流量通过 Linux 网络桥接时使用 iptables 进行过滤。
  • net.ipv4.ip_forward = 1:允许 Linux 内核进行 IPv4 数据包的转发(路由)。
9.配置时间同步
  • 四台主机安装Chrony
yum -y install chrony
  • 四台主机修改配置文件,添加国内 NTP 服务器
echo "server ntp1.aliyun.com iburst" >> /etc/chrony.conf
echo "server ntp2.aliyun.com iburst" >> /etc/chrony.conf
echo "server ntp3.aliyun.com iburst" >> /etc/chrony.conf
echo "server ntp.tuna.tsinghua.edu.cn iburst" >> /etc/chrony.conf

tail -n 4 /etc/chrony.conf
systemctl restart chronyd 
systemctl enable chronyd 
# 查看同步状态
chronyc sources
所有节点安装ipvsadm
yum install ipvsadm ipset sysstat conntrack libseccomp -y
cat <<EOF > /etc/modules-load.d/ipvs.conf 
##加入以下内容
ip_vs
ip_vs_lc
ip_vs_wlc
ip_vs_rr
ip_vs_wrr
ip_vs_lblc
ip_vs_lblcr
ip_vs_dh
ip_vs_sh
ip_vs_fo
ip_vs_nq
ip_vs_sed
ip_vs_ftp
ip_vs_sh
nf_conntrack_ipv4
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip
EOF

## 加载内核配置
systemctl enable --now systemd-modules-load.service
开启一些k8s集群中必须的内核参数,所有节点配置k8s内核
cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720

net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384
EOF

sysctl --system
高可用组件安装
## 所有Master节点通过yum安装HAProxy和KeepAlived:
yum install keepalived haproxy -y


vim /etc/haproxy/haproxy.cfg 
## 删除所有内容,加入以下内容,并修改自己ip
global
  maxconn  2000
  ulimit-n  16384
  log  127.0.0.1 local0 err
  stats timeout 30s

defaults
  log global
  mode  http
  option  httplog
  timeout connect 5000
  timeout client  50000
  timeout server  50000
  timeout http-request 15s
  timeout http-keep-alive 15s

frontend monitor-in
  bind *:33305
  mode http
  option httplog
  monitor-uri /monitor

frontend k8s-master
  bind 0.0.0.0:16443
  bind 127.0.0.1:16443
  mode tcp
  option tcplog
  tcp-request inspect-delay 5s
  default_backend k8s-master

backend k8s-master
  mode tcp
  option tcplog
  option tcp-check
  balance roundrobin
  default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
  server test-k8s-master-01 10.150.32.4:6443  check
  server test-k8s-master-02 10.150.32.5:6443  check
  server test-k8s-master-03 10.150.32.6:6443  check
所有Master节点配置KeepAlived,

配置不一样,注意区分
注意每个节点的IP和网卡(interface参数)

Master01节点的配置:
! Configuration File for keepalived
global_defs {
    router_id aws7-prod-k8s-master01
    script_user root
    enable_script_security
}
vrrp_script chk_apiserver {
    script "/etc/keepalived/check_apiserver.sh"
    interval 1
    weight -50
    fall 1
    rise 1
}
vrrp_instance VI_1 {
    state BACKUP
    interface eth0 # 网卡名称
    mcast_src_ip 10.150.30.52 # 本机ip地址
    virtual_router_id 51
    priority 101
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        10.150.30.57 # VIP地址
    }
    track_script {
       chk_apiserver
    }
}
Master02节点的配置:
! Configuration File for keepalived
global_defs {
    router_id aws7-prod-k8s-master02
script_user root
    enable_script_security
}
vrrp_script chk_apiserver {
    script "/etc/keepalived/check_apiserver.sh"
    interval 1
    weight -50
    fall 1
    rise 1
}
vrrp_instance VI_1 {
    state BACKUP
    interface eth0
    mcast_src_ip 10.150.30.53
    virtual_router_id 51
    priority 101
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        10.150.30.57
    }
    track_script {
       chk_apiserver
    }
}
Master03节点的配置:
! Configuration File for keepalived
global_defs {
    router_id aws7-prod-k8s-master03
script_user root
    enable_script_security
}
vrrp_script chk_apiserver {
    script "/etc/keepalived/check_apiserver.sh"
    interval 1
    weight -50
    fall 1
    rise 1
}
vrrp_instance VI_1 {
    state BACKUP
    interface eth0
    mcast_src_ip 10.150.30.54
    virtual_router_id 51
    priority 101
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        10.150.30.57
    }
    track_script {
       chk_apiserver
    }
}
配置KeepAlived健康检查文件:
 vim /etc/keepalived/check_apiserver.sh 
 ## 加入下面内容
#!/bin/bash

err=0
for k in $(seq 1 3)
do
    check_code=$(pgrep haproxy)
    if [[ $check_code == "" ]]; then
        err=$(expr $err + 1)
        sleep 1
        continue
    else
        err=0
        break
    fi
done

if [[ $err != "0" ]]; then
    echo "systemctl stop keepalived"
    /usr/bin/systemctl stop keepalived
    exit 1
else
    exit 0
fi

##加以执行权限
chmod +x /etc/keepalived/check_apiserver.sh
## 启动haproxy和keepalived
systemctl daemon-reload
systemctl enable --now haproxy
systemctl enable --now keepalived

二、环境部署

1.配置docker和k8syum源
# docker阿里源 
cat <<EOF | tee /etc/yum.repos.d/docker-ce.repo
[docker-ce-stable]
name=Docker CE Stable - \$basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/8/\$basearch/stable
enabled=1
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
EOF

# k8s阿里源
cat <<EOF | tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.30/rpm/
enabled=1
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.30/rpm/repodata/repomd.xml.key
EOF

# k8s阿里源(1.24版本之前)
cat > /etc/yum.repos.d/kubernetes.repo <<- EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

yum clean  all && yum makecache
2.安装containerd
yum -y install containerd.io
3.修改containerd存储路径
vim /etc/containerd/config.toml
# 将root修改为/data
root = "/data/containerd"
# 重启containerd
systemctl restart containerd.service
5.k8s安装

在所有节点上安装 kubeletkubeadmkubectl,并设置开机自启

yum install -y kubelet-1.30.0 kubeadm-1.30.0 kubectl-1.30.0 --disableexcludes=kubernetes

systemctl enable kubelet
systemctl start kubelet
6.初始化K8S集群
kubeadm init \
  --kubernetes-version=v1.32.0 \
  --pod-network-cidr=172.19.0.0/16 \
  --service-cidr=172.18.0.0/16 \
  --image-repository=registry.aliyuncs.com/google_containers \
  --cri-socket=unix:///run/containerd/containerd.sock \
  --control-plane-endpoint "10.150.30.57:16443" \
  --upload-certs

记录生成的token
控制节点Master01执行:

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
扩容k8s控制节点,将Master01和Master02加入到k8s集群

控制节点master02/master03加入集群
启动kubelet

systemctl start kubelet && systemctl enable kubelet

控制节点Master01生成集群token:

kubeadm token create --print-join-command

需要在生成的token后加入 --control-plane,如下:

kubeadm join 10.150.30.57:16443 --token wk073i.7m74hnzze293w2ef \
        --discovery-token-ca-cert-hash sha256:13b2a2ae82d5548a629c8e65f97852c60cd78b8efd11b646726b8e7ecf5650c9 \
        --control-plane --certificate-key bfbd7d0678b89f70791f558170220235bfb80fb0f486e49697d13837ff206169 

控制节点加入集群后执行如下命令,即可使用kubectl命令工作:

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

创建secret

# 修改成个人镜像仓库的地址、用户名和密码
kubectl create secret docker-registry gj-harbor --docker-server="harbor.private.cn:8443" --docker-username="k8s" --docker-password="jmu4EArdaaseA" -n kube-system

# 给命名空间下所有账户打补丁(calico安装后再执行一次,给calico用户授权)
for sa in $(kubectl get sa -n kube-system -o jsonpath='{.items[*].metadata.name}'); do   kubectl patch sa "$sa" -n kube-system     --type='json' -p='[{"op":"add","path":"/imagePullSecrets","value":[{"name":"gj-harbor"}]}]'; done

Calico安装,以下步骤只在master01执行

# 下载calico.yaml文件(个人内网已修改过镜像tag的yaml文件)
mkdir -p /data/yaml/kube-system && wget http://10.9.0.166:8080/packages/shell/fangxi/k8s/calico/calico.yaml -O /data/yaml/kube-system

kubectl apply -f /data/yaml/kube-system/calico.yaml
# 如果报错镜像拉取失败重新执行上面一步,给用户授权
Logo

有“AI”的1024 = 2048,欢迎大家加入2048 AI社区

更多推荐