一、操作系统初始化配置【所有节点都要执行】

1.关闭防火墙

[root@localhost ~]# systemctl stop firewalld 
[root@localhost ~]# systemctl disable firewalld 
Removed symlink /etc/systemd/system/multi-user.target.wants/firewalld.service.
Removed symlink /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.

2.清空iptables规则

[root@localhost ~]# iptables -F 
[root@localhost ~]# iptables -nvL 
Chain INPUT (policy ACCEPT 30 packets, 1980 bytes)
 pkts bytes target     prot opt in     out     source               destination         

Chain FORWARD (policy ACCEPT 0 packets, 0 bytes)
 pkts bytes target     prot opt in     out     source               destination         

Chain OUTPUT (policy ACCEPT 16 packets, 1520 bytes)
 pkts bytes target     prot opt in     out     source               destination     

3.关闭SElinux

[root@localhost ~]# setenforce 0     #临时关闭
[root@localhost ~]# sed -i 's/enforcing/disabled/g' /etc/selinux/config  #永久关闭
[root@localhost ~]# getenforce  #查看selinux状态
Permissive

4.关闭swap分区

[root@localhost ~]# sed -i 's/*.swap.*/$&/' /etc/fstab  #永久关闭-重启后生效
[root@localhost ~]# swapoff -a #暂时关闭
[root@localhost ~]# free -h 
              total        used        free      shared  buff/cache   available
Mem:           3.7G        458M        2.8G         23M        412M        3.0G
Swap:            0B          0B          0B

5.根据规划设置主机名

[root@localhost ~]# hostnamectl set-hostname k8s-master01 #master上操作
[root@localhost ~]# bash 
[root@localhost ~]# hostnamectl set-hostname k8s-node01 #node1上操作
[root@localhost ~]# bash 
[root@localhost ~]# hostnamectl set-hostname k8s-node02 #node2上操作
[root@localhost ~]# bash 
....

6.在节点上添加hosts

[root@k8s-master01 ~]# cat >> /etc/hosts << EOF
192.168.1.1 k8s-master01
192.168.1.2 k8s-node01
192.168.1.3 k8s-node02 
EOF

7.将桥接的IPv4流量传递到iptables的链 --调整优化内核的参数并开启路由转发

[root@k8s-master01 ~]# cat > /etc/sysctl.d/k8s.conf << EOF 
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1 #使iptables也能处理网桥bridge产生的流量
EOF
[root@k8s-master01 ~]# sysctl --system #进行加载配置

[root@k8s-master01 ~]# cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
> net.bridge.bridge-nf-call-iptables  = 1
> net.bridge.bridge-nf-call-ip6tables = 1
> net.ipv4.ip_forward                 = 1 
> EOF
[root@k8s-master01 ~]# sysctl -p

8.时间同步

[root@k8s-master01 ~]# ntpdate asia.pool.ntp.org 
26 Mar 23:06:36 ntpdate[9417]: step time server 17.253.116.253 offset -28798.713312 sec

9 .替换yum源为阿里源

[root@localhost01 ~]# sudo mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup
[root@localhost01 ~]# sudo curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
[root@localhost01 ~]# sudo yum clean all
[root@localhost01 ~]# sudo yum makecache
[root@localhost01 ~]# sudo yum update


二、部署containerd/kubeadm/kubelet【所有节点】

1.yum安装containerd并配置

[root@k8s-master01 ~]# wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo
[root@k8s-master01 ~]# yum -y install containerd.io

#生成containerd配置文件
[root@k8s-master01 ~]# containerd config default > /etc/containerd/config.toml
#原内容:
sandbox_image = "registry.k8s.io/pause:3.6"
#需要修改成:
sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.6"

[root@k8s-master01 ~]#  systemctl enable containerd && systemctl start containerd

2.添加k8s的阿里云yum软件源

[root@k8s-master01 ~]# cat <<EOF | tee /etc/yum.repos.d/kubernetes.repo
> [kubernetes]
> name=Kubernetes
> baseurl=https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.30/rpm/
> enabled=1
> gpgcheck=1
> gpgkey=https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.30/rpm/repodata/repomd.xml.key
> EOF

[root@k8s-master01 ~]# yum --showduplicates list kubelet

3.安装kubeadm、kubectl、kubelet

阿里云这个源在安装k8s组件时已经包含了containerd所使用的cri-tools和cni,就不需要单独安装了,如果是二进制部署containerd的话需要安装

由于k8s版本更新频繁,所以这里我们要指定版本号进行部署
[root@k8s-master01 ~]# yum install -y kubelet-1.30.1 kubeadm-1.30.1 kubectl-1.30.1 
[root@k8s-master ~]# systemctl enable kubelet #这里把kubelet设置为自启动,先不要启动,一会儿初始化的时候会自动启动kubelet(现在启动也起不来,缺少kubelet的配置文件)
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.

4.配置 crictl 指向 containerd

否则在执行crictl命令时会有警告提示

[root@k8s-master01 ~]# vim /etc/crictl.yaml
#新增
runtime-endpoint: unix:///var/run/containerd/containerd.sock
image-endpoint: unix:///var/run/containerd/containerd.sock
timeout: 10
debug: false

5.下载k8s镜像

所有节点都执行

#查看k8s所需镜像
[root@k8s-master01 ~]# kubeadm config images list --image-repository registry.aliyuncs.com/google_containers
I0907 19:09:23.987349    7825 version.go:256] remote version is much newer: v1.34.0; falling back to: stable-1.30
registry.aliyuncs.com/google_containers/kube-apiserver:v1.30.14
registry.aliyuncs.com/google_containers/kube-controller-manager:v1.30.14
registry.aliyuncs.com/google_containers/kube-scheduler:v1.30.14
registry.aliyuncs.com/google_containers/kube-proxy:v1.30.14
registry.aliyuncs.com/google_containers/coredns:v1.11.1
registry.aliyuncs.com/google_containers/pause:3.9
registry.aliyuncs.com/google_containers/etcd:3.5.12-0

命令行执行下载

crictl pull registry.aliyuncs.com/google_containers/kube-apiserver:v1.30.14             
crictl pull registry.aliyuncs.com/google_containers/kube-controller-manager:v1.30.14 
crictl pull registry.aliyuncs.com/google_containers/kube-scheduler:v1.30.14            
crictl pull registry.aliyuncs.com/google_containers/kube-proxy:v1.30.14  
crictl pull registry.aliyuncs.com/google_containers/coredns:v1.11.1
crictl pull registry.aliyuncs.com/google_containers/pause:3.9  
crictl pull registry.aliyuncs.com/google_containers/etcd:3.5.12-0

6.部署K8s-Master01

初始化master

#生成k8s初始化文件,按需修改自己的配置
[root@k8s-master01 ~]# kubeadm config print init-defaults > kubeadm-init.yaml
[root@k8s-master01 ~]# vim kubeadm-init.yaml
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  #修改为master的地址
  advertiseAddress: 192.168.1.1
  bindPort: 6443
nodeRegistration:
  criSocket: unix:///var/run/containerd/containerd.sock
  imagePullPolicy: IfNotPresent
  name: k8s-master01
  taints: null
  #添加忽略错误
  ignorePreflightErrors:
  - all
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
etcd:
  local:
    dataDir: /var/lib/etcd
#修改拉去镜像的地址
imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
#修改要安装的版本
kubernetesVersion: 1.30.1
networking:
  dnsDomain: cluster.local
  #service虚拟IP的网段,按需修改
  serviceSubnet: 10.96.0.0/12
  #添加pod的网络,需要与后面部署的CNI网络组件yaml中CALICO_IPV4POOL_CIDR字段保持一致
  podSubnet: 10.244.0.0/16
scheduler: {}

[root@k8s-master01 ~]# kubeadm init --config=kubeadm-init.yaml

或者直接使用执行初始化命令,跟使用配置文件一样,选其一即可

[root@k8s-master ~]# kubeadm init \
   --apiserver-advertise-address=192.168.1.1 \
   --image-repository registry.aliyuncs.com/google_containers \
   --kubernetes-version v1.30.1 \
   --service-cidr=10.96.0.0/12 \
   --pod-network-cidr=10.244.0.0/16 \
   --ignore-preflight-errors=all

初始化完成后 拷贝kubectl使用的连接k8s认证配置文件到默认路径

[root@k8s-master ~]# mkdir -p $HOME/.kube
[root@k8s-master ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@k8s-master ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config

7.将k8s的node节点加入集群

在node节点上执行;向集群添加新节点,执行在kubeadm init 输出的kebeadm join命令:

[root@k8s-node1 ~]# kubeadm join 192.168.1.1:6443 --token h77r38.7u4lh0gq8hh8c4pw --discovery-token-ca-cert-hash sha256:fb43f68785633aa1888f115591263e8231c7d9ff2a72ee0104163bf205684d3b

#默认token有效期为24小时,当过期后,这个token就不能用了,这时就需要重新创建token,可以直接使用如下命令快捷生成token:

[root@k8s-master kubernetes]# kubeadm token create --print-join-command
kubeadm join 192.168.1.1:6443 --token q0rvf4.rrr2b6qupwsi9mfj --discovery-token-ca-cert-hash sha256:fb43f68785633aa1888f115591263e8231c7d9ff2a72ee0104163bf205684d3b

参考资料:https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-join/

8.部署容器网络(CNI)

当我们把node节点加入集群后会发现他的状态时NotReady(不可用),那是因为网络插件还没有部署;接下来部署网络插件calico;

[root@k8s-master01 ~]# kubectl get nodes 
NAME         STATUS     ROLES           AGE   VERSION
k8s-node01   NotReady   <none>          9s    v1.30.1
k8s-node02   NotReady   <none>          5s    v1.30.1
k8s-master01         NotReady   control-plane   99s   v1.30.1

calico是一个纯三层的数据中心网络方案,是目前k8s主流的网络方案;
#下载YAML

[root@k8s-master ~]# wget https://docs.projectcalico.org/manifests/calico.yaml

下载完之后还需要修改里面定义的pod网络(CALICO_IPV4POOL_CIDR),与前面kubeadm
init初始化的–pod-network-cdir(pod所使用的网络)指定的一样;

[root@k8s-master01 ~]#vim calico.yaml
- name: CALICO_IPV4POOL_CIDR 
  value: "10.244.0.0/16"
...
#如果有多个网卡的话,可以选择指定的网卡进行绑定
- name: IP_AUTODETECTION_METHOD
  value: interface=ens33
# Auto-detect the BGP IP address.

在这里插入图片描述
在这里插入图片描述

修改完calico.yaml文件后,下载calico所需要的镜像【集群节点都需要下载】

#命令行执行拉取【需要将下面的版本换成自己的版本,可以通过grep image calico.yaml 查看版本】
ctr -n k8s.io image pull docker.m.daocloud.io/calico/pod2daemon-flexvol:v3.25.0
ctr -n k8s.io image pull docker.m.daocloud.io/calico/typha:v3.25.0
ctr -n k8s.io image pull docker.m.daocloud.io/calico/kube-controllers:v3.25.0
ctr -n k8s.io image pull docker.m.daocloud.io/calico/apiserver:v3.25.0
ctr -n k8s.io image pull docker.m.daocloud.io/calico/csi:v3.25.0
ctr -n k8s.io image pull docker.m.daocloud.io/calico/cni:v3.25.0
ctr -n k8s.io image pull docker.m.daocloud.io/calico/node:v3.25.0
ctr -n k8s.io image pull docker.m.daocloud.io/calico/node-driver-registrar:v3.25.0


ctr -n k8s.io image tag docker.m.daocloud.io/calico/pod2daemon-flexvol:v3.25.0 	 	docker.io/calico/pod2daemon-flexvol:v3.25.0 
ctr -n k8s.io image tag docker.m.daocloud.io/calico/typha:v3.25.0        		 	docker.io/calico/typha:v3.25.0
ctr -n k8s.io image tag docker.m.daocloud.io/calico/kube-controllers:v3.25.0     	docker.io/calico/kube-controllers:v3.25.0
ctr -n k8s.io image tag docker.m.daocloud.io/calico/apiserver:v3.25.0            	docker.io/calico/apiserver:v3.25.0
ctr -n k8s.io image tag docker.m.daocloud.io/calico/csi:v3.25.0				     	docker.io/calico/csi:v3.25.0
ctr -n k8s.io image tag docker.m.daocloud.io/calico/cni:v3.25.0   		         	docker.io/calico/cni:v3.25.0
ctr -n k8s.io image tag docker.m.daocloud.io/calico/node:v3.25.0			     	docker.io/calico/node:v3.25.0
ctr -n k8s.io image tag docker.m.daocloud.io/calico/node-driver-registrar:v3.25.0   docker.io/calico/node-driver-registrar:v3.25.0

下载完镜像后 需要进行部署calico pod;

[root@k8s-master01 ~]# kubectl apply -f calico.yaml 
[root@k8s-master01 ~]# kubectl get pods -n kube-system
NAME                                       READY   STATUS    RESTARTS   AGE
calico-kube-controllers-5b9b456c66-rv4xw   1/1     Running   0          17m
calico-node-92mmt                          0/1     Running   0          17m
calico-node-pgsj6                          0/1     Running   0          17m
calico-node-xstz7                          0/1     Running   0          17m
coredns-7b5944fdcf-84bpm                   1/1     Running   0          32m
coredns-7b5944fdcf-kg272                   1/1     Running   0          32m
etcd-node                                  1/1     Running   0          32m
kube-apiserver-node                        1/1     Running   0          32m
kube-controller-manager-node               1/1     Running   0          32m
kube-proxy-4xcrm                           1/1     Running   0          32m
kube-proxy-l5cp2                           1/1     Running   0          31m
kube-proxy-vl98k                           1/1     Running   0          31m
kube-scheduler-node                        1/1     Running   0          32m

等calico pod都Running之后,node节点也会准备就绪;

[root@k8s-master01 ~]# kubectl get nodes 
NAME         STATUS   ROLES           AGE   VERSION
k8s-node01   Ready    <none>          31m   v1.30.1
k8s-node02   Ready    <none>          31m   v1.30.1
k8s-master01         Ready    control-plane   33m   v1.30.1

提示:以后所有组件的yaml文件都只在master节点执行
安装目录:/etc/kubernetes/
组件配置文件目录:/etc/kubernetes/manifests/

参考资料:https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/#pod-network

至此,kubeadm部署k8s集群就部署完成了;


【补充】部署ingress-nginx

[root@k8s-master01 ~]# wget -c https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.3.0/deploy/static/provider/baremetal/deploy.yaml
下载其它版本的话直接修改中间的版本号就行
[root@k8s-master01 ~]# mv deploy.yaml ingress-controller.yaml

(1)需要将镜像地址(registry.k8s.io)修改为国内的(lank8s.cn)否则会下不了镜像

查看现在需要使用镜像
[root@k8s-master01 ~]# grep -r "image:" ingress-controller.yaml
        image: registry.k8s.io/ingress-nginx/controller:v1.3.0@sha256:d1707ca76d3b044ab8a28277a2466a02100ee9f58a86af1535a3edf9323ea1b5
        image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.1.1@sha256:64d8c73dca984af206adf9d6d7e46aa550362b1d7a01f3a0a91b20cc67868660
        image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.1.1@sha256:64d8c73dca984af206adf9d6d7e46aa550362b1d7a01f3a0a91b20cc67868660
批量修改镜像地址
[root@k8s-master01 ~]# sed -i 's/registry.k8s.io/k8s.m.daocloud.io/g' ingress-controller.yaml 
[root@k8s-master01 ~]# grep -r "image:" ingress-controller.yaml
        image: k8s.m.daocloud.io/ingress-nginx/controller:v1.3.0@sha256:d1707ca76d3b044ab8a28277a2466a02100ee9f58a86af1535a3edf9323ea1b5
        image: k8s.m.daocloud.io/ingress-nginx/kube-webhook-certgen:v1.1.1@sha256:64d8c73dca984af206adf9d6d7e46aa550362b1d7a01f3a0a91b20cc67868660
        image: k8s.m.daocloud.io/ingress-nginx/kube-webhook-certgen:v1.1.1@sha256:64d8c73dca984af206adf9d6d7e46aa550362b1d7a01f3a0a91b20cc67868660

(2)修改ingress使用 共享宿主机网络

[root@k8s-master1 ingress-nginx]# vim ingress-controller.yaml 
    spec:
      #共享宿主机的网络协议栈
      hostNetwork: True  
      #将Pod调度到指定的Node上,不经过调度器
      nodeName: k8s-master01 
      #根据标签控制pod在哪个node节点上,受scheduler调度器控制(例如有污点便不能调度)
      #例如将pod调度到含有[test-label-ingress-controller: "true"]标签的节点上
      #nodeSelector: 
      #  test-label-ingress-controller: "true"
      containers:
      ...

(3)部署ingress-nginx

[root@k8s-master01 ~]# kubectl delete -f ingress-controller.yaml 
Logo

有“AI”的1024 = 2048,欢迎大家加入2048 AI社区

更多推荐