Kubernetes + Istio
echo "=== Kubernetes + Istio 集群初始化 ==="# 安装容器运行时(containerd)echo "=== 初始化完成 ==="# 其他Master节点加入。# 第一个Master节点。# 检查Sidecar注入。# 安装Istio到集群。可与Master共部署。# Service网段。# 给命名空间添加标签。# 配置kubectl。# 备份Istio配置。# Is
1. 环境准备与规划
1.1 硬件资源要求
角色 |
数量 |
CPU |
内存 |
存储 |
说明 |
---|---|---|---|---|---|
Master |
3+ |
4核 |
8GB |
50GB |
高可用控制平面 |
Worker |
3+ |
8核 |
16GB |
100GB |
工作节点 |
etcd |
3 |
2核 |
4GB |
50GB |
可与Master共部署 |
1.2 网络规划
# Pod网段
POD_CIDR="10.244.0.0/16"
# Service网段
SERVICE_CIDR="10.96.0.0/12"
# DNS域名
CLUSTER_DOMAIN="cluster.local"
2. Kubernetes 集群部署
2.1 使用 kubeadm 部署高可用集群
所有节点初始化
# 禁用swap
swapoff -a
sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
# 加载内核模块
cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF
modprobe overlay
modprobe br_netfilter
# 设置内核参数
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
EOF
sysctl --system
# 安装容器运行时(containerd)
apt-get update
apt-get install -y containerd
mkdir -p /etc/containerd
containerd config default | sudo tee /etc/containerd/config.toml
systemctl restart containerd
安装 kubeadm、kubelet、kubectl
apt-get update
apt-get install -y apt-transport-https ca-certificates curl
curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.29/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-archive-keyring.gpg
echo "deb [signed-by=/etc/apt/keyrings/kubernetes-archive-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.29/deb/ /" | sudo tee /etc/apt/sources.list.d/kubernetes.list
apt-get update
apt-get install -y kubelet kubeadm kubectl
apt-mark hold kubelet kubeadm kubectl
初始化控制平面
# 第一个Master节点
kubeadm init --control-plane-endpoint="LOAD_BALANCER_DNS:6443" \
--upload-certs \
--pod-network-cidr=10.244.0.0/16 \
--service-cidr=10.96.0.0/12
# 其他Master节点加入
kubeadm join LOAD_BALANCER_DNS:6443 --token <token> \
--discovery-token-ca-cert-hash <hash> \
--control-plane --certificate-key <key>
安装网络插件(Calico)
kubectl apply -f https://raw.githubusercontent.com/projectcalico/calico/v3.26.4/manifests/calico.yaml
3. Istio 服务网格部署
3.1 下载并安装 Istio
# 下载最新版本
curl -L https://istio.io/downloadIstio | sh -
cd istio-*
sudo cp bin/istioctl /usr/local/bin/
# 安装Istio到集群
istioctl install --set profile=demo -y
# 验证安装
kubectl get pods -n istio-system
3.2 启用自动 Sidecar 注入
# 给命名空间添加标签
kubectl label namespace default istio-injection=enabled
# 验证命名空间配置
kubectl get namespace -L istio-injection
4. 核心组件配置
4.1 Istio Ingress Gateway
# ingress-gateway.yaml
apiVersion: networking.istio.io/v1beta1
kind: Gateway
metadata:
name: istio-ingressgateway
namespace: istio-system
spec:
selector:
istio: ingressgateway
servers:
- port:
number: 80
name: http
protocol: HTTP
hosts:
- "*"
- port:
number: 443
name: https
protocol: HTTPS
tls:
mode: PASSTHROUGH
hosts:
- "*"
4.2 示例应用部署
# sample-app.yaml
apiVersion: v1
kind: Service
metadata:
name: web-app
labels:
app: web-app
spec:
ports:
- port: 80
name: http
selector:
app: web-app
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: web-app
spec:
replicas: 3
selector:
matchLabels:
app: web-app
template:
metadata:
labels:
app: web-app
spec:
containers:
- name: nginx
image: nginx:1.25
ports:
- containerPort: 80
---
apiVersion: networking.istio.io/v1beta1
kind: VirtualService
metadata:
name: web-app
spec:
hosts:
- "example.com"
gateways:
- istio-ingressgateway
http:
- route:
- destination:
host: web-app.default.svc.cluster.local
port:
number: 80
5. 监控与可观测性
5.1 部署 Prometheus + Grafana
# 启用监控组件
kubectl apply -f https://raw.githubusercontent.com/istio/istio/release-1.19/samples/addons/prometheus.yaml
kubectl apply -f https://raw.githubusercontent.com/istio/istio/release-1.19/samples/addons/grafana.yaml
5.2 部署 Kiali(服务网格可视化)
kubectl apply -f https://raw.githubusercontent.com/istio/istio/release-1.19/samples/addons/kiali.yaml
5.3 访问监控界面
# 端口转发
kubectl port-forward -n istio-system svc/grafana 3000:3000 &
kubectl port-forward -n istio-system svc/kiali 20001:20001 &
# 访问地址
# Grafana: http://localhost:3000
# Kiali: http://localhost:20001
6. 安全配置
6.1 mTLS 策略
# mtls-policy.yaml
apiVersion: security.istio.io/v1beta1
kind: PeerAuthentication
metadata:
name: default
namespace: istio-system
spec:
mtls:
mode: STRICT
6.2 授权策略
# authorization-policy.yaml
apiVersion: security.istio.io/v1beta1
kind: AuthorizationPolicy
metadata:
name: require-jwt
namespace: default
spec:
selector:
matchLabels:
app: web-app
action: ALLOW
rules:
- from:
- source:
requestPrincipals: ["*"]
7. 流量管理
7.1 金丝雀发布
# canary-release.yaml
apiVersion: networking.istio.io/v1beta1
kind: VirtualService
metadata:
name: web-app
spec:
hosts:
- web-app.default.svc.cluster.local
http:
- route:
- destination:
host: web-app
subset: v1
weight: 90
- destination:
host: web-app
subset: v2
weight: 10
---
apiVersion: networking.istio.io/v1beta1
kind: DestinationRule
metadata:
name: web-app
spec:
host: web-app.default.svc.cluster.local
subsets:
- name: v1
labels:
version: v1.0
- name: v2
labels:
version: v2.0
7.2 故障注入测试
# fault-injection.yaml
apiVersion: networking.istio.io/v1beta1
kind: VirtualService
metadata:
name: web-app
spec:
hosts:
- web-app.default.svc.cluster.local
http:
- fault:
delay:
percentage:
value: 10
fixedDelay: 5s
route:
- destination:
host: web-app.default.svc.cluster.local
port:
number: 80
8. 自动化部署脚本
8.1 集群初始化脚本
#!/bin/bash
# init-cluster.sh
set -e
echo "=== Kubernetes + Istio 集群初始化 ==="
# 初始化集群
kubeadm init --control-plane-endpoint="$1:6443" \
--upload-certs \
--pod-network-cidr=10.244.0.0/16 \
--service-cidr=10.96.0.0/12
# 配置kubectl
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
# 安装网络插件
kubectl apply -f https://raw.githubusercontent.com/projectcalico/calico/v3.26.4/manifests/calico.yaml
# 安装Istio
curl -L https://istio.io/downloadIstio | sh -
cd istio-*
sudo cp bin/istioctl /usr/local/bin/
istioctl install --set profile=demo -y
echo "=== 初始化完成 ==="
9. 备份与恢复
9.1 集群状态备份
#!/bin/bash
# backup-cluster.sh
# 备份etcd
ETCDCTL_API=3 etcdctl --endpoints=127.0.0.1:2379 \
--cert=/etc/kubernetes/pki/etcd/server.crt \
--key=/etc/kubernetes/pki/etcd/server.key \
--cacert=/etc/kubernetes/pki/etcd/ca.crt \
snapshot save /backup/etcd-snapshot-$(date +%Y%m%d).db
# 备份关键资源
kubectl get all --all-namespaces -o yaml > /backup/k8s-resources-$(date +%Y%m%d).yaml
kubectl get secrets --all-namespaces -o yaml > /backup/secrets-$(date +%Y%m%d).yaml
# 备份Istio配置
kubectl get istiooperators -A -o yaml > /backup/istio-config-$(date +%Y%m%d).yaml
10. 监控告警配置
10.1 Prometheus 告警规则
# prometheus-alerts.yaml
groups:
- name: kubernetes-alerts
rules:
- alert: PodCrashLooping
expr: rate(kube_pod_container_status_restarts_total[5m]) * 60 * 5 > 0
for: 5m
labels:
severity: critical
annotations:
summary: "Pod {{ $labels.pod }} is crash looping"
- alert: IstioHighLatency
expr: histogram_quantile(0.99, rate(istio_request_duration_milliseconds_bucket[1m])) > 1000
for: 2m
labels:
severity: warning
annotations:
summary: "High request latency detected"
故障排查命令
# 检查集群状态
kubectl get nodes
kubectl get pods -A
# Istio状态检查
istioctl verify-install
istioctl analyze
# 检查Sidecar注入
kubectl describe pod <pod-name> | grep istio-proxy
# 查看Envoy配置
istioctl proxy-config routes <pod-name> -n <namespace>
istioctl proxy-config listeners <pod-name> -n <namespace>
# 流量调试
istioctl dashboard kiali
istioctl dashboard grafana
更多推荐
所有评论(0)