文章目录
k8s 实验环境部署
k8s 安装
虚拟机安装 CentOS 7
下载系统安装镜像
- 地址:http://isoredirect.centos.org/centos/7/isos/x86_64/
- 可以下载CentOS-7-x86_64-Minimal-2009,大小 973MB
创建虚拟机
- 配置:CPU 2 核、内存 3G、硬盘 30G、网卡 桥接模式
- 两台虚拟机配置相同
虚拟机安装系统
- 虚拟机设置镜像为 Centos7的 iso文件
- 启动虚拟机安装,安装过程中:
- 设置一台命名 k8s-master,一台命名 k8s-node1
- 设置虚拟机 IP 地址,两台与宿主在同一网段,如
- 192.168.3.80(k8s-master)
- 192.168.3.81(k8s-node1)
- 设置 root 密码
- 安装完成后重启虚拟机
虚拟机系统配置
#在 root 下执行
# 修改DNS
echo nameserver 192.168.3.1 > /etc/resolv.conf
# 验证
ping www.baidu.com
# 升级所有包同时也升级软件和系统内核
yum update -y
# 关闭防火墙
systemctl stop firewalld
# 关闭防火墙自启动
systemctl disable firewalld
# 禁用SELinux
setenforce 0
sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
# 添加 host
vi /etc/hosts
192.168.3.80 k8s-master
192.168.3.81 k8s-node1
# 转发 IPv4 并让 iptables 看到桥接流
cat <<EOF | tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF
modprobe overlay
modprobe br_netfilter
# 设置所需的 sysctl 参数,参数在重新启动后保持不变
cat <<EOF | tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
EOF
# 应用 sysctl 参数而不重新启动
sysctl --system
lsmod | grep br_netfilter
lsmod | grep overlay
sysctl net.bridge.bridge-nf-call-iptables net.bridge.bridge-nf-call-ip6tables net.ipv4.ip_forward
# 关闭swap分区:
vi /etc/fstab
# 注释掉 /dev/mapper/centos-swap swap
# 重启生效
reboot
安装 docker
# 添加docker官方yum源
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
# 安装 docker
yum install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
# 启动+自启动
systemctl enable --now docker
# 查看状态
systemctl status docker
# 验证安装
docker run hello-world
安装 cri-dockerd
# 安装 wget
yum install -y wget
# 下载 rpm
wget https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.4/cri-dockerd-0.3.4-3.el7.x86_64.rpm
# 安装
rpm -ivh cri-dockerd-0.3.4-3.el7.x86_64.rpm
# 重载系统守护进程
systemctl daemon-reload
# 启动+自启动
systemctl enable --now cri-docker.socket cri-docker
# 查看状态
systemctl status cri-docker.socket
安装runc
# 下载
wget https://github.com/opencontainers/runc/releases/download/v1.1.9/runc.amd64
# 安装
install -m 755 runc.amd64 /usr/local/bin/runc
# 验证
runc -v
安装 kubectl
# 下载v1.27.4版本
curl -LO "https://dl.k8s.io/release/v1.27.4/bin/linux/amd64/kubectl"
# 下载校验文件
curl -LO "https://dl.k8s.io/v1.27.4/bin/linux/amd64/kubectl.sha256"
# 验证文件
echo "$(cat kubectl.sha256) kubectl" | sha256sum --check
# 安装
install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
# 验证安装
kubectl version --client
安装 kubeadm、kubelet 和 kubectl
# 配置yum文件,exclude 参数避免误更新
cat <<EOF | tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=1
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
exclude=kubelet kubeadm kubectl
EOF
# 安装 1.27.4 版本
yum -y install kubelet-1.27.4 kubeadm-1.27.4 kubectl-1.27.4 --disableexcludes=kubernetes
# 查看安装情况
yum list kubelet kubeadm kubectl
# 设置kubelet自启动
systemctl enable --now kubelet
设置 docker 和 cri-dockerd
# Docker设置国内镜像加速
tee /etc/docker/daemon.json <<-'EOF'
{
"registry-mirrors": [
"https://docker.mirrors.ustc.edu.cn",
"http://hub-mirror.c.163.com"
],
"exec-opts": ["native.cgroupdriver=systemd"]
}
EOF
# cri-dockerd设置
vi /usr/lib/systemd/system/cri-docker.service
# 找到第10行ExecStart= 修改为
ExecStart=/usr/bin/cri-dockerd --network-plugin=cni --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.7
# 重启Docker组件
systemctl daemon-reload && systemctl restart docker cri-docker.socket cri-docker
# 检查Docker组件状态
systemctl status docker cri-docker.socket cri-docker
搭建 k8s 集群
初始化 master:
# 初始化 master
kubeadm init --kubernetes-version=v1.27.3 --node-name=k8s-master \
--image-repository=registry.aliyuncs.com/google_containers \
--cri-socket=unix:///var/run/cri-dockerd.sock \
--apiserver-advertise-address=192.168.3.80 \
--pod-network-cidr=10.244.0.0/16 \
--service-cidr=10.96.0.0/12
# 假如初始化失败,需要回滚
kubeadm reset --cri-socket=unix:///var/run/cri-dockerd.sock
# 成功后复制出:kubeadm join 语句用于到 k8s-node1 上执行
# 如:kubeadm join 192.168.3.80:6443 --token rvh7p6.ymka5l4f8jeax53q \
# --discovery-token-ca-cert-hash sha256:172ba55c505403eccafb197d0d0c4e7405870c5080f50290457cdd6962d9217b
# 设置 KUBECONFIG 环境变量
echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> ~/.bash_profile
# 生效
source ~/.bash_profile
# 检测配置是否生效
echo $KUBECONFIG
安装网络插件 flannel:
# 下载 kube-flannel.yml
# 地址 https://github.com/flannel-io/flannel/releases/tag/v0.22.0
# 将文件 kube-flannel.yml 上传 k8s-master
# 部署 flannel
kubectl apply -f kube-flannel.yml
# 创建目录
mkdir -p /run/flannel/
# 创建 subnet.env 文件
cat <<EOF | tee /run/flannel/subnet.env
FLANNEL_NETWORK=10.244.0.0/16
FLANNEL_SUBNET=10.244.0.1/24
FLANNEL_MTU=1450
FLANNEL_IPMASQ=true
EOF
加入node节点:
# 在 master 上执行
# 将配置文件远程复制到 node1
scp /etc/kubernetes/admin.conf 192.168.3.81:/etc/kubernetes/
# 以下在 node1 上执行——————————
# 确认配置文件 admin.conf 存在
ls /etc/kubernetes
# 设置 KUBECONFIG 环境变量
echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> ~/.bash_profile
# 生效
source ~/.bash_profile
# 检测配置是否生效
echo $KUBECONFIG
# 将 master 上执行 kubeadm init 时的 join 语句加上参数--cri-socket unix:///var/run/cri-dockerd.sock执行
# 如以下语句:
kubeadm join 192.168.3.80:6443 --token rvh7p6.ymka5l4f8jeax53q \
--discovery-token-ca-cert-hash sha256:172ba55c505403eccafb197d0d0c4e7405870c5080f50290457cdd6962d9217b \
--cri-socket unix:///var/run/cri-dockerd.sock
#————————————————————————————
安装 dashboard
# 下载yaml
wget -O k8s-dashboard-v2.7.0.yaml https://raw.githubusercontent.com/kubernetes/dashboard/v2.7.0/aio/deploy/recommended.yaml
# 修改yaml
vi k8s-dashboard-v2.7.0.yaml
# 在以下内容中修改 Service 如下:
# kind: Service
# apiVersion: v1
# metadata:
# labels:
# k8s-app: kubernetes-dashboard
# name: kubernetes-dashboard
# namespace: kubernetes-dashboard
# spec:
# type: NodePort # 增加NodePort
# ports:
# - port: 443
# targetPort: 8443
# nodePort: 30001 # 指定端口号
# selector:
# k8s-app: kubernetes-dashboard
# 部署 dashboard
kubectl apply -f k8s-dashboard-v2.7.0.yaml
# 查看 dashboard 访问端口
kubectl -n kubernetes-dashboard get service kubernetes-dashboard
# NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
# kubernetes-dashboard NodePort 10.107.209.165 <none> 443:30001/TCP 110m
# 创建文件 k8s-dashboard-account.yaml
touch k8s-dashboard-account.yaml
vi k8s-dashboard-account.yaml
# 修改为后面 yaml 内容
# 创建账户和绑定角色
kubectl apply -f k8s-dashboard-account.yaml
# 查看登录 token
kubectl -n kubernetes-dashboard describe secret dashboard-admin
# 或直接提取 token
kubectl -n kubernetes-dashboard get secrets dashboard-admin -o go-template --template '{{index .data "token"}}' | base64 --decode
# 将输出的 token 复制到 https://192.168.3.80:30001 登录使用
# 修改 Dashboard token失效时间
# 登录dashboard,在Deployments 下找到 kubernetes-dashboard,编辑 Yaml
# 查找“--auto-generate-certificates”,定位到修改位置
# 增加参数 '--token-ttl=0' 表示永不过期,修改后如下:
# containers:
# - name: kubernetes-dashboard
# image: kubernetesui/dashboard:v2.7.0
# args:
# - '--auto-generate-certificates'
# - '--namespace=kubernetes-dashboard'
# - '--token-ttl=0'
k8s-dashboard-account.yaml内容:
apiVersion: v1
kind: ServiceAccount
metadata:
name: dashboard-admin
namespace: kubernetes-dashboard
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: dashboard-admin-binding
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
roleRef:
kind: ClusterRole
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: dashboard-admin
namespace: kubernetes-dashboard
---
apiVersion: v1
kind: Secret
metadata:
name: dashboard-admin
namespace: kubernetes-dashboard
annotations:
kubernetes.io/service-account.name: "dashboard-admin"
type: kubernetes.io/service-account-token
实验环境优化
- 解决kube-scheduler重启问题。
- kubeadm 未配置 scheduler 的服务端证书,健康检查采用https将会失败导致不断重启。
- 生成自签名证书,修改 scheduler 的 pod 配置增加启动参数。
- 单 master 时关闭竞选 leader。
- 多 master 时,组件存在多个实例需要不断竞选 leader。
- scheduler 和 controller-manager默认启动参数会使用竞选leader。
- 可以关掉避免一直对 apiserver 发请求。
- master 上各组件的探针频率默认 10s,可适度调低。
生成自签名证书:
mkdir ~/certs
cd ~/certs
#下载安装CFSSL证书生成工具
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo
mv cfssl_linux-amd64 /usr/local/bin/cfssl
mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
chmod +x /usr/local/bin/cfssl /usr/local/bin/cfssl-certinfo /usr/local/bin/cfssljson
cat > ca-config.json <<EOF
{
"signing": {
"default": {
"expiry": "876000h"
},
"profiles": {
"kubernetes": {
"expiry": "876000h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
EOF
cat > ca-csr.json <<EOF
{
"CN": "system:kube-scheduler",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "system:kube-scheduler",
"OU": "System"
}
],
"ca": {
"expiry": "876000h"
}
}
EOF
cat > kube-scheduler-csr.json <<EOF
{
"CN": "system:kube-scheduler",
"hosts": [
"127.0.0.1",
"192.168.3.8",
"192.168.3.80",
"192.168.3.81",
"192.168.3.82"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing"
}
]
}
EOF
#生成 CA 根证书
cfssl gencert -initca ca-csr.json | cfssljson -bare ca
#生成 kube-scheduler 证书
cfssl gencert -ca=ca.pem \
-ca-key=ca-key.pem \
-config=ca-config.json \
-profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler
#确认已生成证书
ls kube-scheduler*pem
#拷贝证书到k8s证书目录
cp kube-scheduler.pem /etc/kubernetes/pki/scheduler.crt
cp kube-scheduler-key.pem /etc/kubernetes/pki/scheduler.key
#修改kube-scheduler.yaml
vi /etc/kubernetes/manifests/kube-scheduler.yaml
#增加容器启动参数和挂载证书目录后保存
# - --tls-cert-file=/etc/kubernetes/pki/scheduler.crt
# - --tls-private-key-file=/etc/kubernetes/pki/scheduler.key
# - --leader-elect=false #多 master 高可用时才需要
# volumeMounts:
# - mountPath: /etc/kubernetes/pki
# name: k8s-certs
# readOnly: true
# volumes:
# - hostPath:
# path: /etc/kubernetes/pki
# type: DirectoryOrCreate
# name: k8s-certs
#修改kube-controller-manager.yaml
vi /etc/kubernetes/manifests/kube-controller-manager.yaml
#修改容器启动参数后保存
# - --leader-elect=false #多 master 高可用时才需要
#重启kubelet
systemctl restart kubelet