hostnamectl set-hostname master
hostnamectl set-hostname node1
hostnamectl set-hostname node2
hostnamectl set-hostname node3
echo 192.168.1.200 master >> /etc/hosts
echo 192.168.1.201 node1 >> /etc/hosts
echo 192.168.1.202 node2 >> /etc/hosts
echo 192.168.1.203 node3 >> /etc/hosts
systemctl stop firewalld && systemctl disable firewalld && systemctl status firewalld
sed -i 's/enforcing/disabled/' /etc/selinux/config
1.开启内核 ipv4 转发需要执行如下命令加载 br_netfilter 模块在所有节点执行
modprobe br_netfilter
2.创建 /etc/sysctl.d/k8s.conf文件,添加如下内容:
cat > /etc/sysctl.d/k8s.conf<<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
vm.swappiness=0
EOF
sysctl -p /etc/sysctl.d/k8s.conf
bridge-nf 使得 netfilter 可以对 Linux 网桥上的IPv4/ARP/IPv6 包过滤。
比如,设置net.bridge.bridge-nfcall-iptables=1后,二层的网桥在转发包时也会被 iptables的FORWARD 规则所过滤。常用的选项包括:
yum -y install ipset ipvsadm
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_Vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
查看安装情况
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
上面脚本创建了的/etc/sysconfig/modules/ipvs.modules文件保证在节点重启后能自动加载所需模块。
使用Lsmod grep -eip_vs -e nf_conntrack_ipv4命令查看是否已经正确加载所需的内核模块。
yum install chrony -y
systemctl enable chronyd --now
chronyc sources
date
需要在所有节点上安装Docker、kubelet、kubectl、kubeadm
yum remove docker*
yum install -y yum-utils
yum-config-manager--add-repohttp://mirrors.aliyun.com/dockerce Tinux/centos/docker-ce.repo
yum -y install docker-ce
mkdir -p /etc/docker && tee /etc/docker/daemon.json <<EOF
{
"registry-mirrors":
["https: // g2gr04ke .mirror.aliyuncs.com"],
"exec-opts": ["native.cgroupdriver=systemd"]
}
EOF
systemctl start docker
systemctl daemon-reload
systemctl enable docker
systemctl restart docker
systemctl status docker
1、配置kubernetes 阿里云yum源
cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
2、安装kubernetes的工具
yum install -y kubelet-1.23.0 kubeadm-1.23.0 kubectl-1.23.0
检查版本:
kubeadm version
3、启动kubelet,并加入开机自启动
systemctl start kubelet && systemctl enable kubelet
可以使用kubeadm命令,也可以使用配置文件进行集群初始化
kubeadm init \
--apiserver-advertise-address=192.168.1.200 \
--image-repository registry.aliyuncs.com/google_containers \
--kubernetes-version v1.23.0 \
--service-cidr=10.96.0.0/12 \
--pod-network-cidr=10.244.0.0/16 \
也可以先下载好镜像,这样会节约点时间
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
export KUBECONFIG=/etc/kubernetes/admin.conf
kubeadm join 192.168.1.200:6443 --token t7oc4d.qeeuptvexraill47 \
--discovery-token-ca-cert-hash sha256:c7a523ea127e2d2ce03e0d1d68c10fbfc161f4739867d0482dd3135fdd2e8b2d
kubeadm token create --print-join-command
所有节点执行完毕后,查看集群状态:
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
wget https://github.com/flannel-io/flannel/releases/tag/v0.22.3/kube-flannel.yml
198 command:
199 - /opt/bin/flanneld
200 args:
201 - --ip-masq
202 - --kube-subnet-mgr
203 - --iface=ens33 ###默认是使用第一个网络,一般第一个网卡都是docker占用
204 resources:
205 requests:
206 cpu: "100m"
207 memory: "50Mi"
208 limits:
123 }
124 ]
125 }
126 net-conf.json: |
127 {
128 "Network": "10.244.0.0/16",
129 "Backend": {
130 "Type": "vxlan"
131 }
132 }
133 ---
170 image: rancher/mirrored-flannelcni-flannel-cni-plugin:v1.0.1
182 image: rancher/mirrored-flannelcni-flannel:v0.17.0
197 image: rancher/mirrored-flannelcni-flannel:v0.17.0
docker pull rancher/mirrored-flannelcni-flannel-cni-plugin:v1.0.1
docker pull rancher/mirrored-flannelcni-flannel:v0.17.0
docker pull rancher/mirrored-flannelcni-flannel:v0.17.0
一般下载时间过长,如果执行执行,容易报错
kubectl apply -f kube-flannel.yml
验证k8s集群安装完毕
kubectl run bs --image=busybox:1.28.4 -- sleep 24h
kubectl exec -it bs -- sh
/ # ping www.baidu.com
PING www.baidu.com (183.2.172.185): 56 data bytes
64 bytes from 183.2.172.185: seq=0 ttl=127 time=5.915 ms
64 bytes from 183.2.172.185: seq=1 ttl=127 time=6.577 ms
64 bytes from 183.2.172.185: seq=2 ttl=127 time=6.602 ms
64 bytes from 183.2.172.185: seq=3 ttl=127 time=6.438 ms
^C
--- www.baidu.com ping statistics ---
4 packets transmitted, 4 packets received, 0% packet loss
round-trip min/avg/max = 5.915/6.383/6.602 ms
/ # nslookup kubernetes.default.svc.cluster.local
Server: 10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local
Name: kubernetes.default.svc.cluster.local
Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local
/ # exit
[root@master ~]# kubectl get service
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 110m
测试结果:出公网没问题,域名解析正常,说明安装的coredns没有问题,可用解析到service 上,并解析正确无误,集群验证成功
# 安装bash-completion
## bash-completion-extras需要epelrepo源
yum install -y bash-completion bash-completion-extras
# 配置自动补全
source /usr/share/bash-completion/bash_completion
# 临时生效kubectl自动补全
source <(kubectl completion bash)
## 只在当前用户生效kubectl自动补全
echo 'source <(kubectl completion bash)' >>~/.bashrc
## 全局生效
echo 'source <(kubectl completion bash)' >/etc/profile.d/k8s.sh && source /etc/profile
# 生成kubectl的自动补全脚本
kubectl completion bash >/etc/bash_completion.d/kubectl