Kubeadm

发布时间:2023年12月28日

?Kubeadm快速的搭建一个k8s集群

二进制搭建适合大集群,50台以上主机

Kubeadm更适合中下企业的业务集群

Master:192.168.233.81? ?2核4G? docker? ?kubelet? kubeadm? kubectl? ?flannel

Node1: 192.168.233.82? ?2核4G?? docker? ?kubelet? kubeadm? kubectl? ?flannel

Node2 :192.168.233.83? ?2核4G? docker? ?kubelet? kubeadm? kubectl? ?flannel

harbor节点:192.168.233.84 2核4G? docker? ?docker-compose? ? ?harbor

四台主机同时操作
[root@k8s1 ~]# systemctl stop firewalld
[root@k8s1 ~]# systemctl disable firewalld
Removed symlink /etc/systemd/system/multi-user.target.wants/firewalld.se                                         rvice.
Removed symlink /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.se                                         rvice.
[root@k8s1 ~]# setenforce 0
[root@k8s1 ~]# sed -i 's/enforcing/disabled/' /etc/selinux/config
[root@k8s1 ~]# iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
[root@k8s1 ~]# swapoff -a
[root@k8s1 ~]# sed -ri 's/.*swap.*/#&/' /etc/fstab
[root@k8s1 ~]# for i in $(ls /usr/lib/modules/$(uname -r)/kernel/net/net       filter/ipvs|grep -o "^[^.]*");do echo $i; /sbin/modinfo -F filename $i > /dev/null 2>&1 && /sbin/modprobe $i;done
ip_vs_dh
ip_vs_ftp
ip_vs
ip_vs_lblc
ip_vs_lblcr
ip_vs_lc
ip_vs_nq
ip_vs_pe_sip
ip_vs_rr
ip_vs_sed
ip_vs_sh
ip_vs_wlc
ip_vs_wrr
[root@k8s1 ~]# hostnamectl set-hostname master01
hostnamectl set-hostname master01
hostnamectl set-hostname node01
hostnamectl set-hostname node02

[root@k8s1 ~]# su
[root@master01 ~]# vim /etc/hosts

192.168.233.81 master01
192.168.233.82 node01
192.168.233.83 node02
192.168.233.84 hub.test.com

[root@master01 ~]# cat > /etc/sysctl                                         .d/kubernetes.conf << EOF
> net.bridge.bridge-nf-call-ip6tables=1
> net.bridge.bridge-nf-call-iptables=1
> net.ipv6.conf.all.disable_ipv6=1
> net.ipv4.ip_forward=1
> EOF
[root@k8s1 ~]# hostnamectl set-hostname master01
[root@k8s1 ~]# su
[root@master01 ~]# vim /etc/hosts
[root@master01 ~]# cat > /etc/sysctl                                         .d/kubernetes.conf << EOF
> net.bridge.bridge-nf-call-ip6tables=1
> net.bridge.bridge-nf-call-iptables=1
> net.ipv6.conf.all.disable_ipv6=1
> net.ipv4.ip_forward=1
> EOF
[root@master01 ~]# sysctl --system
[root@master01 ~]# 
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum install -y docker-ce docker-ce-cli containerd.io

mkdir /etc/docker
cat > /etc/docker/daemon.json <<EOF
{
  "registry-mirrors": ["https://pkm63jfy.mirror.aliyuncs.com"],
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m"
  }
}
EOF
[root@master01 ~]# systemctl daemon-reload
[root@master01 ~]# systemctl restart                                          docker.service
[root@master01 ~]#
[root@master01 ~]# systemctl enable docker.service 
[root@master01 ~]# docker info | grep "Cgroup Driver"
           
cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
 
yum install -y kubelet-1.20.15 kubeadm-1.20.15 kubectl-1.20.15
[root@master01 ~]# systemctl enable kubelet.service
[root@master01 ~]# kubeadm config images list --kubernetes-version 1.20.15

kubeadm init \
--apiserver-advertise-address=192.168.233.81 \
--image-repository registry.aliyuncs.com/google_containers \
--kubernetes-version=v1.20.15 \
--service-cidr=10.96.0.0/16 \
--pod-network-cidr=10.244.0.0/16 \
--token-ttl=0

到node1和node2节点输入
kubeadm join 192.168.233.81:6443 --token yc2pbq.1z84jlj07c78nmxy \
    --discovery-token-ca-cert-hash sha256:de951968754b8d86fdf0db1fe2067c61a242360357895b4a8f85a4750911da85

[root@master01 opt]# mkdir -p $HOME/.kube
[root@master01 opt]# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master01 opt]# chown $(id -u):$(id -g) $HOME/.kube/config
[root@master01 opt]# systemctl restart kubelet
[root@master01 opt]# kubectl edit cm kube-proxy -n=kube-system
configmap/kube-proxy edited
修改 mod ipvs


[root@master01 opt]# kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME                 STATUS      MESSAGE                                                                                       ERROR
controller-manager   Unhealthy   Get "http://127.0.0.1:10252/healthz": dial tcp 127.0.0.1:10252: connect: connection refused
scheduler            Unhealthy   Get "http://127.0.0.1:10251/healthz": dial tcp 127.0.0.1:10251: connect: connection refused
两个不通需要修改配置文件

[root@master01 opt]# vim /etc/kubernetes/manifests/kube-scheduler.yaml

16     - --bind-address=192.168.233.81
19 #   - --port=0
25         host: 192.168.233.81
39         host: 192.168.233.81
[root@master01 opt]# vim /etc/kubernetes/manifests/kube-controller-manager.yaml
17     - --bind-address=192.168.233.81
26 #   - --port=0
37         host: 192.168.233.81
51         host: 192.168.233.81

所有节点上传 flannel 镜像 flannel.tar 和网络插件 cni-plugins-linux-amd64-v0.8.6.tgz 到 /opt 目录,master节点上传 kube-flannel.yml 文件
[root@master01 opt]# cd /opt
[root@master01 opt]# docker load < flannel.tar
[root@master01 opt]# mv /opt/cni /opt/cni_bak
[root@master01 opt]# mkdir -p /opt/cni/bin
[root@master01 opt]# tar zxvf cni-plugins-linux-amd64-v0.8.6.tgz -C /opt
[root@master01 opt]# kubectl apply -f kube-flannel.yml
[root@master01 opt]# openssl x509 -in /etc/kubernetes/pki/ca.crt -noout -text | grep Not
            Not Before: Dec 28 14:21:13 2023 GMT
            Not After : Dec 25 14:21:13 2033 GMT
[root@master01 opt]# openssl x509 -in /etc/kubernetes/pki/apiserver.crt -noout -text | grep Not
            Not Before: Dec 28 14:21:13 2023 GMT
            Not After : Dec 27 14:21:13 2024 GMT
上传脚本 update-kubeadm-cert.sh
[root@master01 opt]# chmod 777 update-kubeadm-cert.sh
[root@master01 opt]# ./update-kubeadm-cert.sh all
[root@master01 opt]# kubectl get nodes
在master节点查看节点状态
[root@master01 opt]# kubectl get pods -n kube-system
[root@master01 opt]# kubectl create deployment nginx --image=nginx
deployment.apps/nginx created
测试 pod 资源创建
[root@master01 opt]# kubectl get pods -o wide
[root@master01 opt]# kubectl expose deployment nginx --port=80 --type=NodePort
[root@master01 opt]# kubectl get svc
[root@master01 opt]# curl http://node01:30221

安装 部署与k8s集群对接的Harbor仓库,上传 harbor-offline-installer-v2.8.1.tgz 和 docker-compose 文件到 /opt 目录
[root@k8s4 ~]# systemctl stop firewalld
[root@k8s4 ~]# setenforce 0
[root@k8s4 ~]# cd /opt
[root@k8s4 opt]# ls
containerd                   harbor-offline-installer-v2.8.1.tgz
docker-compose-linux-x86_64  rh
[root@k8s4 opt]# mv docker-compose-linux-x86_64 docker-compose
[root@k8s4 opt]# cp docker-compose /usr/local/bin/
[root@k8s4 opt]# chmod +x /usr/local/bin/docker-compose
[root@k8s4 opt]# tar zxvf harbor-offline-installer-v2.8.1.tgz
[root@k8s4 opt]# cd harbor/
[root@k8s4 harbor]# ls
common.sh             harbor.yml.tmpl  LICENSE
harbor.v2.8.1.tar.gz  install.sh       prepare
[root@k8s4 harbor]# cp harbor.yml.tmpl harbor.yml
[root@k8s4 harbor]# vim harbor.yml

5 hostname: hub.test.com
17   certificate: /data/cert/server.crt
18   private_key: /data/cert/server.key
34 harbor_admin_password: 123456

[root@k8s4 harbor]# mkdir -p /data/cert
[root@k8s4 harbor]# cd /data/cert
[root@k8s4 cert]# openssl genrsa -des3 -out server.key 2048
输入两遍123456
[root@k8s4 cert]# openssl req -new -key server.key -out server.csr
输入私钥密码:123456
输入国家名:CN
输入省名:BJ
输入市名:BJ
输入组织名:TEST
输入机构名:TEST
输入域名:hub.kgc.com
输入管理员邮箱:admin@test.com
其它全部直接回车
[root@k8s4 cert]# cp server.key server.key.org
[root@k8s4 cert]# ls
server.csr  server.key  server.key.org
[root@k8s4 cert]# openssl rsa -in server.key.org -out server.key
Enter pass phrase for server.key.org:
writing RSA key
[root@k8s4 cert]# openssl x509 -req -days 1000 -in server.csr -signkey server.key -out server.crt
[root@k8s4 cert]# chmod +x /data/cert/*
[root@k8s4 cert]# cd /opt/harbor/
[root@k8s4 harbor]# ./prepare
访问 https://192.168.233.84

[root@node01 opt]# mkdir -p /etc/docker/certs.d/hub.test.com/
[root@k8s4 harbor]# cd /
[root@k8s4 /]# scp -r data/ root@192.168.233.82:/
root@192.168.233.82's password:
[root@k8s4 /]# scp -r data/ root@192.168.233.83:/
root@192.168.233.83's password:
[root@k8s4 /]# cd /data/cert/
[root@k8s4 cert]# ls
server.crt  server.csr  server.key  server.key.org

[root@node01 hub.test.com]# cd /data/cert/
[root@node01 cert]# ls
server.crt  server.csr  server.key  server.key.org
[root@node01 cert]# cp server.crt  server.csr  server.key /etc/docker/certs.d/hub.test.com/
[root@node01 cert]# cd /etc/docker/certs.d/hub.test.com/
[root@node01 hub.test.com]# ls
server.crt  server.csr  server.key
[root@node01 hub.test.com]# vim /etc/hosts
192.168.233.84 hub.test.com
[root@node01 hub.test.com]# vim /lib/systemd/system/docker.service
13 ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock --insecure-registry=hub.test.com
[root@node01 hub.test.com]# systemctl daemon-reload
[root@node01 hub.test.com]# systemctl restart docker
[root@node01 hub.test.com]# docker login -u admin -p 123456 https://hub.test.com
[root@node01 hub.test.com]# docker images
[root@node01 hub.test.com]# docker push hub.test.com/library/nginx:v1
到网页查看一下,如果上传失败,可以看一下私有仓库的权限,以及是否仓库公开
[root@master01 opt]# kubectl delete deployment nginx
deployment.apps "nginx" deleted
[root@master01 opt]# kubectl create deployment myapp-test  --image=hub.test.com/library/nginx:v1 --port=80 --replicas=3
[root@master01 opt]# kubectl expose deployment myapp-test  --port=30000 --target-port=80
service/myapp-test exposed
[root@master01 opt]# kubectl get svc,pods
[root@master01 opt]# yum install ipvsadm -y
[root@master01 opt]# ipvsadm -Ln
[root@master01 opt]# kubectl get svc
[root@master01 opt]# curl 10.96.208.213:30000
[root@master01 opt]# kubectl edit svc myapp-test
22     port: 80
28   type: NodePort
[root@master01 opt]# kubectl get svc
NAME         TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE
kubernetes   ClusterIP   10.96.0.1       <none>        443/TCP        98m
myapp-test   NodePort    10.96.208.213   <none>        80:30096/TCP   17m
nginx        NodePort    10.96.22.254    <none>        80:30221/TCP   59m
到网页访问本机ip加30096端口
[root@master01 opt]# kubectl create clusterrolebinding cluster-system-anonymous --clusterrole=cluster-admin --user=system:anonymous

node2一样的操作
[root@node02 opt]# mkdir -p /etc/docker/certs.d/hub.test.com/
[root@node02 opt]# cd /data/cert/
[root@node02 cert]# ls
server.crt  server.csr  server.key  server.key.org
[root@node02 cert]# cp server.crt  server.csr  server.key /etc/docker/certs.d/hub.test.com/
[root@node02 cert]# cd /etc/docker/certs.d/hub.test.com/
[root@node02 hub.test.com]# ls
server.crt  server.csr  server.key
[root@node02 hub.test.com]# vim /etc/hosts
[root@node02 hub.test.com]# vim /lib/systemd/system/docker.service
[root@node02 hub.test.com]# vim /lib/systemd/system/docker.service
[root@node02 hub.test.com]# systemctl daemon-reload
[root@node02 hub.test.com]# systemctl restart docker
[root@node02 hub.test.com]# docker login -u admin -p 123456 https://hub.test.com
[root@node02 hub.test.com]# docker tag nginx:latest hub.test.com/library/nginx:v2
[root@node02 hub.test.com]# docker push hub.test.com/library/nginx:v2

文章来源:https://blog.csdn.net/2301_79410672/article/details/135268298
本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。