PV 和 PVC 之间的关系是一种动态的供需匹配关系。PVC 表示应用程序对持久化存储的需求,而 PV 表示可用的持久化存储资源。Kubernetes 控制平面会根据 PVC 的需求来选择和绑定合适的 PV,将其挂载到应用程序的 Pod 中,从而使应用程序可以访问持久化存储。
PV可以静态或动态的创建;PV和PVC必须一一对应;PVC如果没有对应的绑定PV则会Pending
PVC被删除后,PV内的数据有两种处理策略分别是Retain保留(默认)、Delete删除
接下来的实验中会对这几种模式进行测试,测试结果发现并没有什么区别(k8s1.26)
#绑定master2节点的/dirfornfs
yum -y install nfs-utils
#创建一个新的nfs目录,并添加到/etc/exports文件中
mkdir -p /dirfornfs/{1..5}
#
echo "/dirfornfs *(rw,no_root_squash)
/dirfornfs/1 *(rw,no_root_squash)
/dirfornfs/2 *(rw,no_root_squash)
/dirfornfs/3 *(rw,no_root_squash)
/dirfornfs/4 *(rw,no_root_squash)
/dirfornfs/5 *(rw,no_root_squash)" > /etc/exports
#创建pv资源
cat > jintai-PV.yaml << EOF
apiVersion: v1
kind: PersistentVolume
metadata:
name: jintai-pv1
labels:
stor: pv1
spec:
nfs:
server: 192.168.8.159
path: /dirfornfs/1
accessModes: ["ReadWriteOnce"] #访问模式 只支持同一node的读写
capacity:
storage: 1.5Gi #分配1.5个G
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: jintai-pv2
labels:
stor: pv2
spec:
nfs:
server: 192.168.8.159
path: /dirfornfs/2
accessModes: ["ReadWriteMany"] #支持多个node读写
capacity:
storage: 2Gi #分配2个G
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: jintai-pv3
labels:
stor: pv3
spec:
nfs:
server: 192.168.8.159
path: /dirfornfs/3
accessModes: ["ReadOnlyMany"] #多个node只读
capacity:
storage: 3Gi #分配3个G
EOF
kubectl apply -f jintai-PV.yaml
kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS
jintai-pv1 1536Mi RWO Retain Available #单节点读写
jintai-pv2 2Gi RWX Retain Available #多节点读写
jintai-pv3 3Gi ROX Retain Available #多节点只读
#创建pvc
cat > pvc.yaml << EOF
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc1
spec:
accessModes: ["ReadWriteOnce"] #对应的pv必须访问模式保持相同
selector:
matchLabels:
stor: pv1
resources:
requests:
storage: 1.5Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc2
spec:
accessModes: ["ReadWriteMany"] #对应的pv必须访问模式保持相同
selector:
matchLabels:
stor: pv2
resources:
requests:
storage: 2Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc3
spec:
accessModes: ["ReadOnlyMany"] #对应的pv必须访问模式保持相同
selector:
matchLabels:
stor: pv3 #对应上pv的标签
resources:
requests:
storage: 3Gi
EOF
kubectl apply -f pvc.yaml
kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
pvc1 Bound jintai-pv1 1536Mi RWO 54s
pvc2 Bound jintai-pv2 2Gi RWX 54s
pvc3 Bound jintai-pv3 3Gi ROX 54s
#创建pod1,让pvc1挂载上去
cat > pod-pvc.yaml << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: pod-pvc1
spec:
replicas: 3
selector:
matchLabels:
stor: pvc
template:
metadata:
labels:
stor: pvc
spec:
containers:
- name: test
image: docker.io/library/nginx
imagePullPolicy: IfNotPresent
volumeMounts:
- mountPath: /usr/share/nginx/html
name: pvc1
volumes:
- name: pvc1
persistentVolumeClaim:
claimName: pvc1
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: pod-pvc2
spec:
replicas: 3
selector:
matchLabels:
stor: pvc
template:
metadata:
labels:
stor: pvc
spec:
containers:
- name: test
image: docker.io/library/nginx
imagePullPolicy: IfNotPresent
volumeMounts:
- mountPath: /usr/share/nginx/html
name: pvc2
volumes:
- name: pvc2
persistentVolumeClaim:
claimName: pvc2
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: pod-pvc3
spec:
replicas: 3
selector:
matchLabels:
stor: pvc
template:
metadata:
labels:
stor: pvc
spec:
containers:
- name: test
image: docker.io/library/nginx
imagePullPolicy: IfNotPresent
volumeMounts:
- mountPath: /usr/share/nginx/html
name: pvc3
volumes:
- name: pvc3
persistentVolumeClaim:
claimName: pvc3
EOF
kubectl apply -f pod-pvc.yaml
kubectl get pods -owide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod-pvc1-69b655447-5zmjn 1/1 Running 0 95s 10.10.179.12 ws-k8s-node1 <none> <none>
pod-pvc1-69b655447-crnfr 1/1 Running 0 95s 10.10.179.11 ws-k8s-node1 <none> <none>
pod-pvc1-69b655447-kzpf5 1/1 Running 0 95s 10.10.234.75 ws-k8s-node2 <none> <none>
pod-pvc2-697979cddb-6x658 1/1 Running 0 95s 10.10.179.13 ws-k8s-node1 <none> <none>
pod-pvc2-697979cddb-bxcxm 1/1 Running 0 95s 10.10.179.15 ws-k8s-node1 <none> <none>
pod-pvc2-697979cddb-zffwh 1/1 Running 0 95s 10.10.234.74 ws-k8s-node2 <none> <none>
pod-pvc3-7588fbc489-2v8pt 1/1 Running 0 95s 10.10.179.14 ws-k8s-node1 <none> <none>
pod-pvc3-7588fbc489-5scpd 1/1 Running 0 95s 10.10.234.76 ws-k8s-node2 <none> <none>
pod-pvc3-7588fbc489-b7cp9 1/1 Running 0 95s 10.10.234.77 ws-k8s-node2 <none> <none>
#进入不同node节点的pod查看是否同步
#pvc1
kubectl exec -it pod-pvc1-69b655447-5zmjn -- /bin/bash
cd /usr/share/nginx/html/
touch 11
exit
kubectl exec -it pod-pvc1-69b655447-kzpf5 -- /bin/bash
ls /usr/share/nginx/html/11
/usr/share/nginx/html/11 #不同节点依然可以同时访问到这个pv
#pvc2也可以,略过了
#pvc3 ACCESS MODES为ROX,无法创建
root@pod-pvc3-7588fbc489-b7cp9:/# touch 123454 /usr/share/nginx/html/
root@pod-pvc3-7588fbc489-b7cp9:/#
root@pod-pvc3-7588fbc489-b7cp9:/# ls /usr/share/nginx/html/
root@pod-pvc3-7588fbc489-b7cp9:/# 无输出
#
#删除
kubectl delete -f pod-pvc.yaml
kubectl delete -f pvc.yaml
kubectl delete -f jintai-PV.yaml
#启用
kubectl apply -f jintai-PV.yaml
kubectl apply -f pvc.yaml
kubectl apply -f pod-pvc.yaml
kubectl exec -it pod-pvc1-69b655447-46h5h -- /bin/bash
ls /usr/share/nginx/html/
11 #依然保留了数据
#修改回收策略
#
vim jintai-PV.yaml
...
capacity:
storage: 1.5Gi #分配1.5个G
persistentVolumeReclaimPolicy: Delete #回收策略为Delete
---
...
kubectl delete -f pod-pvc.yaml
kubectl delete -f pvc.yaml
kubectl delete -f jintai-PV.yaml
kubectl apply -f jintai-PV.yaml
kubectl apply -f pvc.yaml
kubectl apply -f pod-pvc.yaml
#创建一个新pod关联pvc1
cat > pod-test.yaml << EOF
apiVersion: v1
kind: Pod
metadata:
name: pod-pvc-test
spec:
containers:
- name: test10
image: docker.io/library/nginx
imagePullPolicy: IfNotPresent
volumeMounts:
- mountPath: /usr/share/nginx/html
name: pvc1
volumes:
- name: pvc1
persistentVolumeClaim:
claimName: pvc1
EOF
kubectl apply -f pod-test.yaml
#使用测试pod新建文件
kubectl exec -it pod-pvc-test -- /bin/bash
cd /usr/share/nginx/html/
mkdir 123
exit
#进入另一个pod查看
kubectl exec -it pod-pvc1-69b655447-7lxwl -- /bin/bash
ls /usr/share/nginx/html/
123 12345
#删除新建文件的测试pod
kubectl delete -f pod-test.yaml
#在另一个查看
ls /usr/share/nginx/html/
123 12345 #依然存在
#
#回收策略Delete和Retain没什么区别,都不会被删除
#清理
kubectl delete -f pod-pvc.yaml
kubectl delete -f pvc.yaml
kubectl delete -f jintai-PV.yaml
#查看帮助
kubectl explain storageclass
allowVolumeExpansion <boolean> # 是否允许持久卷的扩展,不能支持缩小
allowedTopologies <[]Object> # 定义允许使用该StorageClass的节点拓扑约束
apiVersion <string>
kind <string>
metadata <Object>
mountOptions <[]string> # 挂载持久卷时使用的挂载选项
parameters <map[string]string> # 存储提供程序的特定参数
provisioner <string> -required- # 供应商,不同供应商要填写的不同
reclaimPolicy <string> # 定义持久卷回收策略
volumeBindingMode <string> # 定义持久卷与节点的绑定模式
#
#在nfs服务器中加入实验的目录
mkdir -p /dirfornfs/nfs
echo "/dirfornfs/nfs *(rw,no_root_squash)" >> /etc/exports
exportfs -arv
systemctl restart nfs
#创建nfs的资源供应商的认证授权
cat > serviceaccount.yaml << EOF
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-provisioner
EOF
kubectl apply -f serviceaccount.yaml
kubectl create clusterrolebinding nfs-provisioner-clusterrolebinding --clusterrole=cluster-admin --serviceaccount=default:nfs-provisioner
#
cat > nfs.yaml << EOF
kind: Deployment
apiVersion: apps/v1
metadata:
name: nfs-provisioner
spec:
selector:
matchLabels:
app: nfs-provisioner
replicas: 1
strategy:
type: Recreate
template:
metadata:
labels:
app: nfs-provisioner
spec:
serviceAccount: nfs-provisioner
containers:
- name: nfs-provisioner
image: registry.cn-beijing.aliyuncs.com/mydlq/nfs-subdir-external-provisioner:v4.0.0
imagePullPolicy: IfNotPresent
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env: #环境变量
- name: PROVISIONER_NAME #供应商名称值改为example.com/nfs,存储类文件需要于其一致
value: example.com/nfs
- name: NFS_SERVER
value: 192.168.8.159
- name: NFS_PATH
value: /dirfornfs/nfs/
volumes:
- name: nfs-client-root
nfs:
server: 192.168.8.159
path: /dirfornfs/nfs/
EOF
kubectl apply -f nfs.yaml
#创建一个nfs的storageclass存储类
cat > nfs-storageclass.yaml << EOF
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: nfs #存储类名称为nfs
provisioner: example.com/nfs #nfs的供应商为example.com/nfs
EOF
kubectl apply -f nfs-storageclass.yaml
#根据存储类 创建pvc
cat > pvc.yaml << EOF
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: test
spec:
accessModes: ["ReadWriteMany"] #多节点可读写
resources:
requests:
storage: 1Gi
storageClassName: nfs #要与上面的存储类名字相同
EOF
kubectl apply -f pv-sc.yaml
kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pvc-660c088b-c9ba-412b-8c54-7d0716844b24 1Gi RWX Delete Bound default/claim-test nfs 2m58s
kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
test Bound pvc-660c088b-c9ba-412b-8c54-7d0716844b24 1Gi RWX nfs 3m13s
#已经绑定完成
#
cat > pvc-test.yaml << EOF
kind: Pod
apiVersion: v1
metadata:
name: read-pod
spec:
containers:
- name: read-pod
image: nginx
imagePullPolicy: IfNotPresent
volumeMounts:
- name: nfs-pvc
mountPath: /usr/share/nginx/html
restartPolicy: "Never"
volumes:
- name: nfs-pvc
persistentVolumeClaim: #
claimName: test
EOF
kubectl apply -f pvc-test.yaml
kubectl get pods
NAME READY STATUS RESTARTS AGE
nfs-provisioner-5468dbd878-95jmz 1/1 Running 0 15m
read-pod 1/1 Running 0 14m
#正常运行
#查看nfs服务器,自动创建了对应的目录
ls /dirfornfs/nfs/
default-claim-test-pvc-f2f469c5-df7d-44a8-8ddb-adb9744fb528
#清理
kubectl delete -f pvc-test.yaml