mkdir -p /var/nfs/data/harbor/chartmuseum
mkdir -p /var/nfs/data/harbor/database
mkdir -p /var/nfs/data/harbor/jobservice
mkdir -p /var/nfs/data/harbor/redis
mkdir -p /var/nfs/data/harbor/registry
mkdir -p /var/nfs/data/harbor/trivy
chmod -R 777 harbor
helm repo add harbor https://helm.goharbor.io
helm repo update
kubectl create namespace harbor
mkdir -p /root/harbor
cd /root/harbor
apiVersion: v1
kind: PersistentVolume
metadata:
name: harbor-registry
labels:
app: harbor-registry
spec:
capacity:
storage: 50Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: "nfs-client"
mountOptions:
- hard
nfs:
path: /mydata/k8s/public/harbor/registry
server: 192.168.5.22
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: harbor-chartmuseum
labels:
app: harbor-chartmuseum
spec:
capacity:
storage: 5Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: "nfs-client"
mountOptions:
- hard
nfs:
path: /mydata/k8s/public/harbor/chartmuseum
server: 192.168.5.22
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: harbor-jobservice
labels:
app: harbor-jobservice
spec:
capacity:
storage: 5Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: "nfs-client"
mountOptions:
- hard
nfs:
path: /mydata/k8s/public/harbor/jobservice
server: 192.168.5.22
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: harbor-database
labels:
app: harbor-database
spec:
capacity:
storage: 5Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: "nfs-client"
mountOptions:
- hard
nfs:
path: /mydata/k8s/public/harbor/database
server: 192.168.5.22
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: harbor-redis
labels:
app: harbor-redis
spec:
capacity:
storage: 5Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: "nfs-client"
mountOptions:
- hard
nfs:
path: /mydata/k8s/public/harbor/redis
server: 192.168.5.22
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: harbor-trivy
labels:
app: harbor-trivy
spec:
capacity:
storage: 5Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: "nfs-client"
mountOptions:
- hard
nfs:
path: /mydata/k8s/public/harbor/trivy
server: 192.168.5.22
sed -i 's/192.168.5.22/k8s-master01/g' /root/harbor/harbor-pv.yaml
sed -i 's/\/mydata\/k8s\/public/\/var\/nfs\/data/g' /root/harbor/harbor-pv.yaml
sed -i 's/nfs-client/nfs-sc/g' /root/harbor/harbor-pv.yaml
kubectl apply -f /root/harbor/harbor-pv.yaml
kubectl delete -f /root/harbor/harbor-pv.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: harbor-registry
spec:
accessModes:
- ReadWriteOnce
storageClassName: "nfs-client"
resources:
requests:
storage: 50Gi
selector:
matchLabels:
app: harbor-registry
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: harbor-chartmuseum
spec:
accessModes:
- ReadWriteOnce
storageClassName: "nfs-client"
resources:
requests:
storage: 5Gi
selector:
matchLabels:
app: harbor-chartmuseum
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: harbor-jobservice
spec:
accessModes:
- ReadWriteOnce
storageClassName: "nfs-client"
resources:
requests:
storage: 5Gi
selector:
matchLabels:
app: harbor-jobservice
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: harbor-database
spec:
accessModes:
- ReadWriteOnce
storageClassName: "nfs-client"
resources:
requests:
storage: 5Gi
selector:
matchLabels:
app: harbor-database
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: harbor-redis
spec:
accessModes:
- ReadWriteOnce
storageClassName: "nfs-client"
resources:
requests:
storage: 5Gi
selector:
matchLabels:
app: harbor-redis
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: harbor-trivy
spec:
accessModes:
- ReadWriteOnce
storageClassName: "nfs-client"
resources:
requests:
storage: 5Gi
selector:
matchLabels:
app: harbor-trivy
sed -i 's/nfs-client/nfs-sc/g' /root/harbor/harbor-pvc.yaml
kubectl apply -f /root/harbor/harbor-pvc.yaml -n harbor
kubectl delete -f harbor-pvc.yaml -n harbor
expose:
type: ingress
tls:
enabled: true
clusterIP:
name: harbor
annotations: {}
ports:
httpPort: 80
httpsPort: 443
notaryPort: 4443
ingress:
hosts:
core: harbor-core.public.192.168.4.224.nip.io
notary: harbor-notary.public.192.168.4.224.nip.io
controller: default
kubeVersionOverride: ""
annotations:
ingress.kubernetes.io/ssl-redirect: "true"
ingress.kubernetes.io/proxy-body-size: "0"
nginx.ingress.kubernetes.io/ssl-redirect: "true"
nginx.ingress.kubernetes.io/proxy-body-size: "0"
notary:
annotations: {}
harbor:
annotations: {}
externalURL: https://harbor-core.public.192.168.4.224.nip.io:31839
persistence:
enabled: true
resourcePolicy: "keep"
persistentVolumeClaim:
registry:
existingClaim: "harbor-registry"
storageClass: "nfs-client"
subPath: ""
accessMode: ReadWriteOnce
size: 50Gi
chartmuseum:
existingClaim: "harbor-chartmuseum"
storageClass: "nfs-client"
subPath: ""
accessMode: ReadWriteOnce
size: 5Gi
jobservice:
existingClaim: "harbor-jobservice"
storageClass: "nfs-client"
subPath: ""
accessMode: ReadWriteOnce
size: 5Gi
database:
existingClaim: "harbor-database"
storageClass: "nfs-client"
subPath: ""
accessMode: ReadWriteOnce
size: 5Gi
redis:
existingClaim: "harbor-redis"
storageClass: "nfs-client"
subPath: ""
accessMode: ReadWriteOnce
size: 5Gi
trivy:
existingClaim: "harbor-trivy"
storageClass: "nfs-client"
subPath: ""
accessMode: ReadWriteOnce
size: 5Gi
sed -i 's/nfs-client/nfs-sc/g' /root/harbor/harbor-values.yaml
替换harbor-values.yaml文件中的expose段
expose:
type: nodePort
tls:
enabled: false
clusterIP:
name: harbor
annotations: {}
ports:
httpPort: 80
httpsPort: 443
notaryPort: 4443
externalURL: http://yourip:3xxxx
helm安装harbor后登陆一直提示账户或密码错误_harbor默认密码不对-CSDN博客
参考:??K8S Helm 安装ingress-nginx/ingress-nginx-CSDN博客
expose:
type: ingress
tls:
enabled: true
clusterIP:
name: harbor
annotations: {}
ports:
httpPort: 80
httpsPort: 443
notaryPort: 4443
ingress:
controller: default
kubeVersionOverride: ""
annotations:
ingress.kubernetes.io/ssl-redirect: "true"
ingress.kubernetes.io/proxy-body-size: "0"
nginx.ingress.kubernetes.io/ssl-redirect: "true"
nginx.ingress.kubernetes.io/proxy-body-size: "0"
notary:
annotations: {}
harbor:
annotations: {}
persistence:
helm install harbor harbor/harbor --namespace harbor --create-namespace \
--values harbor-values.yaml \
--set expose.ingress.className=nginx \
--set expose.ingress.hosts.core=harbor.david.org \
--set expose.ingress.hosts.notary=notary.david.org \
--set externalURL=https://harbor.david.org \
--set harborAdminPassword="Harbor12345"
[root@k8s-master01 harbor]# kubectl describe ingress -n harbor harbor-ingress
Name: harbor-ingress
Labels: app=harbor
app.kubernetes.io/managed-by=Helm
chart=harbor
heritage=Helm
release=harbor
Namespace: harbor
Address:
Ingress Class: nginx
Default backend: <default>
TLS:
harbor-ingress terminates harbor.david.org
Rules:
Host Path Backends
---- ---- --------
harbor.david.org
/api/ harbor-core:80 (10.244.1.102:8080)
/service/ harbor-core:80 (10.244.1.102:8080)
/v2/ harbor-core:80 (10.244.1.102:8080)
/chartrepo/ harbor-core:80 (10.244.1.102:8080)
/c/ harbor-core:80 (10.244.1.102:8080)
/ harbor-portal:80 (10.244.1.100:8080)
Annotations: ingress.kubernetes.io/proxy-body-size: 0
ingress.kubernetes.io/ssl-redirect: true
meta.helm.sh/release-name: harbor
meta.helm.sh/release-namespace: harbor
nginx.ingress.kubernetes.io/proxy-body-size: 0
nginx.ingress.kubernetes.io/ssl-redirect: true
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Sync 11m nginx-ingress-controller Scheduled for sync
helm install harbor harbor/harbor -f harbor-values.yaml -n harbor
helm uninstall harbor -n harbor
kubectl get pods -owide -n harbor
kubectl get ingress -owide -n harbor
用户名: admin
密码:Harbor12345
https://harbor-core.myharbor.io:3xxxx
Error: INSTALLATION FAILED: Unable to continue with install:
PersistentVolumeClaim "harbor-jobservice" in namespace "harbor" exists and cannot be imported into the current release:
invalid ownership metadata;
label validation error: missing key "app.kubernetes.io/managed-by": must be set to "Helm"; annotation validation error: missing key "meta.helm.sh/release-name": must be set to "harbor"; annotation validation error: missing key "meta.helm.sh/release-namespace": must be set to "harbor"
kubectl delete pvc harbor-jobservice -n harbor
kubectl get pods -owide -n harbor
kubectl describe pods -n harbor pod名称
[root@master01 harbor]# vi /etc/docker/daemon.json
[root@master01 harbor]# cat /etc/docker/daemon.json
{
"registry-mirrors": ["http://hub-mirror.c.163.com"],
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"insecure-registries": [
"yourip:3xxxx",
"0.0.0.0/0"
]
}
[root@master01 harbor]# systemctl daemon-reload
[root@master01 harbor]# systemctl restart docker.service
[root@master01 harbor]# docker login yourip:3xxxx
Username: admin
Password:
WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See
Login Succeeded
docker images
docker tag hello-world:latest yourip:3xxxx/yourlib/hello-world:latest
docker push yourip:3xxxx/yourlib/hello-world:latest
上网页端查看yourlib项目下,hello-world:latest已上传成功
vi /etc/containerd/config.toml
[plugins."io.containerd.grpc.v1.cri".registry.auths]
[plugins."io.containerd.grpc.v1.cri".registry.configs]
[plugins."io.containerd.grpc.v1.cri".registry.configs."yourip:3xxxx".tls]
insecure_skip_verify = true
[plugins."io.containerd.grpc.v1.cri".registry.headers]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."yourip:3xxxx"]
endpoint = ["http://yourip:3xxxx"]
#重新加载配置并重启
systemctl daemon-reload && systemctl restart containerd
#可以写个deployment尝试拉取镜像