您好,登錄后才能下訂單哦!
Ceph版本:v13.2.5 mimic穩定版
[root@ceph-node1 ceph]# ceph osd pool create k8s 128 128
pool 'k8s' created
[root@ceph-node1 ceph]# ceph osd pool ls
k8s
本環境中直接使用了Ceph的admin賬號,當然生產環境中還是要根據不同功能客戶端分配不同的賬號:ceph auth get-or-create client.k8s mon 'allow r' osd 'allow rwx pool=k8s' -o ceph.client.k8s.keyring
獲取賬號的密鑰:
[root@ceph-node1 ceph]# ceph auth get-key client.admin | base64
QVFDMmIrWmNEL3JTS2hBQWwwdmR3eGJGMmVYNUM3SjdDUGZZbkE9PQ==
使用StorageClass動態創建PV時,controller-manager會自動在Ceph上創建image,所以我們要為其準備好rbd命令。
(1) 如果集群是用kubeadm部署的,由于controller-manager官方鏡像中沒有rbd命令,所以我們要導入外部配置:
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-provisioner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
- apiGroups: [""]
resources: ["services"]
resourceNames: ["kube-dns","coredns"]
verbs: ["list", "get"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-provisioner
subjects:
- kind: ServiceAccount
name: rbd-provisioner
namespace: default
roleRef:
kind: ClusterRole
name: rbd-provisioner
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: rbd-provisioner
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get"]
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: rbd-provisioner
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: rbd-provisioner
subjects:
- kind: ServiceAccount
name: rbd-provisioner
namespace: default
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: rbd-provisioner
spec:
replicas: 1
strategy:
type: Recreate
template:
metadata:
labels:
app: rbd-provisioner
spec:
containers:
- name: rbd-provisioner
image: quay.io/external_storage/rbd-provisioner:latest
env:
- name: PROVISIONER_NAME
value: ceph.com/rbd
serviceAccount: rbd-provisioner
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: rbd-provisioner
kubectl apply -f rbd-provisioner.yaml
注意:rbd-provisioner的鏡像要和ceph的版本適配,這里鏡像使用最新的,根據官方提示已支持ceph mimic版。
(2) 如果集群是用二進制方式部署的,直接在master節點安裝ceph-common即可。
YUM源:
[Ceph]
name=Ceph packages for $basearch
baseurl=http://download.ceph.com/rpm-mimic/el7/$basearch
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://download.ceph.com/keys/release.asc
priority=1
[Ceph-noarch]
name=Ceph noarch packages
baseurl=http://download.ceph.com/rpm-mimic/el7/noarch
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://download.ceph.com/keys/release.asc
priority=1
[ceph-source]
name=Ceph source packages
baseurl=http://download.ceph.com/rpm-mimic/el7/SRPMS
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://download.ceph.com/keys/release.asc
priority=1
#安裝客戶端yum -y install ceph-common-13.2.5
#拷貝keyring文件
將ceph的ceph.client.admin.keyring文件拷貝到master的/etc/ceph目錄下。
創建pod時,kubelet需要使用rbd命令去檢測和掛載pv對應的ceph image,所以要在所有的worker節點安裝ceph客戶端ceph-common-13.2.5。
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: ceph-sc
namespace: default
annotations:
storageclass.kubernetes.io/is-default-class: "false"
provisioner: ceph.com/rbd
reclaimPolicy: Retain
parameters:
monitors: 172.16.1.31:6789,172.16.1.32:6789,172.16.1.33:6789
adminId: admin
adminSecretName: storage-secret
adminSecretNamespace: default
pool: k8s
fsType: xfs
userId: admin
userSecretName: storage-secret
imageFormat: "2"
imageFeatures: "layering"
kubectl apply -f storage_class.yaml
apiVersion: v1
kind: Secret
metadata:
name: storage-secret
namespace: default
data:
key: QVFDMmIrWmNEL3JTS2hBQWwwdmR3eGJGMmVYNUM3SjdDUGZZbkE9PQ==
type:
kubernetes.io/rbd
kubectl apply -f storage_secret.yaml
注意:provisioner的值要和rbd-provisioner設置的值一樣
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: ceph-pvc
namespace: default
spec:
storageClassName: ceph-sc
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
kubectl apply -f storage_pvc.yaml
#創建完PVC后,PV會自動創建:
[root@k8s-master03 ceph]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pvc-315991e9-7d4b-11e9-b6cc-0050569ba238 1Gi RWO Retain Bound default/ceph-sc-test prom-sc 13h
#正常情況PVC也處于Bound狀態
[root@k8s-master03 ceph]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
ceph-sc-test Bound pvc-315991e9-7d4b-11e9-b6cc-0050569ba238 1Gi RWO prom-sc 17s
apiVersion: v1
kind: Pod
metadata:
name: ceph-pod1
spec:
nodeName: k8s-node02
containers:
- name: nginx
image: nginx:1.14
volumeMounts:
- name: ceph-rdb-vol1
mountPath: /usr/share/nginx/html
readOnly: false
volumes:
- name: ceph-rdb-vol1
persistentVolumeClaim:
claimName: ceph-pvc
kubectl apply -f storage_pod.yaml
#查看pod狀態
[root@k8s-master03 ceph]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
ceph-pod1 1/1 Running 0 3d23h 10.244.4.75 k8s-node02 <none> <none>
#進入容器查看掛載情況,可以看到rbd已掛載到/usr/share/nginx/html目錄。
[root@k8s-master03 ceph]# kubectl exec -it ceph-pod1 -- /bin/bash
root@ceph-pod1:/# df –hT
/dev/rbd0 xfs 1014M 33M 982M 4% /usr/share/nginx/html
#在掛載目錄下添加一個測試文件
root@ceph-pod1:/# cat /usr/share/nginx/html/index.html
hello ceph!
#在Ceph上檢查對應image掛載的節點,目前在172.16.1.22即k8s-node02。
[root@ceph-node1 ~]# rbd status k8s/kubernetes-dynamic-pvc-2410765c-7dec-11e9-aa80-26a98c3bc9e4
Watchers:
watcher=172.16.1.22:0/264870305 client.24553 cookie=18446462598732840961
#而后我們刪掉這個的pod
[root@k8s-master03 ceph]# kubectl delete -f storage_pod.yaml
pod "ceph-pod1" deleted
#修改清單文件storage_pod.yaml,將pod調度到k8s-node01上,并應用。
#稍后,查看pod的狀態,改pod已部署在k8s-node01上了。
[root@k8s-master01 ~]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
ceph-pod1 1/1 Running 0 34s 10.244.3.28 k8s-node01 <none> <none>
#在Ceph上再次檢查image掛載節點,目前在172.16.1.21即k8s-node01
[root@ceph-node1 ~]# rbd status k8s/kubernetes-dynamic-pvc-2410765c-7dec-11e9-aa80-26a98c3bc9e4
Watchers:
watcher=172.16.1.21:0/1812501701 client.114340 cookie=18446462598732840963
#進入容器,檢查文件存在并沒有丟失,說明pod切換節點后使用了原來的image。
[root@k8s-master03 ceph]# kubectl exec -it ceph-pod1 -- /bin/bash
root@ceph-pod1:/# cat /usr/share/nginx/html/index.html
hello ceph!
免責聲明:本站發布的內容(圖片、視頻和文字)以原創、轉載和分享為主,文章觀點不代表本網站立場,如果涉及侵權請聯系站長郵箱:is@yisu.com進行舉報,并提供相關證據,一經查實,將立刻刪除涉嫌侵權內容。