持续集成系列(6)------k8s结合ceph实现动态申请pv
时间:2019-01-22 13:53 来源:未知 作者:IT
持续集成系列(6)------k8s结合ceph实现动态申请pv
转载自:https://www.jianshu.com/p/750a8fde377b
文章目录
-
持续集成系列(6)------k8s结合ceph实现动态申请pv
-
目标
-
创建rbd-provisioner
-
配置storageclass
-
使用测试
目标
默认情况下,pod使用的存储需要先手动创建pv,再通过pvc申请已创建的pv,步骤繁琐;利用kubernetes的新特性allows storage volumes to be created on-demand就不用每次都手动创建PV了。通过配置StorageClass为ceph实现动态分配
创建rbd-provisioner
cat >external-storage-rbd-provisioner.yaml<<EOF
apiVersion: v1
kind: ServiceAccount
metadata:
name: rbd-provisioner
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-provisioner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["services"]
resourceNames: ["kube-dns"]
verbs: ["list", "get"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-provisioner
subjects:
- kind: ServiceAccount
name: rbd-provisioner
namespace: kube-system
roleRef:
kind: ClusterRole
name: rbd-provisioner
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: rbd-provisioner
namespace: kube-system
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: rbd-provisioner
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: rbd-provisioner
subjects:
- kind: ServiceAccount
name: rbd-provisioner
namespace: kube-system
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: rbd-provisioner
namespace: kube-system
spec:
replicas: 1
strategy:
type: Recreate
template:
metadata:
labels:
app: rbd-provisioner
spec:
containers:
- name: rbd-provisioner
image: "quay.io/external_storage/rbd-provisioner:v2.0.0-k8s1.11"
env:
- name: PROVISIONER_NAME
value: ceph.com/rbd
serviceAccount: rbd-provisioner
EOF
kubectl apply -f external-storage-rbd-provisioner.yaml
# 查看状态 等待running之后 再进行后续的操作
kubectl get pod -n kube-system
配置storageclass
# 在k8s集群中所有节点安装 ceph-common
yum install -y ceph-common
# 创建 osd pool 在ceph的mon或者admin节点
ceph osd pool create kube 4096
ceph osd pool ls
# 创建k8s访问ceph的用户 在ceph的mon或者admin节点
ceph auth get-or-create client.kube mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=kube' -o ceph.client.kube.keyring
# 查看key 在ceph的mon或者admin节点
ceph auth get-key client.admin
ceph auth get-key client.kube
# 创建 admin secret
# CEPH_ADMIN_SECRET 替换为 client.admin 获取到的key
export CEPH_ADMIN_SECRET='AQBBAnRbSiSOFxAAEZXNMzYV6hsceccYLhzdWw=='
kubectl create secret generic ceph-secret --type="kubernetes.io/rbd" \
--from-literal=key=$CEPH_ADMIN_SECRET \
--namespace=kube-system
# 在 default 命名空间创建pvc用于访问ceph的 secret
# CEPH_KUBE_SECRET 替换为 client.kube 获取到的key
export CEPH_KUBE_SECRET='AQBZK3VbTN/QOBAAIYi6CRLQcVevW5HM8lunOg=='
kubectl create secret generic ceph-user-secret --type="kubernetes.io/rbd" \
--from-literal=key=$CEPH_KUBE_SECRET \
--namespace=default
# 查看 secret
kubectl get secret ceph-user-secret -o yaml
kubectl get secret ceph-secret -n kube-system -o yaml
# 配置 StorageClass
# 如果使用kubeadm创建的集群 provisioner 使用如下方式
# provisioner: ceph.com/rbd
cat >storageclass-ceph-rdb.yaml<<EOF
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: dynamic-ceph-rdb
provisioner: ceph.com/rbd
# provisioner: kubernetes.io/rbd
parameters:
monitors: 10.79.167.29:6789,10.79.167.30:6789,10.79.167.31:6789
adminId: admin
adminSecretName: ceph-secret
adminSecretNamespace: kube-system
pool: kube
userId: kube
userSecretName: ceph-user-secret
fsType: ext4
imageFormat: "2"
imageFeatures: "layering"
EOF
# 创建
kubectl apply -f storageclass-ceph-rdb.yaml
# 查看
kubectl get sc
使用测试
# 创建pvc测试
cat >ceph-rdb-pvc-test.yaml<<EOF
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: ceph-rdb-claim
spec:
accessModes:
- ReadWriteOnce
storageClassName: dynamic-ceph-rdb
resources:
requests:
storage: 2Gi
EOF
kubectl apply -f ceph-rdb-pvc-test.yaml
# 查看
kubectl get pvc
kubectl get pv
# 创建 nginx pod 挂载测试
cat >nginx-pod.yaml<<EOF
apiVersion: v1
kind: Pod
metadata:
name: nginx-pod1
labels:
name: nginx-pod1
spec:
containers:
- name: nginx-pod1
image: nginx:alpine
ports:
- name: web
containerPort: 80
volumeMounts:
- name: ceph-rdb
mountPath: /usr/share/nginx/html
volumes:
- name: ceph-rdb
persistentVolumeClaim:
claimName: ceph-rdb-claim
EOF
kubectl apply -f nginx-pod.yaml
# 查看
kubectl get pods -o wide
# 修改文件内容
kubectl exec -ti nginx-pod1 -- /bin/sh -c 'echo Hello World from Ceph RBD!!! > /usr/share/nginx/html/index.html'
# 访问测试
POD_ID=$(kubectl get pods -o wide | grep nginx-pod1 | awk '{print $(NF-1)}')
curl http://$POD_ID
# 清理
kubectl delete -f nginx-pod.yaml
kubectl delete -f ceph-rdb-pvc-test.yaml
(责任编辑:IT)
持续集成系列(6)------k8s结合ceph实现动态申请pv
转载自:https://www.jianshu.com/p/750a8fde377b
文章目录
目标默认情况下,pod使用的存储需要先手动创建pv,再通过pvc申请已创建的pv,步骤繁琐;利用kubernetes的新特性allows storage volumes to be created on-demand就不用每次都手动创建PV了。通过配置StorageClass为ceph实现动态分配 创建rbd-provisionercat >external-storage-rbd-provisioner.yaml<<EOF apiVersion: v1 kind: ServiceAccount metadata: name: rbd-provisioner namespace: kube-system --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: rbd-provisioner rules: - apiGroups: [""] resources: ["persistentvolumes"] verbs: ["get", "list", "watch", "create", "delete"] - apiGroups: [""] resources: ["persistentvolumeclaims"] verbs: ["get", "list", "watch", "update"] - apiGroups: ["storage.k8s.io"] resources: ["storageclasses"] verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["events"] verbs: ["create", "update", "patch"] - apiGroups: [""] resources: ["endpoints"] verbs: ["get", "list", "watch", "create", "update", "patch"] - apiGroups: [""] resources: ["services"] resourceNames: ["kube-dns"] verbs: ["list", "get"] --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: rbd-provisioner subjects: - kind: ServiceAccount name: rbd-provisioner namespace: kube-system roleRef: kind: ClusterRole name: rbd-provisioner apiGroup: rbac.authorization.k8s.io --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: rbd-provisioner namespace: kube-system rules: - apiGroups: [""] resources: ["secrets"] verbs: ["get"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: rbd-provisioner namespace: kube-system roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: rbd-provisioner subjects: - kind: ServiceAccount name: rbd-provisioner namespace: kube-system --- apiVersion: extensions/v1beta1 kind: Deployment metadata: name: rbd-provisioner namespace: kube-system spec: replicas: 1 strategy: type: Recreate template: metadata: labels: app: rbd-provisioner spec: containers: - name: rbd-provisioner image: "quay.io/external_storage/rbd-provisioner:v2.0.0-k8s1.11" env: - name: PROVISIONER_NAME value: ceph.com/rbd serviceAccount: rbd-provisioner EOF kubectl apply -f external-storage-rbd-provisioner.yaml # 查看状态 等待running之后 再进行后续的操作 kubectl get pod -n kube-system 配置storageclass# 在k8s集群中所有节点安装 ceph-common yum install -y ceph-common # 创建 osd pool 在ceph的mon或者admin节点 ceph osd pool create kube 4096 ceph osd pool ls # 创建k8s访问ceph的用户 在ceph的mon或者admin节点 ceph auth get-or-create client.kube mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=kube' -o ceph.client.kube.keyring # 查看key 在ceph的mon或者admin节点 ceph auth get-key client.admin ceph auth get-key client.kube # 创建 admin secret # CEPH_ADMIN_SECRET 替换为 client.admin 获取到的key export CEPH_ADMIN_SECRET='AQBBAnRbSiSOFxAAEZXNMzYV6hsceccYLhzdWw==' kubectl create secret generic ceph-secret --type="kubernetes.io/rbd" \ --from-literal=key=$CEPH_ADMIN_SECRET \ --namespace=kube-system # 在 default 命名空间创建pvc用于访问ceph的 secret # CEPH_KUBE_SECRET 替换为 client.kube 获取到的key export CEPH_KUBE_SECRET='AQBZK3VbTN/QOBAAIYi6CRLQcVevW5HM8lunOg==' kubectl create secret generic ceph-user-secret --type="kubernetes.io/rbd" \ --from-literal=key=$CEPH_KUBE_SECRET \ --namespace=default # 查看 secret kubectl get secret ceph-user-secret -o yaml kubectl get secret ceph-secret -n kube-system -o yaml # 配置 StorageClass # 如果使用kubeadm创建的集群 provisioner 使用如下方式 # provisioner: ceph.com/rbd cat >storageclass-ceph-rdb.yaml<<EOF kind: StorageClass apiVersion: storage.k8s.io/v1 metadata: name: dynamic-ceph-rdb provisioner: ceph.com/rbd # provisioner: kubernetes.io/rbd parameters: monitors: 10.79.167.29:6789,10.79.167.30:6789,10.79.167.31:6789 adminId: admin adminSecretName: ceph-secret adminSecretNamespace: kube-system pool: kube userId: kube userSecretName: ceph-user-secret fsType: ext4 imageFormat: "2" imageFeatures: "layering" EOF # 创建 kubectl apply -f storageclass-ceph-rdb.yaml # 查看 kubectl get sc 使用测试# 创建pvc测试 cat >ceph-rdb-pvc-test.yaml<<EOF kind: PersistentVolumeClaim apiVersion: v1 metadata: name: ceph-rdb-claim spec: accessModes: - ReadWriteOnce storageClassName: dynamic-ceph-rdb resources: requests: storage: 2Gi EOF kubectl apply -f ceph-rdb-pvc-test.yaml # 查看 kubectl get pvc kubectl get pv # 创建 nginx pod 挂载测试 cat >nginx-pod.yaml<<EOF apiVersion: v1 kind: Pod metadata: name: nginx-pod1 labels: name: nginx-pod1 spec: containers: - name: nginx-pod1 image: nginx:alpine ports: - name: web containerPort: 80 volumeMounts: - name: ceph-rdb mountPath: /usr/share/nginx/html volumes: - name: ceph-rdb persistentVolumeClaim: claimName: ceph-rdb-claim EOF kubectl apply -f nginx-pod.yaml # 查看 kubectl get pods -o wide # 修改文件内容 kubectl exec -ti nginx-pod1 -- /bin/sh -c 'echo Hello World from Ceph RBD!!! > /usr/share/nginx/html/index.html' # 访问测试 POD_ID=$(kubectl get pods -o wide | grep nginx-pod1 | awk '{print $(NF-1)}') curl http://$POD_ID # 清理 kubectl delete -f nginx-pod.yaml kubectl delete -f ceph-rdb-pvc-test.yaml (责任编辑:IT) |