diff --git a/deploy/helm/Chart.yaml b/deploy/helm/Chart.yaml new file mode 100644 index 0000000..662b868 --- /dev/null +++ b/deploy/helm/Chart.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +appVersion: 0.30.4 +description: "Container Storage Interface (CSI) driver for S3 volumes" +name: csi-s3 +version: 0.30.4 +keywords: + - s3 +home: https://github.com/yandex-cloud/k8s-csi-s3 +sources: + - https://github.com/yandex-cloud/k8s-csi-s3/deploy/helm +icon: https://raw.githubusercontent.com/yandex-cloud/geesefs/master/doc/geesefs.png diff --git a/deploy/helm/README.md b/deploy/helm/README.md new file mode 100644 index 0000000..8c70634 --- /dev/null +++ b/deploy/helm/README.md @@ -0,0 +1,36 @@ +# Helm chart for csi-s3 + +This chart adds S3 volume support to your cluster. + +## Install chart + +- Helm 2.x: `helm install [--set secret.accessKey=... --set secret.secretKey=... ...] --namespace kube-system --name csi-s3 .` +- Helm 3.x: `helm install [--set secret.accessKey=... --set secret.secretKey=... ...] --namespace kube-system csi-s3 .` + +After installation succeeds, you can get a status of Chart: `helm status csi-s3`. + +## Delete Chart + +- Helm 2.x: `helm delete --purge csi-s3` +- Helm 3.x: `helm uninstall csi-s3 --namespace kube-system` + +## Configuration + +By default, this chart creates a secret and a storage class. You should at least set `secret.accessKey` and `secret.secretKey` +to your [Yandex Object Storage](https://cloud.yandex.com/en-ru/services/storage) keys for it to work. + +The following table lists all configuration parameters and their default values. + +| Parameter | Description | Default | +| ---------------------------- | ---------------------------------------------------------------------- | ------------------------------------------------------ | +| `storageClass.create` | Specifies whether the storage class should be created | true | +| `storageClass.name` | Storage class name | csi-s3 | +| `storageClass.singleBucket` | Use a single bucket for all dynamically provisioned persistent volumes | | +| `storageClass.mountOptions` | GeeseFS mount options | `--memory-limit 1000 --dir-mode 0777 --file-mode 0666` | +| `storageClass.reclaimPolicy` | Volume reclaim policy | Delete | +| `storageClass.annotations` | Annotations for the storage class | | +| `secret.create` | Specifies whether the secret should be created | true | +| `secret.name` | Name of the secret | csi-s3-secret | +| `secret.accessKey` | S3 Access Key | | +| `secret.secretKey` | S3 Secret Key | | +| `secret.endpoint` | Endpoint | https://storage.yandexcloud.net | diff --git a/deploy/helm/templates/attacher.yaml b/deploy/helm/templates/attacher.yaml new file mode 100644 index 0000000..9e6bc42 --- /dev/null +++ b/deploy/helm/templates/attacher.yaml @@ -0,0 +1,93 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-attacher-sa + namespace: {{ .Release.Namespace }} +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: external-attacher-runner +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update", "patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-attacher-role +subjects: + - kind: ServiceAccount + name: csi-attacher-sa + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: external-attacher-runner + apiGroup: rbac.authorization.k8s.io +--- +# needed for StatefulSet +kind: Service +apiVersion: v1 +metadata: + name: csi-attacher-s3 + namespace: {{ .Release.Namespace }} + labels: + app: csi-attacher-s3 +spec: + selector: + app: csi-attacher-s3 + ports: + - name: csi-s3-dummy + port: 65535 +--- +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: csi-attacher-s3 + namespace: {{ .Release.Namespace }} +spec: + serviceName: "csi-attacher-s3" + replicas: 1 + selector: + matchLabels: + app: csi-attacher-s3 + template: + metadata: + labels: + app: csi-attacher-s3 + spec: + serviceAccount: csi-attacher-sa + tolerations: + - key: node-role.kubernetes.io/master + operator: "Exists" + containers: + - name: csi-attacher + image: {{ .Values.images.attacher }} + args: + - "--v=4" + - "--csi-address=$(ADDRESS)" + env: + - name: ADDRESS + value: /var/lib/kubelet/plugins/ru.yandex.s3.csi/csi.sock + imagePullPolicy: "IfNotPresent" + volumeMounts: + - name: socket-dir + mountPath: /var/lib/kubelet/plugins/ru.yandex.s3.csi + volumes: + - name: socket-dir + hostPath: + path: /var/lib/kubelet/plugins/ru.yandex.s3.csi + type: DirectoryOrCreate diff --git a/deploy/helm/templates/csi-s3.yaml b/deploy/helm/templates/csi-s3.yaml new file mode 100644 index 0000000..1b02cc0 --- /dev/null +++ b/deploy/helm/templates/csi-s3.yaml @@ -0,0 +1,120 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-s3 + namespace: {{ .Release.Namespace }} +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-s3 +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "update"] + - apiGroups: [""] + resources: ["namespaces"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-s3 +subjects: + - kind: ServiceAccount + name: csi-s3 + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: csi-s3 + apiGroup: rbac.authorization.k8s.io +--- +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: csi-s3 + namespace: {{ .Release.Namespace }} +spec: + selector: + matchLabels: + app: csi-s3 + template: + metadata: + labels: + app: csi-s3 + spec: + serviceAccount: csi-s3 + hostNetwork: true + containers: + - name: driver-registrar + image: {{ .Values.images.registrar }} + args: + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" + - "--v=4" + - "--csi-address=$(ADDRESS)" + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/ru.yandex.s3.csi/csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - name: plugin-dir + mountPath: /csi + - name: registration-dir + mountPath: /registration/ + - name: csi-s3 + securityContext: + privileged: true + capabilities: + add: ["SYS_ADMIN"] + allowPrivilegeEscalation: true + image: {{ .Values.images.csi }} + imagePullPolicy: IfNotPresent + args: + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(NODE_ID)" + - "--v=4" + env: + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: NODE_ID + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - name: plugin-dir + mountPath: /csi + - name: pods-mount-dir + mountPath: /var/lib/kubelet/pods + mountPropagation: "Bidirectional" + - name: fuse-device + mountPath: /dev/fuse + volumes: + - name: registration-dir + hostPath: + path: /var/lib/kubelet/plugins_registry/ + type: DirectoryOrCreate + - name: plugin-dir + hostPath: + path: /var/lib/kubelet/plugins/ru.yandex.s3.csi + type: DirectoryOrCreate + - name: pods-mount-dir + hostPath: + path: /var/lib/kubelet/pods + type: Directory + - name: fuse-device + hostPath: + path: /dev/fuse diff --git a/deploy/helm/templates/provisioner.yaml b/deploy/helm/templates/provisioner.yaml new file mode 100644 index 0000000..cef6e0c --- /dev/null +++ b/deploy/helm/templates/provisioner.yaml @@ -0,0 +1,107 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-provisioner-sa + namespace: {{ .Release.Namespace }} +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: external-provisioner-runner +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-provisioner-role +subjects: + - kind: ServiceAccount + name: csi-provisioner-sa + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: external-provisioner-runner + apiGroup: rbac.authorization.k8s.io +--- +kind: Service +apiVersion: v1 +metadata: + name: csi-provisioner-s3 + namespace: {{ .Release.Namespace }} + labels: + app: csi-provisioner-s3 +spec: + selector: + app: csi-provisioner-s3 + ports: + - name: csi-s3-dummy + port: 65535 +--- +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: csi-provisioner-s3 + namespace: {{ .Release.Namespace }} +spec: + serviceName: "csi-provisioner-s3" + replicas: 1 + selector: + matchLabels: + app: csi-provisioner-s3 + template: + metadata: + labels: + app: csi-provisioner-s3 + spec: + serviceAccount: csi-provisioner-sa + tolerations: + - key: node-role.kubernetes.io/master + operator: "Exists" + containers: + - name: csi-provisioner + image: {{ .Values.images.provisioner }} + args: + - "--csi-address=$(ADDRESS)" + - "--v=4" + env: + - name: ADDRESS + value: /var/lib/kubelet/plugins/ru.yandex.s3.csi/csi.sock + imagePullPolicy: "IfNotPresent" + volumeMounts: + - name: socket-dir + mountPath: /var/lib/kubelet/plugins/ru.yandex.s3.csi + - name: csi-s3 + image: {{ .Values.images.csi }} + imagePullPolicy: IfNotPresent + args: + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(NODE_ID)" + - "--v=4" + env: + - name: CSI_ENDPOINT + value: unix:///var/lib/kubelet/plugins/ru.yandex.s3.csi/csi.sock + - name: NODE_ID + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - name: socket-dir + mountPath: /var/lib/kubelet/plugins/ru.yandex.s3.csi + volumes: + - name: socket-dir + emptyDir: {} diff --git a/deploy/helm/templates/secret.yaml b/deploy/helm/templates/secret.yaml new file mode 100644 index 0000000..dd6a748 --- /dev/null +++ b/deploy/helm/templates/secret.yaml @@ -0,0 +1,11 @@ +{{- if .Values.secret.create -}} +apiVersion: v1 +kind: Secret +metadata: + namespace: {{ .Release.Namespace }} + name: {{ .Values.secret.name }} +stringData: + accessKeyID: {{ .Values.secret.accessKey }} + secretAccessKey: {{ .Values.secret.secretKey }} + endpoint: {{ .Values.secret.endpoint }} +{{- end -}} diff --git a/deploy/helm/templates/storageclass.yaml b/deploy/helm/templates/storageclass.yaml new file mode 100644 index 0000000..d6cd263 --- /dev/null +++ b/deploy/helm/templates/storageclass.yaml @@ -0,0 +1,24 @@ +{{- if .Values.storageClass.create -}} +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: {{ .Values.storageClass.name }} +{{- if .Values.storageClass.annotations }} + annotations: +{{ toYaml .Values.storageClass.annotations | indent 4 }} +{{- end }} +provisioner: ru.yandex.s3.csi +parameters: + mounter: geesefs + options: "{{ .Values.storageClass.mountOptions }}" + bucket: "{{ .Values.storageClass.singleBucket }}" + csi.storage.k8s.io/provisioner-secret-name: {{ .Values.secret.name }} + csi.storage.k8s.io/provisioner-secret-namespace: {{ .Release.Namespace }} + csi.storage.k8s.io/controller-publish-secret-name: {{ .Values.secret.name }} + csi.storage.k8s.io/controller-publish-secret-namespace: {{ .Release.Namespace }} + csi.storage.k8s.io/node-stage-secret-name: {{ .Values.secret.name }} + csi.storage.k8s.io/node-stage-secret-namespace: {{ .Release.Namespace }} + csi.storage.k8s.io/node-publish-secret-name: {{ .Values.secret.name }} + csi.storage.k8s.io/node-publish-secret-namespace: {{ .Release.Namespace }} +reclaimPolicy: {{ .Values.storageClass.reclaimPolicy }} +{{- end -}} diff --git a/deploy/helm/values.yaml b/deploy/helm/values.yaml new file mode 100644 index 0000000..5e7c469 --- /dev/null +++ b/deploy/helm/values.yaml @@ -0,0 +1,39 @@ +--- +images: + # Source: quay.io/k8scsi/csi-attacher:v2.2.0 + attacher: cr.yandex/crp9ftr22d26age3hulg/yandex-cloud/csi-s3/csi-attacher:v2.2.0 + # Source: quay.io/k8scsi/csi-node-driver-registrar:v1.2.0 + registrar: cr.yandex/crp9ftr22d26age3hulg/yandex-cloud/csi-s3/csi-node-driver-registrar:v1.2.0 + # Source: quay.io/k8scsi/csi-provisioner:v2.1.0 + provisioner: cr.yandex/crp9ftr22d26age3hulg/yandex-cloud/csi-s3/csi-provisioner:v2.1.0 + # Main image + csi: cr.yandex/crp9ftr22d26age3hulg/yandex-cloud/csi-s3/csi-s3:0.30.4 + +storageClass: + # Specifies whether the storage class should be created + create: true + # Name + name: csi-s3 + # Use a single bucket for all dynamically provisioned persistent volumes + singleBucket: "" + # GeeseFS mount options + mountOptions: "--memory-limit 1000 --dir-mode 0777 --file-mode 0666" + # Volume reclaim policy + reclaimPolicy: Delete + # Annotations for the storage class + # Example: + # annotations: + # storageclass.kubernetes.io/is-default-class: "true" + annotations: {} + +secret: + # Specifies whether the secret should be created + create: true + # Name of the secret + name: csi-s3-secret + # S3 Access Key + accessKey: "" + # S3 Secret Key + secretKey: "" + # Endpoint + endpoint: https://storage.yandexcloud.net