Compare commits
36 commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
25401592e1 | ||
![]() |
8d1ad692e5 | ||
![]() |
16c6c0ee13 | ||
![]() |
227e1cf2dd | ||
![]() |
40086c1ffa | ||
![]() |
6cfd3ebbb6 | ||
![]() |
195829887a | ||
![]() |
f7e3c21a87 | ||
![]() |
06d059bfd1 | ||
![]() |
b630faefa7 | ||
![]() |
4af9636d19 | ||
![]() |
44511523e2 | ||
![]() |
dd0c0b68d5 | ||
![]() |
59a7605ad8 | ||
![]() |
5a3a517315 | ||
![]() |
6b72154ebc | ||
![]() |
a43867a307 | ||
![]() |
e334aedd0c | ||
![]() |
9a04d5a6eb | ||
![]() |
874dedcd3b | ||
![]() |
c7e066396b | ||
![]() |
8539ff0a48 | ||
![]() |
0fb81f07e7 | ||
![]() |
37c35c788a | ||
![]() |
680a649a21 | ||
![]() |
519c4f0bd7 | ||
![]() |
8ea6111b0d | ||
![]() |
4e410df6e1 | ||
![]() |
7a415ae6ab | ||
![]() |
96818e563a | ||
![]() |
5dbebd01bd | ||
![]() |
3b38d545ab | ||
![]() |
259c9ca561 | ||
![]() |
f658121c77 | ||
![]() |
2c85a614ea | ||
![]() |
a3fa9f3696 |
26 changed files with 202 additions and 274 deletions
.github/workflows
DockerfileMakefileREADME.mddeploy
pkg
48
.github/workflows/pages.yml
vendored
Normal file
48
.github/workflows/pages.yml
vendored
Normal file
|
@ -0,0 +1,48 @@
|
|||
name: Publish Helm chart
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
# Allows you to run this workflow manually from the Actions tab
|
||||
workflow_dispatch:
|
||||
|
||||
# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
|
||||
permissions:
|
||||
contents: write
|
||||
pages: write
|
||||
id-token: write
|
||||
|
||||
# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued.
|
||||
# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete.
|
||||
concurrency:
|
||||
group: "pages"
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
|
||||
deploy:
|
||||
|
||||
environment:
|
||||
name: github-pages
|
||||
url: ${{ steps.deployment.outputs.page_url }}
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Setup Pages
|
||||
uses: actions/configure-pages@v3
|
||||
|
||||
- name: Publish Helm chart
|
||||
uses: stefanprodan/helm-gh-pages@0ad2bb377311d61ac04ad9eb6f252fb68e207260 # v1.7.0
|
||||
with:
|
||||
# GitHub automatically creates a GITHUB_TOKEN secret to use in your workflow. You can use the GITHUB_TOKEN to authenticate in a workflow run.
|
||||
# See https://docs.github.com/en/actions/reference/authentication-in-a-workflow#about-the-github_token-secret
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
branch: github-pages
|
||||
charts_dir: deploy/helm
|
||||
target_dir: charts
|
||||
linting: off
|
16
.github/workflows/test.yml
vendored
16
.github/workflows/test.yml
vendored
|
@ -1,16 +0,0 @@
|
|||
name: Test
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "v*"
|
||||
|
||||
jobs:
|
||||
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Test
|
||||
run: make test
|
|
@ -11,8 +11,8 @@ FROM alpine:3.17
|
|||
LABEL maintainers="Vitaliy Filippov <vitalif@yourcmc.ru>"
|
||||
LABEL description="csi-s3 slim image"
|
||||
|
||||
RUN apk add --no-cache fuse
|
||||
#RUN apk add --no-cache -X http://dl-cdn.alpinelinux.org/alpine/edge/community rclone s3fs-fuse
|
||||
RUN apk add --no-cache fuse mailcap rclone
|
||||
RUN apk add --no-cache -X http://dl-cdn.alpinelinux.org/alpine/edge/community s3fs-fuse
|
||||
|
||||
ADD https://github.com/yandex-cloud/geesefs/releases/latest/download/geesefs-linux-amd64 /usr/bin/geesefs
|
||||
RUN chmod 755 /usr/bin/geesefs
|
||||
|
|
2
Makefile
2
Makefile
|
@ -17,7 +17,7 @@ REGISTRY_NAME=cr.yandex/crp9ftr22d26age3hulg
|
|||
REGISTRY_NAME2=cr.il.nebius.cloud/crll7us9n6i5j3v4n92m
|
||||
IMAGE_NAME=csi-s3
|
||||
IMAGE_NAME2=yandex-cloud/csi-s3/csi-s3-driver
|
||||
VERSION ?= 0.35.3
|
||||
VERSION ?= 0.38.3
|
||||
IMAGE_TAG=$(REGISTRY_NAME)/$(IMAGE_NAME):$(VERSION)
|
||||
TEST_IMAGE_TAG=$(IMAGE_NAME):test
|
||||
|
||||
|
|
34
README.md
34
README.md
|
@ -10,7 +10,19 @@ This is a Container Storage Interface ([CSI](https://github.com/container-storag
|
|||
* Kubernetes has to allow privileged containers
|
||||
* Docker daemon must allow shared mounts (systemd flag `MountFlags=shared`)
|
||||
|
||||
### 1. Create a secret with your S3 credentials
|
||||
### Helm chart
|
||||
|
||||
Helm chart is published at `https://yandex-cloud.github.io/k8s-csi-s3`:
|
||||
|
||||
```
|
||||
helm repo add yandex-s3 https://yandex-cloud.github.io/k8s-csi-s3/charts
|
||||
|
||||
helm install csi-s3 yandex-s3/csi-s3
|
||||
```
|
||||
|
||||
### Manual installation
|
||||
|
||||
#### 1. Create a secret with your S3 credentials
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
|
@ -30,22 +42,30 @@ stringData:
|
|||
|
||||
The region can be empty if you are using some other S3 compatible storage.
|
||||
|
||||
### 2. Deploy the driver
|
||||
#### 2. Deploy the driver
|
||||
|
||||
```bash
|
||||
cd deploy/kubernetes
|
||||
kubectl create -f provisioner.yaml
|
||||
kubectl create -f attacher.yaml
|
||||
kubectl create -f driver.yaml
|
||||
kubectl create -f csi-s3.yaml
|
||||
```
|
||||
|
||||
### 3. Create the storage class
|
||||
If you're upgrading from a previous version which had `attacher.yaml` you
|
||||
can safely delete all resources created from that file:
|
||||
|
||||
```
|
||||
wget https://raw.githubusercontent.com/yandex-cloud/k8s-csi-s3/v0.35.5/deploy/kubernetes/attacher.yaml
|
||||
kubectl delete -f attacher.yaml
|
||||
```
|
||||
|
||||
#### 3. Create the storage class
|
||||
|
||||
```bash
|
||||
kubectl create -f examples/storageclass.yaml
|
||||
```
|
||||
|
||||
### 4. Test the S3 driver
|
||||
#### 4. Test the S3 driver
|
||||
|
||||
1. Create a pvc using the new storage class:
|
||||
|
||||
|
@ -74,8 +94,8 @@ kubectl create -f examples/storageclass.yaml
|
|||
```bash
|
||||
$ kubectl exec -ti csi-s3-test-nginx bash
|
||||
$ mount | grep fuse
|
||||
s3fs on /var/lib/www/html type fuse.s3fs (rw,nosuid,nodev,relatime,user_id=0,group_id=0,allow_other)
|
||||
$ touch /var/lib/www/html/hello_world
|
||||
pvc-035763df-0488-4941-9a34-f637292eb95c: on /usr/share/nginx/html/s3 type fuse.geesefs (rw,nosuid,nodev,relatime,user_id=65534,group_id=0,default_permissions,allow_other)
|
||||
$ touch /usr/share/nginx/html/s3/hello_world
|
||||
```
|
||||
|
||||
If something does not work as expected, check the troubleshooting section below.
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
appVersion: 0.35.3
|
||||
appVersion: 0.38.3
|
||||
description: "Container Storage Interface (CSI) driver for S3 volumes"
|
||||
name: csi-s3
|
||||
version: 0.35.3
|
||||
version: 0.38.3
|
||||
keywords:
|
||||
- s3
|
||||
home: https://github.com/yandex-cloud/k8s-csi-s3
|
|
@ -26,6 +26,7 @@ The following table lists all configuration parameters and their default values.
|
|||
| `storageClass.create` | Specifies whether the storage class should be created | true |
|
||||
| `storageClass.name` | Storage class name | csi-s3 |
|
||||
| `storageClass.singleBucket` | Use a single bucket for all dynamically provisioned persistent volumes | |
|
||||
| `storageClass.mounter` | Mounter to use. Either geesefs, s3fs or rclone. geesefs recommended | geesefs |
|
||||
| `storageClass.mountOptions` | GeeseFS mount options | `--memory-limit 1000 --dir-mode 0777 --file-mode 0666` |
|
||||
| `storageClass.reclaimPolicy` | Volume reclaim policy | Delete |
|
||||
| `storageClass.annotations` | Annotations for the storage class | |
|
|
@ -1,10 +1,9 @@
|
|||
helm_chart:
|
||||
name: cr.yandex/crp9ftr22d26age3hulg/yandex-cloud/csi-s3/csi-s3
|
||||
tag: 0.35.3
|
||||
tag: 0.38.3
|
||||
requirements:
|
||||
k8s_version: ">=1.13"
|
||||
images:
|
||||
- full: images.attacher
|
||||
- full: images.registrar
|
||||
- full: images.provisioner
|
||||
- full: images.csi
|
||||
|
@ -15,7 +14,7 @@ user_values:
|
|||
ru: Создать класс хранения
|
||||
description:
|
||||
en: Specifies whether the storage class should be created
|
||||
ru: 'Если "да", при установке будет создан класс хранения S3'
|
||||
ru: Выберите, чтобы создать новый S3-класс хранения при развёртывании приложения.
|
||||
boolean_value:
|
||||
default_value: true
|
||||
- name: secret.create
|
||||
|
@ -24,7 +23,7 @@ user_values:
|
|||
ru: Создать секрет
|
||||
description:
|
||||
en: Specifies whether the secret should be created
|
||||
ru: 'Если "да", при установке будет создан секрет, иначе для класса хранения будет использован существующий'
|
||||
ru: Выберите, чтобы создать новый секрет для класса хранения при установке приложения, а не использовать существующий.
|
||||
boolean_value:
|
||||
default_value: true
|
||||
- name: secret.accessKey
|
||||
|
@ -33,7 +32,7 @@ user_values:
|
|||
ru: Идентификатор ключа S3
|
||||
description:
|
||||
en: S3 Access Key ID
|
||||
ru: Идентификатор ключа S3
|
||||
ru: Идентификатор ключа S3.
|
||||
string_value:
|
||||
default_value: ""
|
||||
- name: secret.secretKey
|
||||
|
@ -42,16 +41,16 @@ user_values:
|
|||
ru: Секретный ключ S3
|
||||
description:
|
||||
en: S3 Secret Key
|
||||
ru: Секретный ключ S3
|
||||
ru: Секретный ключ S3.
|
||||
string_value:
|
||||
default_value: ""
|
||||
- name: storageClass.singleBucket
|
||||
title:
|
||||
en: Single S3 bucket for volumes
|
||||
ru: Общий S3 бакет для томов
|
||||
ru: Общий бакет S3 для томов
|
||||
description:
|
||||
en: Single S3 bucket to use for all dynamically provisioned persistent volumes
|
||||
ru: Общий бакет S3, в котором будут создаваться все динамически распределяемые тома. Если пусто, под каждый том будет создаваться новый бакет
|
||||
ru: Общий бакет S3, в котором будут создаваться все динамически распределяемые тома. Если пусто, под каждый том будет создаваться новый бакет.
|
||||
string_value:
|
||||
default_value: ""
|
||||
- name: secret.endpoint
|
||||
|
@ -60,7 +59,7 @@ user_values:
|
|||
ru: Адрес S3-сервиса
|
||||
description:
|
||||
en: S3 service endpoint to use
|
||||
ru: Адрес S3-сервиса, который будет использоваться
|
||||
ru: Адрес S3-сервиса, который будет использоваться.
|
||||
string_value:
|
||||
default_value: "https://storage.yandexcloud.net"
|
||||
- name: storageClass.mountOptions
|
||||
|
@ -68,8 +67,8 @@ user_values:
|
|||
en: GeeseFS mount options
|
||||
ru: Опции монтирования GeeseFS
|
||||
description:
|
||||
en: GeeseFS mount options to use. Consult GeeseFS (https://github.com/yandex-cloud/geesefs) help for the full option list
|
||||
ru: Опции монтирования GeeseFS. Смотрите справку GeeseFS (https://github.com/yandex-cloud/geesefs) для полного перечня опций
|
||||
en: GeeseFS mount options to use. Refer to `geesefs --help` command output for the whole list of options (https://github.com/yandex-cloud/geesefs).
|
||||
ru: Опции монтирования GeeseFS. Полный перечень и описание опций смотрите в выводе команды `geesefs --help` (https://github.com/yandex-cloud/geesefs).
|
||||
string_value:
|
||||
default_value: "--memory-limit 1000 --dir-mode 0777 --file-mode 0666"
|
||||
- name: storageClass.reclaimPolicy
|
||||
|
@ -78,7 +77,7 @@ user_values:
|
|||
ru: Политика очистки томов
|
||||
description:
|
||||
en: Volume reclaim policy for the storage class (Retain or Delete)
|
||||
ru: Политика очистки PV, связанных с PVC (Retain - сохранять при удалении PVC, Delete - удалять при удалении PVC)
|
||||
ru: Выберите политику очистки томов PersistentVolume при удалении PersistentVolumeClaim. Retain — сохранять том, Delete — удалять том.
|
||||
string_selector_value:
|
||||
default_value: Delete
|
||||
values:
|
||||
|
@ -90,7 +89,7 @@ user_values:
|
|||
ru: Название класса хранения
|
||||
description:
|
||||
en: Name of the storage class that will be created
|
||||
ru: Название класса хранения, который будет создан при установке
|
||||
ru: Название класса хранения, который будет создан при установке.
|
||||
string_value:
|
||||
default_value: csi-s3
|
||||
- name: secret.name
|
||||
|
@ -99,15 +98,15 @@ user_values:
|
|||
ru: Название секрета
|
||||
description:
|
||||
en: Name of the secret to create or use for the storage class
|
||||
ru: Название секрета, который будет создан или использован для класса хранения
|
||||
ru: Название секрета, который будет создан или использован для класса хранения.
|
||||
string_value:
|
||||
default_value: csi-s3-secret
|
||||
- name: tolerations.all
|
||||
title:
|
||||
en: Tolerate all taints
|
||||
ru: Игнорировать все taint-ы
|
||||
ru: Игнорировать все политики taint
|
||||
description:
|
||||
en: Tolerate all taints by the CSI-S3 node driver (mounter)
|
||||
ru: Игнорировать все taint-ы узлов кластера драйвером CSI-S3, монтирующим ФС на узлах
|
||||
ru: Выберите, чтобы драйвер CSI, который монтирует файловую систему на узлах, игнорировал все политики taint для узлов кластера.
|
||||
boolean_value:
|
||||
default_value: false
|
|
@ -50,7 +50,6 @@ spec:
|
|||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccount: csi-s3
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: driver-registrar
|
||||
image: {{ .Values.images.registrar }}
|
||||
|
@ -62,7 +61,7 @@ spec:
|
|||
- name: ADDRESS
|
||||
value: /csi/csi.sock
|
||||
- name: DRIVER_REG_SOCK_PATH
|
||||
value: /var/lib/kubelet/plugins/ru.yandex.s3.csi/csi.sock
|
||||
value: {{ .Values.kubeletPath }}/plugins/ru.yandex.s3.csi/csi.sock
|
||||
- name: KUBE_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
|
@ -94,24 +93,37 @@ spec:
|
|||
volumeMounts:
|
||||
- name: plugin-dir
|
||||
mountPath: /csi
|
||||
- name: stage-dir
|
||||
mountPath: {{ .Values.kubeletPath }}/plugins/kubernetes.io/csi
|
||||
mountPropagation: "Bidirectional"
|
||||
- name: pods-mount-dir
|
||||
mountPath: /var/lib/kubelet/pods
|
||||
mountPath: {{ .Values.kubeletPath }}/pods
|
||||
mountPropagation: "Bidirectional"
|
||||
- name: fuse-device
|
||||
mountPath: /dev/fuse
|
||||
- name: systemd-control
|
||||
mountPath: /run/systemd
|
||||
volumes:
|
||||
- name: registration-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/plugins_registry/
|
||||
path: {{ .Values.kubeletPath }}/plugins_registry/
|
||||
type: DirectoryOrCreate
|
||||
- name: plugin-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/plugins/ru.yandex.s3.csi
|
||||
path: {{ .Values.kubeletPath }}/plugins/ru.yandex.s3.csi
|
||||
type: DirectoryOrCreate
|
||||
- name: stage-dir
|
||||
hostPath:
|
||||
path: {{ .Values.kubeletPath }}/plugins/kubernetes.io/csi
|
||||
type: DirectoryOrCreate
|
||||
- name: pods-mount-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/pods
|
||||
path: {{ .Values.kubeletPath }}/pods
|
||||
type: Directory
|
||||
- name: fuse-device
|
||||
hostPath:
|
||||
path: /dev/fuse
|
||||
- name: systemd-control
|
||||
hostPath:
|
||||
path: /run/systemd
|
||||
type: DirectoryOrCreate
|
10
deploy/helm/csi-s3/templates/driver.yaml
Normal file
10
deploy/helm/csi-s3/templates/driver.yaml
Normal file
|
@ -0,0 +1,10 @@
|
|||
apiVersion: storage.k8s.io/v1
|
||||
kind: CSIDriver
|
||||
metadata:
|
||||
name: ru.yandex.s3.csi
|
||||
spec:
|
||||
attachRequired: false
|
||||
podInfoOnMount: true
|
||||
fsGroupPolicy: File # added in Kubernetes 1.19, this field is GA as of Kubernetes 1.23
|
||||
volumeLifecycleModes: # added in Kubernetes 1.16, this field is beta
|
||||
- Persistent
|
|
@ -77,6 +77,10 @@ spec:
|
|||
{{- with .Values.tolerations.controller }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: csi-provisioner
|
||||
image: {{ .Values.images.provisioner }}
|
||||
|
@ -85,11 +89,11 @@ spec:
|
|||
- "--v=4"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /var/lib/kubelet/plugins/ru.yandex.s3.csi/csi.sock
|
||||
value: {{ .Values.kubeletPath }}/plugins/ru.yandex.s3.csi/csi.sock
|
||||
imagePullPolicy: "IfNotPresent"
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /var/lib/kubelet/plugins/ru.yandex.s3.csi
|
||||
mountPath: {{ .Values.kubeletPath }}/plugins/ru.yandex.s3.csi
|
||||
- name: csi-s3
|
||||
image: {{ .Values.images.csi }}
|
||||
imagePullPolicy: IfNotPresent
|
||||
|
@ -99,14 +103,14 @@ spec:
|
|||
- "--v=4"
|
||||
env:
|
||||
- name: CSI_ENDPOINT
|
||||
value: unix:///var/lib/kubelet/plugins/ru.yandex.s3.csi/csi.sock
|
||||
value: unix://{{ .Values.kubeletPath }}/plugins/ru.yandex.s3.csi/csi.sock
|
||||
- name: NODE_ID
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /var/lib/kubelet/plugins/ru.yandex.s3.csi
|
||||
mountPath: {{ .Values.kubeletPath }}/plugins/ru.yandex.s3.csi
|
||||
volumes:
|
||||
- name: socket-dir
|
||||
emptyDir: {}
|
|
@ -5,7 +5,11 @@ metadata:
|
|||
namespace: {{ .Release.Namespace }}
|
||||
name: {{ .Values.secret.name }}
|
||||
stringData:
|
||||
{{- if .Values.secret.accessKey }}
|
||||
accessKeyID: {{ .Values.secret.accessKey }}
|
||||
{{- end }}
|
||||
{{- if .Values.secret.secretKey }}
|
||||
secretAccessKey: {{ .Values.secret.secretKey }}
|
||||
{{- end }}
|
||||
endpoint: {{ .Values.secret.endpoint }}
|
||||
{{- end -}}
|
|
@ -9,7 +9,7 @@ metadata:
|
|||
{{- end }}
|
||||
provisioner: ru.yandex.s3.csi
|
||||
parameters:
|
||||
mounter: geesefs
|
||||
mounter: "{{ .Values.storageClass.mounter }}"
|
||||
options: "{{ .Values.storageClass.mountOptions }}"
|
||||
{{- if .Values.storageClass.singleBucket }}
|
||||
bucket: "{{ .Values.storageClass.singleBucket }}"
|
|
@ -1,13 +1,11 @@
|
|||
---
|
||||
images:
|
||||
# Source: quay.io/k8scsi/csi-attacher:v3.0.1
|
||||
attacher: cr.yandex/crp9ftr22d26age3hulg/yandex-cloud/csi-s3/csi-attacher:v3.0.1
|
||||
# Source: quay.io/k8scsi/csi-node-driver-registrar:v1.2.0
|
||||
registrar: cr.yandex/crp9ftr22d26age3hulg/yandex-cloud/csi-s3/csi-node-driver-registrar:v1.2.0
|
||||
# Source: quay.io/k8scsi/csi-provisioner:v2.1.0
|
||||
provisioner: cr.yandex/crp9ftr22d26age3hulg/yandex-cloud/csi-s3/csi-provisioner:v2.1.0
|
||||
# Main image
|
||||
csi: cr.yandex/crp9ftr22d26age3hulg/yandex-cloud/csi-s3/csi-s3-driver:0.35.3
|
||||
csi: cr.yandex/crp9ftr22d26age3hulg/yandex-cloud/csi-s3/csi-s3-driver:0.38.3
|
||||
|
||||
storageClass:
|
||||
# Specifies whether the storage class should be created
|
||||
|
@ -16,6 +14,8 @@ storageClass:
|
|||
name: csi-s3
|
||||
# Use a single bucket for all dynamically provisioned persistent volumes
|
||||
singleBucket: ""
|
||||
# mounter to use - either geesefs, s3fs or rclone (default geesefs)
|
||||
mounter: geesefs
|
||||
# GeeseFS mount options
|
||||
mountOptions: "--memory-limit 1000 --dir-mode 0777 --file-mode 0666"
|
||||
# Volume reclaim policy
|
||||
|
@ -42,3 +42,7 @@ tolerations:
|
|||
all: false
|
||||
node: []
|
||||
controller: []
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
kubeletPath: /var/lib/kubelet
|
|
@ -1,101 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: csi-attacher-sa
|
||||
namespace: {{ .Release.Namespace }}
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: external-attacher-runner
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["csinodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["volumeattachments"]
|
||||
verbs: ["get", "list", "watch", "update", "patch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["volumeattachments/status"]
|
||||
verbs: ["patch"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-attacher-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-attacher-sa
|
||||
namespace: {{ .Release.Namespace }}
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: external-attacher-runner
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
# needed for StatefulSet
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: csi-attacher-s3
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app: csi-attacher-s3
|
||||
spec:
|
||||
selector:
|
||||
app: csi-attacher-s3
|
||||
ports:
|
||||
- name: csi-s3-dummy
|
||||
port: 65535
|
||||
---
|
||||
kind: StatefulSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: csi-attacher-s3
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
serviceName: "csi-attacher-s3"
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: csi-attacher-s3
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: csi-attacher-s3
|
||||
spec:
|
||||
serviceAccount: csi-attacher-sa
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
containers:
|
||||
- name: csi-attacher
|
||||
image: {{ .Values.images.attacher }}
|
||||
args:
|
||||
- "--v=4"
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /var/lib/kubelet/plugins/ru.yandex.s3.csi/csi.sock
|
||||
imagePullPolicy: "IfNotPresent"
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /var/lib/kubelet/plugins/ru.yandex.s3.csi
|
||||
volumes:
|
||||
- name: socket-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/plugins/ru.yandex.s3.csi
|
||||
type: DirectoryOrCreate
|
|
@ -1,101 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: csi-attacher-sa
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: external-attacher-runner
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["csinodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["volumeattachments"]
|
||||
verbs: ["get", "list", "watch", "update", "patch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["volumeattachments/status"]
|
||||
verbs: ["patch"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-attacher-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-attacher-sa
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: external-attacher-runner
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
# needed for StatefulSet
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: csi-attacher-s3
|
||||
namespace: kube-system
|
||||
labels:
|
||||
app: csi-attacher-s3
|
||||
spec:
|
||||
selector:
|
||||
app: csi-attacher-s3
|
||||
ports:
|
||||
- name: csi-s3-dummy
|
||||
port: 65535
|
||||
---
|
||||
kind: StatefulSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: csi-attacher-s3
|
||||
namespace: kube-system
|
||||
spec:
|
||||
serviceName: "csi-attacher-s3"
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: csi-attacher-s3
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: csi-attacher-s3
|
||||
spec:
|
||||
serviceAccount: csi-attacher-sa
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
containers:
|
||||
- name: csi-attacher
|
||||
image: quay.io/k8scsi/csi-attacher:v3.0.1
|
||||
args:
|
||||
- "--v=4"
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /var/lib/kubelet/plugins/ru.yandex.s3.csi/csi.sock
|
||||
imagePullPolicy: "IfNotPresent"
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /var/lib/kubelet/plugins/ru.yandex.s3.csi
|
||||
volumes:
|
||||
- name: socket-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/plugins/ru.yandex.s3.csi
|
||||
type: DirectoryOrCreate
|
|
@ -43,7 +43,6 @@ spec:
|
|||
effect: NoExecute
|
||||
tolerationSeconds: 300
|
||||
serviceAccount: csi-s3
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: driver-registrar
|
||||
image: quay.io/k8scsi/csi-node-driver-registrar:v1.2.0
|
||||
|
@ -71,7 +70,7 @@ spec:
|
|||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
allowPrivilegeEscalation: true
|
||||
image: cr.yandex/crp9ftr22d26age3hulg/csi-s3:0.35.3
|
||||
image: cr.yandex/crp9ftr22d26age3hulg/csi-s3:0.38.3
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- "--endpoint=$(CSI_ENDPOINT)"
|
||||
|
|
7
deploy/kubernetes/driver.yaml
Normal file
7
deploy/kubernetes/driver.yaml
Normal file
|
@ -0,0 +1,7 @@
|
|||
apiVersion: storage.k8s.io/v1
|
||||
kind: CSIDriver
|
||||
metadata:
|
||||
name: ru.yandex.s3.csi
|
||||
spec:
|
||||
attachRequired: false
|
||||
podInfoOnMount: true
|
|
@ -29,6 +29,7 @@ spec:
|
|||
volumeAttributes:
|
||||
capacity: 10Gi
|
||||
mounter: geesefs
|
||||
options: --memory-limit 1000 --dir-mode 0777 --file-mode 0666
|
||||
volumeHandle: manualbucket/path
|
||||
---
|
||||
apiVersion: v1
|
||||
|
|
|
@ -88,7 +88,7 @@ spec:
|
|||
- name: socket-dir
|
||||
mountPath: /var/lib/kubelet/plugins/ru.yandex.s3.csi
|
||||
- name: csi-s3
|
||||
image: cr.yandex/crp9ftr22d26age3hulg/csi-s3:0.35.3
|
||||
image: cr.yandex/crp9ftr22d26age3hulg/csi-s3:0.38.3
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- "--endpoint=$(CSI_ENDPOINT)"
|
||||
|
|
|
@ -84,7 +84,28 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
|
|||
return nil, status.Error(codes.InvalidArgument, "Target path missing in request")
|
||||
}
|
||||
|
||||
notMnt, err := checkMount(targetPath)
|
||||
notMnt, err := checkMount(stagingTargetPath)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
if notMnt {
|
||||
// Staged mount is dead by some reason. Revive it
|
||||
bucketName, prefix := volumeIDToBucketPrefix(volumeID)
|
||||
s3, err := s3.NewClientFromSecret(req.GetSecrets())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize S3 client: %s", err)
|
||||
}
|
||||
meta := getMeta(bucketName, prefix, req.VolumeContext)
|
||||
mounter, err := mounter.New(meta, s3.Config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := mounter.Mount(stagingTargetPath, volumeID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
notMnt, err = checkMount(targetPath)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
|
|
@ -75,9 +75,17 @@ func (geesefs *geesefsMounter) MountDirect(target string, args []string) error {
|
|||
"-o", "allow_other",
|
||||
"--log-file", "/dev/stderr",
|
||||
}, args...)
|
||||
os.Setenv("AWS_ACCESS_KEY_ID", geesefs.accessKeyID)
|
||||
os.Setenv("AWS_SECRET_ACCESS_KEY", geesefs.secretAccessKey)
|
||||
return fuseMount(target, geesefsCmd, args)
|
||||
envs := []string{
|
||||
"AWS_ACCESS_KEY_ID=" + geesefs.accessKeyID,
|
||||
"AWS_SECRET_ACCESS_KEY=" + geesefs.secretAccessKey,
|
||||
}
|
||||
return fuseMount(target, geesefsCmd, args, envs)
|
||||
}
|
||||
|
||||
type execCmd struct {
|
||||
Path string
|
||||
Args []string
|
||||
UncleanIsFailure bool
|
||||
}
|
||||
|
||||
func (geesefs *geesefsMounter) Mount(target, volumeID string) error {
|
||||
|
@ -147,6 +155,11 @@ func (geesefs *geesefsMounter) Mount(target, volumeID string) error {
|
|||
Value: dbus.MakeVariant("GeeseFS mount for Kubernetes volume "+volumeID),
|
||||
},
|
||||
systemd.PropExecStart(args, false),
|
||||
systemd.Property{
|
||||
Name: "ExecStopPost",
|
||||
// force & lazy unmount to cleanup possibly dead mountpoints
|
||||
Value: dbus.MakeVariant([]execCmd{ execCmd{ "/bin/umount", []string{ "/bin/umount", "-f", "-l", target }, false } }),
|
||||
},
|
||||
systemd.Property{
|
||||
Name: "Environment",
|
||||
Value: dbus.MakeVariant([]string{ "AWS_ACCESS_KEY_ID="+geesefs.accessKeyID, "AWS_SECRET_ACCESS_KEY="+geesefs.secretAccessKey }),
|
||||
|
|
|
@ -57,9 +57,11 @@ func New(meta *s3.FSMeta, cfg *s3.Config) (Mounter, error) {
|
|||
}
|
||||
}
|
||||
|
||||
func fuseMount(path string, command string, args []string) error {
|
||||
func fuseMount(path string, command string, args []string, envs []string) error {
|
||||
cmd := exec.Command(command, args...)
|
||||
cmd.Stderr = os.Stderr
|
||||
// cmd.Environ() returns envs inherited from the current process
|
||||
cmd.Env = append(cmd.Environ(), envs...)
|
||||
glog.V(3).Infof("Mounting fuse with command: %s and args: %s", command, args)
|
||||
|
||||
out, err := cmd.Output()
|
||||
|
|
|
@ -2,7 +2,6 @@ package mounter
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/yandex-cloud/k8s-csi-s3/pkg/s3"
|
||||
|
@ -47,7 +46,9 @@ func (rclone *rcloneMounter) Mount(target, volumeID string) error {
|
|||
args = append(args, fmt.Sprintf("--s3-region=%s", rclone.region))
|
||||
}
|
||||
args = append(args, rclone.meta.MountOptions...)
|
||||
os.Setenv("AWS_ACCESS_KEY_ID", rclone.accessKeyID)
|
||||
os.Setenv("AWS_SECRET_ACCESS_KEY", rclone.secretAccessKey)
|
||||
return fuseMount(target, rcloneCmd, args)
|
||||
envs := []string{
|
||||
"AWS_ACCESS_KEY_ID=" + rclone.accessKeyID,
|
||||
"AWS_SECRET_ACCESS_KEY=" + rclone.secretAccessKey,
|
||||
}
|
||||
return fuseMount(target, rcloneCmd, args, envs)
|
||||
}
|
||||
|
|
|
@ -44,7 +44,7 @@ func (s3fs *s3fsMounter) Mount(target, volumeID string) error {
|
|||
args = append(args, "-o", fmt.Sprintf("endpoint=%s", s3fs.region))
|
||||
}
|
||||
args = append(args, s3fs.meta.MountOptions...)
|
||||
return fuseMount(target, s3fsCmd, args)
|
||||
return fuseMount(target, s3fsCmd, args, nil)
|
||||
}
|
||||
|
||||
func writes3fsPass(pwFileContent string) error {
|
||||
|
|
|
@ -52,7 +52,7 @@ func NewClient(cfg *Config) (*s3Client, error) {
|
|||
endpoint = u.Hostname() + ":" + u.Port()
|
||||
}
|
||||
minioClient, err := minio.New(endpoint, &minio.Options{
|
||||
Creds: credentials.NewStaticV4(client.Config.AccessKeyID, client.Config.SecretAccessKey, client.Config.Region),
|
||||
Creds: credentials.NewStaticV4(client.Config.AccessKeyID, client.Config.SecretAccessKey, ""),
|
||||
Secure: ssl,
|
||||
})
|
||||
if err != nil {
|
||||
|
|
Loading…
Add table
Reference in a new issue