Compare commits

...

59 commits

Author SHA1 Message Date
Vitaliy Filippov
25401592e1 Bump version to 0.38.3, following GeeseFS
- Make long directory listings faster (remove O(N^2))
- Fix long directory listings where some files could be skipped due to inode expiration
- Remove extra empty COPY requests when metadata is not changed
- Add --fsync-on-close option for synchronous s3fs-like mode
- Fix --disable-xattr error code
- Do not try to switch between STANDARD and COLD based on the object size
2023-09-28 11:45:36 +03:00
Vitaliy Filippov
8d1ad692e5 Add github pages helm chart URL 2023-09-13 16:53:02 +03:00
Vitaliy Filippov
16c6c0ee13
Merge pull request from kopytov/mime-types
Add /etc/mime.types to csi-s3 image
2023-08-30 16:40:06 +03:00
Vitaliy Filippov
227e1cf2dd Publish helm chart to github pages 2023-08-28 18:41:38 +03:00
Vitaliy Filippov
40086c1ffa Bump version to 0.37.4
- Update GeeseFS (now it also limits metadata memory usage)
- Return s3fs-fuse & rclone packages back because they are now fixed in Alpine
- Do not use hostNetwork: true for mounter... but it won't help much because GeeseFS is now started on host :)
- Publish helm chart to github pages
2023-08-28 18:40:53 +03:00
Dmitry Kopytov
6cfd3ebbb6 Add mailcap installation to Dockerfile 2023-08-25 12:20:47 +03:00
Vitaliy Filippov
195829887a Return s3fs-fuse & rclone back, fix (README) 2023-08-15 13:37:25 +03:00
Vitaliy Filippov
f7e3c21a87 Remove hostNetwork: true () 2023-07-27 19:08:03 +03:00
Vitaliy Filippov
06d059bfd1 Update descriptions in the Marketplace manifest 2023-07-26 16:25:29 +03:00
Vitaliy Filippov
b630faefa7 Bump version to 0.36.2
- Update GeeseFS
- Add CSIDriver and remove external-attacher
- Add missing mounts to Helm chart
- Allow to configure mounter in Helm chart
- Allow to configure kubelet path in Helm chart
- Add nodeSelector for provisioner in Helm chart
- Do not use os.Setenv to pass keys
- Fix minio client constructor argument
2023-07-19 16:07:29 +03:00
Vitaliy Filippov
4af9636d19 Add missing systemd-control and stage-dir to Helm chart (fixes ) 2023-07-19 15:59:36 +03:00
Vitaliy Filippov
44511523e2 Slightly improve property names 2023-07-06 11:36:11 +03:00
Vitaliy Filippov
dd0c0b68d5
Merge pull request from ksrt12/feat-nodeSelector
feat: add nodeSelector for provisioner
2023-06-28 20:15:18 +03:00
Stepan Kazakov
59a7605ad8
feat: add nodeSelector for provisioner 2023-06-26 00:09:58 +03:00
Vitaliy Filippov
5a3a517315 Note about deleting attacher 2023-06-20 20:45:59 +03:00
x.zhou
6b72154ebc deploy: add CSIDriver object and remove attacher 2023-06-20 20:40:17 +03:00
Vitaliy Filippov
a43867a307 Fix perms after merging 2023-06-20 14:03:48 +03:00
Vitaliy Filippov
e334aedd0c
Merge pull request from nuwang/add_mounter_and_optional_secrets
Add support for optional secrets
2023-06-20 13:19:38 +03:00
Vitaliy Filippov
9a04d5a6eb
Merge pull request from nuwang/add_configurable_mounter
Make mounter configurable
2023-06-20 13:18:26 +03:00
Vitaliy Filippov
874dedcd3b
Merge pull request from amonhuang/pvc-manual-options
add manual pvc mount options
2023-06-15 19:17:36 +03:00
Vitaliy Filippov
c7e066396b
Merge pull request from NiklasRosenstein/add-helm-kubeletPath-option
add a `kubeletPath` option to `values.yaml`
2023-06-15 19:11:30 +03:00
Vitaliy Filippov
8539ff0a48
Merge pull request from zjx20/fix/wrong-arg
Fix the wrong argument for credentials.NewStaticV4()
2023-06-15 16:01:47 +03:00
Niklas Rosenstein
0fb81f07e7
Update deploy/helm/values.yaml
Co-authored-by: Kashemir001 <14910998+Kashemir001@users.noreply.github.com>
2023-06-14 11:09:55 +02:00
x.zhou
37c35c788a fix compilation 2023-06-09 16:44:40 +08:00
x.zhou
680a649a21 Fix the wrong argument for credentials.NewStaticV4() 2023-06-09 16:10:33 +08:00
Vitaliy Filippov
519c4f0bd7
Merge pull request from zjx20/fix-env
Don't call os.Setenv()
2023-06-05 18:49:19 +03:00
x.zhou
8ea6111b0d Don't call os.Setenv() 2023-06-03 15:36:03 +08:00
Vitaliy Filippov
4e410df6e1 Bump version to 0.35.5 2023-05-23 14:59:44 +03:00
Vitaliy Filippov
7a415ae6ab Recheck and revive staged mount when mounting it to the real path 2023-05-23 14:58:01 +03:00
Vitaliy Filippov
96818e563a Cleanup mounts after stopping them using systemd 2023-05-23 14:40:53 +03:00
Niklas Rosenstein
5dbebd01bd add a kubeletPath option to values.yaml 2023-05-18 09:40:51 +00:00
Vitaliy Filippov
3b38d545ab Bump version to 0.35.4, following GeeseFS 2023-04-26 22:57:37 +03:00
Vitaliy Filippov
259c9ca561 Remove broken test workflow 2023-04-26 22:57:37 +03:00
Vitaliy Filippov
64a443a5e2 Bump version to 0.35.3 2023-04-26 12:04:40 +03:00
Vitaliy Filippov
701c86fa4d Filter out unsafe options 2023-04-26 12:04:40 +03:00
Vitaliy Filippov
ede57438c0 Cache downloaded Go modules in Docker build, use alpine 3.17 2023-04-26 12:04:31 +03:00
Vitaliy Filippov
2927c733fb Bump version to 0.35.2, following GeeseFS 2023-04-18 16:08:38 +03:00
Vitaliy Filippov
c31204b8e4 Bump version to 0.35.1, following GeeseFS 2023-04-05 14:15:34 +03:00
Vitaliy Filippov
8ac6bd58e4 Bump version to 0.35.0 2023-03-29 13:56:05 +03:00
Vitaliy Filippov
fe02df610f - Remove unneeded permissions from csi-s3 in k8s manifests 2023-03-27 18:16:16 +03:00
Vitaliy Filippov
c4031bcbc6
Merge pull request from Kirillovap/remove_excess_permissions
Remove excess permissions of csi-s3 account
2023-03-27 18:14:55 +03:00
kirillovap
5c78b9b69d Remove excess permissions of csi-s3 account 2023-03-27 15:05:14 +03:00
Vitaliy Filippov
f4d01e12c7 Fix stage-dir - it is different on some installations which leads to csi-s3 malfunction 2023-03-15 12:04:19 +03:00
Vitaliy Filippov
c6af9556d7 Bump version to 0.34.7
It's larger than GeeseFS version and may collide with it, but screw it,
we can invent a new versioning scheme later %)
2023-03-07 12:47:56 +03:00
Vitaliy Filippov
ecf1031dfc Implement mounting via stage directory
Previously, multiple containers with the same mounted volume resulted in multiple
FUSE processes. This behaviour was breaking parallel modifications from different
containers, consumed extra resources, and after mounting via systemd was introduced,
led to the total inability to mount the same volume into multiple containers on
the same host.

Now only one FUSE process is started per volume, per host.
2023-03-07 00:49:12 +03:00
Vitaliy Filippov
1305b20bae Bump version to 0.34.6 2023-03-04 13:09:49 +03:00
Vitaliy Filippov
2ad5d21714 Drop geesefs root privileges 2023-03-04 13:04:03 +03:00
Vitaliy Filippov
bfba08742c Implement support for running geesefs OUTSIDE of the container using systemd to not crash mountpoints when csi-s3 is upgraded or restarted 2023-03-03 00:44:05 +03:00
Vitaliy Filippov
e8d63dfc14 Add Nebius Israel container registry URL 2023-02-27 19:22:46 +03:00
Vitaliy Filippov
8bbd7ebaf0 Bump version to 0.34.4, following GeeseFS 2023-01-30 16:11:30 +03:00
Vitaliy Filippov
543704336f Bump version to 0.34.2, following GeeseFS 2022-12-21 16:15:47 +03:00
Vitaliy Filippov
63b1f45dba Add a note about static provisioning 2022-12-21 16:14:28 +03:00
Vitaliy Filippov
0a97f8d4ce Bump version to 0.34.1, following GeeseFS 2022-11-24 11:20:31 +03:00
Vitaliy Filippov
9dac91e1ec Remove NoExecute toleration for attacher, but add CriticalAddons back for provisioner and attacher 2022-11-17 01:21:59 +03:00
Vitaliy Filippov
514c0131dc
Merge pull request from NyrouxHide/master
Delete taint for provisioner
2022-11-17 01:17:26 +03:00
Nyroux
a1a001ce27 Delete taint for provisioner 2022-11-16 23:30:10 +03:00
amonhuang
f658121c77 add manual pvc mount options 2022-09-28 10:45:01 +08:00
Nuwan Goonasekera
2c85a614ea Make mounter configurable 2022-06-24 21:05:53 +05:30
Nuwan Goonasekera
a3fa9f3696 Make access key and secret key optional when using iam 2022-05-21 23:30:54 +05:30
29 changed files with 446 additions and 367 deletions

48
.github/workflows/pages.yml vendored Normal file
View file

@ -0,0 +1,48 @@
name: Publish Helm chart
on:
push:
branches:
- master
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
permissions:
contents: write
pages: write
id-token: write
# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued.
# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete.
concurrency:
group: "pages"
cancel-in-progress: false
jobs:
deploy:
environment:
name: github-pages
url: ${{ steps.deployment.outputs.page_url }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Setup Pages
uses: actions/configure-pages@v3
- name: Publish Helm chart
uses: stefanprodan/helm-gh-pages@0ad2bb377311d61ac04ad9eb6f252fb68e207260 # v1.7.0
with:
# GitHub automatically creates a GITHUB_TOKEN secret to use in your workflow. You can use the GITHUB_TOKEN to authenticate in a workflow run.
# See https://docs.github.com/en/actions/reference/authentication-in-a-workflow#about-the-github_token-secret
token: ${{ secrets.GITHUB_TOKEN }}
branch: github-pages
charts_dir: deploy/helm
target_dir: charts
linting: off

View file

@ -1,16 +0,0 @@
name: Test
on:
push:
tags:
- "v*"
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Test
run: make test

View file

@ -1,22 +1,18 @@
FROM golang:1.16-alpine as gobuild FROM golang:1.19-alpine as gobuild
WORKDIR /build WORKDIR /build
ADD go.mod go.sum /build/ ADD go.mod go.sum /build/
RUN go mod download -x
ADD cmd /build/cmd ADD cmd /build/cmd
ADD pkg /build/pkg ADD pkg /build/pkg
RUN go get -d -v ./...
RUN CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o ./s3driver ./cmd/s3driver RUN CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o ./s3driver ./cmd/s3driver
FROM alpine:3.16 FROM alpine:3.17
LABEL maintainers="Vitaliy Filippov <vitalif@yourcmc.ru>" LABEL maintainers="Vitaliy Filippov <vitalif@yourcmc.ru>"
LABEL description="csi-s3 slim image" LABEL description="csi-s3 slim image"
# apk add temporarily broken: RUN apk add --no-cache fuse mailcap rclone
#ERROR: unable to select packages: RUN apk add --no-cache -X http://dl-cdn.alpinelinux.org/alpine/edge/community s3fs-fuse
# so:libcrypto.so.3 (no such package):
# required by: s3fs-fuse-1.91-r1[so:libcrypto.so.3]
#RUN apk add --no-cache -X http://dl-cdn.alpinelinux.org/alpine/edge/testing s3fs-fuse rclone
ADD https://github.com/yandex-cloud/geesefs/releases/latest/download/geesefs-linux-amd64 /usr/bin/geesefs ADD https://github.com/yandex-cloud/geesefs/releases/latest/download/geesefs-linux-amd64 /usr/bin/geesefs
RUN chmod 755 /usr/bin/geesefs RUN chmod 755 /usr/bin/geesefs

View file

@ -14,9 +14,10 @@
.PHONY: test build container push clean .PHONY: test build container push clean
REGISTRY_NAME=cr.yandex/crp9ftr22d26age3hulg REGISTRY_NAME=cr.yandex/crp9ftr22d26age3hulg
REGISTRY_NAME2=cr.il.nebius.cloud/crll7us9n6i5j3v4n92m
IMAGE_NAME=csi-s3 IMAGE_NAME=csi-s3
IMAGE_NAME2=yandex-cloud/csi-s3/csi-s3-driver IMAGE_NAME2=yandex-cloud/csi-s3/csi-s3-driver
VERSION ?= 0.34.0 VERSION ?= 0.38.3
IMAGE_TAG=$(REGISTRY_NAME)/$(IMAGE_NAME):$(VERSION) IMAGE_TAG=$(REGISTRY_NAME)/$(IMAGE_NAME):$(VERSION)
TEST_IMAGE_TAG=$(IMAGE_NAME):test TEST_IMAGE_TAG=$(IMAGE_NAME):test

View file

@ -10,7 +10,19 @@ This is a Container Storage Interface ([CSI](https://github.com/container-storag
* Kubernetes has to allow privileged containers * Kubernetes has to allow privileged containers
* Docker daemon must allow shared mounts (systemd flag `MountFlags=shared`) * Docker daemon must allow shared mounts (systemd flag `MountFlags=shared`)
### 1. Create a secret with your S3 credentials ### Helm chart
Helm chart is published at `https://yandex-cloud.github.io/k8s-csi-s3`:
```
helm repo add yandex-s3 https://yandex-cloud.github.io/k8s-csi-s3/charts
helm install csi-s3 yandex-s3/csi-s3
```
### Manual installation
#### 1. Create a secret with your S3 credentials
```yaml ```yaml
apiVersion: v1 apiVersion: v1
@ -30,22 +42,30 @@ stringData:
The region can be empty if you are using some other S3 compatible storage. The region can be empty if you are using some other S3 compatible storage.
### 2. Deploy the driver #### 2. Deploy the driver
```bash ```bash
cd deploy/kubernetes cd deploy/kubernetes
kubectl create -f provisioner.yaml kubectl create -f provisioner.yaml
kubectl create -f attacher.yaml kubectl create -f driver.yaml
kubectl create -f csi-s3.yaml kubectl create -f csi-s3.yaml
``` ```
### 3. Create the storage class If you're upgrading from a previous version which had `attacher.yaml` you
can safely delete all resources created from that file:
```
wget https://raw.githubusercontent.com/yandex-cloud/k8s-csi-s3/v0.35.5/deploy/kubernetes/attacher.yaml
kubectl delete -f attacher.yaml
```
#### 3. Create the storage class
```bash ```bash
kubectl create -f examples/storageclass.yaml kubectl create -f examples/storageclass.yaml
``` ```
### 4. Test the S3 driver #### 4. Test the S3 driver
1. Create a pvc using the new storage class: 1. Create a pvc using the new storage class:
@ -74,8 +94,8 @@ kubectl create -f examples/storageclass.yaml
```bash ```bash
$ kubectl exec -ti csi-s3-test-nginx bash $ kubectl exec -ti csi-s3-test-nginx bash
$ mount | grep fuse $ mount | grep fuse
s3fs on /var/lib/www/html type fuse.s3fs (rw,nosuid,nodev,relatime,user_id=0,group_id=0,allow_other) pvc-035763df-0488-4941-9a34-f637292eb95c: on /usr/share/nginx/html/s3 type fuse.geesefs (rw,nosuid,nodev,relatime,user_id=65534,group_id=0,default_permissions,allow_other)
$ touch /var/lib/www/html/hello_world $ touch /usr/share/nginx/html/s3/hello_world
``` ```
If something does not work as expected, check the troubleshooting section below. If something does not work as expected, check the troubleshooting section below.
@ -94,11 +114,18 @@ metadata:
provisioner: ru.yandex.s3.csi provisioner: ru.yandex.s3.csi
parameters: parameters:
mounter: geesefs mounter: geesefs
options: "--memory-limit 1000 --dir-mode 0777 --file-mode 0666"
bucket: some-existing-bucket-name bucket: some-existing-bucket-name
``` ```
If the bucket is specified, it will still be created if it does not exist on the backend. Every volume will get its own prefix within the bucket which matches the volume ID. When deleting a volume, also just the prefix will be deleted. If the bucket is specified, it will still be created if it does not exist on the backend. Every volume will get its own prefix within the bucket which matches the volume ID. When deleting a volume, also just the prefix will be deleted.
### Static Provisioning
If you want to mount a pre-existing bucket or prefix within a pre-existing bucket and don't want csi-s3 to delete it when PV is deleted, you can use static provisioning.
To do that you should omit `storageClassName` in the `PersistentVolumeClaim` and manually create a `PersistentVolume` with a matching `claimRef`, like in the following example: [deploy/kubernetes/examples/pvc-manual.yaml](deploy/kubernetes/examples/pvc-manual.yaml).
### Mounter ### Mounter
We **strongly recommend** to use the default mounter which is [GeeseFS](https://github.com/yandex-cloud/geesefs). We **strongly recommend** to use the default mounter which is [GeeseFS](https://github.com/yandex-cloud/geesefs).
@ -118,6 +145,10 @@ You can check POSIX compatibility matrix here: https://github.com/yandex-cloud/g
* Almost full POSIX compatibility * Almost full POSIX compatibility
* Good performance for both small and big files * Good performance for both small and big files
* Does not store file permissions and custom modification times * Does not store file permissions and custom modification times
* By default runs **outside** of the csi-s3 container using systemd, to not crash
mountpoints with "Transport endpoint is not connected" when csi-s3 is upgraded
or restarted. Add `--no-systemd` to `parameters.options` of the `StorageClass`
to disable this behaviour.
#### s3fs #### s3fs

View file

@ -1,9 +1,9 @@
--- ---
apiVersion: v1 apiVersion: v1
appVersion: 0.34.0 appVersion: 0.38.3
description: "Container Storage Interface (CSI) driver for S3 volumes" description: "Container Storage Interface (CSI) driver for S3 volumes"
name: csi-s3 name: csi-s3
version: 0.34.0 version: 0.38.3
keywords: keywords:
- s3 - s3
home: https://github.com/yandex-cloud/k8s-csi-s3 home: https://github.com/yandex-cloud/k8s-csi-s3

View file

@ -26,6 +26,7 @@ The following table lists all configuration parameters and their default values.
| `storageClass.create` | Specifies whether the storage class should be created | true | | `storageClass.create` | Specifies whether the storage class should be created | true |
| `storageClass.name` | Storage class name | csi-s3 | | `storageClass.name` | Storage class name | csi-s3 |
| `storageClass.singleBucket` | Use a single bucket for all dynamically provisioned persistent volumes | | | `storageClass.singleBucket` | Use a single bucket for all dynamically provisioned persistent volumes | |
| `storageClass.mounter` | Mounter to use. Either geesefs, s3fs or rclone. geesefs recommended | geesefs |
| `storageClass.mountOptions` | GeeseFS mount options | `--memory-limit 1000 --dir-mode 0777 --file-mode 0666` | | `storageClass.mountOptions` | GeeseFS mount options | `--memory-limit 1000 --dir-mode 0777 --file-mode 0666` |
| `storageClass.reclaimPolicy` | Volume reclaim policy | Delete | | `storageClass.reclaimPolicy` | Volume reclaim policy | Delete |
| `storageClass.annotations` | Annotations for the storage class | | | `storageClass.annotations` | Annotations for the storage class | |

View file

@ -1,10 +1,9 @@
helm_chart: helm_chart:
name: cr.yandex/crp9ftr22d26age3hulg/yandex-cloud/csi-s3/csi-s3 name: cr.yandex/crp9ftr22d26age3hulg/yandex-cloud/csi-s3/csi-s3
tag: 0.34.0 tag: 0.38.3
requirements: requirements:
k8s_version: ">=1.13" k8s_version: ">=1.13"
images: images:
- full: images.attacher
- full: images.registrar - full: images.registrar
- full: images.provisioner - full: images.provisioner
- full: images.csi - full: images.csi
@ -15,7 +14,7 @@ user_values:
ru: Создать класс хранения ru: Создать класс хранения
description: description:
en: Specifies whether the storage class should be created en: Specifies whether the storage class should be created
ru: 'Если "да", при установке будет создан класс хранения S3' ru: Выберите, чтобы создать новый S3-класс хранения при развёртывании приложения.
boolean_value: boolean_value:
default_value: true default_value: true
- name: secret.create - name: secret.create
@ -24,7 +23,7 @@ user_values:
ru: Создать секрет ru: Создать секрет
description: description:
en: Specifies whether the secret should be created en: Specifies whether the secret should be created
ru: 'Если "да", при установке будет создан секрет, иначе для класса хранения будет использован существующий' ru: Выберите, чтобы создать новый секрет для класса хранения при установке приложения, а не использовать существующий.
boolean_value: boolean_value:
default_value: true default_value: true
- name: secret.accessKey - name: secret.accessKey
@ -33,7 +32,7 @@ user_values:
ru: Идентификатор ключа S3 ru: Идентификатор ключа S3
description: description:
en: S3 Access Key ID en: S3 Access Key ID
ru: Идентификатор ключа S3 ru: Идентификатор ключа S3.
string_value: string_value:
default_value: "" default_value: ""
- name: secret.secretKey - name: secret.secretKey
@ -42,16 +41,16 @@ user_values:
ru: Секретный ключ S3 ru: Секретный ключ S3
description: description:
en: S3 Secret Key en: S3 Secret Key
ru: Секретный ключ S3 ru: Секретный ключ S3.
string_value: string_value:
default_value: "" default_value: ""
- name: storageClass.singleBucket - name: storageClass.singleBucket
title: title:
en: Single S3 bucket for volumes en: Single S3 bucket for volumes
ru: Общий S3 бакет для томов ru: Общий бакет S3 для томов
description: description:
en: Single S3 bucket to use for all dynamically provisioned persistent volumes en: Single S3 bucket to use for all dynamically provisioned persistent volumes
ru: Общий бакет S3, в котором будут создаваться все динамически распределяемые тома. Если пусто, под каждый том будет создаваться новый бакет ru: Общий бакет S3, в котором будут создаваться все динамически распределяемые тома. Если пусто, под каждый том будет создаваться новый бакет.
string_value: string_value:
default_value: "" default_value: ""
- name: secret.endpoint - name: secret.endpoint
@ -60,7 +59,7 @@ user_values:
ru: Адрес S3-сервиса ru: Адрес S3-сервиса
description: description:
en: S3 service endpoint to use en: S3 service endpoint to use
ru: Адрес S3-сервиса, который будет использоваться ru: Адрес S3-сервиса, который будет использоваться.
string_value: string_value:
default_value: "https://storage.yandexcloud.net" default_value: "https://storage.yandexcloud.net"
- name: storageClass.mountOptions - name: storageClass.mountOptions
@ -68,8 +67,8 @@ user_values:
en: GeeseFS mount options en: GeeseFS mount options
ru: Опции монтирования GeeseFS ru: Опции монтирования GeeseFS
description: description:
en: GeeseFS mount options to use. Consult GeeseFS (https://github.com/yandex-cloud/geesefs) help for the full option list en: GeeseFS mount options to use. Refer to `geesefs --help` command output for the whole list of options (https://github.com/yandex-cloud/geesefs).
ru: Опции монтирования GeeseFS. Смотрите справку GeeseFS (https://github.com/yandex-cloud/geesefs) для полного перечня опций ru: Опции монтирования GeeseFS. Полный перечень и описание опций смотрите в выводе команды `geesefs --help` (https://github.com/yandex-cloud/geesefs).
string_value: string_value:
default_value: "--memory-limit 1000 --dir-mode 0777 --file-mode 0666" default_value: "--memory-limit 1000 --dir-mode 0777 --file-mode 0666"
- name: storageClass.reclaimPolicy - name: storageClass.reclaimPolicy
@ -78,7 +77,7 @@ user_values:
ru: Политика очистки томов ru: Политика очистки томов
description: description:
en: Volume reclaim policy for the storage class (Retain or Delete) en: Volume reclaim policy for the storage class (Retain or Delete)
ru: Политика очистки PV, связанных с PVC (Retain - сохранять при удалении PVC, Delete - удалять при удалении PVC) ru: Выберите политику очистки томов PersistentVolume при удалении PersistentVolumeClaim. Retain — сохранять том, Delete — удалять том.
string_selector_value: string_selector_value:
default_value: Delete default_value: Delete
values: values:
@ -90,7 +89,7 @@ user_values:
ru: Название класса хранения ru: Название класса хранения
description: description:
en: Name of the storage class that will be created en: Name of the storage class that will be created
ru: Название класса хранения, который будет создан при установке ru: Название класса хранения, который будет создан при установке.
string_value: string_value:
default_value: csi-s3 default_value: csi-s3
- name: secret.name - name: secret.name
@ -99,15 +98,15 @@ user_values:
ru: Название секрета ru: Название секрета
description: description:
en: Name of the secret to create or use for the storage class en: Name of the secret to create or use for the storage class
ru: Название секрета, который будет создан или использован для класса хранения ru: Название секрета, который будет создан или использован для класса хранения.
string_value: string_value:
default_value: csi-s3-secret default_value: csi-s3-secret
- name: tolerations.all - name: tolerations.all
title: title:
en: Tolerate all taints en: Tolerate all taints
ru: Игнорировать все taint ru: Игнорировать все политики taint
description: description:
en: Tolerate all taints by the CSI-S3 node driver (mounter) en: Tolerate all taints by the CSI-S3 node driver (mounter)
ru: Игнорировать все taint-ы узлов кластера драйвером CSI-S3, монтирующим ФС на узлах ru: Выберите, чтобы драйвер CSI, который монтирует файловую систему на узлах, игнорировал все политики taint для узлов кластера.
boolean_value: boolean_value:
default_value: false default_value: false

View file

@ -8,22 +8,6 @@ kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
metadata: metadata:
name: csi-s3 name: csi-s3
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "update"]
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update"]
--- ---
kind: ClusterRoleBinding kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
@ -66,7 +50,6 @@ spec:
{{- toYaml . | nindent 8 }} {{- toYaml . | nindent 8 }}
{{- end }} {{- end }}
serviceAccount: csi-s3 serviceAccount: csi-s3
hostNetwork: true
containers: containers:
- name: driver-registrar - name: driver-registrar
image: {{ .Values.images.registrar }} image: {{ .Values.images.registrar }}
@ -78,7 +61,7 @@ spec:
- name: ADDRESS - name: ADDRESS
value: /csi/csi.sock value: /csi/csi.sock
- name: DRIVER_REG_SOCK_PATH - name: DRIVER_REG_SOCK_PATH
value: /var/lib/kubelet/plugins/ru.yandex.s3.csi/csi.sock value: {{ .Values.kubeletPath }}/plugins/ru.yandex.s3.csi/csi.sock
- name: KUBE_NODE_NAME - name: KUBE_NODE_NAME
valueFrom: valueFrom:
fieldRef: fieldRef:
@ -110,24 +93,37 @@ spec:
volumeMounts: volumeMounts:
- name: plugin-dir - name: plugin-dir
mountPath: /csi mountPath: /csi
- name: stage-dir
mountPath: {{ .Values.kubeletPath }}/plugins/kubernetes.io/csi
mountPropagation: "Bidirectional"
- name: pods-mount-dir - name: pods-mount-dir
mountPath: /var/lib/kubelet/pods mountPath: {{ .Values.kubeletPath }}/pods
mountPropagation: "Bidirectional" mountPropagation: "Bidirectional"
- name: fuse-device - name: fuse-device
mountPath: /dev/fuse mountPath: /dev/fuse
- name: systemd-control
mountPath: /run/systemd
volumes: volumes:
- name: registration-dir - name: registration-dir
hostPath: hostPath:
path: /var/lib/kubelet/plugins_registry/ path: {{ .Values.kubeletPath }}/plugins_registry/
type: DirectoryOrCreate type: DirectoryOrCreate
- name: plugin-dir - name: plugin-dir
hostPath: hostPath:
path: /var/lib/kubelet/plugins/ru.yandex.s3.csi path: {{ .Values.kubeletPath }}/plugins/ru.yandex.s3.csi
type: DirectoryOrCreate
- name: stage-dir
hostPath:
path: {{ .Values.kubeletPath }}/plugins/kubernetes.io/csi
type: DirectoryOrCreate type: DirectoryOrCreate
- name: pods-mount-dir - name: pods-mount-dir
hostPath: hostPath:
path: /var/lib/kubelet/pods path: {{ .Values.kubeletPath }}/pods
type: Directory type: Directory
- name: fuse-device - name: fuse-device
hostPath: hostPath:
path: /dev/fuse path: /dev/fuse
- name: systemd-control
hostPath:
path: /run/systemd
type: DirectoryOrCreate

View file

@ -0,0 +1,10 @@
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: ru.yandex.s3.csi
spec:
attachRequired: false
podInfoOnMount: true
fsGroupPolicy: File # added in Kubernetes 1.19, this field is GA as of Kubernetes 1.23
volumeLifecycleModes: # added in Kubernetes 1.16, this field is beta
- Persistent

View file

@ -74,12 +74,13 @@ spec:
operator: Exists operator: Exists
- key: CriticalAddonsOnly - key: CriticalAddonsOnly
operator: Exists operator: Exists
- operator: Exists
effect: NoExecute
tolerationSeconds: 300
{{- with .Values.tolerations.controller }} {{- with .Values.tolerations.controller }}
{{- toYaml . | nindent 8 }} {{- toYaml . | nindent 8 }}
{{- end }} {{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
containers: containers:
- name: csi-provisioner - name: csi-provisioner
image: {{ .Values.images.provisioner }} image: {{ .Values.images.provisioner }}
@ -88,11 +89,11 @@ spec:
- "--v=4" - "--v=4"
env: env:
- name: ADDRESS - name: ADDRESS
value: /var/lib/kubelet/plugins/ru.yandex.s3.csi/csi.sock value: {{ .Values.kubeletPath }}/plugins/ru.yandex.s3.csi/csi.sock
imagePullPolicy: "IfNotPresent" imagePullPolicy: "IfNotPresent"
volumeMounts: volumeMounts:
- name: socket-dir - name: socket-dir
mountPath: /var/lib/kubelet/plugins/ru.yandex.s3.csi mountPath: {{ .Values.kubeletPath }}/plugins/ru.yandex.s3.csi
- name: csi-s3 - name: csi-s3
image: {{ .Values.images.csi }} image: {{ .Values.images.csi }}
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
@ -102,14 +103,14 @@ spec:
- "--v=4" - "--v=4"
env: env:
- name: CSI_ENDPOINT - name: CSI_ENDPOINT
value: unix:///var/lib/kubelet/plugins/ru.yandex.s3.csi/csi.sock value: unix://{{ .Values.kubeletPath }}/plugins/ru.yandex.s3.csi/csi.sock
- name: NODE_ID - name: NODE_ID
valueFrom: valueFrom:
fieldRef: fieldRef:
fieldPath: spec.nodeName fieldPath: spec.nodeName
volumeMounts: volumeMounts:
- name: socket-dir - name: socket-dir
mountPath: /var/lib/kubelet/plugins/ru.yandex.s3.csi mountPath: {{ .Values.kubeletPath }}/plugins/ru.yandex.s3.csi
volumes: volumes:
- name: socket-dir - name: socket-dir
emptyDir: {} emptyDir: {}

View file

@ -5,7 +5,11 @@ metadata:
namespace: {{ .Release.Namespace }} namespace: {{ .Release.Namespace }}
name: {{ .Values.secret.name }} name: {{ .Values.secret.name }}
stringData: stringData:
{{- if .Values.secret.accessKey }}
accessKeyID: {{ .Values.secret.accessKey }} accessKeyID: {{ .Values.secret.accessKey }}
{{- end }}
{{- if .Values.secret.secretKey }}
secretAccessKey: {{ .Values.secret.secretKey }} secretAccessKey: {{ .Values.secret.secretKey }}
{{- end }}
endpoint: {{ .Values.secret.endpoint }} endpoint: {{ .Values.secret.endpoint }}
{{- end -}} {{- end -}}

View file

@ -9,7 +9,7 @@ metadata:
{{- end }} {{- end }}
provisioner: ru.yandex.s3.csi provisioner: ru.yandex.s3.csi
parameters: parameters:
mounter: geesefs mounter: "{{ .Values.storageClass.mounter }}"
options: "{{ .Values.storageClass.mountOptions }}" options: "{{ .Values.storageClass.mountOptions }}"
{{- if .Values.storageClass.singleBucket }} {{- if .Values.storageClass.singleBucket }}
bucket: "{{ .Values.storageClass.singleBucket }}" bucket: "{{ .Values.storageClass.singleBucket }}"

View file

@ -1,13 +1,11 @@
--- ---
images: images:
# Source: quay.io/k8scsi/csi-attacher:v3.0.1
attacher: cr.yandex/crp9ftr22d26age3hulg/yandex-cloud/csi-s3/csi-attacher:v3.0.1
# Source: quay.io/k8scsi/csi-node-driver-registrar:v1.2.0 # Source: quay.io/k8scsi/csi-node-driver-registrar:v1.2.0
registrar: cr.yandex/crp9ftr22d26age3hulg/yandex-cloud/csi-s3/csi-node-driver-registrar:v1.2.0 registrar: cr.yandex/crp9ftr22d26age3hulg/yandex-cloud/csi-s3/csi-node-driver-registrar:v1.2.0
# Source: quay.io/k8scsi/csi-provisioner:v2.1.0 # Source: quay.io/k8scsi/csi-provisioner:v2.1.0
provisioner: cr.yandex/crp9ftr22d26age3hulg/yandex-cloud/csi-s3/csi-provisioner:v2.1.0 provisioner: cr.yandex/crp9ftr22d26age3hulg/yandex-cloud/csi-s3/csi-provisioner:v2.1.0
# Main image # Main image
csi: cr.yandex/crp9ftr22d26age3hulg/yandex-cloud/csi-s3/csi-s3-driver:0.34.0 csi: cr.yandex/crp9ftr22d26age3hulg/yandex-cloud/csi-s3/csi-s3-driver:0.38.3
storageClass: storageClass:
# Specifies whether the storage class should be created # Specifies whether the storage class should be created
@ -16,6 +14,8 @@ storageClass:
name: csi-s3 name: csi-s3
# Use a single bucket for all dynamically provisioned persistent volumes # Use a single bucket for all dynamically provisioned persistent volumes
singleBucket: "" singleBucket: ""
# mounter to use - either geesefs, s3fs or rclone (default geesefs)
mounter: geesefs
# GeeseFS mount options # GeeseFS mount options
mountOptions: "--memory-limit 1000 --dir-mode 0777 --file-mode 0666" mountOptions: "--memory-limit 1000 --dir-mode 0777 --file-mode 0666"
# Volume reclaim policy # Volume reclaim policy
@ -42,3 +42,7 @@ tolerations:
all: false all: false
node: [] node: []
controller: [] controller: []
nodeSelector: {}
kubeletPath: /var/lib/kubelet

View file

@ -1,99 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-attacher-sa
namespace: {{ .Release.Namespace }}
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: external-attacher-runner
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["events"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments/status"]
verbs: ["patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-attacher-role
subjects:
- kind: ServiceAccount
name: csi-attacher-sa
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: external-attacher-runner
apiGroup: rbac.authorization.k8s.io
---
# needed for StatefulSet
kind: Service
apiVersion: v1
metadata:
name: csi-attacher-s3
namespace: {{ .Release.Namespace }}
labels:
app: csi-attacher-s3
spec:
selector:
app: csi-attacher-s3
ports:
- name: csi-s3-dummy
port: 65535
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: csi-attacher-s3
namespace: {{ .Release.Namespace }}
spec:
serviceName: "csi-attacher-s3"
replicas: 1
selector:
matchLabels:
app: csi-attacher-s3
template:
metadata:
labels:
app: csi-attacher-s3
spec:
serviceAccount: csi-attacher-sa
tolerations:
- key: node-role.kubernetes.io/master
operator: "Exists"
containers:
- name: csi-attacher
image: {{ .Values.images.attacher }}
args:
- "--v=4"
- "--csi-address=$(ADDRESS)"
env:
- name: ADDRESS
value: /var/lib/kubelet/plugins/ru.yandex.s3.csi/csi.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /var/lib/kubelet/plugins/ru.yandex.s3.csi
volumes:
- name: socket-dir
hostPath:
path: /var/lib/kubelet/plugins/ru.yandex.s3.csi
type: DirectoryOrCreate

View file

@ -1,104 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-attacher-sa
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: external-attacher-runner
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["events"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments/status"]
verbs: ["patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-attacher-role
subjects:
- kind: ServiceAccount
name: csi-attacher-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: external-attacher-runner
apiGroup: rbac.authorization.k8s.io
---
# needed for StatefulSet
kind: Service
apiVersion: v1
metadata:
name: csi-attacher-s3
namespace: kube-system
labels:
app: csi-attacher-s3
spec:
selector:
app: csi-attacher-s3
ports:
- name: csi-s3-dummy
port: 65535
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: csi-attacher-s3
namespace: kube-system
spec:
serviceName: "csi-attacher-s3"
replicas: 1
selector:
matchLabels:
app: csi-attacher-s3
template:
metadata:
labels:
app: csi-attacher-s3
spec:
serviceAccount: csi-attacher-sa
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
- key: CriticalAddonsOnly
operator: Exists
- operator: Exists
effect: NoExecute
tolerationSeconds: 300
containers:
- name: csi-attacher
image: quay.io/k8scsi/csi-attacher:v3.0.1
args:
- "--v=4"
- "--csi-address=$(ADDRESS)"
env:
- name: ADDRESS
value: /var/lib/kubelet/plugins/ru.yandex.s3.csi/csi.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /var/lib/kubelet/plugins/ru.yandex.s3.csi
volumes:
- name: socket-dir
hostPath:
path: /var/lib/kubelet/plugins/ru.yandex.s3.csi
type: DirectoryOrCreate

View file

@ -8,22 +8,6 @@ kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
metadata: metadata:
name: csi-s3 name: csi-s3
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "update"]
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update"]
--- ---
kind: ClusterRoleBinding kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
@ -59,7 +43,6 @@ spec:
effect: NoExecute effect: NoExecute
tolerationSeconds: 300 tolerationSeconds: 300
serviceAccount: csi-s3 serviceAccount: csi-s3
hostNetwork: true
containers: containers:
- name: driver-registrar - name: driver-registrar
image: quay.io/k8scsi/csi-node-driver-registrar:v1.2.0 image: quay.io/k8scsi/csi-node-driver-registrar:v1.2.0
@ -87,7 +70,7 @@ spec:
capabilities: capabilities:
add: ["SYS_ADMIN"] add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true allowPrivilegeEscalation: true
image: cr.yandex/crp9ftr22d26age3hulg/csi-s3:0.34.0 image: cr.yandex/crp9ftr22d26age3hulg/csi-s3:0.38.3
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
args: args:
- "--endpoint=$(CSI_ENDPOINT)" - "--endpoint=$(CSI_ENDPOINT)"
@ -103,11 +86,16 @@ spec:
volumeMounts: volumeMounts:
- name: plugin-dir - name: plugin-dir
mountPath: /csi mountPath: /csi
- name: stage-dir
mountPath: /var/lib/kubelet/plugins/kubernetes.io/csi
mountPropagation: "Bidirectional"
- name: pods-mount-dir - name: pods-mount-dir
mountPath: /var/lib/kubelet/pods mountPath: /var/lib/kubelet/pods
mountPropagation: "Bidirectional" mountPropagation: "Bidirectional"
- name: fuse-device - name: fuse-device
mountPath: /dev/fuse mountPath: /dev/fuse
- name: systemd-control
mountPath: /run/systemd
volumes: volumes:
- name: registration-dir - name: registration-dir
hostPath: hostPath:
@ -117,6 +105,10 @@ spec:
hostPath: hostPath:
path: /var/lib/kubelet/plugins/ru.yandex.s3.csi path: /var/lib/kubelet/plugins/ru.yandex.s3.csi
type: DirectoryOrCreate type: DirectoryOrCreate
- name: stage-dir
hostPath:
path: /var/lib/kubelet/plugins/kubernetes.io/csi
type: DirectoryOrCreate
- name: pods-mount-dir - name: pods-mount-dir
hostPath: hostPath:
path: /var/lib/kubelet/pods path: /var/lib/kubelet/pods
@ -124,3 +116,7 @@ spec:
- name: fuse-device - name: fuse-device
hostPath: hostPath:
path: /dev/fuse path: /dev/fuse
- name: systemd-control
hostPath:
path: /run/systemd
type: DirectoryOrCreate

View file

@ -0,0 +1,7 @@
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: ru.yandex.s3.csi
spec:
attachRequired: false
podInfoOnMount: true

View file

@ -29,6 +29,7 @@ spec:
volumeAttributes: volumeAttributes:
capacity: 10Gi capacity: 10Gi
mounter: geesefs mounter: geesefs
options: --memory-limit 1000 --dir-mode 0777 --file-mode 0666
volumeHandle: manualbucket/path volumeHandle: manualbucket/path
--- ---
apiVersion: v1 apiVersion: v1

View file

@ -70,8 +70,10 @@ spec:
spec: spec:
serviceAccount: csi-provisioner-sa serviceAccount: csi-provisioner-sa
tolerations: tolerations:
- key: node-role.kubernetes.io/master - key: node-role.kubernetes.io/master
operator: "Exists" operator: Exists
- key: CriticalAddonsOnly
operator: Exists
containers: containers:
- name: csi-provisioner - name: csi-provisioner
image: quay.io/k8scsi/csi-provisioner:v2.1.0 image: quay.io/k8scsi/csi-provisioner:v2.1.0
@ -86,7 +88,7 @@ spec:
- name: socket-dir - name: socket-dir
mountPath: /var/lib/kubelet/plugins/ru.yandex.s3.csi mountPath: /var/lib/kubelet/plugins/ru.yandex.s3.csi
- name: csi-s3 - name: csi-s3
image: cr.yandex/crp9ftr22d26age3hulg/csi-s3:0.34.0 image: cr.yandex/crp9ftr22d26age3hulg/csi-s3:0.38.3
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
args: args:
- "--endpoint=$(CSI_ENDPOINT)" - "--endpoint=$(CSI_ENDPOINT)"

2
go.mod
View file

@ -4,6 +4,8 @@ go 1.15
require ( require (
github.com/container-storage-interface/spec v1.1.0 github.com/container-storage-interface/spec v1.1.0
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/godbus/dbus/v5 v5.0.4 // indirect
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b
github.com/golang/protobuf v1.1.0 // indirect github.com/golang/protobuf v1.1.0 // indirect
github.com/kubernetes-csi/csi-lib-utils v0.6.1 // indirect github.com/kubernetes-csi/csi-lib-utils v0.6.1 // indirect

4
go.sum
View file

@ -1,9 +1,13 @@
github.com/container-storage-interface/spec v1.1.0 h1:qPsTqtR1VUPvMPeK0UnCZMtXaKGyyLPG8gj/wG6VqMs= github.com/container-storage-interface/spec v1.1.0 h1:qPsTqtR1VUPvMPeK0UnCZMtXaKGyyLPG8gj/wG6VqMs=
github.com/container-storage-interface/spec v1.1.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4= github.com/container-storage-interface/spec v1.1.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4=
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/protobuf v1.1.0 h1:0iH4Ffd/meGoXqF2lSAhZHt8X+cPgkfn/cb6Cce5Vpc= github.com/golang/protobuf v1.1.0 h1:0iH4Ffd/meGoXqF2lSAhZHt8X+cPgkfn/cb6Cce5Vpc=

View file

@ -33,7 +33,7 @@ type driver struct {
} }
var ( var (
vendorVersion = "v1.2.0" vendorVersion = "v1.34.7"
driverName = "ru.yandex.s3.csi" driverName = "ru.yandex.s3.csi"
) )

View file

@ -19,6 +19,7 @@ package driver
import ( import (
"fmt" "fmt"
"os" "os"
"os/exec"
"regexp" "regexp"
"strconv" "strconv"
@ -68,7 +69,6 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
volumeID := req.GetVolumeId() volumeID := req.GetVolumeId()
targetPath := req.GetTargetPath() targetPath := req.GetTargetPath()
stagingTargetPath := req.GetStagingTargetPath() stagingTargetPath := req.GetStagingTargetPath()
bucketName, prefix := volumeIDToBucketPrefix(volumeID)
// Check arguments // Check arguments
if req.GetVolumeCapability() == nil { if req.GetVolumeCapability() == nil {
@ -84,7 +84,28 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
return nil, status.Error(codes.InvalidArgument, "Target path missing in request") return nil, status.Error(codes.InvalidArgument, "Target path missing in request")
} }
notMnt, err := checkMount(targetPath) notMnt, err := checkMount(stagingTargetPath)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
if notMnt {
// Staged mount is dead by some reason. Revive it
bucketName, prefix := volumeIDToBucketPrefix(volumeID)
s3, err := s3.NewClientFromSecret(req.GetSecrets())
if err != nil {
return nil, fmt.Errorf("failed to initialize S3 client: %s", err)
}
meta := getMeta(bucketName, prefix, req.VolumeContext)
mounter, err := mounter.New(meta, s3.Config)
if err != nil {
return nil, err
}
if err := mounter.Mount(stagingTargetPath, volumeID); err != nil {
return nil, err
}
}
notMnt, err = checkMount(targetPath)
if err != nil { if err != nil {
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
@ -100,18 +121,12 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
glog.V(4).Infof("target %v\nreadonly %v\nvolumeId %v\nattributes %v\nmountflags %v\n", glog.V(4).Infof("target %v\nreadonly %v\nvolumeId %v\nattributes %v\nmountflags %v\n",
targetPath, readOnly, volumeID, attrib, mountFlags) targetPath, readOnly, volumeID, attrib, mountFlags)
s3, err := s3.NewClientFromSecret(req.GetSecrets()) cmd := exec.Command("mount", "--bind", stagingTargetPath, targetPath)
cmd.Stderr = os.Stderr
glog.V(3).Infof("Binding volume %v from %v to %v", volumeID, stagingTargetPath, targetPath)
out, err := cmd.Output()
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to initialize S3 client: %s", err) return nil, fmt.Errorf("Error running mount --bind %v %v: %s", stagingTargetPath, targetPath, out)
}
meta := getMeta(bucketName, prefix, req.VolumeContext)
mounter, err := mounter.New(meta, s3.Config)
if err != nil {
return nil, err
}
if err := mounter.Mount(stagingTargetPath, targetPath); err != nil {
return nil, err
} }
glog.V(4).Infof("s3: volume %s successfully mounted to %s", volumeID, targetPath) glog.V(4).Infof("s3: volume %s successfully mounted to %s", volumeID, targetPath)
@ -131,7 +146,7 @@ func (ns *nodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpu
return nil, status.Error(codes.InvalidArgument, "Target path missing in request") return nil, status.Error(codes.InvalidArgument, "Target path missing in request")
} }
if err := mounter.FuseUnmount(targetPath); err != nil { if err := mounter.Unmount(targetPath); err != nil {
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
glog.V(4).Infof("s3: volume %s has been unmounted.", volumeID) glog.V(4).Infof("s3: volume %s has been unmounted.", volumeID)
@ -174,7 +189,7 @@ func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVol
if err != nil { if err != nil {
return nil, err return nil, err
} }
if err := mounter.Stage(stagingTargetPath); err != nil { if err := mounter.Mount(stagingTargetPath, volumeID); err != nil {
return nil, err return nil, err
} }
@ -193,6 +208,22 @@ func (ns *nodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstag
return nil, status.Error(codes.InvalidArgument, "Target path missing in request") return nil, status.Error(codes.InvalidArgument, "Target path missing in request")
} }
proc, err := mounter.FindFuseMountProcess(stagingTargetPath)
if err != nil {
return nil, err
}
exists := false
if proc == nil {
exists, err = mounter.SystemdUnmount(volumeID)
if exists && err != nil {
return nil, err
}
}
if !exists {
err = mounter.FuseUnmount(stagingTargetPath)
}
glog.V(4).Infof("s3: volume %s has been unmounted from stage path %v.", volumeID, stagingTargetPath)
return &csi.NodeUnstageVolumeResponse{}, nil return &csi.NodeUnstageVolumeResponse{}, nil
} }

View file

@ -3,6 +3,12 @@ package mounter
import ( import (
"fmt" "fmt"
"os" "os"
"strings"
"time"
systemd "github.com/coreos/go-systemd/v22/dbus"
dbus "github.com/godbus/dbus/v5"
"github.com/golang/glog"
"github.com/yandex-cloud/k8s-csi-s3/pkg/s3" "github.com/yandex-cloud/k8s-csi-s3/pkg/s3"
) )
@ -30,27 +36,170 @@ func newGeeseFSMounter(meta *s3.FSMeta, cfg *s3.Config) (Mounter, error) {
}, nil }, nil
} }
func (geesefs *geesefsMounter) Stage(stageTarget string) error { func (geesefs *geesefsMounter) CopyBinary(from, to string) error {
st, err := os.Stat(from)
if err != nil {
return fmt.Errorf("Failed to stat %s: %v", from, err)
}
st2, err := os.Stat(to)
if err != nil && !os.IsNotExist(err) {
return fmt.Errorf("Failed to stat %s: %v", to, err)
}
if err != nil || st2.Size() != st.Size() || st2.ModTime() != st.ModTime() {
if err == nil {
// remove the file first to not hit "text file busy" errors
err = os.Remove(to)
if err != nil {
return fmt.Errorf("Error removing %s to update it: %v", to, err)
}
}
bin, err := os.ReadFile(from)
if err != nil {
return fmt.Errorf("Error copying %s to %s: %v", from, to, err)
}
err = os.WriteFile(to, bin, 0755)
if err != nil {
return fmt.Errorf("Error copying %s to %s: %v", from, to, err)
}
err = os.Chtimes(to, st.ModTime(), st.ModTime())
if err != nil {
return fmt.Errorf("Error copying %s to %s: %v", from, to, err)
}
}
return nil return nil
} }
func (geesefs *geesefsMounter) Unstage(stageTarget string) error { func (geesefs *geesefsMounter) MountDirect(target string, args []string) error {
return nil args = append([]string{
}
func (geesefs *geesefsMounter) Mount(source string, target string) error {
fullPath := fmt.Sprintf("%s:%s", geesefs.meta.BucketName, geesefs.meta.Prefix)
args := []string{
"--endpoint", geesefs.endpoint, "--endpoint", geesefs.endpoint,
"-o", "allow_other", "-o", "allow_other",
"--log-file", "/dev/stderr", "--log-file", "/dev/stderr",
}, args...)
envs := []string{
"AWS_ACCESS_KEY_ID=" + geesefs.accessKeyID,
"AWS_SECRET_ACCESS_KEY=" + geesefs.secretAccessKey,
} }
return fuseMount(target, geesefsCmd, args, envs)
}
type execCmd struct {
Path string
Args []string
UncleanIsFailure bool
}
func (geesefs *geesefsMounter) Mount(target, volumeID string) error {
fullPath := fmt.Sprintf("%s:%s", geesefs.meta.BucketName, geesefs.meta.Prefix)
var args []string
if geesefs.region != "" { if geesefs.region != "" {
args = append(args, "--region", geesefs.region) args = append(args, "--region", geesefs.region)
} }
args = append(args, geesefs.meta.MountOptions...) args = append(
args,
"--setuid", "65534", // nobody. drop root privileges
"--setgid", "65534", // nogroup
)
useSystemd := true
for i := 0; i < len(geesefs.meta.MountOptions); i++ {
opt := geesefs.meta.MountOptions[i]
if opt == "--no-systemd" {
useSystemd = false
} else if len(opt) > 0 && opt[0] == '-' {
// Remove unsafe options
s := 1
if len(opt) > 1 && opt[1] == '-' {
s++
}
key := opt[s:]
e := strings.Index(opt, "=")
if e >= 0 {
key = opt[s:e]
}
if key == "log-file" || key == "shared-config" || key == "cache" {
// Skip options accessing local FS
if e < 0 {
i++
}
} else if key != "" {
args = append(args, opt)
}
} else if len(opt) > 0 {
args = append(args, opt)
}
}
args = append(args, fullPath, target) args = append(args, fullPath, target)
os.Setenv("AWS_ACCESS_KEY_ID", geesefs.accessKeyID) // Try to start geesefs using systemd so it doesn't get killed when the container exits
os.Setenv("AWS_SECRET_ACCESS_KEY", geesefs.secretAccessKey) if !useSystemd {
return fuseMount(target, geesefsCmd, args) return geesefs.MountDirect(target, args)
}
conn, err := systemd.New()
if err != nil {
glog.Errorf("Failed to connect to systemd dbus service: %v, starting geesefs directly", err)
return geesefs.MountDirect(target, args)
}
defer conn.Close()
// systemd is present
if err = geesefs.CopyBinary("/usr/bin/geesefs", "/csi/geesefs"); err != nil {
return err
}
pluginDir := os.Getenv("PLUGIN_DIR")
if pluginDir == "" {
pluginDir = "/var/lib/kubelet/plugins/ru.yandex.s3.csi"
}
args = append([]string{pluginDir+"/geesefs", "-f", "-o", "allow_other", "--endpoint", geesefs.endpoint}, args...)
glog.Info("Starting geesefs using systemd: "+strings.Join(args, " "))
unitName := "geesefs-"+systemd.PathBusEscape(volumeID)+".service"
newProps := []systemd.Property{
systemd.Property{
Name: "Description",
Value: dbus.MakeVariant("GeeseFS mount for Kubernetes volume "+volumeID),
},
systemd.PropExecStart(args, false),
systemd.Property{
Name: "ExecStopPost",
// force & lazy unmount to cleanup possibly dead mountpoints
Value: dbus.MakeVariant([]execCmd{ execCmd{ "/bin/umount", []string{ "/bin/umount", "-f", "-l", target }, false } }),
},
systemd.Property{
Name: "Environment",
Value: dbus.MakeVariant([]string{ "AWS_ACCESS_KEY_ID="+geesefs.accessKeyID, "AWS_SECRET_ACCESS_KEY="+geesefs.secretAccessKey }),
},
systemd.Property{
Name: "CollectMode",
Value: dbus.MakeVariant("inactive-or-failed"),
},
}
unitProps, err := conn.GetAllProperties(unitName)
if err == nil {
// Unit already exists
if s, ok := unitProps["ActiveState"].(string); ok && (s == "active" || s == "activating" || s == "reloading") {
// Unit is already active
curPath := ""
prevExec, ok := unitProps["ExecStart"].([][]interface{})
if ok && len(prevExec) > 0 && len(prevExec[0]) >= 2 {
execArgs, ok := prevExec[0][1].([]string)
if ok && len(execArgs) >= 2 {
curPath = execArgs[len(execArgs)-1]
}
}
if curPath != target {
return fmt.Errorf(
"GeeseFS for volume %v is already mounted on host, but"+
" in a different directory. We want %v, but it's in %v",
volumeID, target, curPath,
)
}
// Already mounted at right location
return nil
} else {
// Stop and garbage collect the unit if automatic collection didn't work for some reason
conn.StopUnit(unitName, "replace", nil)
conn.ResetFailedUnit(unitName)
}
}
_, err = conn.StartTransientUnit(unitName, "replace", newProps, nil)
if err != nil {
return fmt.Errorf("Error starting systemd unit %s on host: %v", unitName, err)
}
return waitForMount(target, 10*time.Second)
} }

View file

@ -11,18 +11,18 @@ import (
"syscall" "syscall"
"time" "time"
"github.com/yandex-cloud/k8s-csi-s3/pkg/s3" systemd "github.com/coreos/go-systemd/v22/dbus"
"github.com/golang/glog" "github.com/golang/glog"
"github.com/mitchellh/go-ps" "github.com/mitchellh/go-ps"
"k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/mount"
"github.com/yandex-cloud/k8s-csi-s3/pkg/s3"
) )
// Mounter interface which can be implemented // Mounter interface which can be implemented
// by the different mounter types // by the different mounter types
type Mounter interface { type Mounter interface {
Stage(stagePath string) error Mount(target, volumeID string) error
Unstage(stagePath string) error
Mount(source string, target string) error
} }
const ( const (
@ -57,9 +57,11 @@ func New(meta *s3.FSMeta, cfg *s3.Config) (Mounter, error) {
} }
} }
func fuseMount(path string, command string, args []string) error { func fuseMount(path string, command string, args []string, envs []string) error {
cmd := exec.Command(command, args...) cmd := exec.Command(command, args...)
cmd.Stderr = os.Stderr cmd.Stderr = os.Stderr
// cmd.Environ() returns envs inherited from the current process
cmd.Env = append(cmd.Environ(), envs...)
glog.V(3).Infof("Mounting fuse with command: %s and args: %s", command, args) glog.V(3).Infof("Mounting fuse with command: %s and args: %s", command, args)
out, err := cmd.Output() out, err := cmd.Output()
@ -70,12 +72,40 @@ func fuseMount(path string, command string, args []string) error {
return waitForMount(path, 10*time.Second) return waitForMount(path, 10*time.Second)
} }
func Unmount(path string) error {
if err := mount.New("").Unmount(path); err != nil {
return err
}
return nil
}
func SystemdUnmount(volumeID string) (bool, error) {
conn, err := systemd.New()
if err != nil {
glog.Errorf("Failed to connect to systemd dbus service: %v", err)
return false, err
}
defer conn.Close()
unitName := "geesefs-"+systemd.PathBusEscape(volumeID)+".service"
units, err := conn.ListUnitsByNames([]string{ unitName })
glog.Errorf("Got %v", units)
if err != nil {
glog.Errorf("Failed to list systemd unit by name %v: %v", unitName, err)
return false, err
}
if len(units) == 0 || units[0].ActiveState == "inactive" || units[0].ActiveState == "failed" {
return true, nil
}
_, err = conn.StopUnit(unitName, "replace", nil)
return true, err
}
func FuseUnmount(path string) error { func FuseUnmount(path string) error {
if err := mount.New("").Unmount(path); err != nil { if err := mount.New("").Unmount(path); err != nil {
return err return err
} }
// as fuse quits immediately, we will try to wait until the process is done // as fuse quits immediately, we will try to wait until the process is done
process, err := findFuseMountProcess(path) process, err := FindFuseMountProcess(path)
if err != nil { if err != nil {
glog.Errorf("Error getting PID of fuse mount: %s", err) glog.Errorf("Error getting PID of fuse mount: %s", err)
return nil return nil
@ -107,7 +137,7 @@ func waitForMount(path string, timeout time.Duration) error {
} }
} }
func findFuseMountProcess(path string) (*os.Process, error) { func FindFuseMountProcess(path string) (*os.Process, error) {
processes, err := ps.Processes() processes, err := ps.Processes()
if err != nil { if err != nil {
return nil, err return nil, err

View file

@ -2,7 +2,6 @@ package mounter
import ( import (
"fmt" "fmt"
"os"
"path" "path"
"github.com/yandex-cloud/k8s-csi-s3/pkg/s3" "github.com/yandex-cloud/k8s-csi-s3/pkg/s3"
@ -31,15 +30,7 @@ func newRcloneMounter(meta *s3.FSMeta, cfg *s3.Config) (Mounter, error) {
}, nil }, nil
} }
func (rclone *rcloneMounter) Stage(stageTarget string) error { func (rclone *rcloneMounter) Mount(target, volumeID string) error {
return nil
}
func (rclone *rcloneMounter) Unstage(stageTarget string) error {
return nil
}
func (rclone *rcloneMounter) Mount(source string, target string) error {
args := []string{ args := []string{
"mount", "mount",
fmt.Sprintf(":s3:%s", path.Join(rclone.meta.BucketName, rclone.meta.Prefix)), fmt.Sprintf(":s3:%s", path.Join(rclone.meta.BucketName, rclone.meta.Prefix)),
@ -55,7 +46,9 @@ func (rclone *rcloneMounter) Mount(source string, target string) error {
args = append(args, fmt.Sprintf("--s3-region=%s", rclone.region)) args = append(args, fmt.Sprintf("--s3-region=%s", rclone.region))
} }
args = append(args, rclone.meta.MountOptions...) args = append(args, rclone.meta.MountOptions...)
os.Setenv("AWS_ACCESS_KEY_ID", rclone.accessKeyID) envs := []string{
os.Setenv("AWS_SECRET_ACCESS_KEY", rclone.secretAccessKey) "AWS_ACCESS_KEY_ID=" + rclone.accessKeyID,
return fuseMount(target, rcloneCmd, args) "AWS_SECRET_ACCESS_KEY=" + rclone.secretAccessKey,
}
return fuseMount(target, rcloneCmd, args, envs)
} }

View file

@ -28,15 +28,7 @@ func newS3fsMounter(meta *s3.FSMeta, cfg *s3.Config) (Mounter, error) {
}, nil }, nil
} }
func (s3fs *s3fsMounter) Stage(stageTarget string) error { func (s3fs *s3fsMounter) Mount(target, volumeID string) error {
return nil
}
func (s3fs *s3fsMounter) Unstage(stageTarget string) error {
return nil
}
func (s3fs *s3fsMounter) Mount(source string, target string) error {
if err := writes3fsPass(s3fs.pwFileContent); err != nil { if err := writes3fsPass(s3fs.pwFileContent); err != nil {
return err return err
} }
@ -52,7 +44,7 @@ func (s3fs *s3fsMounter) Mount(source string, target string) error {
args = append(args, "-o", fmt.Sprintf("endpoint=%s", s3fs.region)) args = append(args, "-o", fmt.Sprintf("endpoint=%s", s3fs.region))
} }
args = append(args, s3fs.meta.MountOptions...) args = append(args, s3fs.meta.MountOptions...)
return fuseMount(target, s3fsCmd, args) return fuseMount(target, s3fsCmd, args, nil)
} }
func writes3fsPass(pwFileContent string) error { func writes3fsPass(pwFileContent string) error {

View file

@ -52,7 +52,7 @@ func NewClient(cfg *Config) (*s3Client, error) {
endpoint = u.Hostname() + ":" + u.Port() endpoint = u.Hostname() + ":" + u.Port()
} }
minioClient, err := minio.New(endpoint, &minio.Options{ minioClient, err := minio.New(endpoint, &minio.Options{
Creds: credentials.NewStaticV4(client.Config.AccessKeyID, client.Config.SecretAccessKey, client.Config.Region), Creds: credentials.NewStaticV4(client.Config.AccessKeyID, client.Config.SecretAccessKey, ""),
Secure: ssl, Secure: ssl,
}) })
if err != nil { if err != nil {