Compare commits

...

41 commits

Author SHA1 Message Date
Vitaliy Filippov
25401592e1 Bump version to 0.38.3, following GeeseFS
- Make long directory listings faster (remove O(N^2))
- Fix long directory listings where some files could be skipped due to inode expiration
- Remove extra empty COPY requests when metadata is not changed
- Add --fsync-on-close option for synchronous s3fs-like mode
- Fix --disable-xattr error code
- Do not try to switch between STANDARD and COLD based on the object size
2023-09-28 11:45:36 +03:00
Vitaliy Filippov
8d1ad692e5 Add github pages helm chart URL 2023-09-13 16:53:02 +03:00
Vitaliy Filippov
16c6c0ee13
Merge pull request from kopytov/mime-types
Add /etc/mime.types to csi-s3 image
2023-08-30 16:40:06 +03:00
Vitaliy Filippov
227e1cf2dd Publish helm chart to github pages 2023-08-28 18:41:38 +03:00
Vitaliy Filippov
40086c1ffa Bump version to 0.37.4
- Update GeeseFS (now it also limits metadata memory usage)
- Return s3fs-fuse & rclone packages back because they are now fixed in Alpine
- Do not use hostNetwork: true for mounter... but it won't help much because GeeseFS is now started on host :)
- Publish helm chart to github pages
2023-08-28 18:40:53 +03:00
Dmitry Kopytov
6cfd3ebbb6 Add mailcap installation to Dockerfile 2023-08-25 12:20:47 +03:00
Vitaliy Filippov
195829887a Return s3fs-fuse & rclone back, fix (README) 2023-08-15 13:37:25 +03:00
Vitaliy Filippov
f7e3c21a87 Remove hostNetwork: true () 2023-07-27 19:08:03 +03:00
Vitaliy Filippov
06d059bfd1 Update descriptions in the Marketplace manifest 2023-07-26 16:25:29 +03:00
Vitaliy Filippov
b630faefa7 Bump version to 0.36.2
- Update GeeseFS
- Add CSIDriver and remove external-attacher
- Add missing mounts to Helm chart
- Allow to configure mounter in Helm chart
- Allow to configure kubelet path in Helm chart
- Add nodeSelector for provisioner in Helm chart
- Do not use os.Setenv to pass keys
- Fix minio client constructor argument
2023-07-19 16:07:29 +03:00
Vitaliy Filippov
4af9636d19 Add missing systemd-control and stage-dir to Helm chart (fixes ) 2023-07-19 15:59:36 +03:00
Vitaliy Filippov
44511523e2 Slightly improve property names 2023-07-06 11:36:11 +03:00
Vitaliy Filippov
dd0c0b68d5
Merge pull request from ksrt12/feat-nodeSelector
feat: add nodeSelector for provisioner
2023-06-28 20:15:18 +03:00
Stepan Kazakov
59a7605ad8
feat: add nodeSelector for provisioner 2023-06-26 00:09:58 +03:00
Vitaliy Filippov
5a3a517315 Note about deleting attacher 2023-06-20 20:45:59 +03:00
x.zhou
6b72154ebc deploy: add CSIDriver object and remove attacher 2023-06-20 20:40:17 +03:00
Vitaliy Filippov
a43867a307 Fix perms after merging 2023-06-20 14:03:48 +03:00
Vitaliy Filippov
e334aedd0c
Merge pull request from nuwang/add_mounter_and_optional_secrets
Add support for optional secrets
2023-06-20 13:19:38 +03:00
Vitaliy Filippov
9a04d5a6eb
Merge pull request from nuwang/add_configurable_mounter
Make mounter configurable
2023-06-20 13:18:26 +03:00
Vitaliy Filippov
874dedcd3b
Merge pull request from amonhuang/pvc-manual-options
add manual pvc mount options
2023-06-15 19:17:36 +03:00
Vitaliy Filippov
c7e066396b
Merge pull request from NiklasRosenstein/add-helm-kubeletPath-option
add a `kubeletPath` option to `values.yaml`
2023-06-15 19:11:30 +03:00
Vitaliy Filippov
8539ff0a48
Merge pull request from zjx20/fix/wrong-arg
Fix the wrong argument for credentials.NewStaticV4()
2023-06-15 16:01:47 +03:00
Niklas Rosenstein
0fb81f07e7
Update deploy/helm/values.yaml
Co-authored-by: Kashemir001 <14910998+Kashemir001@users.noreply.github.com>
2023-06-14 11:09:55 +02:00
x.zhou
37c35c788a fix compilation 2023-06-09 16:44:40 +08:00
x.zhou
680a649a21 Fix the wrong argument for credentials.NewStaticV4() 2023-06-09 16:10:33 +08:00
Vitaliy Filippov
519c4f0bd7
Merge pull request from zjx20/fix-env
Don't call os.Setenv()
2023-06-05 18:49:19 +03:00
x.zhou
8ea6111b0d Don't call os.Setenv() 2023-06-03 15:36:03 +08:00
Vitaliy Filippov
4e410df6e1 Bump version to 0.35.5 2023-05-23 14:59:44 +03:00
Vitaliy Filippov
7a415ae6ab Recheck and revive staged mount when mounting it to the real path 2023-05-23 14:58:01 +03:00
Vitaliy Filippov
96818e563a Cleanup mounts after stopping them using systemd 2023-05-23 14:40:53 +03:00
Niklas Rosenstein
5dbebd01bd add a kubeletPath option to values.yaml 2023-05-18 09:40:51 +00:00
Vitaliy Filippov
3b38d545ab Bump version to 0.35.4, following GeeseFS 2023-04-26 22:57:37 +03:00
Vitaliy Filippov
259c9ca561 Remove broken test workflow 2023-04-26 22:57:37 +03:00
Vitaliy Filippov
64a443a5e2 Bump version to 0.35.3 2023-04-26 12:04:40 +03:00
Vitaliy Filippov
701c86fa4d Filter out unsafe options 2023-04-26 12:04:40 +03:00
Vitaliy Filippov
ede57438c0 Cache downloaded Go modules in Docker build, use alpine 3.17 2023-04-26 12:04:31 +03:00
Vitaliy Filippov
2927c733fb Bump version to 0.35.2, following GeeseFS 2023-04-18 16:08:38 +03:00
Vitaliy Filippov
c31204b8e4 Bump version to 0.35.1, following GeeseFS 2023-04-05 14:15:34 +03:00
amonhuang
f658121c77 add manual pvc mount options 2022-09-28 10:45:01 +08:00
Nuwan Goonasekera
2c85a614ea Make mounter configurable 2022-06-24 21:05:53 +05:30
Nuwan Goonasekera
a3fa9f3696 Make access key and secret key optional when using iam 2022-05-21 23:30:54 +05:30
26 changed files with 230 additions and 284 deletions

48
.github/workflows/pages.yml vendored Normal file
View file

@ -0,0 +1,48 @@
name: Publish Helm chart
on:
push:
branches:
- master
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
permissions:
contents: write
pages: write
id-token: write
# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued.
# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete.
concurrency:
group: "pages"
cancel-in-progress: false
jobs:
deploy:
environment:
name: github-pages
url: ${{ steps.deployment.outputs.page_url }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Setup Pages
uses: actions/configure-pages@v3
- name: Publish Helm chart
uses: stefanprodan/helm-gh-pages@0ad2bb377311d61ac04ad9eb6f252fb68e207260 # v1.7.0
with:
# GitHub automatically creates a GITHUB_TOKEN secret to use in your workflow. You can use the GITHUB_TOKEN to authenticate in a workflow run.
# See https://docs.github.com/en/actions/reference/authentication-in-a-workflow#about-the-github_token-secret
token: ${{ secrets.GITHUB_TOKEN }}
branch: github-pages
charts_dir: deploy/helm
target_dir: charts
linting: off

View file

@ -1,16 +0,0 @@
name: Test
on:
push:
tags:
- "v*"
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Test
run: make test

View file

@ -1,22 +1,18 @@
FROM golang:1.16-alpine as gobuild
FROM golang:1.19-alpine as gobuild
WORKDIR /build
ADD go.mod go.sum /build/
RUN go mod download -x
ADD cmd /build/cmd
ADD pkg /build/pkg
RUN go get -d -v ./...
RUN CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o ./s3driver ./cmd/s3driver
FROM alpine:3.16
FROM alpine:3.17
LABEL maintainers="Vitaliy Filippov <vitalif@yourcmc.ru>"
LABEL description="csi-s3 slim image"
# apk add temporarily broken:
#ERROR: unable to select packages:
# so:libcrypto.so.3 (no such package):
# required by: s3fs-fuse-1.91-r1[so:libcrypto.so.3]
#RUN apk add --no-cache -X http://dl-cdn.alpinelinux.org/alpine/edge/testing s3fs-fuse rclone
RUN apk add --no-cache fuse mailcap rclone
RUN apk add --no-cache -X http://dl-cdn.alpinelinux.org/alpine/edge/community s3fs-fuse
ADD https://github.com/yandex-cloud/geesefs/releases/latest/download/geesefs-linux-amd64 /usr/bin/geesefs
RUN chmod 755 /usr/bin/geesefs

View file

@ -17,7 +17,7 @@ REGISTRY_NAME=cr.yandex/crp9ftr22d26age3hulg
REGISTRY_NAME2=cr.il.nebius.cloud/crll7us9n6i5j3v4n92m
IMAGE_NAME=csi-s3
IMAGE_NAME2=yandex-cloud/csi-s3/csi-s3-driver
VERSION ?= 0.35.0
VERSION ?= 0.38.3
IMAGE_TAG=$(REGISTRY_NAME)/$(IMAGE_NAME):$(VERSION)
TEST_IMAGE_TAG=$(IMAGE_NAME):test

View file

@ -10,7 +10,19 @@ This is a Container Storage Interface ([CSI](https://github.com/container-storag
* Kubernetes has to allow privileged containers
* Docker daemon must allow shared mounts (systemd flag `MountFlags=shared`)
### 1. Create a secret with your S3 credentials
### Helm chart
Helm chart is published at `https://yandex-cloud.github.io/k8s-csi-s3`:
```
helm repo add yandex-s3 https://yandex-cloud.github.io/k8s-csi-s3/charts
helm install csi-s3 yandex-s3/csi-s3
```
### Manual installation
#### 1. Create a secret with your S3 credentials
```yaml
apiVersion: v1
@ -30,22 +42,30 @@ stringData:
The region can be empty if you are using some other S3 compatible storage.
### 2. Deploy the driver
#### 2. Deploy the driver
```bash
cd deploy/kubernetes
kubectl create -f provisioner.yaml
kubectl create -f attacher.yaml
kubectl create -f driver.yaml
kubectl create -f csi-s3.yaml
```
### 3. Create the storage class
If you're upgrading from a previous version which had `attacher.yaml` you
can safely delete all resources created from that file:
```
wget https://raw.githubusercontent.com/yandex-cloud/k8s-csi-s3/v0.35.5/deploy/kubernetes/attacher.yaml
kubectl delete -f attacher.yaml
```
#### 3. Create the storage class
```bash
kubectl create -f examples/storageclass.yaml
```
### 4. Test the S3 driver
#### 4. Test the S3 driver
1. Create a pvc using the new storage class:
@ -74,8 +94,8 @@ kubectl create -f examples/storageclass.yaml
```bash
$ kubectl exec -ti csi-s3-test-nginx bash
$ mount | grep fuse
s3fs on /var/lib/www/html type fuse.s3fs (rw,nosuid,nodev,relatime,user_id=0,group_id=0,allow_other)
$ touch /var/lib/www/html/hello_world
pvc-035763df-0488-4941-9a34-f637292eb95c: on /usr/share/nginx/html/s3 type fuse.geesefs (rw,nosuid,nodev,relatime,user_id=65534,group_id=0,default_permissions,allow_other)
$ touch /usr/share/nginx/html/s3/hello_world
```
If something does not work as expected, check the troubleshooting section below.

View file

@ -1,9 +1,9 @@
---
apiVersion: v1
appVersion: 0.35.0
appVersion: 0.38.3
description: "Container Storage Interface (CSI) driver for S3 volumes"
name: csi-s3
version: 0.35.0
version: 0.38.3
keywords:
- s3
home: https://github.com/yandex-cloud/k8s-csi-s3

View file

@ -26,6 +26,7 @@ The following table lists all configuration parameters and their default values.
| `storageClass.create` | Specifies whether the storage class should be created | true |
| `storageClass.name` | Storage class name | csi-s3 |
| `storageClass.singleBucket` | Use a single bucket for all dynamically provisioned persistent volumes | |
| `storageClass.mounter` | Mounter to use. Either geesefs, s3fs or rclone. geesefs recommended | geesefs |
| `storageClass.mountOptions` | GeeseFS mount options | `--memory-limit 1000 --dir-mode 0777 --file-mode 0666` |
| `storageClass.reclaimPolicy` | Volume reclaim policy | Delete |
| `storageClass.annotations` | Annotations for the storage class | |

View file

@ -1,10 +1,9 @@
helm_chart:
name: cr.yandex/crp9ftr22d26age3hulg/yandex-cloud/csi-s3/csi-s3
tag: 0.35.0
tag: 0.38.3
requirements:
k8s_version: ">=1.13"
images:
- full: images.attacher
- full: images.registrar
- full: images.provisioner
- full: images.csi
@ -15,7 +14,7 @@ user_values:
ru: Создать класс хранения
description:
en: Specifies whether the storage class should be created
ru: 'Если "да", при установке будет создан класс хранения S3'
ru: Выберите, чтобы создать новый S3-класс хранения при развёртывании приложения.
boolean_value:
default_value: true
- name: secret.create
@ -24,7 +23,7 @@ user_values:
ru: Создать секрет
description:
en: Specifies whether the secret should be created
ru: 'Если "да", при установке будет создан секрет, иначе для класса хранения будет использован существующий'
ru: Выберите, чтобы создать новый секрет для класса хранения при установке приложения, а не использовать существующий.
boolean_value:
default_value: true
- name: secret.accessKey
@ -33,7 +32,7 @@ user_values:
ru: Идентификатор ключа S3
description:
en: S3 Access Key ID
ru: Идентификатор ключа S3
ru: Идентификатор ключа S3.
string_value:
default_value: ""
- name: secret.secretKey
@ -42,16 +41,16 @@ user_values:
ru: Секретный ключ S3
description:
en: S3 Secret Key
ru: Секретный ключ S3
ru: Секретный ключ S3.
string_value:
default_value: ""
- name: storageClass.singleBucket
title:
en: Single S3 bucket for volumes
ru: Общий S3 бакет для томов
ru: Общий бакет S3 для томов
description:
en: Single S3 bucket to use for all dynamically provisioned persistent volumes
ru: Общий бакет S3, в котором будут создаваться все динамически распределяемые тома. Если пусто, под каждый том будет создаваться новый бакет
ru: Общий бакет S3, в котором будут создаваться все динамически распределяемые тома. Если пусто, под каждый том будет создаваться новый бакет.
string_value:
default_value: ""
- name: secret.endpoint
@ -60,7 +59,7 @@ user_values:
ru: Адрес S3-сервиса
description:
en: S3 service endpoint to use
ru: Адрес S3-сервиса, который будет использоваться
ru: Адрес S3-сервиса, который будет использоваться.
string_value:
default_value: "https://storage.yandexcloud.net"
- name: storageClass.mountOptions
@ -68,8 +67,8 @@ user_values:
en: GeeseFS mount options
ru: Опции монтирования GeeseFS
description:
en: GeeseFS mount options to use. Consult GeeseFS (https://github.com/yandex-cloud/geesefs) help for the full option list
ru: Опции монтирования GeeseFS. Смотрите справку GeeseFS (https://github.com/yandex-cloud/geesefs) для полного перечня опций
en: GeeseFS mount options to use. Refer to `geesefs --help` command output for the whole list of options (https://github.com/yandex-cloud/geesefs).
ru: Опции монтирования GeeseFS. Полный перечень и описание опций смотрите в выводе команды `geesefs --help` (https://github.com/yandex-cloud/geesefs).
string_value:
default_value: "--memory-limit 1000 --dir-mode 0777 --file-mode 0666"
- name: storageClass.reclaimPolicy
@ -78,7 +77,7 @@ user_values:
ru: Политика очистки томов
description:
en: Volume reclaim policy for the storage class (Retain or Delete)
ru: Политика очистки PV, связанных с PVC (Retain - сохранять при удалении PVC, Delete - удалять при удалении PVC)
ru: Выберите политику очистки томов PersistentVolume при удалении PersistentVolumeClaim. Retain — сохранять том, Delete — удалять том.
string_selector_value:
default_value: Delete
values:
@ -90,7 +89,7 @@ user_values:
ru: Название класса хранения
description:
en: Name of the storage class that will be created
ru: Название класса хранения, который будет создан при установке
ru: Название класса хранения, который будет создан при установке.
string_value:
default_value: csi-s3
- name: secret.name
@ -99,15 +98,15 @@ user_values:
ru: Название секрета
description:
en: Name of the secret to create or use for the storage class
ru: Название секрета, который будет создан или использован для класса хранения
ru: Название секрета, который будет создан или использован для класса хранения.
string_value:
default_value: csi-s3-secret
- name: tolerations.all
title:
en: Tolerate all taints
ru: Игнорировать все taint
ru: Игнорировать все политики taint
description:
en: Tolerate all taints by the CSI-S3 node driver (mounter)
ru: Игнорировать все taint-ы узлов кластера драйвером CSI-S3, монтирующим ФС на узлах
ru: Выберите, чтобы драйвер CSI, который монтирует файловую систему на узлах, игнорировал все политики taint для узлов кластера.
boolean_value:
default_value: false

View file

@ -50,7 +50,6 @@ spec:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccount: csi-s3
hostNetwork: true
containers:
- name: driver-registrar
image: {{ .Values.images.registrar }}
@ -62,7 +61,7 @@ spec:
- name: ADDRESS
value: /csi/csi.sock
- name: DRIVER_REG_SOCK_PATH
value: /var/lib/kubelet/plugins/ru.yandex.s3.csi/csi.sock
value: {{ .Values.kubeletPath }}/plugins/ru.yandex.s3.csi/csi.sock
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
@ -94,24 +93,37 @@ spec:
volumeMounts:
- name: plugin-dir
mountPath: /csi
- name: stage-dir
mountPath: {{ .Values.kubeletPath }}/plugins/kubernetes.io/csi
mountPropagation: "Bidirectional"
- name: pods-mount-dir
mountPath: /var/lib/kubelet/pods
mountPath: {{ .Values.kubeletPath }}/pods
mountPropagation: "Bidirectional"
- name: fuse-device
mountPath: /dev/fuse
- name: systemd-control
mountPath: /run/systemd
volumes:
- name: registration-dir
hostPath:
path: /var/lib/kubelet/plugins_registry/
path: {{ .Values.kubeletPath }}/plugins_registry/
type: DirectoryOrCreate
- name: plugin-dir
hostPath:
path: /var/lib/kubelet/plugins/ru.yandex.s3.csi
path: {{ .Values.kubeletPath }}/plugins/ru.yandex.s3.csi
type: DirectoryOrCreate
- name: stage-dir
hostPath:
path: {{ .Values.kubeletPath }}/plugins/kubernetes.io/csi
type: DirectoryOrCreate
- name: pods-mount-dir
hostPath:
path: /var/lib/kubelet/pods
path: {{ .Values.kubeletPath }}/pods
type: Directory
- name: fuse-device
hostPath:
path: /dev/fuse
- name: systemd-control
hostPath:
path: /run/systemd
type: DirectoryOrCreate

View file

@ -0,0 +1,10 @@
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: ru.yandex.s3.csi
spec:
attachRequired: false
podInfoOnMount: true
fsGroupPolicy: File # added in Kubernetes 1.19, this field is GA as of Kubernetes 1.23
volumeLifecycleModes: # added in Kubernetes 1.16, this field is beta
- Persistent

View file

@ -77,6 +77,10 @@ spec:
{{- with .Values.tolerations.controller }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
containers:
- name: csi-provisioner
image: {{ .Values.images.provisioner }}
@ -85,11 +89,11 @@ spec:
- "--v=4"
env:
- name: ADDRESS
value: /var/lib/kubelet/plugins/ru.yandex.s3.csi/csi.sock
value: {{ .Values.kubeletPath }}/plugins/ru.yandex.s3.csi/csi.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /var/lib/kubelet/plugins/ru.yandex.s3.csi
mountPath: {{ .Values.kubeletPath }}/plugins/ru.yandex.s3.csi
- name: csi-s3
image: {{ .Values.images.csi }}
imagePullPolicy: IfNotPresent
@ -99,14 +103,14 @@ spec:
- "--v=4"
env:
- name: CSI_ENDPOINT
value: unix:///var/lib/kubelet/plugins/ru.yandex.s3.csi/csi.sock
value: unix://{{ .Values.kubeletPath }}/plugins/ru.yandex.s3.csi/csi.sock
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: socket-dir
mountPath: /var/lib/kubelet/plugins/ru.yandex.s3.csi
mountPath: {{ .Values.kubeletPath }}/plugins/ru.yandex.s3.csi
volumes:
- name: socket-dir
emptyDir: {}

View file

@ -5,7 +5,11 @@ metadata:
namespace: {{ .Release.Namespace }}
name: {{ .Values.secret.name }}
stringData:
{{- if .Values.secret.accessKey }}
accessKeyID: {{ .Values.secret.accessKey }}
{{- end }}
{{- if .Values.secret.secretKey }}
secretAccessKey: {{ .Values.secret.secretKey }}
{{- end }}
endpoint: {{ .Values.secret.endpoint }}
{{- end -}}

View file

@ -9,7 +9,7 @@ metadata:
{{- end }}
provisioner: ru.yandex.s3.csi
parameters:
mounter: geesefs
mounter: "{{ .Values.storageClass.mounter }}"
options: "{{ .Values.storageClass.mountOptions }}"
{{- if .Values.storageClass.singleBucket }}
bucket: "{{ .Values.storageClass.singleBucket }}"

View file

@ -1,13 +1,11 @@
---
images:
# Source: quay.io/k8scsi/csi-attacher:v3.0.1
attacher: cr.yandex/crp9ftr22d26age3hulg/yandex-cloud/csi-s3/csi-attacher:v3.0.1
# Source: quay.io/k8scsi/csi-node-driver-registrar:v1.2.0
registrar: cr.yandex/crp9ftr22d26age3hulg/yandex-cloud/csi-s3/csi-node-driver-registrar:v1.2.0
# Source: quay.io/k8scsi/csi-provisioner:v2.1.0
provisioner: cr.yandex/crp9ftr22d26age3hulg/yandex-cloud/csi-s3/csi-provisioner:v2.1.0
# Main image
csi: cr.yandex/crp9ftr22d26age3hulg/yandex-cloud/csi-s3/csi-s3-driver:0.35.0
csi: cr.yandex/crp9ftr22d26age3hulg/yandex-cloud/csi-s3/csi-s3-driver:0.38.3
storageClass:
# Specifies whether the storage class should be created
@ -16,6 +14,8 @@ storageClass:
name: csi-s3
# Use a single bucket for all dynamically provisioned persistent volumes
singleBucket: ""
# mounter to use - either geesefs, s3fs or rclone (default geesefs)
mounter: geesefs
# GeeseFS mount options
mountOptions: "--memory-limit 1000 --dir-mode 0777 --file-mode 0666"
# Volume reclaim policy
@ -42,3 +42,7 @@ tolerations:
all: false
node: []
controller: []
nodeSelector: {}
kubeletPath: /var/lib/kubelet

View file

@ -1,101 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-attacher-sa
namespace: {{ .Release.Namespace }}
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: external-attacher-runner
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["events"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments/status"]
verbs: ["patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-attacher-role
subjects:
- kind: ServiceAccount
name: csi-attacher-sa
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: external-attacher-runner
apiGroup: rbac.authorization.k8s.io
---
# needed for StatefulSet
kind: Service
apiVersion: v1
metadata:
name: csi-attacher-s3
namespace: {{ .Release.Namespace }}
labels:
app: csi-attacher-s3
spec:
selector:
app: csi-attacher-s3
ports:
- name: csi-s3-dummy
port: 65535
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: csi-attacher-s3
namespace: {{ .Release.Namespace }}
spec:
serviceName: "csi-attacher-s3"
replicas: 1
selector:
matchLabels:
app: csi-attacher-s3
template:
metadata:
labels:
app: csi-attacher-s3
spec:
serviceAccount: csi-attacher-sa
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
- key: CriticalAddonsOnly
operator: Exists
containers:
- name: csi-attacher
image: {{ .Values.images.attacher }}
args:
- "--v=4"
- "--csi-address=$(ADDRESS)"
env:
- name: ADDRESS
value: /var/lib/kubelet/plugins/ru.yandex.s3.csi/csi.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /var/lib/kubelet/plugins/ru.yandex.s3.csi
volumes:
- name: socket-dir
hostPath:
path: /var/lib/kubelet/plugins/ru.yandex.s3.csi
type: DirectoryOrCreate

View file

@ -1,101 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-attacher-sa
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: external-attacher-runner
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["events"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments/status"]
verbs: ["patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-attacher-role
subjects:
- kind: ServiceAccount
name: csi-attacher-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: external-attacher-runner
apiGroup: rbac.authorization.k8s.io
---
# needed for StatefulSet
kind: Service
apiVersion: v1
metadata:
name: csi-attacher-s3
namespace: kube-system
labels:
app: csi-attacher-s3
spec:
selector:
app: csi-attacher-s3
ports:
- name: csi-s3-dummy
port: 65535
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: csi-attacher-s3
namespace: kube-system
spec:
serviceName: "csi-attacher-s3"
replicas: 1
selector:
matchLabels:
app: csi-attacher-s3
template:
metadata:
labels:
app: csi-attacher-s3
spec:
serviceAccount: csi-attacher-sa
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
- key: CriticalAddonsOnly
operator: Exists
containers:
- name: csi-attacher
image: quay.io/k8scsi/csi-attacher:v3.0.1
args:
- "--v=4"
- "--csi-address=$(ADDRESS)"
env:
- name: ADDRESS
value: /var/lib/kubelet/plugins/ru.yandex.s3.csi/csi.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /var/lib/kubelet/plugins/ru.yandex.s3.csi
volumes:
- name: socket-dir
hostPath:
path: /var/lib/kubelet/plugins/ru.yandex.s3.csi
type: DirectoryOrCreate

View file

@ -43,7 +43,6 @@ spec:
effect: NoExecute
tolerationSeconds: 300
serviceAccount: csi-s3
hostNetwork: true
containers:
- name: driver-registrar
image: quay.io/k8scsi/csi-node-driver-registrar:v1.2.0
@ -71,7 +70,7 @@ spec:
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: cr.yandex/crp9ftr22d26age3hulg/csi-s3:0.35.0
image: cr.yandex/crp9ftr22d26age3hulg/csi-s3:0.38.3
imagePullPolicy: IfNotPresent
args:
- "--endpoint=$(CSI_ENDPOINT)"

View file

@ -0,0 +1,7 @@
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: ru.yandex.s3.csi
spec:
attachRequired: false
podInfoOnMount: true

View file

@ -29,6 +29,7 @@ spec:
volumeAttributes:
capacity: 10Gi
mounter: geesefs
options: --memory-limit 1000 --dir-mode 0777 --file-mode 0666
volumeHandle: manualbucket/path
---
apiVersion: v1

View file

@ -88,7 +88,7 @@ spec:
- name: socket-dir
mountPath: /var/lib/kubelet/plugins/ru.yandex.s3.csi
- name: csi-s3
image: cr.yandex/crp9ftr22d26age3hulg/csi-s3:0.35.0
image: cr.yandex/crp9ftr22d26age3hulg/csi-s3:0.38.3
imagePullPolicy: IfNotPresent
args:
- "--endpoint=$(CSI_ENDPOINT)"

View file

@ -84,7 +84,28 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
return nil, status.Error(codes.InvalidArgument, "Target path missing in request")
}
notMnt, err := checkMount(targetPath)
notMnt, err := checkMount(stagingTargetPath)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
if notMnt {
// Staged mount is dead by some reason. Revive it
bucketName, prefix := volumeIDToBucketPrefix(volumeID)
s3, err := s3.NewClientFromSecret(req.GetSecrets())
if err != nil {
return nil, fmt.Errorf("failed to initialize S3 client: %s", err)
}
meta := getMeta(bucketName, prefix, req.VolumeContext)
mounter, err := mounter.New(meta, s3.Config)
if err != nil {
return nil, err
}
if err := mounter.Mount(stagingTargetPath, volumeID); err != nil {
return nil, err
}
}
notMnt, err = checkMount(targetPath)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}

View file

@ -3,6 +3,7 @@ package mounter
import (
"fmt"
"os"
"strings"
"time"
systemd "github.com/coreos/go-systemd/v22/dbus"
@ -74,9 +75,17 @@ func (geesefs *geesefsMounter) MountDirect(target string, args []string) error {
"-o", "allow_other",
"--log-file", "/dev/stderr",
}, args...)
os.Setenv("AWS_ACCESS_KEY_ID", geesefs.accessKeyID)
os.Setenv("AWS_SECRET_ACCESS_KEY", geesefs.secretAccessKey)
return fuseMount(target, geesefsCmd, args)
envs := []string{
"AWS_ACCESS_KEY_ID=" + geesefs.accessKeyID,
"AWS_SECRET_ACCESS_KEY=" + geesefs.secretAccessKey,
}
return fuseMount(target, geesefsCmd, args, envs)
}
type execCmd struct {
Path string
Args []string
UncleanIsFailure bool
}
func (geesefs *geesefsMounter) Mount(target, volumeID string) error {
@ -92,10 +101,30 @@ func (geesefs *geesefsMounter) Mount(target, volumeID string) error {
)
useSystemd := true
for i := 0; i < len(geesefs.meta.MountOptions); i++ {
if geesefs.meta.MountOptions[i] == "--no-systemd" {
opt := geesefs.meta.MountOptions[i]
if opt == "--no-systemd" {
useSystemd = false
} else {
args = append(args, geesefs.meta.MountOptions[i])
} else if len(opt) > 0 && opt[0] == '-' {
// Remove unsafe options
s := 1
if len(opt) > 1 && opt[1] == '-' {
s++
}
key := opt[s:]
e := strings.Index(opt, "=")
if e >= 0 {
key = opt[s:e]
}
if key == "log-file" || key == "shared-config" || key == "cache" {
// Skip options accessing local FS
if e < 0 {
i++
}
} else if key != "" {
args = append(args, opt)
}
} else if len(opt) > 0 {
args = append(args, opt)
}
}
args = append(args, fullPath, target)
@ -118,6 +147,7 @@ func (geesefs *geesefsMounter) Mount(target, volumeID string) error {
pluginDir = "/var/lib/kubelet/plugins/ru.yandex.s3.csi"
}
args = append([]string{pluginDir+"/geesefs", "-f", "-o", "allow_other", "--endpoint", geesefs.endpoint}, args...)
glog.Info("Starting geesefs using systemd: "+strings.Join(args, " "))
unitName := "geesefs-"+systemd.PathBusEscape(volumeID)+".service"
newProps := []systemd.Property{
systemd.Property{
@ -125,6 +155,11 @@ func (geesefs *geesefsMounter) Mount(target, volumeID string) error {
Value: dbus.MakeVariant("GeeseFS mount for Kubernetes volume "+volumeID),
},
systemd.PropExecStart(args, false),
systemd.Property{
Name: "ExecStopPost",
// force & lazy unmount to cleanup possibly dead mountpoints
Value: dbus.MakeVariant([]execCmd{ execCmd{ "/bin/umount", []string{ "/bin/umount", "-f", "-l", target }, false } }),
},
systemd.Property{
Name: "Environment",
Value: dbus.MakeVariant([]string{ "AWS_ACCESS_KEY_ID="+geesefs.accessKeyID, "AWS_SECRET_ACCESS_KEY="+geesefs.secretAccessKey }),

View file

@ -57,9 +57,11 @@ func New(meta *s3.FSMeta, cfg *s3.Config) (Mounter, error) {
}
}
func fuseMount(path string, command string, args []string) error {
func fuseMount(path string, command string, args []string, envs []string) error {
cmd := exec.Command(command, args...)
cmd.Stderr = os.Stderr
// cmd.Environ() returns envs inherited from the current process
cmd.Env = append(cmd.Environ(), envs...)
glog.V(3).Infof("Mounting fuse with command: %s and args: %s", command, args)
out, err := cmd.Output()

View file

@ -2,7 +2,6 @@ package mounter
import (
"fmt"
"os"
"path"
"github.com/yandex-cloud/k8s-csi-s3/pkg/s3"
@ -47,7 +46,9 @@ func (rclone *rcloneMounter) Mount(target, volumeID string) error {
args = append(args, fmt.Sprintf("--s3-region=%s", rclone.region))
}
args = append(args, rclone.meta.MountOptions...)
os.Setenv("AWS_ACCESS_KEY_ID", rclone.accessKeyID)
os.Setenv("AWS_SECRET_ACCESS_KEY", rclone.secretAccessKey)
return fuseMount(target, rcloneCmd, args)
envs := []string{
"AWS_ACCESS_KEY_ID=" + rclone.accessKeyID,
"AWS_SECRET_ACCESS_KEY=" + rclone.secretAccessKey,
}
return fuseMount(target, rcloneCmd, args, envs)
}

View file

@ -44,7 +44,7 @@ func (s3fs *s3fsMounter) Mount(target, volumeID string) error {
args = append(args, "-o", fmt.Sprintf("endpoint=%s", s3fs.region))
}
args = append(args, s3fs.meta.MountOptions...)
return fuseMount(target, s3fsCmd, args)
return fuseMount(target, s3fsCmd, args, nil)
}
func writes3fsPass(pwFileContent string) error {

View file

@ -52,7 +52,7 @@ func NewClient(cfg *Config) (*s3Client, error) {
endpoint = u.Hostname() + ":" + u.Port()
}
minioClient, err := minio.New(endpoint, &minio.Options{
Creds: credentials.NewStaticV4(client.Config.AccessKeyID, client.Config.SecretAccessKey, client.Config.Region),
Creds: credentials.NewStaticV4(client.Config.AccessKeyID, client.Config.SecretAccessKey, ""),
Secure: ssl,
})
if err != nil {