Compare commits
60 commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
25401592e1 | ||
![]() |
8d1ad692e5 | ||
![]() |
16c6c0ee13 | ||
![]() |
227e1cf2dd | ||
![]() |
40086c1ffa | ||
![]() |
6cfd3ebbb6 | ||
![]() |
195829887a | ||
![]() |
f7e3c21a87 | ||
![]() |
06d059bfd1 | ||
![]() |
b630faefa7 | ||
![]() |
4af9636d19 | ||
![]() |
44511523e2 | ||
![]() |
dd0c0b68d5 | ||
![]() |
59a7605ad8 | ||
![]() |
5a3a517315 | ||
![]() |
6b72154ebc | ||
![]() |
a43867a307 | ||
![]() |
e334aedd0c | ||
![]() |
9a04d5a6eb | ||
![]() |
874dedcd3b | ||
![]() |
c7e066396b | ||
![]() |
8539ff0a48 | ||
![]() |
0fb81f07e7 | ||
![]() |
37c35c788a | ||
![]() |
680a649a21 | ||
![]() |
519c4f0bd7 | ||
![]() |
8ea6111b0d | ||
![]() |
4e410df6e1 | ||
![]() |
7a415ae6ab | ||
![]() |
96818e563a | ||
![]() |
5dbebd01bd | ||
![]() |
3b38d545ab | ||
![]() |
259c9ca561 | ||
![]() |
64a443a5e2 | ||
![]() |
701c86fa4d | ||
![]() |
ede57438c0 | ||
![]() |
2927c733fb | ||
![]() |
c31204b8e4 | ||
![]() |
8ac6bd58e4 | ||
![]() |
fe02df610f | ||
![]() |
c4031bcbc6 | ||
![]() |
5c78b9b69d | ||
![]() |
f4d01e12c7 | ||
![]() |
c6af9556d7 | ||
![]() |
ecf1031dfc | ||
![]() |
1305b20bae | ||
![]() |
2ad5d21714 | ||
![]() |
bfba08742c | ||
![]() |
e8d63dfc14 | ||
![]() |
8bbd7ebaf0 | ||
![]() |
543704336f | ||
![]() |
63b1f45dba | ||
![]() |
0a97f8d4ce | ||
![]() |
9dac91e1ec | ||
![]() |
514c0131dc | ||
![]() |
a1a001ce27 | ||
![]() |
ffed042f5c | ||
![]() |
f658121c77 | ||
![]() |
2c85a614ea | ||
![]() |
a3fa9f3696 |
29 changed files with 446 additions and 367 deletions
.github/workflows
DockerfileMakefileREADME.mddeploy
go.modgo.sumpkg
48
.github/workflows/pages.yml
vendored
Normal file
48
.github/workflows/pages.yml
vendored
Normal file
|
@ -0,0 +1,48 @@
|
|||
name: Publish Helm chart
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
# Allows you to run this workflow manually from the Actions tab
|
||||
workflow_dispatch:
|
||||
|
||||
# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
|
||||
permissions:
|
||||
contents: write
|
||||
pages: write
|
||||
id-token: write
|
||||
|
||||
# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued.
|
||||
# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete.
|
||||
concurrency:
|
||||
group: "pages"
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
|
||||
deploy:
|
||||
|
||||
environment:
|
||||
name: github-pages
|
||||
url: ${{ steps.deployment.outputs.page_url }}
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Setup Pages
|
||||
uses: actions/configure-pages@v3
|
||||
|
||||
- name: Publish Helm chart
|
||||
uses: stefanprodan/helm-gh-pages@0ad2bb377311d61ac04ad9eb6f252fb68e207260 # v1.7.0
|
||||
with:
|
||||
# GitHub automatically creates a GITHUB_TOKEN secret to use in your workflow. You can use the GITHUB_TOKEN to authenticate in a workflow run.
|
||||
# See https://docs.github.com/en/actions/reference/authentication-in-a-workflow#about-the-github_token-secret
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
branch: github-pages
|
||||
charts_dir: deploy/helm
|
||||
target_dir: charts
|
||||
linting: off
|
16
.github/workflows/test.yml
vendored
16
.github/workflows/test.yml
vendored
|
@ -1,16 +0,0 @@
|
|||
name: Test
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "v*"
|
||||
|
||||
jobs:
|
||||
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Test
|
||||
run: make test
|
14
Dockerfile
14
Dockerfile
|
@ -1,22 +1,18 @@
|
|||
FROM golang:1.16-alpine as gobuild
|
||||
FROM golang:1.19-alpine as gobuild
|
||||
|
||||
WORKDIR /build
|
||||
ADD go.mod go.sum /build/
|
||||
RUN go mod download -x
|
||||
ADD cmd /build/cmd
|
||||
ADD pkg /build/pkg
|
||||
|
||||
RUN go get -d -v ./...
|
||||
RUN CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o ./s3driver ./cmd/s3driver
|
||||
|
||||
FROM alpine:3.16
|
||||
FROM alpine:3.17
|
||||
LABEL maintainers="Vitaliy Filippov <vitalif@yourcmc.ru>"
|
||||
LABEL description="csi-s3 slim image"
|
||||
|
||||
# apk add temporarily broken:
|
||||
#ERROR: unable to select packages:
|
||||
# so:libcrypto.so.3 (no such package):
|
||||
# required by: s3fs-fuse-1.91-r1[so:libcrypto.so.3]
|
||||
#RUN apk add --no-cache -X http://dl-cdn.alpinelinux.org/alpine/edge/testing s3fs-fuse rclone
|
||||
RUN apk add --no-cache fuse mailcap rclone
|
||||
RUN apk add --no-cache -X http://dl-cdn.alpinelinux.org/alpine/edge/community s3fs-fuse
|
||||
|
||||
ADD https://github.com/yandex-cloud/geesefs/releases/latest/download/geesefs-linux-amd64 /usr/bin/geesefs
|
||||
RUN chmod 755 /usr/bin/geesefs
|
||||
|
|
3
Makefile
3
Makefile
|
@ -14,9 +14,10 @@
|
|||
.PHONY: test build container push clean
|
||||
|
||||
REGISTRY_NAME=cr.yandex/crp9ftr22d26age3hulg
|
||||
REGISTRY_NAME2=cr.il.nebius.cloud/crll7us9n6i5j3v4n92m
|
||||
IMAGE_NAME=csi-s3
|
||||
IMAGE_NAME2=yandex-cloud/csi-s3/csi-s3-driver
|
||||
VERSION ?= 0.32.0
|
||||
VERSION ?= 0.38.3
|
||||
IMAGE_TAG=$(REGISTRY_NAME)/$(IMAGE_NAME):$(VERSION)
|
||||
TEST_IMAGE_TAG=$(IMAGE_NAME):test
|
||||
|
||||
|
|
45
README.md
45
README.md
|
@ -10,7 +10,19 @@ This is a Container Storage Interface ([CSI](https://github.com/container-storag
|
|||
* Kubernetes has to allow privileged containers
|
||||
* Docker daemon must allow shared mounts (systemd flag `MountFlags=shared`)
|
||||
|
||||
### 1. Create a secret with your S3 credentials
|
||||
### Helm chart
|
||||
|
||||
Helm chart is published at `https://yandex-cloud.github.io/k8s-csi-s3`:
|
||||
|
||||
```
|
||||
helm repo add yandex-s3 https://yandex-cloud.github.io/k8s-csi-s3/charts
|
||||
|
||||
helm install csi-s3 yandex-s3/csi-s3
|
||||
```
|
||||
|
||||
### Manual installation
|
||||
|
||||
#### 1. Create a secret with your S3 credentials
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
|
@ -30,22 +42,30 @@ stringData:
|
|||
|
||||
The region can be empty if you are using some other S3 compatible storage.
|
||||
|
||||
### 2. Deploy the driver
|
||||
#### 2. Deploy the driver
|
||||
|
||||
```bash
|
||||
cd deploy/kubernetes
|
||||
kubectl create -f provisioner.yaml
|
||||
kubectl create -f attacher.yaml
|
||||
kubectl create -f driver.yaml
|
||||
kubectl create -f csi-s3.yaml
|
||||
```
|
||||
|
||||
### 3. Create the storage class
|
||||
If you're upgrading from a previous version which had `attacher.yaml` you
|
||||
can safely delete all resources created from that file:
|
||||
|
||||
```
|
||||
wget https://raw.githubusercontent.com/yandex-cloud/k8s-csi-s3/v0.35.5/deploy/kubernetes/attacher.yaml
|
||||
kubectl delete -f attacher.yaml
|
||||
```
|
||||
|
||||
#### 3. Create the storage class
|
||||
|
||||
```bash
|
||||
kubectl create -f examples/storageclass.yaml
|
||||
```
|
||||
|
||||
### 4. Test the S3 driver
|
||||
#### 4. Test the S3 driver
|
||||
|
||||
1. Create a pvc using the new storage class:
|
||||
|
||||
|
@ -74,8 +94,8 @@ kubectl create -f examples/storageclass.yaml
|
|||
```bash
|
||||
$ kubectl exec -ti csi-s3-test-nginx bash
|
||||
$ mount | grep fuse
|
||||
s3fs on /var/lib/www/html type fuse.s3fs (rw,nosuid,nodev,relatime,user_id=0,group_id=0,allow_other)
|
||||
$ touch /var/lib/www/html/hello_world
|
||||
pvc-035763df-0488-4941-9a34-f637292eb95c: on /usr/share/nginx/html/s3 type fuse.geesefs (rw,nosuid,nodev,relatime,user_id=65534,group_id=0,default_permissions,allow_other)
|
||||
$ touch /usr/share/nginx/html/s3/hello_world
|
||||
```
|
||||
|
||||
If something does not work as expected, check the troubleshooting section below.
|
||||
|
@ -94,11 +114,18 @@ metadata:
|
|||
provisioner: ru.yandex.s3.csi
|
||||
parameters:
|
||||
mounter: geesefs
|
||||
options: "--memory-limit 1000 --dir-mode 0777 --file-mode 0666"
|
||||
bucket: some-existing-bucket-name
|
||||
```
|
||||
|
||||
If the bucket is specified, it will still be created if it does not exist on the backend. Every volume will get its own prefix within the bucket which matches the volume ID. When deleting a volume, also just the prefix will be deleted.
|
||||
|
||||
### Static Provisioning
|
||||
|
||||
If you want to mount a pre-existing bucket or prefix within a pre-existing bucket and don't want csi-s3 to delete it when PV is deleted, you can use static provisioning.
|
||||
|
||||
To do that you should omit `storageClassName` in the `PersistentVolumeClaim` and manually create a `PersistentVolume` with a matching `claimRef`, like in the following example: [deploy/kubernetes/examples/pvc-manual.yaml](deploy/kubernetes/examples/pvc-manual.yaml).
|
||||
|
||||
### Mounter
|
||||
|
||||
We **strongly recommend** to use the default mounter which is [GeeseFS](https://github.com/yandex-cloud/geesefs).
|
||||
|
@ -118,6 +145,10 @@ You can check POSIX compatibility matrix here: https://github.com/yandex-cloud/g
|
|||
* Almost full POSIX compatibility
|
||||
* Good performance for both small and big files
|
||||
* Does not store file permissions and custom modification times
|
||||
* By default runs **outside** of the csi-s3 container using systemd, to not crash
|
||||
mountpoints with "Transport endpoint is not connected" when csi-s3 is upgraded
|
||||
or restarted. Add `--no-systemd` to `parameters.options` of the `StorageClass`
|
||||
to disable this behaviour.
|
||||
|
||||
#### s3fs
|
||||
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
appVersion: 0.32.0
|
||||
appVersion: 0.38.3
|
||||
description: "Container Storage Interface (CSI) driver for S3 volumes"
|
||||
name: csi-s3
|
||||
version: 0.32.0
|
||||
version: 0.38.3
|
||||
keywords:
|
||||
- s3
|
||||
home: https://github.com/yandex-cloud/k8s-csi-s3
|
|
@ -26,6 +26,7 @@ The following table lists all configuration parameters and their default values.
|
|||
| `storageClass.create` | Specifies whether the storage class should be created | true |
|
||||
| `storageClass.name` | Storage class name | csi-s3 |
|
||||
| `storageClass.singleBucket` | Use a single bucket for all dynamically provisioned persistent volumes | |
|
||||
| `storageClass.mounter` | Mounter to use. Either geesefs, s3fs or rclone. geesefs recommended | geesefs |
|
||||
| `storageClass.mountOptions` | GeeseFS mount options | `--memory-limit 1000 --dir-mode 0777 --file-mode 0666` |
|
||||
| `storageClass.reclaimPolicy` | Volume reclaim policy | Delete |
|
||||
| `storageClass.annotations` | Annotations for the storage class | |
|
|
@ -1,10 +1,9 @@
|
|||
helm_chart:
|
||||
name: cr.yandex/crp9ftr22d26age3hulg/yandex-cloud/csi-s3/csi-s3
|
||||
tag: 0.32.0
|
||||
tag: 0.38.3
|
||||
requirements:
|
||||
k8s_version: ">=1.13"
|
||||
images:
|
||||
- full: images.attacher
|
||||
- full: images.registrar
|
||||
- full: images.provisioner
|
||||
- full: images.csi
|
||||
|
@ -15,7 +14,7 @@ user_values:
|
|||
ru: Создать класс хранения
|
||||
description:
|
||||
en: Specifies whether the storage class should be created
|
||||
ru: 'Если "да", при установке будет создан класс хранения S3'
|
||||
ru: Выберите, чтобы создать новый S3-класс хранения при развёртывании приложения.
|
||||
boolean_value:
|
||||
default_value: true
|
||||
- name: secret.create
|
||||
|
@ -24,7 +23,7 @@ user_values:
|
|||
ru: Создать секрет
|
||||
description:
|
||||
en: Specifies whether the secret should be created
|
||||
ru: 'Если "да", при установке будет создан секрет, иначе для класса хранения будет использован существующий'
|
||||
ru: Выберите, чтобы создать новый секрет для класса хранения при установке приложения, а не использовать существующий.
|
||||
boolean_value:
|
||||
default_value: true
|
||||
- name: secret.accessKey
|
||||
|
@ -33,7 +32,7 @@ user_values:
|
|||
ru: Идентификатор ключа S3
|
||||
description:
|
||||
en: S3 Access Key ID
|
||||
ru: Идентификатор ключа S3
|
||||
ru: Идентификатор ключа S3.
|
||||
string_value:
|
||||
default_value: ""
|
||||
- name: secret.secretKey
|
||||
|
@ -42,16 +41,16 @@ user_values:
|
|||
ru: Секретный ключ S3
|
||||
description:
|
||||
en: S3 Secret Key
|
||||
ru: Секретный ключ S3
|
||||
ru: Секретный ключ S3.
|
||||
string_value:
|
||||
default_value: ""
|
||||
- name: storageClass.singleBucket
|
||||
title:
|
||||
en: Single S3 bucket for volumes
|
||||
ru: Общий S3 бакет для томов
|
||||
ru: Общий бакет S3 для томов
|
||||
description:
|
||||
en: Single S3 bucket to use for all dynamically provisioned persistent volumes
|
||||
ru: Общий бакет S3, в котором будут создаваться все динамически распределяемые тома. Если пусто, под каждый том будет создаваться новый бакет
|
||||
ru: Общий бакет S3, в котором будут создаваться все динамически распределяемые тома. Если пусто, под каждый том будет создаваться новый бакет.
|
||||
string_value:
|
||||
default_value: ""
|
||||
- name: secret.endpoint
|
||||
|
@ -60,7 +59,7 @@ user_values:
|
|||
ru: Адрес S3-сервиса
|
||||
description:
|
||||
en: S3 service endpoint to use
|
||||
ru: Адрес S3-сервиса, который будет использоваться
|
||||
ru: Адрес S3-сервиса, который будет использоваться.
|
||||
string_value:
|
||||
default_value: "https://storage.yandexcloud.net"
|
||||
- name: storageClass.mountOptions
|
||||
|
@ -68,8 +67,8 @@ user_values:
|
|||
en: GeeseFS mount options
|
||||
ru: Опции монтирования GeeseFS
|
||||
description:
|
||||
en: GeeseFS mount options to use. Consult GeeseFS (https://github.com/yandex-cloud/geesefs) help for the full option list
|
||||
ru: Опции монтирования GeeseFS. Смотрите справку GeeseFS (https://github.com/yandex-cloud/geesefs) для полного перечня опций
|
||||
en: GeeseFS mount options to use. Refer to `geesefs --help` command output for the whole list of options (https://github.com/yandex-cloud/geesefs).
|
||||
ru: Опции монтирования GeeseFS. Полный перечень и описание опций смотрите в выводе команды `geesefs --help` (https://github.com/yandex-cloud/geesefs).
|
||||
string_value:
|
||||
default_value: "--memory-limit 1000 --dir-mode 0777 --file-mode 0666"
|
||||
- name: storageClass.reclaimPolicy
|
||||
|
@ -78,7 +77,7 @@ user_values:
|
|||
ru: Политика очистки томов
|
||||
description:
|
||||
en: Volume reclaim policy for the storage class (Retain or Delete)
|
||||
ru: Политика очистки PV, связанных с PVC (Retain - сохранять при удалении PVC, Delete - удалять при удалении PVC)
|
||||
ru: Выберите политику очистки томов PersistentVolume при удалении PersistentVolumeClaim. Retain — сохранять том, Delete — удалять том.
|
||||
string_selector_value:
|
||||
default_value: Delete
|
||||
values:
|
||||
|
@ -90,7 +89,7 @@ user_values:
|
|||
ru: Название класса хранения
|
||||
description:
|
||||
en: Name of the storage class that will be created
|
||||
ru: Название класса хранения, который будет создан при установке
|
||||
ru: Название класса хранения, который будет создан при установке.
|
||||
string_value:
|
||||
default_value: csi-s3
|
||||
- name: secret.name
|
||||
|
@ -99,15 +98,15 @@ user_values:
|
|||
ru: Название секрета
|
||||
description:
|
||||
en: Name of the secret to create or use for the storage class
|
||||
ru: Название секрета, который будет создан или использован для класса хранения
|
||||
ru: Название секрета, который будет создан или использован для класса хранения.
|
||||
string_value:
|
||||
default_value: csi-s3-secret
|
||||
- name: tolerations.all
|
||||
title:
|
||||
en: Tolerate all taints
|
||||
ru: Игнорировать все taint-ы
|
||||
ru: Игнорировать все политики taint
|
||||
description:
|
||||
en: Tolerate all taints by the CSI-S3 node driver (mounter)
|
||||
ru: Игнорировать все taint-ы узлов кластера драйвером CSI-S3, монтирующим ФС на узлах
|
||||
ru: Выберите, чтобы драйвер CSI, который монтирует файловую систему на узлах, игнорировал все политики taint для узлов кластера.
|
||||
boolean_value:
|
||||
default_value: false
|
|
@ -8,22 +8,6 @@ kind: ClusterRole
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-s3
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "list", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["namespaces"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["volumeattachments"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
|
@ -66,7 +50,6 @@ spec:
|
|||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccount: csi-s3
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: driver-registrar
|
||||
image: {{ .Values.images.registrar }}
|
||||
|
@ -78,7 +61,7 @@ spec:
|
|||
- name: ADDRESS
|
||||
value: /csi/csi.sock
|
||||
- name: DRIVER_REG_SOCK_PATH
|
||||
value: /var/lib/kubelet/plugins/ru.yandex.s3.csi/csi.sock
|
||||
value: {{ .Values.kubeletPath }}/plugins/ru.yandex.s3.csi/csi.sock
|
||||
- name: KUBE_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
|
@ -110,24 +93,37 @@ spec:
|
|||
volumeMounts:
|
||||
- name: plugin-dir
|
||||
mountPath: /csi
|
||||
- name: stage-dir
|
||||
mountPath: {{ .Values.kubeletPath }}/plugins/kubernetes.io/csi
|
||||
mountPropagation: "Bidirectional"
|
||||
- name: pods-mount-dir
|
||||
mountPath: /var/lib/kubelet/pods
|
||||
mountPath: {{ .Values.kubeletPath }}/pods
|
||||
mountPropagation: "Bidirectional"
|
||||
- name: fuse-device
|
||||
mountPath: /dev/fuse
|
||||
- name: systemd-control
|
||||
mountPath: /run/systemd
|
||||
volumes:
|
||||
- name: registration-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/plugins_registry/
|
||||
path: {{ .Values.kubeletPath }}/plugins_registry/
|
||||
type: DirectoryOrCreate
|
||||
- name: plugin-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/plugins/ru.yandex.s3.csi
|
||||
path: {{ .Values.kubeletPath }}/plugins/ru.yandex.s3.csi
|
||||
type: DirectoryOrCreate
|
||||
- name: stage-dir
|
||||
hostPath:
|
||||
path: {{ .Values.kubeletPath }}/plugins/kubernetes.io/csi
|
||||
type: DirectoryOrCreate
|
||||
- name: pods-mount-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/pods
|
||||
path: {{ .Values.kubeletPath }}/pods
|
||||
type: Directory
|
||||
- name: fuse-device
|
||||
hostPath:
|
||||
path: /dev/fuse
|
||||
- name: systemd-control
|
||||
hostPath:
|
||||
path: /run/systemd
|
||||
type: DirectoryOrCreate
|
10
deploy/helm/csi-s3/templates/driver.yaml
Normal file
10
deploy/helm/csi-s3/templates/driver.yaml
Normal file
|
@ -0,0 +1,10 @@
|
|||
apiVersion: storage.k8s.io/v1
|
||||
kind: CSIDriver
|
||||
metadata:
|
||||
name: ru.yandex.s3.csi
|
||||
spec:
|
||||
attachRequired: false
|
||||
podInfoOnMount: true
|
||||
fsGroupPolicy: File # added in Kubernetes 1.19, this field is GA as of Kubernetes 1.23
|
||||
volumeLifecycleModes: # added in Kubernetes 1.16, this field is beta
|
||||
- Persistent
|
|
@ -74,12 +74,13 @@ spec:
|
|||
operator: Exists
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
- operator: Exists
|
||||
effect: NoExecute
|
||||
tolerationSeconds: 300
|
||||
{{- with .Values.tolerations.controller }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: csi-provisioner
|
||||
image: {{ .Values.images.provisioner }}
|
||||
|
@ -88,11 +89,11 @@ spec:
|
|||
- "--v=4"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /var/lib/kubelet/plugins/ru.yandex.s3.csi/csi.sock
|
||||
value: {{ .Values.kubeletPath }}/plugins/ru.yandex.s3.csi/csi.sock
|
||||
imagePullPolicy: "IfNotPresent"
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /var/lib/kubelet/plugins/ru.yandex.s3.csi
|
||||
mountPath: {{ .Values.kubeletPath }}/plugins/ru.yandex.s3.csi
|
||||
- name: csi-s3
|
||||
image: {{ .Values.images.csi }}
|
||||
imagePullPolicy: IfNotPresent
|
||||
|
@ -102,14 +103,14 @@ spec:
|
|||
- "--v=4"
|
||||
env:
|
||||
- name: CSI_ENDPOINT
|
||||
value: unix:///var/lib/kubelet/plugins/ru.yandex.s3.csi/csi.sock
|
||||
value: unix://{{ .Values.kubeletPath }}/plugins/ru.yandex.s3.csi/csi.sock
|
||||
- name: NODE_ID
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /var/lib/kubelet/plugins/ru.yandex.s3.csi
|
||||
mountPath: {{ .Values.kubeletPath }}/plugins/ru.yandex.s3.csi
|
||||
volumes:
|
||||
- name: socket-dir
|
||||
emptyDir: {}
|
|
@ -5,7 +5,11 @@ metadata:
|
|||
namespace: {{ .Release.Namespace }}
|
||||
name: {{ .Values.secret.name }}
|
||||
stringData:
|
||||
{{- if .Values.secret.accessKey }}
|
||||
accessKeyID: {{ .Values.secret.accessKey }}
|
||||
{{- end }}
|
||||
{{- if .Values.secret.secretKey }}
|
||||
secretAccessKey: {{ .Values.secret.secretKey }}
|
||||
{{- end }}
|
||||
endpoint: {{ .Values.secret.endpoint }}
|
||||
{{- end -}}
|
|
@ -9,7 +9,7 @@ metadata:
|
|||
{{- end }}
|
||||
provisioner: ru.yandex.s3.csi
|
||||
parameters:
|
||||
mounter: geesefs
|
||||
mounter: "{{ .Values.storageClass.mounter }}"
|
||||
options: "{{ .Values.storageClass.mountOptions }}"
|
||||
{{- if .Values.storageClass.singleBucket }}
|
||||
bucket: "{{ .Values.storageClass.singleBucket }}"
|
|
@ -1,13 +1,11 @@
|
|||
---
|
||||
images:
|
||||
# Source: quay.io/k8scsi/csi-attacher:v3.0.1
|
||||
attacher: cr.yandex/crp9ftr22d26age3hulg/yandex-cloud/csi-s3/csi-attacher:v3.0.1
|
||||
# Source: quay.io/k8scsi/csi-node-driver-registrar:v1.2.0
|
||||
registrar: cr.yandex/crp9ftr22d26age3hulg/yandex-cloud/csi-s3/csi-node-driver-registrar:v1.2.0
|
||||
# Source: quay.io/k8scsi/csi-provisioner:v2.1.0
|
||||
provisioner: cr.yandex/crp9ftr22d26age3hulg/yandex-cloud/csi-s3/csi-provisioner:v2.1.0
|
||||
# Main image
|
||||
csi: cr.yandex/crp9ftr22d26age3hulg/yandex-cloud/csi-s3/csi-s3-driver:0.32.0
|
||||
csi: cr.yandex/crp9ftr22d26age3hulg/yandex-cloud/csi-s3/csi-s3-driver:0.38.3
|
||||
|
||||
storageClass:
|
||||
# Specifies whether the storage class should be created
|
||||
|
@ -16,6 +14,8 @@ storageClass:
|
|||
name: csi-s3
|
||||
# Use a single bucket for all dynamically provisioned persistent volumes
|
||||
singleBucket: ""
|
||||
# mounter to use - either geesefs, s3fs or rclone (default geesefs)
|
||||
mounter: geesefs
|
||||
# GeeseFS mount options
|
||||
mountOptions: "--memory-limit 1000 --dir-mode 0777 --file-mode 0666"
|
||||
# Volume reclaim policy
|
||||
|
@ -42,3 +42,7 @@ tolerations:
|
|||
all: false
|
||||
node: []
|
||||
controller: []
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
kubeletPath: /var/lib/kubelet
|
|
@ -1,99 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: csi-attacher-sa
|
||||
namespace: {{ .Release.Namespace }}
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: external-attacher-runner
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["csinodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["volumeattachments"]
|
||||
verbs: ["get", "list", "watch", "update", "patch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["volumeattachments/status"]
|
||||
verbs: ["patch"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-attacher-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-attacher-sa
|
||||
namespace: {{ .Release.Namespace }}
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: external-attacher-runner
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
# needed for StatefulSet
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: csi-attacher-s3
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app: csi-attacher-s3
|
||||
spec:
|
||||
selector:
|
||||
app: csi-attacher-s3
|
||||
ports:
|
||||
- name: csi-s3-dummy
|
||||
port: 65535
|
||||
---
|
||||
kind: StatefulSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: csi-attacher-s3
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
serviceName: "csi-attacher-s3"
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: csi-attacher-s3
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: csi-attacher-s3
|
||||
spec:
|
||||
serviceAccount: csi-attacher-sa
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: "Exists"
|
||||
containers:
|
||||
- name: csi-attacher
|
||||
image: {{ .Values.images.attacher }}
|
||||
args:
|
||||
- "--v=4"
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /var/lib/kubelet/plugins/ru.yandex.s3.csi/csi.sock
|
||||
imagePullPolicy: "IfNotPresent"
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /var/lib/kubelet/plugins/ru.yandex.s3.csi
|
||||
volumes:
|
||||
- name: socket-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/plugins/ru.yandex.s3.csi
|
||||
type: DirectoryOrCreate
|
|
@ -1,104 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: csi-attacher-sa
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: external-attacher-runner
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["csinodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["volumeattachments"]
|
||||
verbs: ["get", "list", "watch", "update", "patch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["volumeattachments/status"]
|
||||
verbs: ["patch"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-attacher-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-attacher-sa
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: external-attacher-runner
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
# needed for StatefulSet
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: csi-attacher-s3
|
||||
namespace: kube-system
|
||||
labels:
|
||||
app: csi-attacher-s3
|
||||
spec:
|
||||
selector:
|
||||
app: csi-attacher-s3
|
||||
ports:
|
||||
- name: csi-s3-dummy
|
||||
port: 65535
|
||||
---
|
||||
kind: StatefulSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: csi-attacher-s3
|
||||
namespace: kube-system
|
||||
spec:
|
||||
serviceName: "csi-attacher-s3"
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: csi-attacher-s3
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: csi-attacher-s3
|
||||
spec:
|
||||
serviceAccount: csi-attacher-sa
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
- operator: Exists
|
||||
effect: NoExecute
|
||||
tolerationSeconds: 300
|
||||
containers:
|
||||
- name: csi-attacher
|
||||
image: quay.io/k8scsi/csi-attacher:v3.0.1
|
||||
args:
|
||||
- "--v=4"
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /var/lib/kubelet/plugins/ru.yandex.s3.csi/csi.sock
|
||||
imagePullPolicy: "IfNotPresent"
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /var/lib/kubelet/plugins/ru.yandex.s3.csi
|
||||
volumes:
|
||||
- name: socket-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/plugins/ru.yandex.s3.csi
|
||||
type: DirectoryOrCreate
|
|
@ -8,22 +8,6 @@ kind: ClusterRole
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-s3
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "list", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["namespaces"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["volumeattachments"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
|
@ -59,7 +43,6 @@ spec:
|
|||
effect: NoExecute
|
||||
tolerationSeconds: 300
|
||||
serviceAccount: csi-s3
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: driver-registrar
|
||||
image: quay.io/k8scsi/csi-node-driver-registrar:v1.2.0
|
||||
|
@ -87,7 +70,7 @@ spec:
|
|||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
allowPrivilegeEscalation: true
|
||||
image: cr.yandex/crp9ftr22d26age3hulg/csi-s3:0.32.0
|
||||
image: cr.yandex/crp9ftr22d26age3hulg/csi-s3:0.38.3
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- "--endpoint=$(CSI_ENDPOINT)"
|
||||
|
@ -103,11 +86,16 @@ spec:
|
|||
volumeMounts:
|
||||
- name: plugin-dir
|
||||
mountPath: /csi
|
||||
- name: stage-dir
|
||||
mountPath: /var/lib/kubelet/plugins/kubernetes.io/csi
|
||||
mountPropagation: "Bidirectional"
|
||||
- name: pods-mount-dir
|
||||
mountPath: /var/lib/kubelet/pods
|
||||
mountPropagation: "Bidirectional"
|
||||
- name: fuse-device
|
||||
mountPath: /dev/fuse
|
||||
- name: systemd-control
|
||||
mountPath: /run/systemd
|
||||
volumes:
|
||||
- name: registration-dir
|
||||
hostPath:
|
||||
|
@ -117,6 +105,10 @@ spec:
|
|||
hostPath:
|
||||
path: /var/lib/kubelet/plugins/ru.yandex.s3.csi
|
||||
type: DirectoryOrCreate
|
||||
- name: stage-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/plugins/kubernetes.io/csi
|
||||
type: DirectoryOrCreate
|
||||
- name: pods-mount-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/pods
|
||||
|
@ -124,3 +116,7 @@ spec:
|
|||
- name: fuse-device
|
||||
hostPath:
|
||||
path: /dev/fuse
|
||||
- name: systemd-control
|
||||
hostPath:
|
||||
path: /run/systemd
|
||||
type: DirectoryOrCreate
|
||||
|
|
7
deploy/kubernetes/driver.yaml
Normal file
7
deploy/kubernetes/driver.yaml
Normal file
|
@ -0,0 +1,7 @@
|
|||
apiVersion: storage.k8s.io/v1
|
||||
kind: CSIDriver
|
||||
metadata:
|
||||
name: ru.yandex.s3.csi
|
||||
spec:
|
||||
attachRequired: false
|
||||
podInfoOnMount: true
|
|
@ -29,6 +29,7 @@ spec:
|
|||
volumeAttributes:
|
||||
capacity: 10Gi
|
||||
mounter: geesefs
|
||||
options: --memory-limit 1000 --dir-mode 0777 --file-mode 0666
|
||||
volumeHandle: manualbucket/path
|
||||
---
|
||||
apiVersion: v1
|
||||
|
|
|
@ -70,8 +70,10 @@ spec:
|
|||
spec:
|
||||
serviceAccount: csi-provisioner-sa
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: "Exists"
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
containers:
|
||||
- name: csi-provisioner
|
||||
image: quay.io/k8scsi/csi-provisioner:v2.1.0
|
||||
|
@ -86,7 +88,7 @@ spec:
|
|||
- name: socket-dir
|
||||
mountPath: /var/lib/kubelet/plugins/ru.yandex.s3.csi
|
||||
- name: csi-s3
|
||||
image: cr.yandex/crp9ftr22d26age3hulg/csi-s3:0.32.0
|
||||
image: cr.yandex/crp9ftr22d26age3hulg/csi-s3:0.38.3
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- "--endpoint=$(CSI_ENDPOINT)"
|
||||
|
|
2
go.mod
2
go.mod
|
@ -4,6 +4,8 @@ go 1.15
|
|||
|
||||
require (
|
||||
github.com/container-storage-interface/spec v1.1.0
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/godbus/dbus/v5 v5.0.4 // indirect
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b
|
||||
github.com/golang/protobuf v1.1.0 // indirect
|
||||
github.com/kubernetes-csi/csi-lib-utils v0.6.1 // indirect
|
||||
|
|
4
go.sum
4
go.sum
|
@ -1,9 +1,13 @@
|
|||
github.com/container-storage-interface/spec v1.1.0 h1:qPsTqtR1VUPvMPeK0UnCZMtXaKGyyLPG8gj/wG6VqMs=
|
||||
github.com/container-storage-interface/spec v1.1.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/protobuf v1.1.0 h1:0iH4Ffd/meGoXqF2lSAhZHt8X+cPgkfn/cb6Cce5Vpc=
|
||||
|
|
|
@ -33,7 +33,7 @@ type driver struct {
|
|||
}
|
||||
|
||||
var (
|
||||
vendorVersion = "v1.2.0"
|
||||
vendorVersion = "v1.34.7"
|
||||
driverName = "ru.yandex.s3.csi"
|
||||
)
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@ package driver
|
|||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strconv"
|
||||
|
||||
|
@ -68,7 +69,6 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
|
|||
volumeID := req.GetVolumeId()
|
||||
targetPath := req.GetTargetPath()
|
||||
stagingTargetPath := req.GetStagingTargetPath()
|
||||
bucketName, prefix := volumeIDToBucketPrefix(volumeID)
|
||||
|
||||
// Check arguments
|
||||
if req.GetVolumeCapability() == nil {
|
||||
|
@ -84,7 +84,28 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
|
|||
return nil, status.Error(codes.InvalidArgument, "Target path missing in request")
|
||||
}
|
||||
|
||||
notMnt, err := checkMount(targetPath)
|
||||
notMnt, err := checkMount(stagingTargetPath)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
if notMnt {
|
||||
// Staged mount is dead by some reason. Revive it
|
||||
bucketName, prefix := volumeIDToBucketPrefix(volumeID)
|
||||
s3, err := s3.NewClientFromSecret(req.GetSecrets())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize S3 client: %s", err)
|
||||
}
|
||||
meta := getMeta(bucketName, prefix, req.VolumeContext)
|
||||
mounter, err := mounter.New(meta, s3.Config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := mounter.Mount(stagingTargetPath, volumeID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
notMnt, err = checkMount(targetPath)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
@ -100,18 +121,12 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
|
|||
glog.V(4).Infof("target %v\nreadonly %v\nvolumeId %v\nattributes %v\nmountflags %v\n",
|
||||
targetPath, readOnly, volumeID, attrib, mountFlags)
|
||||
|
||||
s3, err := s3.NewClientFromSecret(req.GetSecrets())
|
||||
cmd := exec.Command("mount", "--bind", stagingTargetPath, targetPath)
|
||||
cmd.Stderr = os.Stderr
|
||||
glog.V(3).Infof("Binding volume %v from %v to %v", volumeID, stagingTargetPath, targetPath)
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize S3 client: %s", err)
|
||||
}
|
||||
|
||||
meta := getMeta(bucketName, prefix, req.VolumeContext)
|
||||
mounter, err := mounter.New(meta, s3.Config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := mounter.Mount(stagingTargetPath, targetPath); err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("Error running mount --bind %v %v: %s", stagingTargetPath, targetPath, out)
|
||||
}
|
||||
|
||||
glog.V(4).Infof("s3: volume %s successfully mounted to %s", volumeID, targetPath)
|
||||
|
@ -131,7 +146,7 @@ func (ns *nodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpu
|
|||
return nil, status.Error(codes.InvalidArgument, "Target path missing in request")
|
||||
}
|
||||
|
||||
if err := mounter.FuseUnmount(targetPath); err != nil {
|
||||
if err := mounter.Unmount(targetPath); err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
glog.V(4).Infof("s3: volume %s has been unmounted.", volumeID)
|
||||
|
@ -174,7 +189,7 @@ func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVol
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := mounter.Stage(stagingTargetPath); err != nil {
|
||||
if err := mounter.Mount(stagingTargetPath, volumeID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -193,6 +208,22 @@ func (ns *nodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstag
|
|||
return nil, status.Error(codes.InvalidArgument, "Target path missing in request")
|
||||
}
|
||||
|
||||
proc, err := mounter.FindFuseMountProcess(stagingTargetPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
exists := false
|
||||
if proc == nil {
|
||||
exists, err = mounter.SystemdUnmount(volumeID)
|
||||
if exists && err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if !exists {
|
||||
err = mounter.FuseUnmount(stagingTargetPath)
|
||||
}
|
||||
glog.V(4).Infof("s3: volume %s has been unmounted from stage path %v.", volumeID, stagingTargetPath)
|
||||
|
||||
return &csi.NodeUnstageVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -3,6 +3,12 @@ package mounter
|
|||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
systemd "github.com/coreos/go-systemd/v22/dbus"
|
||||
dbus "github.com/godbus/dbus/v5"
|
||||
"github.com/golang/glog"
|
||||
|
||||
"github.com/yandex-cloud/k8s-csi-s3/pkg/s3"
|
||||
)
|
||||
|
@ -30,27 +36,170 @@ func newGeeseFSMounter(meta *s3.FSMeta, cfg *s3.Config) (Mounter, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (geesefs *geesefsMounter) Stage(stageTarget string) error {
|
||||
func (geesefs *geesefsMounter) CopyBinary(from, to string) error {
|
||||
st, err := os.Stat(from)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to stat %s: %v", from, err)
|
||||
}
|
||||
st2, err := os.Stat(to)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return fmt.Errorf("Failed to stat %s: %v", to, err)
|
||||
}
|
||||
if err != nil || st2.Size() != st.Size() || st2.ModTime() != st.ModTime() {
|
||||
if err == nil {
|
||||
// remove the file first to not hit "text file busy" errors
|
||||
err = os.Remove(to)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error removing %s to update it: %v", to, err)
|
||||
}
|
||||
}
|
||||
bin, err := os.ReadFile(from)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error copying %s to %s: %v", from, to, err)
|
||||
}
|
||||
err = os.WriteFile(to, bin, 0755)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error copying %s to %s: %v", from, to, err)
|
||||
}
|
||||
err = os.Chtimes(to, st.ModTime(), st.ModTime())
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error copying %s to %s: %v", from, to, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (geesefs *geesefsMounter) Unstage(stageTarget string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (geesefs *geesefsMounter) Mount(source string, target string) error {
|
||||
fullPath := fmt.Sprintf("%s:%s", geesefs.meta.BucketName, geesefs.meta.Prefix)
|
||||
args := []string{
|
||||
func (geesefs *geesefsMounter) MountDirect(target string, args []string) error {
|
||||
args = append([]string{
|
||||
"--endpoint", geesefs.endpoint,
|
||||
"-o", "allow_other",
|
||||
"--log-file", "/dev/stderr",
|
||||
}, args...)
|
||||
envs := []string{
|
||||
"AWS_ACCESS_KEY_ID=" + geesefs.accessKeyID,
|
||||
"AWS_SECRET_ACCESS_KEY=" + geesefs.secretAccessKey,
|
||||
}
|
||||
return fuseMount(target, geesefsCmd, args, envs)
|
||||
}
|
||||
|
||||
type execCmd struct {
|
||||
Path string
|
||||
Args []string
|
||||
UncleanIsFailure bool
|
||||
}
|
||||
|
||||
func (geesefs *geesefsMounter) Mount(target, volumeID string) error {
|
||||
fullPath := fmt.Sprintf("%s:%s", geesefs.meta.BucketName, geesefs.meta.Prefix)
|
||||
var args []string
|
||||
if geesefs.region != "" {
|
||||
args = append(args, "--region", geesefs.region)
|
||||
}
|
||||
args = append(args, geesefs.meta.MountOptions...)
|
||||
args = append(
|
||||
args,
|
||||
"--setuid", "65534", // nobody. drop root privileges
|
||||
"--setgid", "65534", // nogroup
|
||||
)
|
||||
useSystemd := true
|
||||
for i := 0; i < len(geesefs.meta.MountOptions); i++ {
|
||||
opt := geesefs.meta.MountOptions[i]
|
||||
if opt == "--no-systemd" {
|
||||
useSystemd = false
|
||||
} else if len(opt) > 0 && opt[0] == '-' {
|
||||
// Remove unsafe options
|
||||
s := 1
|
||||
if len(opt) > 1 && opt[1] == '-' {
|
||||
s++
|
||||
}
|
||||
key := opt[s:]
|
||||
e := strings.Index(opt, "=")
|
||||
if e >= 0 {
|
||||
key = opt[s:e]
|
||||
}
|
||||
if key == "log-file" || key == "shared-config" || key == "cache" {
|
||||
// Skip options accessing local FS
|
||||
if e < 0 {
|
||||
i++
|
||||
}
|
||||
} else if key != "" {
|
||||
args = append(args, opt)
|
||||
}
|
||||
} else if len(opt) > 0 {
|
||||
args = append(args, opt)
|
||||
}
|
||||
}
|
||||
args = append(args, fullPath, target)
|
||||
os.Setenv("AWS_ACCESS_KEY_ID", geesefs.accessKeyID)
|
||||
os.Setenv("AWS_SECRET_ACCESS_KEY", geesefs.secretAccessKey)
|
||||
return fuseMount(target, geesefsCmd, args)
|
||||
// Try to start geesefs using systemd so it doesn't get killed when the container exits
|
||||
if !useSystemd {
|
||||
return geesefs.MountDirect(target, args)
|
||||
}
|
||||
conn, err := systemd.New()
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to connect to systemd dbus service: %v, starting geesefs directly", err)
|
||||
return geesefs.MountDirect(target, args)
|
||||
}
|
||||
defer conn.Close()
|
||||
// systemd is present
|
||||
if err = geesefs.CopyBinary("/usr/bin/geesefs", "/csi/geesefs"); err != nil {
|
||||
return err
|
||||
}
|
||||
pluginDir := os.Getenv("PLUGIN_DIR")
|
||||
if pluginDir == "" {
|
||||
pluginDir = "/var/lib/kubelet/plugins/ru.yandex.s3.csi"
|
||||
}
|
||||
args = append([]string{pluginDir+"/geesefs", "-f", "-o", "allow_other", "--endpoint", geesefs.endpoint}, args...)
|
||||
glog.Info("Starting geesefs using systemd: "+strings.Join(args, " "))
|
||||
unitName := "geesefs-"+systemd.PathBusEscape(volumeID)+".service"
|
||||
newProps := []systemd.Property{
|
||||
systemd.Property{
|
||||
Name: "Description",
|
||||
Value: dbus.MakeVariant("GeeseFS mount for Kubernetes volume "+volumeID),
|
||||
},
|
||||
systemd.PropExecStart(args, false),
|
||||
systemd.Property{
|
||||
Name: "ExecStopPost",
|
||||
// force & lazy unmount to cleanup possibly dead mountpoints
|
||||
Value: dbus.MakeVariant([]execCmd{ execCmd{ "/bin/umount", []string{ "/bin/umount", "-f", "-l", target }, false } }),
|
||||
},
|
||||
systemd.Property{
|
||||
Name: "Environment",
|
||||
Value: dbus.MakeVariant([]string{ "AWS_ACCESS_KEY_ID="+geesefs.accessKeyID, "AWS_SECRET_ACCESS_KEY="+geesefs.secretAccessKey }),
|
||||
},
|
||||
systemd.Property{
|
||||
Name: "CollectMode",
|
||||
Value: dbus.MakeVariant("inactive-or-failed"),
|
||||
},
|
||||
}
|
||||
unitProps, err := conn.GetAllProperties(unitName)
|
||||
if err == nil {
|
||||
// Unit already exists
|
||||
if s, ok := unitProps["ActiveState"].(string); ok && (s == "active" || s == "activating" || s == "reloading") {
|
||||
// Unit is already active
|
||||
curPath := ""
|
||||
prevExec, ok := unitProps["ExecStart"].([][]interface{})
|
||||
if ok && len(prevExec) > 0 && len(prevExec[0]) >= 2 {
|
||||
execArgs, ok := prevExec[0][1].([]string)
|
||||
if ok && len(execArgs) >= 2 {
|
||||
curPath = execArgs[len(execArgs)-1]
|
||||
}
|
||||
}
|
||||
if curPath != target {
|
||||
return fmt.Errorf(
|
||||
"GeeseFS for volume %v is already mounted on host, but"+
|
||||
" in a different directory. We want %v, but it's in %v",
|
||||
volumeID, target, curPath,
|
||||
)
|
||||
}
|
||||
// Already mounted at right location
|
||||
return nil
|
||||
} else {
|
||||
// Stop and garbage collect the unit if automatic collection didn't work for some reason
|
||||
conn.StopUnit(unitName, "replace", nil)
|
||||
conn.ResetFailedUnit(unitName)
|
||||
}
|
||||
}
|
||||
_, err = conn.StartTransientUnit(unitName, "replace", newProps, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error starting systemd unit %s on host: %v", unitName, err)
|
||||
}
|
||||
return waitForMount(target, 10*time.Second)
|
||||
}
|
||||
|
|
|
@ -11,18 +11,18 @@ import (
|
|||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/yandex-cloud/k8s-csi-s3/pkg/s3"
|
||||
systemd "github.com/coreos/go-systemd/v22/dbus"
|
||||
"github.com/golang/glog"
|
||||
"github.com/mitchellh/go-ps"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
|
||||
"github.com/yandex-cloud/k8s-csi-s3/pkg/s3"
|
||||
)
|
||||
|
||||
// Mounter interface which can be implemented
|
||||
// by the different mounter types
|
||||
type Mounter interface {
|
||||
Stage(stagePath string) error
|
||||
Unstage(stagePath string) error
|
||||
Mount(source string, target string) error
|
||||
Mount(target, volumeID string) error
|
||||
}
|
||||
|
||||
const (
|
||||
|
@ -57,9 +57,11 @@ func New(meta *s3.FSMeta, cfg *s3.Config) (Mounter, error) {
|
|||
}
|
||||
}
|
||||
|
||||
func fuseMount(path string, command string, args []string) error {
|
||||
func fuseMount(path string, command string, args []string, envs []string) error {
|
||||
cmd := exec.Command(command, args...)
|
||||
cmd.Stderr = os.Stderr
|
||||
// cmd.Environ() returns envs inherited from the current process
|
||||
cmd.Env = append(cmd.Environ(), envs...)
|
||||
glog.V(3).Infof("Mounting fuse with command: %s and args: %s", command, args)
|
||||
|
||||
out, err := cmd.Output()
|
||||
|
@ -70,12 +72,40 @@ func fuseMount(path string, command string, args []string) error {
|
|||
return waitForMount(path, 10*time.Second)
|
||||
}
|
||||
|
||||
func Unmount(path string) error {
|
||||
if err := mount.New("").Unmount(path); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func SystemdUnmount(volumeID string) (bool, error) {
|
||||
conn, err := systemd.New()
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to connect to systemd dbus service: %v", err)
|
||||
return false, err
|
||||
}
|
||||
defer conn.Close()
|
||||
unitName := "geesefs-"+systemd.PathBusEscape(volumeID)+".service"
|
||||
units, err := conn.ListUnitsByNames([]string{ unitName })
|
||||
glog.Errorf("Got %v", units)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to list systemd unit by name %v: %v", unitName, err)
|
||||
return false, err
|
||||
}
|
||||
if len(units) == 0 || units[0].ActiveState == "inactive" || units[0].ActiveState == "failed" {
|
||||
return true, nil
|
||||
}
|
||||
_, err = conn.StopUnit(unitName, "replace", nil)
|
||||
return true, err
|
||||
}
|
||||
|
||||
func FuseUnmount(path string) error {
|
||||
if err := mount.New("").Unmount(path); err != nil {
|
||||
return err
|
||||
}
|
||||
// as fuse quits immediately, we will try to wait until the process is done
|
||||
process, err := findFuseMountProcess(path)
|
||||
process, err := FindFuseMountProcess(path)
|
||||
if err != nil {
|
||||
glog.Errorf("Error getting PID of fuse mount: %s", err)
|
||||
return nil
|
||||
|
@ -107,7 +137,7 @@ func waitForMount(path string, timeout time.Duration) error {
|
|||
}
|
||||
}
|
||||
|
||||
func findFuseMountProcess(path string) (*os.Process, error) {
|
||||
func FindFuseMountProcess(path string) (*os.Process, error) {
|
||||
processes, err := ps.Processes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -2,7 +2,6 @@ package mounter
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/yandex-cloud/k8s-csi-s3/pkg/s3"
|
||||
|
@ -31,15 +30,7 @@ func newRcloneMounter(meta *s3.FSMeta, cfg *s3.Config) (Mounter, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (rclone *rcloneMounter) Stage(stageTarget string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rclone *rcloneMounter) Unstage(stageTarget string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rclone *rcloneMounter) Mount(source string, target string) error {
|
||||
func (rclone *rcloneMounter) Mount(target, volumeID string) error {
|
||||
args := []string{
|
||||
"mount",
|
||||
fmt.Sprintf(":s3:%s", path.Join(rclone.meta.BucketName, rclone.meta.Prefix)),
|
||||
|
@ -55,7 +46,9 @@ func (rclone *rcloneMounter) Mount(source string, target string) error {
|
|||
args = append(args, fmt.Sprintf("--s3-region=%s", rclone.region))
|
||||
}
|
||||
args = append(args, rclone.meta.MountOptions...)
|
||||
os.Setenv("AWS_ACCESS_KEY_ID", rclone.accessKeyID)
|
||||
os.Setenv("AWS_SECRET_ACCESS_KEY", rclone.secretAccessKey)
|
||||
return fuseMount(target, rcloneCmd, args)
|
||||
envs := []string{
|
||||
"AWS_ACCESS_KEY_ID=" + rclone.accessKeyID,
|
||||
"AWS_SECRET_ACCESS_KEY=" + rclone.secretAccessKey,
|
||||
}
|
||||
return fuseMount(target, rcloneCmd, args, envs)
|
||||
}
|
||||
|
|
|
@ -28,15 +28,7 @@ func newS3fsMounter(meta *s3.FSMeta, cfg *s3.Config) (Mounter, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (s3fs *s3fsMounter) Stage(stageTarget string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s3fs *s3fsMounter) Unstage(stageTarget string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s3fs *s3fsMounter) Mount(source string, target string) error {
|
||||
func (s3fs *s3fsMounter) Mount(target, volumeID string) error {
|
||||
if err := writes3fsPass(s3fs.pwFileContent); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -52,7 +44,7 @@ func (s3fs *s3fsMounter) Mount(source string, target string) error {
|
|||
args = append(args, "-o", fmt.Sprintf("endpoint=%s", s3fs.region))
|
||||
}
|
||||
args = append(args, s3fs.meta.MountOptions...)
|
||||
return fuseMount(target, s3fsCmd, args)
|
||||
return fuseMount(target, s3fsCmd, args, nil)
|
||||
}
|
||||
|
||||
func writes3fsPass(pwFileContent string) error {
|
||||
|
|
|
@ -52,7 +52,7 @@ func NewClient(cfg *Config) (*s3Client, error) {
|
|||
endpoint = u.Hostname() + ":" + u.Port()
|
||||
}
|
||||
minioClient, err := minio.New(endpoint, &minio.Options{
|
||||
Creds: credentials.NewStaticV4(client.Config.AccessKeyID, client.Config.SecretAccessKey, client.Config.Region),
|
||||
Creds: credentials.NewStaticV4(client.Config.AccessKeyID, client.Config.SecretAccessKey, ""),
|
||||
Secure: ssl,
|
||||
})
|
||||
if err != nil {
|
||||
|
|
Loading…
Add table
Reference in a new issue