Compare commits
93 commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
25401592e1 | ||
![]() |
8d1ad692e5 | ||
![]() |
16c6c0ee13 | ||
![]() |
227e1cf2dd | ||
![]() |
40086c1ffa | ||
![]() |
6cfd3ebbb6 | ||
![]() |
195829887a | ||
![]() |
f7e3c21a87 | ||
![]() |
06d059bfd1 | ||
![]() |
b630faefa7 | ||
![]() |
4af9636d19 | ||
![]() |
44511523e2 | ||
![]() |
dd0c0b68d5 | ||
![]() |
59a7605ad8 | ||
![]() |
5a3a517315 | ||
![]() |
6b72154ebc | ||
![]() |
a43867a307 | ||
![]() |
e334aedd0c | ||
![]() |
9a04d5a6eb | ||
![]() |
874dedcd3b | ||
![]() |
c7e066396b | ||
![]() |
8539ff0a48 | ||
![]() |
0fb81f07e7 | ||
![]() |
37c35c788a | ||
![]() |
680a649a21 | ||
![]() |
519c4f0bd7 | ||
![]() |
8ea6111b0d | ||
![]() |
4e410df6e1 | ||
![]() |
7a415ae6ab | ||
![]() |
96818e563a | ||
![]() |
5dbebd01bd | ||
![]() |
3b38d545ab | ||
![]() |
259c9ca561 | ||
![]() |
64a443a5e2 | ||
![]() |
701c86fa4d | ||
![]() |
ede57438c0 | ||
![]() |
2927c733fb | ||
![]() |
c31204b8e4 | ||
![]() |
8ac6bd58e4 | ||
![]() |
fe02df610f | ||
![]() |
c4031bcbc6 | ||
![]() |
5c78b9b69d | ||
![]() |
f4d01e12c7 | ||
![]() |
c6af9556d7 | ||
![]() |
ecf1031dfc | ||
![]() |
1305b20bae | ||
![]() |
2ad5d21714 | ||
![]() |
bfba08742c | ||
![]() |
e8d63dfc14 | ||
![]() |
8bbd7ebaf0 | ||
![]() |
543704336f | ||
![]() |
63b1f45dba | ||
![]() |
0a97f8d4ce | ||
![]() |
9dac91e1ec | ||
![]() |
514c0131dc | ||
![]() |
a1a001ce27 | ||
![]() |
ffed042f5c | ||
![]() |
93f87186f5 | ||
![]() |
9640a35d35 | ||
![]() |
f658121c77 | ||
![]() |
35a528e1c1 | ||
![]() |
6a44360e2f | ||
![]() |
1e535a36f4 | ||
![]() |
dc1eb30b0d | ||
![]() |
52b29e1725 | ||
![]() |
2c85a614ea | ||
![]() |
ec05cec252 | ||
![]() |
63fe3703bb | ||
![]() |
683d7d5e4e | ||
![]() |
f1cf91c00c | ||
![]() |
2ccacb0de0 | ||
![]() |
a3fa9f3696 | ||
![]() |
fd5d022ca8 | ||
![]() |
f0c07b1704 | ||
![]() |
f3ff4f75c3 | ||
![]() |
6331bb840b | ||
![]() |
cb4717e4c8 | ||
![]() |
42970751f0 | ||
![]() |
cc8baed8ef | ||
![]() |
52d6241f46 | ||
![]() |
4ef8d3d1ec | ||
![]() |
714641b44c | ||
![]() |
fc9567cb05 | ||
![]() |
1dedbefa6c | ||
![]() |
afcf283bba | ||
![]() |
e0f07e8971 | ||
![]() |
3925b5595a | ||
![]() |
e03074f1da | ||
![]() |
4efd543afc | ||
![]() |
cedcc6a6fa | ||
![]() |
8c959a926a | ||
![]() |
f889300e78 | ||
![]() |
7b422cfcbb |
33 changed files with 942 additions and 266 deletions
.github/workflows
DockerfileMakefileREADME.mddeploy
go.modgo.sumpkg
driver
mounter
s3
test
25
.github/workflows/go.yml
vendored
25
.github/workflows/go.yml
vendored
|
@ -1,25 +0,0 @@
|
|||
name: Go
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
|
||||
jobs:
|
||||
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16
|
||||
|
||||
- name: Build
|
||||
run: go build -v ./...
|
||||
|
||||
- name: Test
|
||||
run: make test
|
48
.github/workflows/pages.yml
vendored
Normal file
48
.github/workflows/pages.yml
vendored
Normal file
|
@ -0,0 +1,48 @@
|
|||
name: Publish Helm chart
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
# Allows you to run this workflow manually from the Actions tab
|
||||
workflow_dispatch:
|
||||
|
||||
# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
|
||||
permissions:
|
||||
contents: write
|
||||
pages: write
|
||||
id-token: write
|
||||
|
||||
# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued.
|
||||
# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete.
|
||||
concurrency:
|
||||
group: "pages"
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
|
||||
deploy:
|
||||
|
||||
environment:
|
||||
name: github-pages
|
||||
url: ${{ steps.deployment.outputs.page_url }}
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Setup Pages
|
||||
uses: actions/configure-pages@v3
|
||||
|
||||
- name: Publish Helm chart
|
||||
uses: stefanprodan/helm-gh-pages@0ad2bb377311d61ac04ad9eb6f252fb68e207260 # v1.7.0
|
||||
with:
|
||||
# GitHub automatically creates a GITHUB_TOKEN secret to use in your workflow. You can use the GITHUB_TOKEN to authenticate in a workflow run.
|
||||
# See https://docs.github.com/en/actions/reference/authentication-in-a-workflow#about-the-github_token-secret
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
branch: github-pages
|
||||
charts_dir: deploy/helm
|
||||
target_dir: charts
|
||||
linting: off
|
|
@ -1,18 +1,18 @@
|
|||
FROM golang:1.16-alpine as gobuild
|
||||
FROM golang:1.19-alpine as gobuild
|
||||
|
||||
WORKDIR /build
|
||||
ADD . /build
|
||||
|
||||
RUN go get -d -v ./...
|
||||
ADD go.mod go.sum /build/
|
||||
RUN go mod download -x
|
||||
ADD cmd /build/cmd
|
||||
ADD pkg /build/pkg
|
||||
RUN CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o ./s3driver ./cmd/s3driver
|
||||
|
||||
FROM debian:buster-slim
|
||||
FROM alpine:3.17
|
||||
LABEL maintainers="Vitaliy Filippov <vitalif@yourcmc.ru>"
|
||||
LABEL description="csi-s3 slim image"
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y ca-certificates && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
RUN apk add --no-cache fuse mailcap rclone
|
||||
RUN apk add --no-cache -X http://dl-cdn.alpinelinux.org/alpine/edge/community s3fs-fuse
|
||||
|
||||
ADD https://github.com/yandex-cloud/geesefs/releases/latest/download/geesefs-linux-amd64 /usr/bin/geesefs
|
||||
RUN chmod 755 /usr/bin/geesefs
|
13
Makefile
13
Makefile
|
@ -13,10 +13,11 @@
|
|||
# limitations under the License.
|
||||
.PHONY: test build container push clean
|
||||
|
||||
PROJECT_DIR=/app
|
||||
REGISTRY_NAME=cr.yandex/crp9ftr22d26age3hulg
|
||||
REGISTRY_NAME2=cr.il.nebius.cloud/crll7us9n6i5j3v4n92m
|
||||
IMAGE_NAME=csi-s3
|
||||
VERSION ?= 0.26.2
|
||||
IMAGE_NAME2=yandex-cloud/csi-s3/csi-s3-driver
|
||||
VERSION ?= 0.38.3
|
||||
IMAGE_TAG=$(REGISTRY_NAME)/$(IMAGE_NAME):$(VERSION)
|
||||
TEST_IMAGE_TAG=$(IMAGE_NAME):test
|
||||
|
||||
|
@ -24,13 +25,17 @@ build:
|
|||
CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o _output/s3driver ./cmd/s3driver
|
||||
test:
|
||||
docker build -t $(TEST_IMAGE_TAG) -f test/Dockerfile .
|
||||
docker run --rm --privileged -v $(PWD):$(PROJECT_DIR) --device /dev/fuse $(TEST_IMAGE_TAG)
|
||||
docker run --rm --privileged -v $(PWD):/build --device /dev/fuse $(TEST_IMAGE_TAG)
|
||||
container:
|
||||
docker build -t $(IMAGE_TAG) -f cmd/s3driver/Dockerfile .
|
||||
docker build -t $(IMAGE_TAG) .
|
||||
push: container
|
||||
docker tag $(IMAGE_TAG) $(REGISTRY_NAME)/$(IMAGE_NAME):latest
|
||||
docker tag $(IMAGE_TAG) $(REGISTRY_NAME)/$(IMAGE_NAME2):$(VERSION)
|
||||
docker tag $(IMAGE_TAG) $(REGISTRY_NAME)/$(IMAGE_NAME2):latest
|
||||
docker push $(IMAGE_TAG)
|
||||
docker push $(REGISTRY_NAME)/$(IMAGE_NAME)
|
||||
docker push $(REGISTRY_NAME)/$(IMAGE_NAME2)
|
||||
docker push $(REGISTRY_NAME)/$(IMAGE_NAME2):$(VERSION)
|
||||
clean:
|
||||
go clean -r -x
|
||||
-rm -rf _output
|
||||
|
|
69
README.md
69
README.md
|
@ -6,11 +6,23 @@ This is a Container Storage Interface ([CSI](https://github.com/container-storag
|
|||
|
||||
### Requirements
|
||||
|
||||
* Kubernetes 1.13+ (CSI v1.0.0 compatibility)
|
||||
* Kubernetes 1.17+
|
||||
* Kubernetes has to allow privileged containers
|
||||
* Docker daemon must allow shared mounts (systemd flag `MountFlags=shared`)
|
||||
|
||||
### 1. Create a secret with your S3 credentials
|
||||
### Helm chart
|
||||
|
||||
Helm chart is published at `https://yandex-cloud.github.io/k8s-csi-s3`:
|
||||
|
||||
```
|
||||
helm repo add yandex-s3 https://yandex-cloud.github.io/k8s-csi-s3/charts
|
||||
|
||||
helm install csi-s3 yandex-s3/csi-s3
|
||||
```
|
||||
|
||||
### Manual installation
|
||||
|
||||
#### 1. Create a secret with your S3 credentials
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
|
@ -30,22 +42,30 @@ stringData:
|
|||
|
||||
The region can be empty if you are using some other S3 compatible storage.
|
||||
|
||||
### 2. Deploy the driver
|
||||
#### 2. Deploy the driver
|
||||
|
||||
```bash
|
||||
cd deploy/kubernetes
|
||||
kubectl create -f provisioner.yaml
|
||||
kubectl create -f attacher.yaml
|
||||
kubectl create -f driver.yaml
|
||||
kubectl create -f csi-s3.yaml
|
||||
```
|
||||
|
||||
### 3. Create the storage class
|
||||
If you're upgrading from a previous version which had `attacher.yaml` you
|
||||
can safely delete all resources created from that file:
|
||||
|
||||
```
|
||||
wget https://raw.githubusercontent.com/yandex-cloud/k8s-csi-s3/v0.35.5/deploy/kubernetes/attacher.yaml
|
||||
kubectl delete -f attacher.yaml
|
||||
```
|
||||
|
||||
#### 3. Create the storage class
|
||||
|
||||
```bash
|
||||
kubectl create -f examples/storageclass.yaml
|
||||
```
|
||||
|
||||
### 4. Test the S3 driver
|
||||
#### 4. Test the S3 driver
|
||||
|
||||
1. Create a pvc using the new storage class:
|
||||
|
||||
|
@ -74,8 +94,8 @@ kubectl create -f examples/storageclass.yaml
|
|||
```bash
|
||||
$ kubectl exec -ti csi-s3-test-nginx bash
|
||||
$ mount | grep fuse
|
||||
s3fs on /var/lib/www/html type fuse.s3fs (rw,nosuid,nodev,relatime,user_id=0,group_id=0,allow_other)
|
||||
$ touch /var/lib/www/html/hello_world
|
||||
pvc-035763df-0488-4941-9a34-f637292eb95c: on /usr/share/nginx/html/s3 type fuse.geesefs (rw,nosuid,nodev,relatime,user_id=65534,group_id=0,default_permissions,allow_other)
|
||||
$ touch /usr/share/nginx/html/s3/hello_world
|
||||
```
|
||||
|
||||
If something does not work as expected, check the troubleshooting section below.
|
||||
|
@ -94,43 +114,54 @@ metadata:
|
|||
provisioner: ru.yandex.s3.csi
|
||||
parameters:
|
||||
mounter: geesefs
|
||||
options: "--memory-limit 1000 --dir-mode 0777 --file-mode 0666"
|
||||
bucket: some-existing-bucket-name
|
||||
```
|
||||
|
||||
If the bucket is specified, it will still be created if it does not exist on the backend. Every volume will get its own prefix within the bucket which matches the volume ID. When deleting a volume, also just the prefix will be deleted.
|
||||
|
||||
### Static Provisioning
|
||||
|
||||
If you want to mount a pre-existing bucket or prefix within a pre-existing bucket and don't want csi-s3 to delete it when PV is deleted, you can use static provisioning.
|
||||
|
||||
To do that you should omit `storageClassName` in the `PersistentVolumeClaim` and manually create a `PersistentVolume` with a matching `claimRef`, like in the following example: [deploy/kubernetes/examples/pvc-manual.yaml](deploy/kubernetes/examples/pvc-manual.yaml).
|
||||
|
||||
### Mounter
|
||||
|
||||
As S3 is not a real file system there are some limitations to consider here. Depending on what mounter you are using, you will have different levels of POSIX compability. Also depending on what S3 storage backend you are using there are not always [consistency guarantees](https://github.com/gaul/are-we-consistent-yet#observed-consistency).
|
||||
We **strongly recommend** to use the default mounter which is [GeeseFS](https://github.com/yandex-cloud/geesefs).
|
||||
|
||||
The driver can be configured to use one of these mounters to mount buckets:
|
||||
|
||||
* [geesefs](https://github.com/yandex-cloud/geesefs) (recommended and default)
|
||||
* [s3fs](https://github.com/s3fs-fuse/s3fs-fuse)
|
||||
* [rclone](https://rclone.org/commands/rclone_mount)
|
||||
However there is also support for two other backends: [s3fs](https://github.com/s3fs-fuse/s3fs-fuse) and [rclone](https://rclone.org/commands/rclone_mount).
|
||||
|
||||
The mounter can be set as a parameter in the storage class. You can also create multiple storage classes for each mounter if you like.
|
||||
|
||||
Characteristics of different mounters (for more detailed information consult their own documentation):
|
||||
As S3 is not a real file system there are some limitations to consider here.
|
||||
Depending on what mounter you are using, you will have different levels of POSIX compability.
|
||||
Also depending on what S3 storage backend you are using there are not always [consistency guarantees](https://github.com/gaul/are-we-consistent-yet#observed-consistency).
|
||||
|
||||
You can check POSIX compatibility matrix here: https://github.com/yandex-cloud/geesefs#posix-compatibility-matrix.
|
||||
|
||||
#### GeeseFS
|
||||
|
||||
* Almost full POSIX compatibility
|
||||
* Good performance for both small and big files
|
||||
* Files can be viewed normally with any S3 client
|
||||
* Does not store file permissions and custom modification times
|
||||
* By default runs **outside** of the csi-s3 container using systemd, to not crash
|
||||
mountpoints with "Transport endpoint is not connected" when csi-s3 is upgraded
|
||||
or restarted. Add `--no-systemd` to `parameters.options` of the `StorageClass`
|
||||
to disable this behaviour.
|
||||
|
||||
#### s3fs
|
||||
|
||||
* Almost full POSIX compatibility
|
||||
* Good performance for big files, poor performance for small files
|
||||
* Files can be viewed normally with any S3 client
|
||||
* Very slow for directories with a large number of files
|
||||
|
||||
#### rclone
|
||||
|
||||
* Less POSIX compatible than s3fs
|
||||
* Poor POSIX compatibility
|
||||
* Bad performance for big files, okayish performance for small files
|
||||
* Files can be viewed normally with any S3 client
|
||||
* Doesn't create directory objects like s3fs or GeeseFS
|
||||
* May hang :-)
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
|
|
12
deploy/helm/csi-s3/Chart.yaml
Normal file
12
deploy/helm/csi-s3/Chart.yaml
Normal file
|
@ -0,0 +1,12 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
appVersion: 0.38.3
|
||||
description: "Container Storage Interface (CSI) driver for S3 volumes"
|
||||
name: csi-s3
|
||||
version: 0.38.3
|
||||
keywords:
|
||||
- s3
|
||||
home: https://github.com/yandex-cloud/k8s-csi-s3
|
||||
sources:
|
||||
- https://github.com/yandex-cloud/k8s-csi-s3/deploy/helm
|
||||
icon: https://raw.githubusercontent.com/yandex-cloud/geesefs/master/doc/geesefs.png
|
40
deploy/helm/csi-s3/README.md
Normal file
40
deploy/helm/csi-s3/README.md
Normal file
|
@ -0,0 +1,40 @@
|
|||
# Helm chart for csi-s3
|
||||
|
||||
This chart adds S3 volume support to your cluster.
|
||||
|
||||
## Install chart
|
||||
|
||||
- Helm 2.x: `helm install [--set secret.accessKey=... --set secret.secretKey=... ...] --namespace kube-system --name csi-s3 .`
|
||||
- Helm 3.x: `helm install [--set secret.accessKey=... --set secret.secretKey=... ...] --namespace kube-system csi-s3 .`
|
||||
|
||||
After installation succeeds, you can get a status of Chart: `helm status csi-s3`.
|
||||
|
||||
## Delete Chart
|
||||
|
||||
- Helm 2.x: `helm delete --purge csi-s3`
|
||||
- Helm 3.x: `helm uninstall csi-s3 --namespace kube-system`
|
||||
|
||||
## Configuration
|
||||
|
||||
By default, this chart creates a secret and a storage class. You should at least set `secret.accessKey` and `secret.secretKey`
|
||||
to your [Yandex Object Storage](https://cloud.yandex.com/en-ru/services/storage) keys for it to work.
|
||||
|
||||
The following table lists all configuration parameters and their default values.
|
||||
|
||||
| Parameter | Description | Default |
|
||||
| ---------------------------- | ---------------------------------------------------------------------- | ------------------------------------------------------ |
|
||||
| `storageClass.create` | Specifies whether the storage class should be created | true |
|
||||
| `storageClass.name` | Storage class name | csi-s3 |
|
||||
| `storageClass.singleBucket` | Use a single bucket for all dynamically provisioned persistent volumes | |
|
||||
| `storageClass.mounter` | Mounter to use. Either geesefs, s3fs or rclone. geesefs recommended | geesefs |
|
||||
| `storageClass.mountOptions` | GeeseFS mount options | `--memory-limit 1000 --dir-mode 0777 --file-mode 0666` |
|
||||
| `storageClass.reclaimPolicy` | Volume reclaim policy | Delete |
|
||||
| `storageClass.annotations` | Annotations for the storage class | |
|
||||
| `secret.create` | Specifies whether the secret should be created | true |
|
||||
| `secret.name` | Name of the secret | csi-s3-secret |
|
||||
| `secret.accessKey` | S3 Access Key | |
|
||||
| `secret.secretKey` | S3 Secret Key | |
|
||||
| `secret.endpoint` | Endpoint | https://storage.yandexcloud.net |
|
||||
| `tolerations.all` | Tolerate all taints by the CSI-S3 node driver (mounter) | false |
|
||||
| `tolerations.node` | Custom tolerations for the CSI-S3 node driver (mounter) | [] |
|
||||
| `tolerations.controller` | Custom tolerations for the CSI-S3 controller (provisioner) | [] |
|
112
deploy/helm/csi-s3/manifest.yaml
Normal file
112
deploy/helm/csi-s3/manifest.yaml
Normal file
|
@ -0,0 +1,112 @@
|
|||
helm_chart:
|
||||
name: cr.yandex/crp9ftr22d26age3hulg/yandex-cloud/csi-s3/csi-s3
|
||||
tag: 0.38.3
|
||||
requirements:
|
||||
k8s_version: ">=1.13"
|
||||
images:
|
||||
- full: images.registrar
|
||||
- full: images.provisioner
|
||||
- full: images.csi
|
||||
user_values:
|
||||
- name: storageClass.create
|
||||
title:
|
||||
en: Create storage class
|
||||
ru: Создать класс хранения
|
||||
description:
|
||||
en: Specifies whether the storage class should be created
|
||||
ru: Выберите, чтобы создать новый S3-класс хранения при развёртывании приложения.
|
||||
boolean_value:
|
||||
default_value: true
|
||||
- name: secret.create
|
||||
title:
|
||||
en: Create secret
|
||||
ru: Создать секрет
|
||||
description:
|
||||
en: Specifies whether the secret should be created
|
||||
ru: Выберите, чтобы создать новый секрет для класса хранения при установке приложения, а не использовать существующий.
|
||||
boolean_value:
|
||||
default_value: true
|
||||
- name: secret.accessKey
|
||||
title:
|
||||
en: S3 Access Key ID
|
||||
ru: Идентификатор ключа S3
|
||||
description:
|
||||
en: S3 Access Key ID
|
||||
ru: Идентификатор ключа S3.
|
||||
string_value:
|
||||
default_value: ""
|
||||
- name: secret.secretKey
|
||||
title:
|
||||
en: S3 Secret Key
|
||||
ru: Секретный ключ S3
|
||||
description:
|
||||
en: S3 Secret Key
|
||||
ru: Секретный ключ S3.
|
||||
string_value:
|
||||
default_value: ""
|
||||
- name: storageClass.singleBucket
|
||||
title:
|
||||
en: Single S3 bucket for volumes
|
||||
ru: Общий бакет S3 для томов
|
||||
description:
|
||||
en: Single S3 bucket to use for all dynamically provisioned persistent volumes
|
||||
ru: Общий бакет S3, в котором будут создаваться все динамически распределяемые тома. Если пусто, под каждый том будет создаваться новый бакет.
|
||||
string_value:
|
||||
default_value: ""
|
||||
- name: secret.endpoint
|
||||
title:
|
||||
en: S3 endpoint
|
||||
ru: Адрес S3-сервиса
|
||||
description:
|
||||
en: S3 service endpoint to use
|
||||
ru: Адрес S3-сервиса, который будет использоваться.
|
||||
string_value:
|
||||
default_value: "https://storage.yandexcloud.net"
|
||||
- name: storageClass.mountOptions
|
||||
title:
|
||||
en: GeeseFS mount options
|
||||
ru: Опции монтирования GeeseFS
|
||||
description:
|
||||
en: GeeseFS mount options to use. Refer to `geesefs --help` command output for the whole list of options (https://github.com/yandex-cloud/geesefs).
|
||||
ru: Опции монтирования GeeseFS. Полный перечень и описание опций смотрите в выводе команды `geesefs --help` (https://github.com/yandex-cloud/geesefs).
|
||||
string_value:
|
||||
default_value: "--memory-limit 1000 --dir-mode 0777 --file-mode 0666"
|
||||
- name: storageClass.reclaimPolicy
|
||||
title:
|
||||
en: Volume reclaim policy
|
||||
ru: Политика очистки томов
|
||||
description:
|
||||
en: Volume reclaim policy for the storage class (Retain or Delete)
|
||||
ru: Выберите политику очистки томов PersistentVolume при удалении PersistentVolumeClaim. Retain — сохранять том, Delete — удалять том.
|
||||
string_selector_value:
|
||||
default_value: Delete
|
||||
values:
|
||||
- Delete
|
||||
- Retain
|
||||
- name: storageClass.name
|
||||
title:
|
||||
en: Storage class name
|
||||
ru: Название класса хранения
|
||||
description:
|
||||
en: Name of the storage class that will be created
|
||||
ru: Название класса хранения, который будет создан при установке.
|
||||
string_value:
|
||||
default_value: csi-s3
|
||||
- name: secret.name
|
||||
title:
|
||||
en: Name of the secret
|
||||
ru: Название секрета
|
||||
description:
|
||||
en: Name of the secret to create or use for the storage class
|
||||
ru: Название секрета, который будет создан или использован для класса хранения.
|
||||
string_value:
|
||||
default_value: csi-s3-secret
|
||||
- name: tolerations.all
|
||||
title:
|
||||
en: Tolerate all taints
|
||||
ru: Игнорировать все политики taint
|
||||
description:
|
||||
en: Tolerate all taints by the CSI-S3 node driver (mounter)
|
||||
ru: Выберите, чтобы драйвер CSI, который монтирует файловую систему на узлах, игнорировал все политики taint для узлов кластера.
|
||||
boolean_value:
|
||||
default_value: false
|
129
deploy/helm/csi-s3/templates/csi-s3.yaml
Normal file
129
deploy/helm/csi-s3/templates/csi-s3.yaml
Normal file
|
@ -0,0 +1,129 @@
|
|||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: csi-s3
|
||||
namespace: {{ .Release.Namespace }}
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-s3
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-s3
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-s3
|
||||
namespace: {{ .Release.Namespace }}
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: csi-s3
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
kind: DaemonSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: csi-s3
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: csi-s3
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: csi-s3
|
||||
spec:
|
||||
tolerations:
|
||||
{{- if .Values.tolerations.all }}
|
||||
- operator: Exists
|
||||
{{- else }}
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
- operator: Exists
|
||||
effect: NoExecute
|
||||
tolerationSeconds: 300
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations.node }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccount: csi-s3
|
||||
containers:
|
||||
- name: driver-registrar
|
||||
image: {{ .Values.images.registrar }}
|
||||
args:
|
||||
- "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)"
|
||||
- "--v=4"
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /csi/csi.sock
|
||||
- name: DRIVER_REG_SOCK_PATH
|
||||
value: {{ .Values.kubeletPath }}/plugins/ru.yandex.s3.csi/csi.sock
|
||||
- name: KUBE_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
volumeMounts:
|
||||
- name: plugin-dir
|
||||
mountPath: /csi
|
||||
- name: registration-dir
|
||||
mountPath: /registration/
|
||||
- name: csi-s3
|
||||
securityContext:
|
||||
privileged: true
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
allowPrivilegeEscalation: true
|
||||
image: {{ .Values.images.csi }}
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- "--endpoint=$(CSI_ENDPOINT)"
|
||||
- "--nodeid=$(NODE_ID)"
|
||||
- "--v=4"
|
||||
env:
|
||||
- name: CSI_ENDPOINT
|
||||
value: unix:///csi/csi.sock
|
||||
- name: NODE_ID
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
volumeMounts:
|
||||
- name: plugin-dir
|
||||
mountPath: /csi
|
||||
- name: stage-dir
|
||||
mountPath: {{ .Values.kubeletPath }}/plugins/kubernetes.io/csi
|
||||
mountPropagation: "Bidirectional"
|
||||
- name: pods-mount-dir
|
||||
mountPath: {{ .Values.kubeletPath }}/pods
|
||||
mountPropagation: "Bidirectional"
|
||||
- name: fuse-device
|
||||
mountPath: /dev/fuse
|
||||
- name: systemd-control
|
||||
mountPath: /run/systemd
|
||||
volumes:
|
||||
- name: registration-dir
|
||||
hostPath:
|
||||
path: {{ .Values.kubeletPath }}/plugins_registry/
|
||||
type: DirectoryOrCreate
|
||||
- name: plugin-dir
|
||||
hostPath:
|
||||
path: {{ .Values.kubeletPath }}/plugins/ru.yandex.s3.csi
|
||||
type: DirectoryOrCreate
|
||||
- name: stage-dir
|
||||
hostPath:
|
||||
path: {{ .Values.kubeletPath }}/plugins/kubernetes.io/csi
|
||||
type: DirectoryOrCreate
|
||||
- name: pods-mount-dir
|
||||
hostPath:
|
||||
path: {{ .Values.kubeletPath }}/pods
|
||||
type: Directory
|
||||
- name: fuse-device
|
||||
hostPath:
|
||||
path: /dev/fuse
|
||||
- name: systemd-control
|
||||
hostPath:
|
||||
path: /run/systemd
|
||||
type: DirectoryOrCreate
|
10
deploy/helm/csi-s3/templates/driver.yaml
Normal file
10
deploy/helm/csi-s3/templates/driver.yaml
Normal file
|
@ -0,0 +1,10 @@
|
|||
apiVersion: storage.k8s.io/v1
|
||||
kind: CSIDriver
|
||||
metadata:
|
||||
name: ru.yandex.s3.csi
|
||||
spec:
|
||||
attachRequired: false
|
||||
podInfoOnMount: true
|
||||
fsGroupPolicy: File # added in Kubernetes 1.19, this field is GA as of Kubernetes 1.23
|
||||
volumeLifecycleModes: # added in Kubernetes 1.16, this field is beta
|
||||
- Persistent
|
116
deploy/helm/csi-s3/templates/provisioner.yaml
Normal file
116
deploy/helm/csi-s3/templates/provisioner.yaml
Normal file
|
@ -0,0 +1,116 @@
|
|||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: csi-provisioner-sa
|
||||
namespace: {{ .Release.Namespace }}
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: external-provisioner-runner
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "create", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["list", "watch", "create", "update", "patch"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-provisioner-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-provisioner-sa
|
||||
namespace: {{ .Release.Namespace }}
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: external-provisioner-runner
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: csi-provisioner-s3
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app: csi-provisioner-s3
|
||||
spec:
|
||||
selector:
|
||||
app: csi-provisioner-s3
|
||||
ports:
|
||||
- name: csi-s3-dummy
|
||||
port: 65535
|
||||
---
|
||||
kind: StatefulSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: csi-provisioner-s3
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
serviceName: "csi-provisioner-s3"
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: csi-provisioner-s3
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: csi-provisioner-s3
|
||||
spec:
|
||||
serviceAccount: csi-provisioner-sa
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
{{- with .Values.tolerations.controller }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: csi-provisioner
|
||||
image: {{ .Values.images.provisioner }}
|
||||
args:
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
- "--v=4"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: {{ .Values.kubeletPath }}/plugins/ru.yandex.s3.csi/csi.sock
|
||||
imagePullPolicy: "IfNotPresent"
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: {{ .Values.kubeletPath }}/plugins/ru.yandex.s3.csi
|
||||
- name: csi-s3
|
||||
image: {{ .Values.images.csi }}
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- "--endpoint=$(CSI_ENDPOINT)"
|
||||
- "--nodeid=$(NODE_ID)"
|
||||
- "--v=4"
|
||||
env:
|
||||
- name: CSI_ENDPOINT
|
||||
value: unix://{{ .Values.kubeletPath }}/plugins/ru.yandex.s3.csi/csi.sock
|
||||
- name: NODE_ID
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: {{ .Values.kubeletPath }}/plugins/ru.yandex.s3.csi
|
||||
volumes:
|
||||
- name: socket-dir
|
||||
emptyDir: {}
|
15
deploy/helm/csi-s3/templates/secret.yaml
Normal file
15
deploy/helm/csi-s3/templates/secret.yaml
Normal file
|
@ -0,0 +1,15 @@
|
|||
{{- if .Values.secret.create -}}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
namespace: {{ .Release.Namespace }}
|
||||
name: {{ .Values.secret.name }}
|
||||
stringData:
|
||||
{{- if .Values.secret.accessKey }}
|
||||
accessKeyID: {{ .Values.secret.accessKey }}
|
||||
{{- end }}
|
||||
{{- if .Values.secret.secretKey }}
|
||||
secretAccessKey: {{ .Values.secret.secretKey }}
|
||||
{{- end }}
|
||||
endpoint: {{ .Values.secret.endpoint }}
|
||||
{{- end -}}
|
26
deploy/helm/csi-s3/templates/storageclass.yaml
Normal file
26
deploy/helm/csi-s3/templates/storageclass.yaml
Normal file
|
@ -0,0 +1,26 @@
|
|||
{{- if .Values.storageClass.create -}}
|
||||
kind: StorageClass
|
||||
apiVersion: storage.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ .Values.storageClass.name }}
|
||||
{{- if .Values.storageClass.annotations }}
|
||||
annotations:
|
||||
{{ toYaml .Values.storageClass.annotations | indent 4 }}
|
||||
{{- end }}
|
||||
provisioner: ru.yandex.s3.csi
|
||||
parameters:
|
||||
mounter: "{{ .Values.storageClass.mounter }}"
|
||||
options: "{{ .Values.storageClass.mountOptions }}"
|
||||
{{- if .Values.storageClass.singleBucket }}
|
||||
bucket: "{{ .Values.storageClass.singleBucket }}"
|
||||
{{- end }}
|
||||
csi.storage.k8s.io/provisioner-secret-name: {{ .Values.secret.name }}
|
||||
csi.storage.k8s.io/provisioner-secret-namespace: {{ .Release.Namespace }}
|
||||
csi.storage.k8s.io/controller-publish-secret-name: {{ .Values.secret.name }}
|
||||
csi.storage.k8s.io/controller-publish-secret-namespace: {{ .Release.Namespace }}
|
||||
csi.storage.k8s.io/node-stage-secret-name: {{ .Values.secret.name }}
|
||||
csi.storage.k8s.io/node-stage-secret-namespace: {{ .Release.Namespace }}
|
||||
csi.storage.k8s.io/node-publish-secret-name: {{ .Values.secret.name }}
|
||||
csi.storage.k8s.io/node-publish-secret-namespace: {{ .Release.Namespace }}
|
||||
reclaimPolicy: {{ .Values.storageClass.reclaimPolicy }}
|
||||
{{- end -}}
|
48
deploy/helm/csi-s3/values.yaml
Normal file
48
deploy/helm/csi-s3/values.yaml
Normal file
|
@ -0,0 +1,48 @@
|
|||
---
|
||||
images:
|
||||
# Source: quay.io/k8scsi/csi-node-driver-registrar:v1.2.0
|
||||
registrar: cr.yandex/crp9ftr22d26age3hulg/yandex-cloud/csi-s3/csi-node-driver-registrar:v1.2.0
|
||||
# Source: quay.io/k8scsi/csi-provisioner:v2.1.0
|
||||
provisioner: cr.yandex/crp9ftr22d26age3hulg/yandex-cloud/csi-s3/csi-provisioner:v2.1.0
|
||||
# Main image
|
||||
csi: cr.yandex/crp9ftr22d26age3hulg/yandex-cloud/csi-s3/csi-s3-driver:0.38.3
|
||||
|
||||
storageClass:
|
||||
# Specifies whether the storage class should be created
|
||||
create: true
|
||||
# Name
|
||||
name: csi-s3
|
||||
# Use a single bucket for all dynamically provisioned persistent volumes
|
||||
singleBucket: ""
|
||||
# mounter to use - either geesefs, s3fs or rclone (default geesefs)
|
||||
mounter: geesefs
|
||||
# GeeseFS mount options
|
||||
mountOptions: "--memory-limit 1000 --dir-mode 0777 --file-mode 0666"
|
||||
# Volume reclaim policy
|
||||
reclaimPolicy: Delete
|
||||
# Annotations for the storage class
|
||||
# Example:
|
||||
# annotations:
|
||||
# storageclass.kubernetes.io/is-default-class: "true"
|
||||
annotations: {}
|
||||
|
||||
secret:
|
||||
# Specifies whether the secret should be created
|
||||
create: true
|
||||
# Name of the secret
|
||||
name: csi-s3-secret
|
||||
# S3 Access Key
|
||||
accessKey: ""
|
||||
# S3 Secret Key
|
||||
secretKey: ""
|
||||
# Endpoint
|
||||
endpoint: https://storage.yandexcloud.net
|
||||
|
||||
tolerations:
|
||||
all: false
|
||||
node: []
|
||||
controller: []
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
kubeletPath: /var/lib/kubelet
|
|
@ -1,93 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: csi-attacher-sa
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: external-attacher-runner
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["volumeattachments"]
|
||||
verbs: ["get", "list", "watch", "update", "patch"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-attacher-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-attacher-sa
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: external-attacher-runner
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
# needed for StatefulSet
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: csi-attacher-s3
|
||||
namespace: kube-system
|
||||
labels:
|
||||
app: csi-attacher-s3
|
||||
spec:
|
||||
selector:
|
||||
app: csi-attacher-s3
|
||||
ports:
|
||||
- name: csi-s3-dummy
|
||||
port: 65535
|
||||
---
|
||||
kind: StatefulSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: csi-attacher-s3
|
||||
namespace: kube-system
|
||||
spec:
|
||||
serviceName: "csi-attacher-s3"
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: csi-attacher-s3
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: csi-attacher-s3
|
||||
spec:
|
||||
serviceAccount: csi-attacher-sa
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: "Exists"
|
||||
containers:
|
||||
- name: csi-attacher
|
||||
image: quay.io/k8scsi/csi-attacher:v2.2.0
|
||||
args:
|
||||
- "--v=4"
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /var/lib/kubelet/plugins/ru.yandex.s3.csi/csi.sock
|
||||
imagePullPolicy: "IfNotPresent"
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /var/lib/kubelet/plugins/ru.yandex.s3.csi
|
||||
volumes:
|
||||
- name: socket-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/plugins/ru.yandex.s3.csi
|
||||
type: DirectoryOrCreate
|
|
@ -8,22 +8,6 @@ kind: ClusterRole
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-s3
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "list", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["namespaces"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["volumeattachments"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
|
@ -52,8 +36,13 @@ spec:
|
|||
labels:
|
||||
app: csi-s3
|
||||
spec:
|
||||
tolerations:
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
- operator: Exists
|
||||
effect: NoExecute
|
||||
tolerationSeconds: 300
|
||||
serviceAccount: csi-s3
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: driver-registrar
|
||||
image: quay.io/k8scsi/csi-node-driver-registrar:v1.2.0
|
||||
|
@ -81,7 +70,7 @@ spec:
|
|||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
allowPrivilegeEscalation: true
|
||||
image: cr.yandex/crp9ftr22d26age3hulg/csi-s3:0.26.2
|
||||
image: cr.yandex/crp9ftr22d26age3hulg/csi-s3:0.38.3
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- "--endpoint=$(CSI_ENDPOINT)"
|
||||
|
@ -97,11 +86,16 @@ spec:
|
|||
volumeMounts:
|
||||
- name: plugin-dir
|
||||
mountPath: /csi
|
||||
- name: stage-dir
|
||||
mountPath: /var/lib/kubelet/plugins/kubernetes.io/csi
|
||||
mountPropagation: "Bidirectional"
|
||||
- name: pods-mount-dir
|
||||
mountPath: /var/lib/kubelet/pods
|
||||
mountPropagation: "Bidirectional"
|
||||
- name: fuse-device
|
||||
mountPath: /dev/fuse
|
||||
- name: systemd-control
|
||||
mountPath: /run/systemd
|
||||
volumes:
|
||||
- name: registration-dir
|
||||
hostPath:
|
||||
|
@ -111,6 +105,10 @@ spec:
|
|||
hostPath:
|
||||
path: /var/lib/kubelet/plugins/ru.yandex.s3.csi
|
||||
type: DirectoryOrCreate
|
||||
- name: stage-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/plugins/kubernetes.io/csi
|
||||
type: DirectoryOrCreate
|
||||
- name: pods-mount-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/pods
|
||||
|
@ -118,3 +116,7 @@ spec:
|
|||
- name: fuse-device
|
||||
hostPath:
|
||||
path: /dev/fuse
|
||||
- name: systemd-control
|
||||
hostPath:
|
||||
path: /run/systemd
|
||||
type: DirectoryOrCreate
|
||||
|
|
7
deploy/kubernetes/driver.yaml
Normal file
7
deploy/kubernetes/driver.yaml
Normal file
|
@ -0,0 +1,7 @@
|
|||
apiVersion: storage.k8s.io/v1
|
||||
kind: CSIDriver
|
||||
metadata:
|
||||
name: ru.yandex.s3.csi
|
||||
spec:
|
||||
attachRequired: false
|
||||
podInfoOnMount: true
|
|
@ -5,7 +5,7 @@
|
|||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: manualbucket/path
|
||||
name: manualbucket-with-path
|
||||
spec:
|
||||
storageClassName: csi-s3
|
||||
capacity:
|
||||
|
@ -14,7 +14,7 @@ spec:
|
|||
- ReadWriteMany
|
||||
claimRef:
|
||||
namespace: default
|
||||
name: manualclaim
|
||||
name: csi-s3-manual-pvc
|
||||
csi:
|
||||
driver: ru.yandex.s3.csi
|
||||
controllerPublishSecretRef:
|
||||
|
@ -29,12 +29,13 @@ spec:
|
|||
volumeAttributes:
|
||||
capacity: 10Gi
|
||||
mounter: geesefs
|
||||
options: --memory-limit 1000 --dir-mode 0777 --file-mode 0666
|
||||
volumeHandle: manualbucket/path
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: csi-s3-pvc
|
||||
name: csi-s3-manual-pvc
|
||||
spec:
|
||||
# Empty storage class disables dynamic provisioning
|
||||
storageClassName: ""
|
||||
|
|
|
@ -8,7 +8,7 @@ metadata:
|
|||
namespace: default
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
- ReadWriteMany
|
||||
resources:
|
||||
requests:
|
||||
storage: 5Gi
|
||||
|
|
|
@ -70,8 +70,10 @@ spec:
|
|||
spec:
|
||||
serviceAccount: csi-provisioner-sa
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: "Exists"
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
containers:
|
||||
- name: csi-provisioner
|
||||
image: quay.io/k8scsi/csi-provisioner:v2.1.0
|
||||
|
@ -86,7 +88,7 @@ spec:
|
|||
- name: socket-dir
|
||||
mountPath: /var/lib/kubelet/plugins/ru.yandex.s3.csi
|
||||
- name: csi-s3
|
||||
image: cr.yandex/crp9ftr22d26age3hulg/csi-s3:0.26.2
|
||||
image: cr.yandex/crp9ftr22d26age3hulg/csi-s3:0.38.3
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- "--endpoint=$(CSI_ENDPOINT)"
|
||||
|
|
2
go.mod
2
go.mod
|
@ -4,6 +4,8 @@ go 1.15
|
|||
|
||||
require (
|
||||
github.com/container-storage-interface/spec v1.1.0
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/godbus/dbus/v5 v5.0.4 // indirect
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b
|
||||
github.com/golang/protobuf v1.1.0 // indirect
|
||||
github.com/kubernetes-csi/csi-lib-utils v0.6.1 // indirect
|
||||
|
|
4
go.sum
4
go.sum
|
@ -1,9 +1,13 @@
|
|||
github.com/container-storage-interface/spec v1.1.0 h1:qPsTqtR1VUPvMPeK0UnCZMtXaKGyyLPG8gj/wG6VqMs=
|
||||
github.com/container-storage-interface/spec v1.1.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/protobuf v1.1.0 h1:0iH4Ffd/meGoXqF2lSAhZHt8X+cPgkfn/cb6Cce5Vpc=
|
||||
|
|
|
@ -47,8 +47,8 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
|
|||
prefix := ""
|
||||
|
||||
// check if bucket name is overridden
|
||||
if nameOverride, ok := params[mounter.BucketKey]; ok {
|
||||
bucketName = nameOverride
|
||||
if params[mounter.BucketKey] != "" {
|
||||
bucketName = params[mounter.BucketKey]
|
||||
prefix = volumeID
|
||||
volumeID = path.Join(bucketName, prefix)
|
||||
}
|
||||
|
@ -128,7 +128,7 @@ func (cs *controllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol
|
|||
var deleteErr error
|
||||
if prefix == "" {
|
||||
// prefix is empty, we delete the whole bucket
|
||||
if err := client.RemoveBucket(bucketName); err != nil {
|
||||
if err := client.RemoveBucket(bucketName); err != nil && err.Error() != "The specified bucket does not exist" {
|
||||
deleteErr = err
|
||||
}
|
||||
glog.V(4).Infof("Bucket %s removed", bucketName)
|
||||
|
@ -170,9 +170,8 @@ func (cs *controllerServer) ValidateVolumeCapabilities(ctx context.Context, req
|
|||
return nil, status.Error(codes.NotFound, fmt.Sprintf("bucket of volume with id %s does not exist", req.GetVolumeId()))
|
||||
}
|
||||
|
||||
// We currently only support RWO
|
||||
supportedAccessMode := &csi.VolumeCapability_AccessMode{
|
||||
Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER,
|
||||
Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER,
|
||||
}
|
||||
|
||||
for _, capability := range req.VolumeCapabilities {
|
||||
|
@ -211,7 +210,7 @@ func sanitizeVolumeID(volumeID string) string {
|
|||
func volumeIDToBucketPrefix(volumeID string) (string, string) {
|
||||
// if the volumeID has a slash in it, this volume is
|
||||
// stored under a certain prefix within the bucket.
|
||||
splitVolumeID := strings.Split(volumeID, "/")
|
||||
splitVolumeID := strings.SplitN(volumeID, "/", 2)
|
||||
if len(splitVolumeID) > 1 {
|
||||
return splitVolumeID[0], splitVolumeID[1]
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ type driver struct {
|
|||
}
|
||||
|
||||
var (
|
||||
vendorVersion = "v1.2.0"
|
||||
vendorVersion = "v1.34.7"
|
||||
driverName = "ru.yandex.s3.csi"
|
||||
)
|
||||
|
||||
|
@ -75,7 +75,7 @@ func (s3 *driver) Run() {
|
|||
// Initialize default library driver
|
||||
|
||||
s3.driver.AddControllerServiceCapabilities([]csi.ControllerServiceCapability_RPC_Type{csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME})
|
||||
s3.driver.AddVolumeCapabilityAccessModes([]csi.VolumeCapability_AccessMode_Mode{csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER})
|
||||
s3.driver.AddVolumeCapabilityAccessModes([]csi.VolumeCapability_AccessMode_Mode{csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER})
|
||||
|
||||
// Create GRPC servers
|
||||
s3.ids = s3.newIdentityServer(s3.driver)
|
||||
|
|
|
@ -5,7 +5,6 @@ import (
|
|||
"os"
|
||||
|
||||
"github.com/yandex-cloud/k8s-csi-s3/pkg/driver"
|
||||
"github.com/yandex-cloud/k8s-csi-s3/pkg/mounter"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
|
@ -67,7 +66,7 @@ var _ = Describe("S3Driver", func() {
|
|||
})
|
||||
})
|
||||
|
||||
Context("s3fs", func() {
|
||||
/* Context("s3fs", func() {
|
||||
socket := "/tmp/csi-s3fs.sock"
|
||||
csiEndpoint := "unix://" + socket
|
||||
if err := os.Remove(socket); err != nil && !os.IsNotExist(err) {
|
||||
|
@ -120,5 +119,5 @@ var _ = Describe("S3Driver", func() {
|
|||
}
|
||||
sanity.GinkgoTest(sanityCfg)
|
||||
})
|
||||
})
|
||||
})*/
|
||||
})
|
||||
|
|
|
@ -19,6 +19,7 @@ package driver
|
|||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strconv"
|
||||
|
||||
|
@ -68,7 +69,6 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
|
|||
volumeID := req.GetVolumeId()
|
||||
targetPath := req.GetTargetPath()
|
||||
stagingTargetPath := req.GetStagingTargetPath()
|
||||
bucketName, prefix := volumeIDToBucketPrefix(volumeID)
|
||||
|
||||
// Check arguments
|
||||
if req.GetVolumeCapability() == nil {
|
||||
|
@ -84,7 +84,28 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
|
|||
return nil, status.Error(codes.InvalidArgument, "Target path missing in request")
|
||||
}
|
||||
|
||||
notMnt, err := checkMount(targetPath)
|
||||
notMnt, err := checkMount(stagingTargetPath)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
if notMnt {
|
||||
// Staged mount is dead by some reason. Revive it
|
||||
bucketName, prefix := volumeIDToBucketPrefix(volumeID)
|
||||
s3, err := s3.NewClientFromSecret(req.GetSecrets())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize S3 client: %s", err)
|
||||
}
|
||||
meta := getMeta(bucketName, prefix, req.VolumeContext)
|
||||
mounter, err := mounter.New(meta, s3.Config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := mounter.Mount(stagingTargetPath, volumeID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
notMnt, err = checkMount(targetPath)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
@ -100,18 +121,12 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
|
|||
glog.V(4).Infof("target %v\nreadonly %v\nvolumeId %v\nattributes %v\nmountflags %v\n",
|
||||
targetPath, readOnly, volumeID, attrib, mountFlags)
|
||||
|
||||
s3, err := s3.NewClientFromSecret(req.GetSecrets())
|
||||
cmd := exec.Command("mount", "--bind", stagingTargetPath, targetPath)
|
||||
cmd.Stderr = os.Stderr
|
||||
glog.V(3).Infof("Binding volume %v from %v to %v", volumeID, stagingTargetPath, targetPath)
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize S3 client: %s", err)
|
||||
}
|
||||
|
||||
meta := getMeta(bucketName, prefix, req.VolumeContext)
|
||||
mounter, err := mounter.New(meta, s3.Config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := mounter.Mount(stagingTargetPath, targetPath); err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("Error running mount --bind %v %v: %s", stagingTargetPath, targetPath, out)
|
||||
}
|
||||
|
||||
glog.V(4).Infof("s3: volume %s successfully mounted to %s", volumeID, targetPath)
|
||||
|
@ -131,7 +146,7 @@ func (ns *nodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpu
|
|||
return nil, status.Error(codes.InvalidArgument, "Target path missing in request")
|
||||
}
|
||||
|
||||
if err := mounter.FuseUnmount(targetPath); err != nil {
|
||||
if err := mounter.Unmount(targetPath); err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
glog.V(4).Infof("s3: volume %s has been unmounted.", volumeID)
|
||||
|
@ -174,7 +189,7 @@ func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVol
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := mounter.Stage(stagingTargetPath); err != nil {
|
||||
if err := mounter.Mount(stagingTargetPath, volumeID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -193,6 +208,22 @@ func (ns *nodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstag
|
|||
return nil, status.Error(codes.InvalidArgument, "Target path missing in request")
|
||||
}
|
||||
|
||||
proc, err := mounter.FindFuseMountProcess(stagingTargetPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
exists := false
|
||||
if proc == nil {
|
||||
exists, err = mounter.SystemdUnmount(volumeID)
|
||||
if exists && err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if !exists {
|
||||
err = mounter.FuseUnmount(stagingTargetPath)
|
||||
}
|
||||
glog.V(4).Infof("s3: volume %s has been unmounted from stage path %v.", volumeID, stagingTargetPath)
|
||||
|
||||
return &csi.NodeUnstageVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -3,6 +3,12 @@ package mounter
|
|||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
systemd "github.com/coreos/go-systemd/v22/dbus"
|
||||
dbus "github.com/godbus/dbus/v5"
|
||||
"github.com/golang/glog"
|
||||
|
||||
"github.com/yandex-cloud/k8s-csi-s3/pkg/s3"
|
||||
)
|
||||
|
@ -30,27 +36,170 @@ func newGeeseFSMounter(meta *s3.FSMeta, cfg *s3.Config) (Mounter, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (geesefs *geesefsMounter) Stage(stageTarget string) error {
|
||||
func (geesefs *geesefsMounter) CopyBinary(from, to string) error {
|
||||
st, err := os.Stat(from)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to stat %s: %v", from, err)
|
||||
}
|
||||
st2, err := os.Stat(to)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return fmt.Errorf("Failed to stat %s: %v", to, err)
|
||||
}
|
||||
if err != nil || st2.Size() != st.Size() || st2.ModTime() != st.ModTime() {
|
||||
if err == nil {
|
||||
// remove the file first to not hit "text file busy" errors
|
||||
err = os.Remove(to)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error removing %s to update it: %v", to, err)
|
||||
}
|
||||
}
|
||||
bin, err := os.ReadFile(from)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error copying %s to %s: %v", from, to, err)
|
||||
}
|
||||
err = os.WriteFile(to, bin, 0755)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error copying %s to %s: %v", from, to, err)
|
||||
}
|
||||
err = os.Chtimes(to, st.ModTime(), st.ModTime())
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error copying %s to %s: %v", from, to, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (geesefs *geesefsMounter) Unstage(stageTarget string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (geesefs *geesefsMounter) Mount(source string, target string) error {
|
||||
fullPath := fmt.Sprintf("%s:%s", geesefs.meta.BucketName, geesefs.meta.Prefix)
|
||||
args := []string{
|
||||
func (geesefs *geesefsMounter) MountDirect(target string, args []string) error {
|
||||
args = append([]string{
|
||||
"--endpoint", geesefs.endpoint,
|
||||
"-o", "allow_other",
|
||||
"--log-file", "/dev/stderr",
|
||||
}, args...)
|
||||
envs := []string{
|
||||
"AWS_ACCESS_KEY_ID=" + geesefs.accessKeyID,
|
||||
"AWS_SECRET_ACCESS_KEY=" + geesefs.secretAccessKey,
|
||||
}
|
||||
return fuseMount(target, geesefsCmd, args, envs)
|
||||
}
|
||||
|
||||
type execCmd struct {
|
||||
Path string
|
||||
Args []string
|
||||
UncleanIsFailure bool
|
||||
}
|
||||
|
||||
func (geesefs *geesefsMounter) Mount(target, volumeID string) error {
|
||||
fullPath := fmt.Sprintf("%s:%s", geesefs.meta.BucketName, geesefs.meta.Prefix)
|
||||
var args []string
|
||||
if geesefs.region != "" {
|
||||
args = append(args, "--region", geesefs.region)
|
||||
}
|
||||
args = append(args, geesefs.meta.MountOptions...)
|
||||
args = append(
|
||||
args,
|
||||
"--setuid", "65534", // nobody. drop root privileges
|
||||
"--setgid", "65534", // nogroup
|
||||
)
|
||||
useSystemd := true
|
||||
for i := 0; i < len(geesefs.meta.MountOptions); i++ {
|
||||
opt := geesefs.meta.MountOptions[i]
|
||||
if opt == "--no-systemd" {
|
||||
useSystemd = false
|
||||
} else if len(opt) > 0 && opt[0] == '-' {
|
||||
// Remove unsafe options
|
||||
s := 1
|
||||
if len(opt) > 1 && opt[1] == '-' {
|
||||
s++
|
||||
}
|
||||
key := opt[s:]
|
||||
e := strings.Index(opt, "=")
|
||||
if e >= 0 {
|
||||
key = opt[s:e]
|
||||
}
|
||||
if key == "log-file" || key == "shared-config" || key == "cache" {
|
||||
// Skip options accessing local FS
|
||||
if e < 0 {
|
||||
i++
|
||||
}
|
||||
} else if key != "" {
|
||||
args = append(args, opt)
|
||||
}
|
||||
} else if len(opt) > 0 {
|
||||
args = append(args, opt)
|
||||
}
|
||||
}
|
||||
args = append(args, fullPath, target)
|
||||
os.Setenv("AWS_ACCESS_KEY_ID", geesefs.accessKeyID)
|
||||
os.Setenv("AWS_SECRET_ACCESS_KEY", geesefs.secretAccessKey)
|
||||
return fuseMount(target, geesefsCmd, args)
|
||||
// Try to start geesefs using systemd so it doesn't get killed when the container exits
|
||||
if !useSystemd {
|
||||
return geesefs.MountDirect(target, args)
|
||||
}
|
||||
conn, err := systemd.New()
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to connect to systemd dbus service: %v, starting geesefs directly", err)
|
||||
return geesefs.MountDirect(target, args)
|
||||
}
|
||||
defer conn.Close()
|
||||
// systemd is present
|
||||
if err = geesefs.CopyBinary("/usr/bin/geesefs", "/csi/geesefs"); err != nil {
|
||||
return err
|
||||
}
|
||||
pluginDir := os.Getenv("PLUGIN_DIR")
|
||||
if pluginDir == "" {
|
||||
pluginDir = "/var/lib/kubelet/plugins/ru.yandex.s3.csi"
|
||||
}
|
||||
args = append([]string{pluginDir+"/geesefs", "-f", "-o", "allow_other", "--endpoint", geesefs.endpoint}, args...)
|
||||
glog.Info("Starting geesefs using systemd: "+strings.Join(args, " "))
|
||||
unitName := "geesefs-"+systemd.PathBusEscape(volumeID)+".service"
|
||||
newProps := []systemd.Property{
|
||||
systemd.Property{
|
||||
Name: "Description",
|
||||
Value: dbus.MakeVariant("GeeseFS mount for Kubernetes volume "+volumeID),
|
||||
},
|
||||
systemd.PropExecStart(args, false),
|
||||
systemd.Property{
|
||||
Name: "ExecStopPost",
|
||||
// force & lazy unmount to cleanup possibly dead mountpoints
|
||||
Value: dbus.MakeVariant([]execCmd{ execCmd{ "/bin/umount", []string{ "/bin/umount", "-f", "-l", target }, false } }),
|
||||
},
|
||||
systemd.Property{
|
||||
Name: "Environment",
|
||||
Value: dbus.MakeVariant([]string{ "AWS_ACCESS_KEY_ID="+geesefs.accessKeyID, "AWS_SECRET_ACCESS_KEY="+geesefs.secretAccessKey }),
|
||||
},
|
||||
systemd.Property{
|
||||
Name: "CollectMode",
|
||||
Value: dbus.MakeVariant("inactive-or-failed"),
|
||||
},
|
||||
}
|
||||
unitProps, err := conn.GetAllProperties(unitName)
|
||||
if err == nil {
|
||||
// Unit already exists
|
||||
if s, ok := unitProps["ActiveState"].(string); ok && (s == "active" || s == "activating" || s == "reloading") {
|
||||
// Unit is already active
|
||||
curPath := ""
|
||||
prevExec, ok := unitProps["ExecStart"].([][]interface{})
|
||||
if ok && len(prevExec) > 0 && len(prevExec[0]) >= 2 {
|
||||
execArgs, ok := prevExec[0][1].([]string)
|
||||
if ok && len(execArgs) >= 2 {
|
||||
curPath = execArgs[len(execArgs)-1]
|
||||
}
|
||||
}
|
||||
if curPath != target {
|
||||
return fmt.Errorf(
|
||||
"GeeseFS for volume %v is already mounted on host, but"+
|
||||
" in a different directory. We want %v, but it's in %v",
|
||||
volumeID, target, curPath,
|
||||
)
|
||||
}
|
||||
// Already mounted at right location
|
||||
return nil
|
||||
} else {
|
||||
// Stop and garbage collect the unit if automatic collection didn't work for some reason
|
||||
conn.StopUnit(unitName, "replace", nil)
|
||||
conn.ResetFailedUnit(unitName)
|
||||
}
|
||||
}
|
||||
_, err = conn.StartTransientUnit(unitName, "replace", newProps, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error starting systemd unit %s on host: %v", unitName, err)
|
||||
}
|
||||
return waitForMount(target, 10*time.Second)
|
||||
}
|
||||
|
|
|
@ -11,18 +11,18 @@ import (
|
|||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/yandex-cloud/k8s-csi-s3/pkg/s3"
|
||||
systemd "github.com/coreos/go-systemd/v22/dbus"
|
||||
"github.com/golang/glog"
|
||||
"github.com/mitchellh/go-ps"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
|
||||
"github.com/yandex-cloud/k8s-csi-s3/pkg/s3"
|
||||
)
|
||||
|
||||
// Mounter interface which can be implemented
|
||||
// by the different mounter types
|
||||
type Mounter interface {
|
||||
Stage(stagePath string) error
|
||||
Unstage(stagePath string) error
|
||||
Mount(source string, target string) error
|
||||
Mount(target, volumeID string) error
|
||||
}
|
||||
|
||||
const (
|
||||
|
@ -57,9 +57,11 @@ func New(meta *s3.FSMeta, cfg *s3.Config) (Mounter, error) {
|
|||
}
|
||||
}
|
||||
|
||||
func fuseMount(path string, command string, args []string) error {
|
||||
func fuseMount(path string, command string, args []string, envs []string) error {
|
||||
cmd := exec.Command(command, args...)
|
||||
cmd.Stderr = os.Stderr
|
||||
// cmd.Environ() returns envs inherited from the current process
|
||||
cmd.Env = append(cmd.Environ(), envs...)
|
||||
glog.V(3).Infof("Mounting fuse with command: %s and args: %s", command, args)
|
||||
|
||||
out, err := cmd.Output()
|
||||
|
@ -70,12 +72,40 @@ func fuseMount(path string, command string, args []string) error {
|
|||
return waitForMount(path, 10*time.Second)
|
||||
}
|
||||
|
||||
func Unmount(path string) error {
|
||||
if err := mount.New("").Unmount(path); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func SystemdUnmount(volumeID string) (bool, error) {
|
||||
conn, err := systemd.New()
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to connect to systemd dbus service: %v", err)
|
||||
return false, err
|
||||
}
|
||||
defer conn.Close()
|
||||
unitName := "geesefs-"+systemd.PathBusEscape(volumeID)+".service"
|
||||
units, err := conn.ListUnitsByNames([]string{ unitName })
|
||||
glog.Errorf("Got %v", units)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to list systemd unit by name %v: %v", unitName, err)
|
||||
return false, err
|
||||
}
|
||||
if len(units) == 0 || units[0].ActiveState == "inactive" || units[0].ActiveState == "failed" {
|
||||
return true, nil
|
||||
}
|
||||
_, err = conn.StopUnit(unitName, "replace", nil)
|
||||
return true, err
|
||||
}
|
||||
|
||||
func FuseUnmount(path string) error {
|
||||
if err := mount.New("").Unmount(path); err != nil {
|
||||
return err
|
||||
}
|
||||
// as fuse quits immediately, we will try to wait until the process is done
|
||||
process, err := findFuseMountProcess(path)
|
||||
process, err := FindFuseMountProcess(path)
|
||||
if err != nil {
|
||||
glog.Errorf("Error getting PID of fuse mount: %s", err)
|
||||
return nil
|
||||
|
@ -107,7 +137,7 @@ func waitForMount(path string, timeout time.Duration) error {
|
|||
}
|
||||
}
|
||||
|
||||
func findFuseMountProcess(path string) (*os.Process, error) {
|
||||
func FindFuseMountProcess(path string) (*os.Process, error) {
|
||||
processes, err := ps.Processes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -2,7 +2,6 @@ package mounter
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/yandex-cloud/k8s-csi-s3/pkg/s3"
|
||||
|
@ -31,15 +30,7 @@ func newRcloneMounter(meta *s3.FSMeta, cfg *s3.Config) (Mounter, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (rclone *rcloneMounter) Stage(stageTarget string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rclone *rcloneMounter) Unstage(stageTarget string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rclone *rcloneMounter) Mount(source string, target string) error {
|
||||
func (rclone *rcloneMounter) Mount(target, volumeID string) error {
|
||||
args := []string{
|
||||
"mount",
|
||||
fmt.Sprintf(":s3:%s", path.Join(rclone.meta.BucketName, rclone.meta.Prefix)),
|
||||
|
@ -55,7 +46,9 @@ func (rclone *rcloneMounter) Mount(source string, target string) error {
|
|||
args = append(args, fmt.Sprintf("--s3-region=%s", rclone.region))
|
||||
}
|
||||
args = append(args, rclone.meta.MountOptions...)
|
||||
os.Setenv("AWS_ACCESS_KEY_ID", rclone.accessKeyID)
|
||||
os.Setenv("AWS_SECRET_ACCESS_KEY", rclone.secretAccessKey)
|
||||
return fuseMount(target, rcloneCmd, args)
|
||||
envs := []string{
|
||||
"AWS_ACCESS_KEY_ID=" + rclone.accessKeyID,
|
||||
"AWS_SECRET_ACCESS_KEY=" + rclone.secretAccessKey,
|
||||
}
|
||||
return fuseMount(target, rcloneCmd, args, envs)
|
||||
}
|
||||
|
|
|
@ -28,15 +28,7 @@ func newS3fsMounter(meta *s3.FSMeta, cfg *s3.Config) (Mounter, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (s3fs *s3fsMounter) Stage(stageTarget string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s3fs *s3fsMounter) Unstage(stageTarget string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s3fs *s3fsMounter) Mount(source string, target string) error {
|
||||
func (s3fs *s3fsMounter) Mount(target, volumeID string) error {
|
||||
if err := writes3fsPass(s3fs.pwFileContent); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -52,7 +44,7 @@ func (s3fs *s3fsMounter) Mount(source string, target string) error {
|
|||
args = append(args, "-o", fmt.Sprintf("endpoint=%s", s3fs.region))
|
||||
}
|
||||
args = append(args, s3fs.meta.MountOptions...)
|
||||
return fuseMount(target, s3fsCmd, args)
|
||||
return fuseMount(target, s3fsCmd, args, nil)
|
||||
}
|
||||
|
||||
func writes3fsPass(pwFileContent string) error {
|
||||
|
|
|
@ -52,7 +52,7 @@ func NewClient(cfg *Config) (*s3Client, error) {
|
|||
endpoint = u.Hostname() + ":" + u.Port()
|
||||
}
|
||||
minioClient, err := minio.New(endpoint, &minio.Options{
|
||||
Creds: credentials.NewStaticV4(client.Config.AccessKeyID, client.Config.SecretAccessKey, client.Config.Region),
|
||||
Creds: credentials.NewStaticV4(client.Config.AccessKeyID, client.Config.SecretAccessKey, ""),
|
||||
Secure: ssl,
|
||||
})
|
||||
if err != nil {
|
||||
|
|
|
@ -1,33 +1,24 @@
|
|||
FROM yandex-cloud/k8s-csi-s3:dev
|
||||
FROM golang:1.16-buster
|
||||
|
||||
LABEL maintainers="Vitaliy Filippov <vitalif@yourcmc.ru>"
|
||||
LABEL description="csi-s3 testing image"
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y \
|
||||
git wget make && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
# Minio download servers are TERRIBLY SLOW as of 2021-10-27
|
||||
#RUN wget https://dl.min.io/server/minio/release/linux-amd64/minio && \
|
||||
# chmod +x minio && \
|
||||
# mv minio /usr/local/bin
|
||||
|
||||
ARG GOVERSION=1.16.3
|
||||
RUN wget -q https://golang.org/dl/go${GOVERSION}.linux-amd64.tar.gz && \
|
||||
tar -xf go${GOVERSION}.linux-amd64.tar.gz && \
|
||||
rm go${GOVERSION}.linux-amd64.tar.gz && \
|
||||
mv go /usr/local
|
||||
RUN git clone --depth=1 https://github.com/minio/minio
|
||||
RUN cd minio && go build && mv minio /usr/local/bin
|
||||
|
||||
ENV GOROOT /usr/local/go
|
||||
ENV GOPATH /go
|
||||
ENV PATH=$GOPATH/bin:$GOROOT/bin:$PATH
|
||||
|
||||
RUN wget -q https://dl.min.io/server/minio/release/linux-amd64/minio && \
|
||||
chmod +x minio &&\
|
||||
mv minio /usr/local/bin
|
||||
|
||||
WORKDIR /app
|
||||
WORKDIR /build
|
||||
|
||||
# prewarm go mod cache
|
||||
COPY go.mod .
|
||||
COPY go.sum .
|
||||
RUN go mod download
|
||||
|
||||
ADD test/test.sh /usr/local/bin
|
||||
RUN wget https://github.com/yandex-cloud/geesefs/releases/latest/download/geesefs-linux-amd64 \
|
||||
-O /usr/bin/geesefs && chmod 755 /usr/bin/geesefs
|
||||
|
||||
ENTRYPOINT ["/usr/local/bin/test.sh"]
|
||||
ENTRYPOINT ["/build/test/test.sh"]
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
#!/usr/bin/env bash
|
||||
#!/bin/sh
|
||||
export MINIO_ACCESS_KEY=FJDSJ
|
||||
export MINIO_SECRET_KEY=DSG643HGDS
|
||||
|
||||
mkdir -p /tmp/minio
|
||||
minio server /tmp/minio &>/dev/null &
|
||||
sleep 5
|
||||
go test ./... -cover -ginkgo.noisySkippings=false
|
||||
go test ./... -cover -ginkgo.noisySkippings=false -ginkgo.skip="should fail when requesting to create a volume with already existing name and different capacity"
|
||||
|
|
Loading…
Add table
Reference in a new issue