Compare commits
No commits in common. "master" and "v0.27.0" have entirely different histories.
32 changed files with 262 additions and 939 deletions
.github/workflows
MakefileREADME.mdcmd/s3driver
deploy
go.modgo.sumpkg
driver
mounter
s3
test
25
.github/workflows/go.yml
vendored
Normal file
25
.github/workflows/go.yml
vendored
Normal file
|
@ -0,0 +1,25 @@
|
|||
name: Go
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
|
||||
jobs:
|
||||
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16
|
||||
|
||||
- name: Build
|
||||
run: go build -v ./...
|
||||
|
||||
- name: Test
|
||||
run: make test
|
48
.github/workflows/pages.yml
vendored
48
.github/workflows/pages.yml
vendored
|
@ -1,48 +0,0 @@
|
|||
name: Publish Helm chart
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
# Allows you to run this workflow manually from the Actions tab
|
||||
workflow_dispatch:
|
||||
|
||||
# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
|
||||
permissions:
|
||||
contents: write
|
||||
pages: write
|
||||
id-token: write
|
||||
|
||||
# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued.
|
||||
# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete.
|
||||
concurrency:
|
||||
group: "pages"
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
|
||||
deploy:
|
||||
|
||||
environment:
|
||||
name: github-pages
|
||||
url: ${{ steps.deployment.outputs.page_url }}
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Setup Pages
|
||||
uses: actions/configure-pages@v3
|
||||
|
||||
- name: Publish Helm chart
|
||||
uses: stefanprodan/helm-gh-pages@0ad2bb377311d61ac04ad9eb6f252fb68e207260 # v1.7.0
|
||||
with:
|
||||
# GitHub automatically creates a GITHUB_TOKEN secret to use in your workflow. You can use the GITHUB_TOKEN to authenticate in a workflow run.
|
||||
# See https://docs.github.com/en/actions/reference/authentication-in-a-workflow#about-the-github_token-secret
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
branch: github-pages
|
||||
charts_dir: deploy/helm
|
||||
target_dir: charts
|
||||
linting: off
|
13
Makefile
13
Makefile
|
@ -13,11 +13,10 @@
|
|||
# limitations under the License.
|
||||
.PHONY: test build container push clean
|
||||
|
||||
PROJECT_DIR=/app
|
||||
REGISTRY_NAME=cr.yandex/crp9ftr22d26age3hulg
|
||||
REGISTRY_NAME2=cr.il.nebius.cloud/crll7us9n6i5j3v4n92m
|
||||
IMAGE_NAME=csi-s3
|
||||
IMAGE_NAME2=yandex-cloud/csi-s3/csi-s3-driver
|
||||
VERSION ?= 0.38.3
|
||||
VERSION ?= 0.27.0
|
||||
IMAGE_TAG=$(REGISTRY_NAME)/$(IMAGE_NAME):$(VERSION)
|
||||
TEST_IMAGE_TAG=$(IMAGE_NAME):test
|
||||
|
||||
|
@ -25,17 +24,13 @@ build:
|
|||
CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o _output/s3driver ./cmd/s3driver
|
||||
test:
|
||||
docker build -t $(TEST_IMAGE_TAG) -f test/Dockerfile .
|
||||
docker run --rm --privileged -v $(PWD):/build --device /dev/fuse $(TEST_IMAGE_TAG)
|
||||
docker run --rm --privileged -v $(PWD):$(PROJECT_DIR) --device /dev/fuse $(TEST_IMAGE_TAG)
|
||||
container:
|
||||
docker build -t $(IMAGE_TAG) .
|
||||
docker build -t $(IMAGE_TAG) -f cmd/s3driver/Dockerfile .
|
||||
push: container
|
||||
docker tag $(IMAGE_TAG) $(REGISTRY_NAME)/$(IMAGE_NAME):latest
|
||||
docker tag $(IMAGE_TAG) $(REGISTRY_NAME)/$(IMAGE_NAME2):$(VERSION)
|
||||
docker tag $(IMAGE_TAG) $(REGISTRY_NAME)/$(IMAGE_NAME2):latest
|
||||
docker push $(IMAGE_TAG)
|
||||
docker push $(REGISTRY_NAME)/$(IMAGE_NAME)
|
||||
docker push $(REGISTRY_NAME)/$(IMAGE_NAME2)
|
||||
docker push $(REGISTRY_NAME)/$(IMAGE_NAME2):$(VERSION)
|
||||
clean:
|
||||
go clean -r -x
|
||||
-rm -rf _output
|
||||
|
|
69
README.md
69
README.md
|
@ -6,23 +6,11 @@ This is a Container Storage Interface ([CSI](https://github.com/container-storag
|
|||
|
||||
### Requirements
|
||||
|
||||
* Kubernetes 1.17+
|
||||
* Kubernetes 1.13+ (CSI v1.0.0 compatibility)
|
||||
* Kubernetes has to allow privileged containers
|
||||
* Docker daemon must allow shared mounts (systemd flag `MountFlags=shared`)
|
||||
|
||||
### Helm chart
|
||||
|
||||
Helm chart is published at `https://yandex-cloud.github.io/k8s-csi-s3`:
|
||||
|
||||
```
|
||||
helm repo add yandex-s3 https://yandex-cloud.github.io/k8s-csi-s3/charts
|
||||
|
||||
helm install csi-s3 yandex-s3/csi-s3
|
||||
```
|
||||
|
||||
### Manual installation
|
||||
|
||||
#### 1. Create a secret with your S3 credentials
|
||||
### 1. Create a secret with your S3 credentials
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
|
@ -42,30 +30,22 @@ stringData:
|
|||
|
||||
The region can be empty if you are using some other S3 compatible storage.
|
||||
|
||||
#### 2. Deploy the driver
|
||||
### 2. Deploy the driver
|
||||
|
||||
```bash
|
||||
cd deploy/kubernetes
|
||||
kubectl create -f provisioner.yaml
|
||||
kubectl create -f driver.yaml
|
||||
kubectl create -f attacher.yaml
|
||||
kubectl create -f csi-s3.yaml
|
||||
```
|
||||
|
||||
If you're upgrading from a previous version which had `attacher.yaml` you
|
||||
can safely delete all resources created from that file:
|
||||
|
||||
```
|
||||
wget https://raw.githubusercontent.com/yandex-cloud/k8s-csi-s3/v0.35.5/deploy/kubernetes/attacher.yaml
|
||||
kubectl delete -f attacher.yaml
|
||||
```
|
||||
|
||||
#### 3. Create the storage class
|
||||
### 3. Create the storage class
|
||||
|
||||
```bash
|
||||
kubectl create -f examples/storageclass.yaml
|
||||
```
|
||||
|
||||
#### 4. Test the S3 driver
|
||||
### 4. Test the S3 driver
|
||||
|
||||
1. Create a pvc using the new storage class:
|
||||
|
||||
|
@ -94,8 +74,8 @@ kubectl create -f examples/storageclass.yaml
|
|||
```bash
|
||||
$ kubectl exec -ti csi-s3-test-nginx bash
|
||||
$ mount | grep fuse
|
||||
pvc-035763df-0488-4941-9a34-f637292eb95c: on /usr/share/nginx/html/s3 type fuse.geesefs (rw,nosuid,nodev,relatime,user_id=65534,group_id=0,default_permissions,allow_other)
|
||||
$ touch /usr/share/nginx/html/s3/hello_world
|
||||
s3fs on /var/lib/www/html type fuse.s3fs (rw,nosuid,nodev,relatime,user_id=0,group_id=0,allow_other)
|
||||
$ touch /var/lib/www/html/hello_world
|
||||
```
|
||||
|
||||
If something does not work as expected, check the troubleshooting section below.
|
||||
|
@ -114,54 +94,43 @@ metadata:
|
|||
provisioner: ru.yandex.s3.csi
|
||||
parameters:
|
||||
mounter: geesefs
|
||||
options: "--memory-limit 1000 --dir-mode 0777 --file-mode 0666"
|
||||
bucket: some-existing-bucket-name
|
||||
```
|
||||
|
||||
If the bucket is specified, it will still be created if it does not exist on the backend. Every volume will get its own prefix within the bucket which matches the volume ID. When deleting a volume, also just the prefix will be deleted.
|
||||
|
||||
### Static Provisioning
|
||||
|
||||
If you want to mount a pre-existing bucket or prefix within a pre-existing bucket and don't want csi-s3 to delete it when PV is deleted, you can use static provisioning.
|
||||
|
||||
To do that you should omit `storageClassName` in the `PersistentVolumeClaim` and manually create a `PersistentVolume` with a matching `claimRef`, like in the following example: [deploy/kubernetes/examples/pvc-manual.yaml](deploy/kubernetes/examples/pvc-manual.yaml).
|
||||
|
||||
### Mounter
|
||||
|
||||
We **strongly recommend** to use the default mounter which is [GeeseFS](https://github.com/yandex-cloud/geesefs).
|
||||
As S3 is not a real file system there are some limitations to consider here. Depending on what mounter you are using, you will have different levels of POSIX compability. Also depending on what S3 storage backend you are using there are not always [consistency guarantees](https://github.com/gaul/are-we-consistent-yet#observed-consistency).
|
||||
|
||||
However there is also support for two other backends: [s3fs](https://github.com/s3fs-fuse/s3fs-fuse) and [rclone](https://rclone.org/commands/rclone_mount).
|
||||
The driver can be configured to use one of these mounters to mount buckets:
|
||||
|
||||
* [geesefs](https://github.com/yandex-cloud/geesefs) (recommended and default)
|
||||
* [s3fs](https://github.com/s3fs-fuse/s3fs-fuse)
|
||||
* [rclone](https://rclone.org/commands/rclone_mount)
|
||||
|
||||
The mounter can be set as a parameter in the storage class. You can also create multiple storage classes for each mounter if you like.
|
||||
|
||||
As S3 is not a real file system there are some limitations to consider here.
|
||||
Depending on what mounter you are using, you will have different levels of POSIX compability.
|
||||
Also depending on what S3 storage backend you are using there are not always [consistency guarantees](https://github.com/gaul/are-we-consistent-yet#observed-consistency).
|
||||
|
||||
You can check POSIX compatibility matrix here: https://github.com/yandex-cloud/geesefs#posix-compatibility-matrix.
|
||||
Characteristics of different mounters (for more detailed information consult their own documentation):
|
||||
|
||||
#### GeeseFS
|
||||
|
||||
* Almost full POSIX compatibility
|
||||
* Good performance for both small and big files
|
||||
* Does not store file permissions and custom modification times
|
||||
* By default runs **outside** of the csi-s3 container using systemd, to not crash
|
||||
mountpoints with "Transport endpoint is not connected" when csi-s3 is upgraded
|
||||
or restarted. Add `--no-systemd` to `parameters.options` of the `StorageClass`
|
||||
to disable this behaviour.
|
||||
* Files can be viewed normally with any S3 client
|
||||
|
||||
#### s3fs
|
||||
|
||||
* Almost full POSIX compatibility
|
||||
* Good performance for big files, poor performance for small files
|
||||
* Very slow for directories with a large number of files
|
||||
* Files can be viewed normally with any S3 client
|
||||
|
||||
#### rclone
|
||||
|
||||
* Poor POSIX compatibility
|
||||
* Less POSIX compatible than s3fs
|
||||
* Bad performance for big files, okayish performance for small files
|
||||
* Files can be viewed normally with any S3 client
|
||||
* Doesn't create directory objects like s3fs or GeeseFS
|
||||
* May hang :-)
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
|
|
|
@ -1,18 +1,18 @@
|
|||
FROM golang:1.19-alpine as gobuild
|
||||
FROM golang:1.16-alpine as gobuild
|
||||
|
||||
WORKDIR /build
|
||||
ADD go.mod go.sum /build/
|
||||
RUN go mod download -x
|
||||
ADD cmd /build/cmd
|
||||
ADD pkg /build/pkg
|
||||
ADD . /build
|
||||
|
||||
RUN go get -d -v ./...
|
||||
RUN CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o ./s3driver ./cmd/s3driver
|
||||
|
||||
FROM alpine:3.17
|
||||
FROM debian:buster-slim
|
||||
LABEL maintainers="Vitaliy Filippov <vitalif@yourcmc.ru>"
|
||||
LABEL description="csi-s3 slim image"
|
||||
|
||||
RUN apk add --no-cache fuse mailcap rclone
|
||||
RUN apk add --no-cache -X http://dl-cdn.alpinelinux.org/alpine/edge/community s3fs-fuse
|
||||
RUN apt-get update && \
|
||||
apt-get install -y ca-certificates && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ADD https://github.com/yandex-cloud/geesefs/releases/latest/download/geesefs-linux-amd64 /usr/bin/geesefs
|
||||
RUN chmod 755 /usr/bin/geesefs
|
|
@ -1,12 +0,0 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
appVersion: 0.38.3
|
||||
description: "Container Storage Interface (CSI) driver for S3 volumes"
|
||||
name: csi-s3
|
||||
version: 0.38.3
|
||||
keywords:
|
||||
- s3
|
||||
home: https://github.com/yandex-cloud/k8s-csi-s3
|
||||
sources:
|
||||
- https://github.com/yandex-cloud/k8s-csi-s3/deploy/helm
|
||||
icon: https://raw.githubusercontent.com/yandex-cloud/geesefs/master/doc/geesefs.png
|
|
@ -1,40 +0,0 @@
|
|||
# Helm chart for csi-s3
|
||||
|
||||
This chart adds S3 volume support to your cluster.
|
||||
|
||||
## Install chart
|
||||
|
||||
- Helm 2.x: `helm install [--set secret.accessKey=... --set secret.secretKey=... ...] --namespace kube-system --name csi-s3 .`
|
||||
- Helm 3.x: `helm install [--set secret.accessKey=... --set secret.secretKey=... ...] --namespace kube-system csi-s3 .`
|
||||
|
||||
After installation succeeds, you can get a status of Chart: `helm status csi-s3`.
|
||||
|
||||
## Delete Chart
|
||||
|
||||
- Helm 2.x: `helm delete --purge csi-s3`
|
||||
- Helm 3.x: `helm uninstall csi-s3 --namespace kube-system`
|
||||
|
||||
## Configuration
|
||||
|
||||
By default, this chart creates a secret and a storage class. You should at least set `secret.accessKey` and `secret.secretKey`
|
||||
to your [Yandex Object Storage](https://cloud.yandex.com/en-ru/services/storage) keys for it to work.
|
||||
|
||||
The following table lists all configuration parameters and their default values.
|
||||
|
||||
| Parameter | Description | Default |
|
||||
| ---------------------------- | ---------------------------------------------------------------------- | ------------------------------------------------------ |
|
||||
| `storageClass.create` | Specifies whether the storage class should be created | true |
|
||||
| `storageClass.name` | Storage class name | csi-s3 |
|
||||
| `storageClass.singleBucket` | Use a single bucket for all dynamically provisioned persistent volumes | |
|
||||
| `storageClass.mounter` | Mounter to use. Either geesefs, s3fs or rclone. geesefs recommended | geesefs |
|
||||
| `storageClass.mountOptions` | GeeseFS mount options | `--memory-limit 1000 --dir-mode 0777 --file-mode 0666` |
|
||||
| `storageClass.reclaimPolicy` | Volume reclaim policy | Delete |
|
||||
| `storageClass.annotations` | Annotations for the storage class | |
|
||||
| `secret.create` | Specifies whether the secret should be created | true |
|
||||
| `secret.name` | Name of the secret | csi-s3-secret |
|
||||
| `secret.accessKey` | S3 Access Key | |
|
||||
| `secret.secretKey` | S3 Secret Key | |
|
||||
| `secret.endpoint` | Endpoint | https://storage.yandexcloud.net |
|
||||
| `tolerations.all` | Tolerate all taints by the CSI-S3 node driver (mounter) | false |
|
||||
| `tolerations.node` | Custom tolerations for the CSI-S3 node driver (mounter) | [] |
|
||||
| `tolerations.controller` | Custom tolerations for the CSI-S3 controller (provisioner) | [] |
|
|
@ -1,112 +0,0 @@
|
|||
helm_chart:
|
||||
name: cr.yandex/crp9ftr22d26age3hulg/yandex-cloud/csi-s3/csi-s3
|
||||
tag: 0.38.3
|
||||
requirements:
|
||||
k8s_version: ">=1.13"
|
||||
images:
|
||||
- full: images.registrar
|
||||
- full: images.provisioner
|
||||
- full: images.csi
|
||||
user_values:
|
||||
- name: storageClass.create
|
||||
title:
|
||||
en: Create storage class
|
||||
ru: Создать класс хранения
|
||||
description:
|
||||
en: Specifies whether the storage class should be created
|
||||
ru: Выберите, чтобы создать новый S3-класс хранения при развёртывании приложения.
|
||||
boolean_value:
|
||||
default_value: true
|
||||
- name: secret.create
|
||||
title:
|
||||
en: Create secret
|
||||
ru: Создать секрет
|
||||
description:
|
||||
en: Specifies whether the secret should be created
|
||||
ru: Выберите, чтобы создать новый секрет для класса хранения при установке приложения, а не использовать существующий.
|
||||
boolean_value:
|
||||
default_value: true
|
||||
- name: secret.accessKey
|
||||
title:
|
||||
en: S3 Access Key ID
|
||||
ru: Идентификатор ключа S3
|
||||
description:
|
||||
en: S3 Access Key ID
|
||||
ru: Идентификатор ключа S3.
|
||||
string_value:
|
||||
default_value: ""
|
||||
- name: secret.secretKey
|
||||
title:
|
||||
en: S3 Secret Key
|
||||
ru: Секретный ключ S3
|
||||
description:
|
||||
en: S3 Secret Key
|
||||
ru: Секретный ключ S3.
|
||||
string_value:
|
||||
default_value: ""
|
||||
- name: storageClass.singleBucket
|
||||
title:
|
||||
en: Single S3 bucket for volumes
|
||||
ru: Общий бакет S3 для томов
|
||||
description:
|
||||
en: Single S3 bucket to use for all dynamically provisioned persistent volumes
|
||||
ru: Общий бакет S3, в котором будут создаваться все динамически распределяемые тома. Если пусто, под каждый том будет создаваться новый бакет.
|
||||
string_value:
|
||||
default_value: ""
|
||||
- name: secret.endpoint
|
||||
title:
|
||||
en: S3 endpoint
|
||||
ru: Адрес S3-сервиса
|
||||
description:
|
||||
en: S3 service endpoint to use
|
||||
ru: Адрес S3-сервиса, который будет использоваться.
|
||||
string_value:
|
||||
default_value: "https://storage.yandexcloud.net"
|
||||
- name: storageClass.mountOptions
|
||||
title:
|
||||
en: GeeseFS mount options
|
||||
ru: Опции монтирования GeeseFS
|
||||
description:
|
||||
en: GeeseFS mount options to use. Refer to `geesefs --help` command output for the whole list of options (https://github.com/yandex-cloud/geesefs).
|
||||
ru: Опции монтирования GeeseFS. Полный перечень и описание опций смотрите в выводе команды `geesefs --help` (https://github.com/yandex-cloud/geesefs).
|
||||
string_value:
|
||||
default_value: "--memory-limit 1000 --dir-mode 0777 --file-mode 0666"
|
||||
- name: storageClass.reclaimPolicy
|
||||
title:
|
||||
en: Volume reclaim policy
|
||||
ru: Политика очистки томов
|
||||
description:
|
||||
en: Volume reclaim policy for the storage class (Retain or Delete)
|
||||
ru: Выберите политику очистки томов PersistentVolume при удалении PersistentVolumeClaim. Retain — сохранять том, Delete — удалять том.
|
||||
string_selector_value:
|
||||
default_value: Delete
|
||||
values:
|
||||
- Delete
|
||||
- Retain
|
||||
- name: storageClass.name
|
||||
title:
|
||||
en: Storage class name
|
||||
ru: Название класса хранения
|
||||
description:
|
||||
en: Name of the storage class that will be created
|
||||
ru: Название класса хранения, который будет создан при установке.
|
||||
string_value:
|
||||
default_value: csi-s3
|
||||
- name: secret.name
|
||||
title:
|
||||
en: Name of the secret
|
||||
ru: Название секрета
|
||||
description:
|
||||
en: Name of the secret to create or use for the storage class
|
||||
ru: Название секрета, который будет создан или использован для класса хранения.
|
||||
string_value:
|
||||
default_value: csi-s3-secret
|
||||
- name: tolerations.all
|
||||
title:
|
||||
en: Tolerate all taints
|
||||
ru: Игнорировать все политики taint
|
||||
description:
|
||||
en: Tolerate all taints by the CSI-S3 node driver (mounter)
|
||||
ru: Выберите, чтобы драйвер CSI, который монтирует файловую систему на узлах, игнорировал все политики taint для узлов кластера.
|
||||
boolean_value:
|
||||
default_value: false
|
|
@ -1,129 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: csi-s3
|
||||
namespace: {{ .Release.Namespace }}
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-s3
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-s3
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-s3
|
||||
namespace: {{ .Release.Namespace }}
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: csi-s3
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
kind: DaemonSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: csi-s3
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: csi-s3
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: csi-s3
|
||||
spec:
|
||||
tolerations:
|
||||
{{- if .Values.tolerations.all }}
|
||||
- operator: Exists
|
||||
{{- else }}
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
- operator: Exists
|
||||
effect: NoExecute
|
||||
tolerationSeconds: 300
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations.node }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccount: csi-s3
|
||||
containers:
|
||||
- name: driver-registrar
|
||||
image: {{ .Values.images.registrar }}
|
||||
args:
|
||||
- "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)"
|
||||
- "--v=4"
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /csi/csi.sock
|
||||
- name: DRIVER_REG_SOCK_PATH
|
||||
value: {{ .Values.kubeletPath }}/plugins/ru.yandex.s3.csi/csi.sock
|
||||
- name: KUBE_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
volumeMounts:
|
||||
- name: plugin-dir
|
||||
mountPath: /csi
|
||||
- name: registration-dir
|
||||
mountPath: /registration/
|
||||
- name: csi-s3
|
||||
securityContext:
|
||||
privileged: true
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
allowPrivilegeEscalation: true
|
||||
image: {{ .Values.images.csi }}
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- "--endpoint=$(CSI_ENDPOINT)"
|
||||
- "--nodeid=$(NODE_ID)"
|
||||
- "--v=4"
|
||||
env:
|
||||
- name: CSI_ENDPOINT
|
||||
value: unix:///csi/csi.sock
|
||||
- name: NODE_ID
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
volumeMounts:
|
||||
- name: plugin-dir
|
||||
mountPath: /csi
|
||||
- name: stage-dir
|
||||
mountPath: {{ .Values.kubeletPath }}/plugins/kubernetes.io/csi
|
||||
mountPropagation: "Bidirectional"
|
||||
- name: pods-mount-dir
|
||||
mountPath: {{ .Values.kubeletPath }}/pods
|
||||
mountPropagation: "Bidirectional"
|
||||
- name: fuse-device
|
||||
mountPath: /dev/fuse
|
||||
- name: systemd-control
|
||||
mountPath: /run/systemd
|
||||
volumes:
|
||||
- name: registration-dir
|
||||
hostPath:
|
||||
path: {{ .Values.kubeletPath }}/plugins_registry/
|
||||
type: DirectoryOrCreate
|
||||
- name: plugin-dir
|
||||
hostPath:
|
||||
path: {{ .Values.kubeletPath }}/plugins/ru.yandex.s3.csi
|
||||
type: DirectoryOrCreate
|
||||
- name: stage-dir
|
||||
hostPath:
|
||||
path: {{ .Values.kubeletPath }}/plugins/kubernetes.io/csi
|
||||
type: DirectoryOrCreate
|
||||
- name: pods-mount-dir
|
||||
hostPath:
|
||||
path: {{ .Values.kubeletPath }}/pods
|
||||
type: Directory
|
||||
- name: fuse-device
|
||||
hostPath:
|
||||
path: /dev/fuse
|
||||
- name: systemd-control
|
||||
hostPath:
|
||||
path: /run/systemd
|
||||
type: DirectoryOrCreate
|
|
@ -1,10 +0,0 @@
|
|||
apiVersion: storage.k8s.io/v1
|
||||
kind: CSIDriver
|
||||
metadata:
|
||||
name: ru.yandex.s3.csi
|
||||
spec:
|
||||
attachRequired: false
|
||||
podInfoOnMount: true
|
||||
fsGroupPolicy: File # added in Kubernetes 1.19, this field is GA as of Kubernetes 1.23
|
||||
volumeLifecycleModes: # added in Kubernetes 1.16, this field is beta
|
||||
- Persistent
|
|
@ -1,116 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: csi-provisioner-sa
|
||||
namespace: {{ .Release.Namespace }}
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: external-provisioner-runner
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "create", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["list", "watch", "create", "update", "patch"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-provisioner-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-provisioner-sa
|
||||
namespace: {{ .Release.Namespace }}
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: external-provisioner-runner
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: csi-provisioner-s3
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app: csi-provisioner-s3
|
||||
spec:
|
||||
selector:
|
||||
app: csi-provisioner-s3
|
||||
ports:
|
||||
- name: csi-s3-dummy
|
||||
port: 65535
|
||||
---
|
||||
kind: StatefulSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: csi-provisioner-s3
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
serviceName: "csi-provisioner-s3"
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: csi-provisioner-s3
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: csi-provisioner-s3
|
||||
spec:
|
||||
serviceAccount: csi-provisioner-sa
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
{{- with .Values.tolerations.controller }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: csi-provisioner
|
||||
image: {{ .Values.images.provisioner }}
|
||||
args:
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
- "--v=4"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: {{ .Values.kubeletPath }}/plugins/ru.yandex.s3.csi/csi.sock
|
||||
imagePullPolicy: "IfNotPresent"
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: {{ .Values.kubeletPath }}/plugins/ru.yandex.s3.csi
|
||||
- name: csi-s3
|
||||
image: {{ .Values.images.csi }}
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- "--endpoint=$(CSI_ENDPOINT)"
|
||||
- "--nodeid=$(NODE_ID)"
|
||||
- "--v=4"
|
||||
env:
|
||||
- name: CSI_ENDPOINT
|
||||
value: unix://{{ .Values.kubeletPath }}/plugins/ru.yandex.s3.csi/csi.sock
|
||||
- name: NODE_ID
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: {{ .Values.kubeletPath }}/plugins/ru.yandex.s3.csi
|
||||
volumes:
|
||||
- name: socket-dir
|
||||
emptyDir: {}
|
|
@ -1,15 +0,0 @@
|
|||
{{- if .Values.secret.create -}}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
namespace: {{ .Release.Namespace }}
|
||||
name: {{ .Values.secret.name }}
|
||||
stringData:
|
||||
{{- if .Values.secret.accessKey }}
|
||||
accessKeyID: {{ .Values.secret.accessKey }}
|
||||
{{- end }}
|
||||
{{- if .Values.secret.secretKey }}
|
||||
secretAccessKey: {{ .Values.secret.secretKey }}
|
||||
{{- end }}
|
||||
endpoint: {{ .Values.secret.endpoint }}
|
||||
{{- end -}}
|
|
@ -1,26 +0,0 @@
|
|||
{{- if .Values.storageClass.create -}}
|
||||
kind: StorageClass
|
||||
apiVersion: storage.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ .Values.storageClass.name }}
|
||||
{{- if .Values.storageClass.annotations }}
|
||||
annotations:
|
||||
{{ toYaml .Values.storageClass.annotations | indent 4 }}
|
||||
{{- end }}
|
||||
provisioner: ru.yandex.s3.csi
|
||||
parameters:
|
||||
mounter: "{{ .Values.storageClass.mounter }}"
|
||||
options: "{{ .Values.storageClass.mountOptions }}"
|
||||
{{- if .Values.storageClass.singleBucket }}
|
||||
bucket: "{{ .Values.storageClass.singleBucket }}"
|
||||
{{- end }}
|
||||
csi.storage.k8s.io/provisioner-secret-name: {{ .Values.secret.name }}
|
||||
csi.storage.k8s.io/provisioner-secret-namespace: {{ .Release.Namespace }}
|
||||
csi.storage.k8s.io/controller-publish-secret-name: {{ .Values.secret.name }}
|
||||
csi.storage.k8s.io/controller-publish-secret-namespace: {{ .Release.Namespace }}
|
||||
csi.storage.k8s.io/node-stage-secret-name: {{ .Values.secret.name }}
|
||||
csi.storage.k8s.io/node-stage-secret-namespace: {{ .Release.Namespace }}
|
||||
csi.storage.k8s.io/node-publish-secret-name: {{ .Values.secret.name }}
|
||||
csi.storage.k8s.io/node-publish-secret-namespace: {{ .Release.Namespace }}
|
||||
reclaimPolicy: {{ .Values.storageClass.reclaimPolicy }}
|
||||
{{- end -}}
|
|
@ -1,48 +0,0 @@
|
|||
---
|
||||
images:
|
||||
# Source: quay.io/k8scsi/csi-node-driver-registrar:v1.2.0
|
||||
registrar: cr.yandex/crp9ftr22d26age3hulg/yandex-cloud/csi-s3/csi-node-driver-registrar:v1.2.0
|
||||
# Source: quay.io/k8scsi/csi-provisioner:v2.1.0
|
||||
provisioner: cr.yandex/crp9ftr22d26age3hulg/yandex-cloud/csi-s3/csi-provisioner:v2.1.0
|
||||
# Main image
|
||||
csi: cr.yandex/crp9ftr22d26age3hulg/yandex-cloud/csi-s3/csi-s3-driver:0.38.3
|
||||
|
||||
storageClass:
|
||||
# Specifies whether the storage class should be created
|
||||
create: true
|
||||
# Name
|
||||
name: csi-s3
|
||||
# Use a single bucket for all dynamically provisioned persistent volumes
|
||||
singleBucket: ""
|
||||
# mounter to use - either geesefs, s3fs or rclone (default geesefs)
|
||||
mounter: geesefs
|
||||
# GeeseFS mount options
|
||||
mountOptions: "--memory-limit 1000 --dir-mode 0777 --file-mode 0666"
|
||||
# Volume reclaim policy
|
||||
reclaimPolicy: Delete
|
||||
# Annotations for the storage class
|
||||
# Example:
|
||||
# annotations:
|
||||
# storageclass.kubernetes.io/is-default-class: "true"
|
||||
annotations: {}
|
||||
|
||||
secret:
|
||||
# Specifies whether the secret should be created
|
||||
create: true
|
||||
# Name of the secret
|
||||
name: csi-s3-secret
|
||||
# S3 Access Key
|
||||
accessKey: ""
|
||||
# S3 Secret Key
|
||||
secretKey: ""
|
||||
# Endpoint
|
||||
endpoint: https://storage.yandexcloud.net
|
||||
|
||||
tolerations:
|
||||
all: false
|
||||
node: []
|
||||
controller: []
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
kubeletPath: /var/lib/kubelet
|
93
deploy/kubernetes/attacher.yaml
Normal file
93
deploy/kubernetes/attacher.yaml
Normal file
|
@ -0,0 +1,93 @@
|
|||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: csi-attacher-sa
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: external-attacher-runner
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["volumeattachments"]
|
||||
verbs: ["get", "list", "watch", "update", "patch"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-attacher-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-attacher-sa
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: external-attacher-runner
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
# needed for StatefulSet
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: csi-attacher-s3
|
||||
namespace: kube-system
|
||||
labels:
|
||||
app: csi-attacher-s3
|
||||
spec:
|
||||
selector:
|
||||
app: csi-attacher-s3
|
||||
ports:
|
||||
- name: csi-s3-dummy
|
||||
port: 65535
|
||||
---
|
||||
kind: StatefulSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: csi-attacher-s3
|
||||
namespace: kube-system
|
||||
spec:
|
||||
serviceName: "csi-attacher-s3"
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: csi-attacher-s3
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: csi-attacher-s3
|
||||
spec:
|
||||
serviceAccount: csi-attacher-sa
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: "Exists"
|
||||
containers:
|
||||
- name: csi-attacher
|
||||
image: quay.io/k8scsi/csi-attacher:v2.2.0
|
||||
args:
|
||||
- "--v=4"
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /var/lib/kubelet/plugins/ru.yandex.s3.csi/csi.sock
|
||||
imagePullPolicy: "IfNotPresent"
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /var/lib/kubelet/plugins/ru.yandex.s3.csi
|
||||
volumes:
|
||||
- name: socket-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/plugins/ru.yandex.s3.csi
|
||||
type: DirectoryOrCreate
|
|
@ -8,6 +8,22 @@ kind: ClusterRole
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-s3
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "list", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["namespaces"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["volumeattachments"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
|
@ -36,13 +52,8 @@ spec:
|
|||
labels:
|
||||
app: csi-s3
|
||||
spec:
|
||||
tolerations:
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
- operator: Exists
|
||||
effect: NoExecute
|
||||
tolerationSeconds: 300
|
||||
serviceAccount: csi-s3
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: driver-registrar
|
||||
image: quay.io/k8scsi/csi-node-driver-registrar:v1.2.0
|
||||
|
@ -70,7 +81,7 @@ spec:
|
|||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
allowPrivilegeEscalation: true
|
||||
image: cr.yandex/crp9ftr22d26age3hulg/csi-s3:0.38.3
|
||||
image: cr.yandex/crp9ftr22d26age3hulg/csi-s3:0.27.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- "--endpoint=$(CSI_ENDPOINT)"
|
||||
|
@ -86,16 +97,11 @@ spec:
|
|||
volumeMounts:
|
||||
- name: plugin-dir
|
||||
mountPath: /csi
|
||||
- name: stage-dir
|
||||
mountPath: /var/lib/kubelet/plugins/kubernetes.io/csi
|
||||
mountPropagation: "Bidirectional"
|
||||
- name: pods-mount-dir
|
||||
mountPath: /var/lib/kubelet/pods
|
||||
mountPropagation: "Bidirectional"
|
||||
- name: fuse-device
|
||||
mountPath: /dev/fuse
|
||||
- name: systemd-control
|
||||
mountPath: /run/systemd
|
||||
volumes:
|
||||
- name: registration-dir
|
||||
hostPath:
|
||||
|
@ -105,10 +111,6 @@ spec:
|
|||
hostPath:
|
||||
path: /var/lib/kubelet/plugins/ru.yandex.s3.csi
|
||||
type: DirectoryOrCreate
|
||||
- name: stage-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/plugins/kubernetes.io/csi
|
||||
type: DirectoryOrCreate
|
||||
- name: pods-mount-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/pods
|
||||
|
@ -116,7 +118,3 @@ spec:
|
|||
- name: fuse-device
|
||||
hostPath:
|
||||
path: /dev/fuse
|
||||
- name: systemd-control
|
||||
hostPath:
|
||||
path: /run/systemd
|
||||
type: DirectoryOrCreate
|
||||
|
|
|
@ -1,7 +0,0 @@
|
|||
apiVersion: storage.k8s.io/v1
|
||||
kind: CSIDriver
|
||||
metadata:
|
||||
name: ru.yandex.s3.csi
|
||||
spec:
|
||||
attachRequired: false
|
||||
podInfoOnMount: true
|
|
@ -5,7 +5,7 @@
|
|||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: manualbucket-with-path
|
||||
name: manualbucket/path
|
||||
spec:
|
||||
storageClassName: csi-s3
|
||||
capacity:
|
||||
|
@ -14,7 +14,7 @@ spec:
|
|||
- ReadWriteMany
|
||||
claimRef:
|
||||
namespace: default
|
||||
name: csi-s3-manual-pvc
|
||||
name: manualclaim
|
||||
csi:
|
||||
driver: ru.yandex.s3.csi
|
||||
controllerPublishSecretRef:
|
||||
|
@ -29,13 +29,12 @@ spec:
|
|||
volumeAttributes:
|
||||
capacity: 10Gi
|
||||
mounter: geesefs
|
||||
options: --memory-limit 1000 --dir-mode 0777 --file-mode 0666
|
||||
volumeHandle: manualbucket/path
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: csi-s3-manual-pvc
|
||||
name: csi-s3-pvc
|
||||
spec:
|
||||
# Empty storage class disables dynamic provisioning
|
||||
storageClassName: ""
|
||||
|
|
|
@ -70,10 +70,8 @@ spec:
|
|||
spec:
|
||||
serviceAccount: csi-provisioner-sa
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: "Exists"
|
||||
containers:
|
||||
- name: csi-provisioner
|
||||
image: quay.io/k8scsi/csi-provisioner:v2.1.0
|
||||
|
@ -88,7 +86,7 @@ spec:
|
|||
- name: socket-dir
|
||||
mountPath: /var/lib/kubelet/plugins/ru.yandex.s3.csi
|
||||
- name: csi-s3
|
||||
image: cr.yandex/crp9ftr22d26age3hulg/csi-s3:0.38.3
|
||||
image: cr.yandex/crp9ftr22d26age3hulg/csi-s3:0.27.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- "--endpoint=$(CSI_ENDPOINT)"
|
||||
|
|
2
go.mod
2
go.mod
|
@ -4,8 +4,6 @@ go 1.15
|
|||
|
||||
require (
|
||||
github.com/container-storage-interface/spec v1.1.0
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/godbus/dbus/v5 v5.0.4 // indirect
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b
|
||||
github.com/golang/protobuf v1.1.0 // indirect
|
||||
github.com/kubernetes-csi/csi-lib-utils v0.6.1 // indirect
|
||||
|
|
4
go.sum
4
go.sum
|
@ -1,13 +1,9 @@
|
|||
github.com/container-storage-interface/spec v1.1.0 h1:qPsTqtR1VUPvMPeK0UnCZMtXaKGyyLPG8gj/wG6VqMs=
|
||||
github.com/container-storage-interface/spec v1.1.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/protobuf v1.1.0 h1:0iH4Ffd/meGoXqF2lSAhZHt8X+cPgkfn/cb6Cce5Vpc=
|
||||
|
|
|
@ -47,8 +47,8 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
|
|||
prefix := ""
|
||||
|
||||
// check if bucket name is overridden
|
||||
if params[mounter.BucketKey] != "" {
|
||||
bucketName = params[mounter.BucketKey]
|
||||
if nameOverride, ok := params[mounter.BucketKey]; ok {
|
||||
bucketName = nameOverride
|
||||
prefix = volumeID
|
||||
volumeID = path.Join(bucketName, prefix)
|
||||
}
|
||||
|
@ -128,7 +128,7 @@ func (cs *controllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol
|
|||
var deleteErr error
|
||||
if prefix == "" {
|
||||
// prefix is empty, we delete the whole bucket
|
||||
if err := client.RemoveBucket(bucketName); err != nil && err.Error() != "The specified bucket does not exist" {
|
||||
if err := client.RemoveBucket(bucketName); err != nil {
|
||||
deleteErr = err
|
||||
}
|
||||
glog.V(4).Infof("Bucket %s removed", bucketName)
|
||||
|
@ -210,7 +210,7 @@ func sanitizeVolumeID(volumeID string) string {
|
|||
func volumeIDToBucketPrefix(volumeID string) (string, string) {
|
||||
// if the volumeID has a slash in it, this volume is
|
||||
// stored under a certain prefix within the bucket.
|
||||
splitVolumeID := strings.SplitN(volumeID, "/", 2)
|
||||
splitVolumeID := strings.Split(volumeID, "/")
|
||||
if len(splitVolumeID) > 1 {
|
||||
return splitVolumeID[0], splitVolumeID[1]
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ type driver struct {
|
|||
}
|
||||
|
||||
var (
|
||||
vendorVersion = "v1.34.7"
|
||||
vendorVersion = "v1.2.0"
|
||||
driverName = "ru.yandex.s3.csi"
|
||||
)
|
||||
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"os"
|
||||
|
||||
"github.com/yandex-cloud/k8s-csi-s3/pkg/driver"
|
||||
"github.com/yandex-cloud/k8s-csi-s3/pkg/mounter"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
|
@ -66,7 +67,7 @@ var _ = Describe("S3Driver", func() {
|
|||
})
|
||||
})
|
||||
|
||||
/* Context("s3fs", func() {
|
||||
Context("s3fs", func() {
|
||||
socket := "/tmp/csi-s3fs.sock"
|
||||
csiEndpoint := "unix://" + socket
|
||||
if err := os.Remove(socket); err != nil && !os.IsNotExist(err) {
|
||||
|
@ -119,5 +120,5 @@ var _ = Describe("S3Driver", func() {
|
|||
}
|
||||
sanity.GinkgoTest(sanityCfg)
|
||||
})
|
||||
})*/
|
||||
})
|
||||
})
|
||||
|
|
|
@ -19,7 +19,6 @@ package driver
|
|||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strconv"
|
||||
|
||||
|
@ -69,6 +68,7 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
|
|||
volumeID := req.GetVolumeId()
|
||||
targetPath := req.GetTargetPath()
|
||||
stagingTargetPath := req.GetStagingTargetPath()
|
||||
bucketName, prefix := volumeIDToBucketPrefix(volumeID)
|
||||
|
||||
// Check arguments
|
||||
if req.GetVolumeCapability() == nil {
|
||||
|
@ -84,28 +84,7 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
|
|||
return nil, status.Error(codes.InvalidArgument, "Target path missing in request")
|
||||
}
|
||||
|
||||
notMnt, err := checkMount(stagingTargetPath)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
if notMnt {
|
||||
// Staged mount is dead by some reason. Revive it
|
||||
bucketName, prefix := volumeIDToBucketPrefix(volumeID)
|
||||
s3, err := s3.NewClientFromSecret(req.GetSecrets())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize S3 client: %s", err)
|
||||
}
|
||||
meta := getMeta(bucketName, prefix, req.VolumeContext)
|
||||
mounter, err := mounter.New(meta, s3.Config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := mounter.Mount(stagingTargetPath, volumeID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
notMnt, err = checkMount(targetPath)
|
||||
notMnt, err := checkMount(targetPath)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
@ -121,12 +100,18 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
|
|||
glog.V(4).Infof("target %v\nreadonly %v\nvolumeId %v\nattributes %v\nmountflags %v\n",
|
||||
targetPath, readOnly, volumeID, attrib, mountFlags)
|
||||
|
||||
cmd := exec.Command("mount", "--bind", stagingTargetPath, targetPath)
|
||||
cmd.Stderr = os.Stderr
|
||||
glog.V(3).Infof("Binding volume %v from %v to %v", volumeID, stagingTargetPath, targetPath)
|
||||
out, err := cmd.Output()
|
||||
s3, err := s3.NewClientFromSecret(req.GetSecrets())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error running mount --bind %v %v: %s", stagingTargetPath, targetPath, out)
|
||||
return nil, fmt.Errorf("failed to initialize S3 client: %s", err)
|
||||
}
|
||||
|
||||
meta := getMeta(bucketName, prefix, req.VolumeContext)
|
||||
mounter, err := mounter.New(meta, s3.Config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := mounter.Mount(stagingTargetPath, targetPath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
glog.V(4).Infof("s3: volume %s successfully mounted to %s", volumeID, targetPath)
|
||||
|
@ -146,7 +131,7 @@ func (ns *nodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpu
|
|||
return nil, status.Error(codes.InvalidArgument, "Target path missing in request")
|
||||
}
|
||||
|
||||
if err := mounter.Unmount(targetPath); err != nil {
|
||||
if err := mounter.FuseUnmount(targetPath); err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
glog.V(4).Infof("s3: volume %s has been unmounted.", volumeID)
|
||||
|
@ -189,7 +174,7 @@ func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVol
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := mounter.Mount(stagingTargetPath, volumeID); err != nil {
|
||||
if err := mounter.Stage(stagingTargetPath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -208,22 +193,6 @@ func (ns *nodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstag
|
|||
return nil, status.Error(codes.InvalidArgument, "Target path missing in request")
|
||||
}
|
||||
|
||||
proc, err := mounter.FindFuseMountProcess(stagingTargetPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
exists := false
|
||||
if proc == nil {
|
||||
exists, err = mounter.SystemdUnmount(volumeID)
|
||||
if exists && err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if !exists {
|
||||
err = mounter.FuseUnmount(stagingTargetPath)
|
||||
}
|
||||
glog.V(4).Infof("s3: volume %s has been unmounted from stage path %v.", volumeID, stagingTargetPath)
|
||||
|
||||
return &csi.NodeUnstageVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -3,12 +3,6 @@ package mounter
|
|||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
systemd "github.com/coreos/go-systemd/v22/dbus"
|
||||
dbus "github.com/godbus/dbus/v5"
|
||||
"github.com/golang/glog"
|
||||
|
||||
"github.com/yandex-cloud/k8s-csi-s3/pkg/s3"
|
||||
)
|
||||
|
@ -36,170 +30,27 @@ func newGeeseFSMounter(meta *s3.FSMeta, cfg *s3.Config) (Mounter, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (geesefs *geesefsMounter) CopyBinary(from, to string) error {
|
||||
st, err := os.Stat(from)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to stat %s: %v", from, err)
|
||||
}
|
||||
st2, err := os.Stat(to)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return fmt.Errorf("Failed to stat %s: %v", to, err)
|
||||
}
|
||||
if err != nil || st2.Size() != st.Size() || st2.ModTime() != st.ModTime() {
|
||||
if err == nil {
|
||||
// remove the file first to not hit "text file busy" errors
|
||||
err = os.Remove(to)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error removing %s to update it: %v", to, err)
|
||||
}
|
||||
}
|
||||
bin, err := os.ReadFile(from)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error copying %s to %s: %v", from, to, err)
|
||||
}
|
||||
err = os.WriteFile(to, bin, 0755)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error copying %s to %s: %v", from, to, err)
|
||||
}
|
||||
err = os.Chtimes(to, st.ModTime(), st.ModTime())
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error copying %s to %s: %v", from, to, err)
|
||||
}
|
||||
}
|
||||
func (geesefs *geesefsMounter) Stage(stageTarget string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (geesefs *geesefsMounter) MountDirect(target string, args []string) error {
|
||||
args = append([]string{
|
||||
func (geesefs *geesefsMounter) Unstage(stageTarget string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (geesefs *geesefsMounter) Mount(source string, target string) error {
|
||||
fullPath := fmt.Sprintf("%s:%s", geesefs.meta.BucketName, geesefs.meta.Prefix)
|
||||
args := []string{
|
||||
"--endpoint", geesefs.endpoint,
|
||||
"-o", "allow_other",
|
||||
"--log-file", "/dev/stderr",
|
||||
}, args...)
|
||||
envs := []string{
|
||||
"AWS_ACCESS_KEY_ID=" + geesefs.accessKeyID,
|
||||
"AWS_SECRET_ACCESS_KEY=" + geesefs.secretAccessKey,
|
||||
}
|
||||
return fuseMount(target, geesefsCmd, args, envs)
|
||||
}
|
||||
|
||||
type execCmd struct {
|
||||
Path string
|
||||
Args []string
|
||||
UncleanIsFailure bool
|
||||
}
|
||||
|
||||
func (geesefs *geesefsMounter) Mount(target, volumeID string) error {
|
||||
fullPath := fmt.Sprintf("%s:%s", geesefs.meta.BucketName, geesefs.meta.Prefix)
|
||||
var args []string
|
||||
if geesefs.region != "" {
|
||||
args = append(args, "--region", geesefs.region)
|
||||
}
|
||||
args = append(
|
||||
args,
|
||||
"--setuid", "65534", // nobody. drop root privileges
|
||||
"--setgid", "65534", // nogroup
|
||||
)
|
||||
useSystemd := true
|
||||
for i := 0; i < len(geesefs.meta.MountOptions); i++ {
|
||||
opt := geesefs.meta.MountOptions[i]
|
||||
if opt == "--no-systemd" {
|
||||
useSystemd = false
|
||||
} else if len(opt) > 0 && opt[0] == '-' {
|
||||
// Remove unsafe options
|
||||
s := 1
|
||||
if len(opt) > 1 && opt[1] == '-' {
|
||||
s++
|
||||
}
|
||||
key := opt[s:]
|
||||
e := strings.Index(opt, "=")
|
||||
if e >= 0 {
|
||||
key = opt[s:e]
|
||||
}
|
||||
if key == "log-file" || key == "shared-config" || key == "cache" {
|
||||
// Skip options accessing local FS
|
||||
if e < 0 {
|
||||
i++
|
||||
}
|
||||
} else if key != "" {
|
||||
args = append(args, opt)
|
||||
}
|
||||
} else if len(opt) > 0 {
|
||||
args = append(args, opt)
|
||||
}
|
||||
}
|
||||
args = append(args, geesefs.meta.MountOptions...)
|
||||
args = append(args, fullPath, target)
|
||||
// Try to start geesefs using systemd so it doesn't get killed when the container exits
|
||||
if !useSystemd {
|
||||
return geesefs.MountDirect(target, args)
|
||||
}
|
||||
conn, err := systemd.New()
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to connect to systemd dbus service: %v, starting geesefs directly", err)
|
||||
return geesefs.MountDirect(target, args)
|
||||
}
|
||||
defer conn.Close()
|
||||
// systemd is present
|
||||
if err = geesefs.CopyBinary("/usr/bin/geesefs", "/csi/geesefs"); err != nil {
|
||||
return err
|
||||
}
|
||||
pluginDir := os.Getenv("PLUGIN_DIR")
|
||||
if pluginDir == "" {
|
||||
pluginDir = "/var/lib/kubelet/plugins/ru.yandex.s3.csi"
|
||||
}
|
||||
args = append([]string{pluginDir+"/geesefs", "-f", "-o", "allow_other", "--endpoint", geesefs.endpoint}, args...)
|
||||
glog.Info("Starting geesefs using systemd: "+strings.Join(args, " "))
|
||||
unitName := "geesefs-"+systemd.PathBusEscape(volumeID)+".service"
|
||||
newProps := []systemd.Property{
|
||||
systemd.Property{
|
||||
Name: "Description",
|
||||
Value: dbus.MakeVariant("GeeseFS mount for Kubernetes volume "+volumeID),
|
||||
},
|
||||
systemd.PropExecStart(args, false),
|
||||
systemd.Property{
|
||||
Name: "ExecStopPost",
|
||||
// force & lazy unmount to cleanup possibly dead mountpoints
|
||||
Value: dbus.MakeVariant([]execCmd{ execCmd{ "/bin/umount", []string{ "/bin/umount", "-f", "-l", target }, false } }),
|
||||
},
|
||||
systemd.Property{
|
||||
Name: "Environment",
|
||||
Value: dbus.MakeVariant([]string{ "AWS_ACCESS_KEY_ID="+geesefs.accessKeyID, "AWS_SECRET_ACCESS_KEY="+geesefs.secretAccessKey }),
|
||||
},
|
||||
systemd.Property{
|
||||
Name: "CollectMode",
|
||||
Value: dbus.MakeVariant("inactive-or-failed"),
|
||||
},
|
||||
}
|
||||
unitProps, err := conn.GetAllProperties(unitName)
|
||||
if err == nil {
|
||||
// Unit already exists
|
||||
if s, ok := unitProps["ActiveState"].(string); ok && (s == "active" || s == "activating" || s == "reloading") {
|
||||
// Unit is already active
|
||||
curPath := ""
|
||||
prevExec, ok := unitProps["ExecStart"].([][]interface{})
|
||||
if ok && len(prevExec) > 0 && len(prevExec[0]) >= 2 {
|
||||
execArgs, ok := prevExec[0][1].([]string)
|
||||
if ok && len(execArgs) >= 2 {
|
||||
curPath = execArgs[len(execArgs)-1]
|
||||
}
|
||||
}
|
||||
if curPath != target {
|
||||
return fmt.Errorf(
|
||||
"GeeseFS for volume %v is already mounted on host, but"+
|
||||
" in a different directory. We want %v, but it's in %v",
|
||||
volumeID, target, curPath,
|
||||
)
|
||||
}
|
||||
// Already mounted at right location
|
||||
return nil
|
||||
} else {
|
||||
// Stop and garbage collect the unit if automatic collection didn't work for some reason
|
||||
conn.StopUnit(unitName, "replace", nil)
|
||||
conn.ResetFailedUnit(unitName)
|
||||
}
|
||||
}
|
||||
_, err = conn.StartTransientUnit(unitName, "replace", newProps, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error starting systemd unit %s on host: %v", unitName, err)
|
||||
}
|
||||
return waitForMount(target, 10*time.Second)
|
||||
os.Setenv("AWS_ACCESS_KEY_ID", geesefs.accessKeyID)
|
||||
os.Setenv("AWS_SECRET_ACCESS_KEY", geesefs.secretAccessKey)
|
||||
return fuseMount(target, geesefsCmd, args)
|
||||
}
|
||||
|
|
|
@ -11,18 +11,18 @@ import (
|
|||
"syscall"
|
||||
"time"
|
||||
|
||||
systemd "github.com/coreos/go-systemd/v22/dbus"
|
||||
"github.com/yandex-cloud/k8s-csi-s3/pkg/s3"
|
||||
"github.com/golang/glog"
|
||||
"github.com/mitchellh/go-ps"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
|
||||
"github.com/yandex-cloud/k8s-csi-s3/pkg/s3"
|
||||
)
|
||||
|
||||
// Mounter interface which can be implemented
|
||||
// by the different mounter types
|
||||
type Mounter interface {
|
||||
Mount(target, volumeID string) error
|
||||
Stage(stagePath string) error
|
||||
Unstage(stagePath string) error
|
||||
Mount(source string, target string) error
|
||||
}
|
||||
|
||||
const (
|
||||
|
@ -57,11 +57,9 @@ func New(meta *s3.FSMeta, cfg *s3.Config) (Mounter, error) {
|
|||
}
|
||||
}
|
||||
|
||||
func fuseMount(path string, command string, args []string, envs []string) error {
|
||||
func fuseMount(path string, command string, args []string) error {
|
||||
cmd := exec.Command(command, args...)
|
||||
cmd.Stderr = os.Stderr
|
||||
// cmd.Environ() returns envs inherited from the current process
|
||||
cmd.Env = append(cmd.Environ(), envs...)
|
||||
glog.V(3).Infof("Mounting fuse with command: %s and args: %s", command, args)
|
||||
|
||||
out, err := cmd.Output()
|
||||
|
@ -72,40 +70,12 @@ func fuseMount(path string, command string, args []string, envs []string) error
|
|||
return waitForMount(path, 10*time.Second)
|
||||
}
|
||||
|
||||
func Unmount(path string) error {
|
||||
if err := mount.New("").Unmount(path); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func SystemdUnmount(volumeID string) (bool, error) {
|
||||
conn, err := systemd.New()
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to connect to systemd dbus service: %v", err)
|
||||
return false, err
|
||||
}
|
||||
defer conn.Close()
|
||||
unitName := "geesefs-"+systemd.PathBusEscape(volumeID)+".service"
|
||||
units, err := conn.ListUnitsByNames([]string{ unitName })
|
||||
glog.Errorf("Got %v", units)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to list systemd unit by name %v: %v", unitName, err)
|
||||
return false, err
|
||||
}
|
||||
if len(units) == 0 || units[0].ActiveState == "inactive" || units[0].ActiveState == "failed" {
|
||||
return true, nil
|
||||
}
|
||||
_, err = conn.StopUnit(unitName, "replace", nil)
|
||||
return true, err
|
||||
}
|
||||
|
||||
func FuseUnmount(path string) error {
|
||||
if err := mount.New("").Unmount(path); err != nil {
|
||||
return err
|
||||
}
|
||||
// as fuse quits immediately, we will try to wait until the process is done
|
||||
process, err := FindFuseMountProcess(path)
|
||||
process, err := findFuseMountProcess(path)
|
||||
if err != nil {
|
||||
glog.Errorf("Error getting PID of fuse mount: %s", err)
|
||||
return nil
|
||||
|
@ -137,7 +107,7 @@ func waitForMount(path string, timeout time.Duration) error {
|
|||
}
|
||||
}
|
||||
|
||||
func FindFuseMountProcess(path string) (*os.Process, error) {
|
||||
func findFuseMountProcess(path string) (*os.Process, error) {
|
||||
processes, err := ps.Processes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -2,6 +2,7 @@ package mounter
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/yandex-cloud/k8s-csi-s3/pkg/s3"
|
||||
|
@ -30,7 +31,15 @@ func newRcloneMounter(meta *s3.FSMeta, cfg *s3.Config) (Mounter, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (rclone *rcloneMounter) Mount(target, volumeID string) error {
|
||||
func (rclone *rcloneMounter) Stage(stageTarget string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rclone *rcloneMounter) Unstage(stageTarget string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rclone *rcloneMounter) Mount(source string, target string) error {
|
||||
args := []string{
|
||||
"mount",
|
||||
fmt.Sprintf(":s3:%s", path.Join(rclone.meta.BucketName, rclone.meta.Prefix)),
|
||||
|
@ -46,9 +55,7 @@ func (rclone *rcloneMounter) Mount(target, volumeID string) error {
|
|||
args = append(args, fmt.Sprintf("--s3-region=%s", rclone.region))
|
||||
}
|
||||
args = append(args, rclone.meta.MountOptions...)
|
||||
envs := []string{
|
||||
"AWS_ACCESS_KEY_ID=" + rclone.accessKeyID,
|
||||
"AWS_SECRET_ACCESS_KEY=" + rclone.secretAccessKey,
|
||||
}
|
||||
return fuseMount(target, rcloneCmd, args, envs)
|
||||
os.Setenv("AWS_ACCESS_KEY_ID", rclone.accessKeyID)
|
||||
os.Setenv("AWS_SECRET_ACCESS_KEY", rclone.secretAccessKey)
|
||||
return fuseMount(target, rcloneCmd, args)
|
||||
}
|
||||
|
|
|
@ -28,7 +28,15 @@ func newS3fsMounter(meta *s3.FSMeta, cfg *s3.Config) (Mounter, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (s3fs *s3fsMounter) Mount(target, volumeID string) error {
|
||||
func (s3fs *s3fsMounter) Stage(stageTarget string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s3fs *s3fsMounter) Unstage(stageTarget string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s3fs *s3fsMounter) Mount(source string, target string) error {
|
||||
if err := writes3fsPass(s3fs.pwFileContent); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -44,7 +52,7 @@ func (s3fs *s3fsMounter) Mount(target, volumeID string) error {
|
|||
args = append(args, "-o", fmt.Sprintf("endpoint=%s", s3fs.region))
|
||||
}
|
||||
args = append(args, s3fs.meta.MountOptions...)
|
||||
return fuseMount(target, s3fsCmd, args, nil)
|
||||
return fuseMount(target, s3fsCmd, args)
|
||||
}
|
||||
|
||||
func writes3fsPass(pwFileContent string) error {
|
||||
|
|
|
@ -52,7 +52,7 @@ func NewClient(cfg *Config) (*s3Client, error) {
|
|||
endpoint = u.Hostname() + ":" + u.Port()
|
||||
}
|
||||
minioClient, err := minio.New(endpoint, &minio.Options{
|
||||
Creds: credentials.NewStaticV4(client.Config.AccessKeyID, client.Config.SecretAccessKey, ""),
|
||||
Creds: credentials.NewStaticV4(client.Config.AccessKeyID, client.Config.SecretAccessKey, client.Config.Region),
|
||||
Secure: ssl,
|
||||
})
|
||||
if err != nil {
|
||||
|
|
|
@ -1,24 +1,33 @@
|
|||
FROM golang:1.16-buster
|
||||
|
||||
FROM yandex-cloud/k8s-csi-s3:dev
|
||||
LABEL maintainers="Vitaliy Filippov <vitalif@yourcmc.ru>"
|
||||
LABEL description="csi-s3 testing image"
|
||||
|
||||
# Minio download servers are TERRIBLY SLOW as of 2021-10-27
|
||||
#RUN wget https://dl.min.io/server/minio/release/linux-amd64/minio && \
|
||||
# chmod +x minio && \
|
||||
# mv minio /usr/local/bin
|
||||
RUN apt-get update && \
|
||||
apt-get install -y \
|
||||
git wget make && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN git clone --depth=1 https://github.com/minio/minio
|
||||
RUN cd minio && go build && mv minio /usr/local/bin
|
||||
ARG GOVERSION=1.16.3
|
||||
RUN wget -q https://golang.org/dl/go${GOVERSION}.linux-amd64.tar.gz && \
|
||||
tar -xf go${GOVERSION}.linux-amd64.tar.gz && \
|
||||
rm go${GOVERSION}.linux-amd64.tar.gz && \
|
||||
mv go /usr/local
|
||||
|
||||
WORKDIR /build
|
||||
ENV GOROOT /usr/local/go
|
||||
ENV GOPATH /go
|
||||
ENV PATH=$GOPATH/bin:$GOROOT/bin:$PATH
|
||||
|
||||
RUN wget -q https://dl.min.io/server/minio/release/linux-amd64/minio && \
|
||||
chmod +x minio &&\
|
||||
mv minio /usr/local/bin
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# prewarm go mod cache
|
||||
COPY go.mod .
|
||||
COPY go.sum .
|
||||
RUN go mod download
|
||||
|
||||
RUN wget https://github.com/yandex-cloud/geesefs/releases/latest/download/geesefs-linux-amd64 \
|
||||
-O /usr/bin/geesefs && chmod 755 /usr/bin/geesefs
|
||||
ADD test/test.sh /usr/local/bin
|
||||
|
||||
ENTRYPOINT ["/build/test/test.sh"]
|
||||
ENTRYPOINT ["/usr/local/bin/test.sh"]
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
#!/bin/sh
|
||||
#!/usr/bin/env bash
|
||||
export MINIO_ACCESS_KEY=FJDSJ
|
||||
export MINIO_SECRET_KEY=DSG643HGDS
|
||||
|
||||
mkdir -p /tmp/minio
|
||||
minio server /tmp/minio &>/dev/null &
|
||||
sleep 5
|
||||
go test ./... -cover -ginkgo.noisySkippings=false -ginkgo.skip="should fail when requesting to create a volume with already existing name and different capacity"
|
||||
go test ./... -cover -ginkgo.noisySkippings=false
|
||||
|
|
Loading…
Add table
Reference in a new issue