Compare commits

...

No commits in common. "master" and "github-pages" have entirely different histories.

48 changed files with 34 additions and 3194 deletions

View file

@ -1,48 +0,0 @@
name: Publish Helm chart
on:
push:
branches:
- master
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
permissions:
contents: write
pages: write
id-token: write
# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued.
# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete.
concurrency:
group: "pages"
cancel-in-progress: false
jobs:
deploy:
environment:
name: github-pages
url: ${{ steps.deployment.outputs.page_url }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Setup Pages
uses: actions/configure-pages@v3
- name: Publish Helm chart
uses: stefanprodan/helm-gh-pages@0ad2bb377311d61ac04ad9eb6f252fb68e207260 # v1.7.0
with:
# GitHub automatically creates a GITHUB_TOKEN secret to use in your workflow. You can use the GITHUB_TOKEN to authenticate in a workflow run.
# See https://docs.github.com/en/actions/reference/authentication-in-a-workflow#about-the-github_token-secret
token: ${{ secrets.GITHUB_TOKEN }}
branch: github-pages
charts_dir: deploy/helm
target_dir: charts
linting: off

View file

@ -1,21 +0,0 @@
name: Release
on:
push:
tags:
- "v*"
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Push Docker
run: (echo '${{ secrets.REGISTRY_KEY }}' | docker login --username json_key --password-stdin cr.yandex) && make push
- uses: "marvinpinto/action-automatic-releases@latest"
with:
repo_token: "${{ secrets.GITHUB_TOKEN }}"
prerelease: false

12
.gitignore vendored
View file

@ -1,12 +0,0 @@
# This is where the result of the go build goes
/output*/
/_output*/
/_output
# Go test binaries
*.test
# Godeps or dep workspace
/Godeps/_workspace
vendor
vendor.*

0
.gitkeep Normal file
View file

View file

@ -1,24 +0,0 @@
image:
name: yandex-cloud/k8s-csi-s3:test
entrypoint: [""]
variables:
DOCKER_HOST: tcp://docker:2375
DOCKER_DRIVER: overlay2
stages:
- build
- test
build:
stage: build
script:
- make build
test:
stage: test
image: docker:stable
services:
- docker:dind
script:
- docker run --rm --privileged -v $(pwd):/app --device /dev/fuse yandex-cloud/k8s-csi-s3:test

View file

@ -1,4 +0,0 @@
The following authors have created the source code of "k8s-csi-s3" published and distributed by YANDEX LLC as the owner:
Vitaliy Filippov vitaliff@yandex-team.ru
Cyrill Troxler cyrilltroxler@gmail.com

View file

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -1,21 +0,0 @@
FROM golang:1.19-alpine as gobuild
WORKDIR /build
ADD go.mod go.sum /build/
RUN go mod download -x
ADD cmd /build/cmd
ADD pkg /build/pkg
RUN CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o ./s3driver ./cmd/s3driver
FROM alpine:3.17
LABEL maintainers="Vitaliy Filippov <vitalif@yourcmc.ru>"
LABEL description="csi-s3 slim image"
RUN apk add --no-cache fuse mailcap rclone
RUN apk add --no-cache -X http://dl-cdn.alpinelinux.org/alpine/edge/community s3fs-fuse
ADD https://github.com/yandex-cloud/geesefs/releases/latest/download/geesefs-linux-amd64 /usr/bin/geesefs
RUN chmod 755 /usr/bin/geesefs
COPY --from=gobuild /build/s3driver /s3driver
ENTRYPOINT ["/s3driver"]

13
LICENSE
View file

@ -1,13 +0,0 @@
Copyright 2021 YANDEX LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -1,41 +0,0 @@
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
.PHONY: test build container push clean
REGISTRY_NAME=cr.yandex/crp9ftr22d26age3hulg
REGISTRY_NAME2=cr.il.nebius.cloud/crll7us9n6i5j3v4n92m
IMAGE_NAME=csi-s3
IMAGE_NAME2=yandex-cloud/csi-s3/csi-s3-driver
VERSION ?= 0.38.3
IMAGE_TAG=$(REGISTRY_NAME)/$(IMAGE_NAME):$(VERSION)
TEST_IMAGE_TAG=$(IMAGE_NAME):test
build:
CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o _output/s3driver ./cmd/s3driver
test:
docker build -t $(TEST_IMAGE_TAG) -f test/Dockerfile .
docker run --rm --privileged -v $(PWD):/build --device /dev/fuse $(TEST_IMAGE_TAG)
container:
docker build -t $(IMAGE_TAG) .
push: container
docker tag $(IMAGE_TAG) $(REGISTRY_NAME)/$(IMAGE_NAME):latest
docker tag $(IMAGE_TAG) $(REGISTRY_NAME)/$(IMAGE_NAME2):$(VERSION)
docker tag $(IMAGE_TAG) $(REGISTRY_NAME)/$(IMAGE_NAME2):latest
docker push $(IMAGE_TAG)
docker push $(REGISTRY_NAME)/$(IMAGE_NAME)
docker push $(REGISTRY_NAME)/$(IMAGE_NAME2)
docker push $(REGISTRY_NAME)/$(IMAGE_NAME2):$(VERSION)
clean:
go clean -r -x
-rm -rf _output

205
README.md
View file

@ -1,205 +0,0 @@
# CSI for S3
This is a Container Storage Interface ([CSI](https://github.com/container-storage-interface/spec/blob/master/spec.md)) for S3 (or S3 compatible) storage. This can dynamically allocate buckets and mount them via a fuse mount into any container.
## Kubernetes installation
### Requirements
* Kubernetes 1.17+
* Kubernetes has to allow privileged containers
* Docker daemon must allow shared mounts (systemd flag `MountFlags=shared`)
### Helm chart
Helm chart is published at `https://yandex-cloud.github.io/k8s-csi-s3`:
```
helm repo add yandex-s3 https://yandex-cloud.github.io/k8s-csi-s3/charts
helm install csi-s3 yandex-s3/csi-s3
```
### Manual installation
#### 1. Create a secret with your S3 credentials
```yaml
apiVersion: v1
kind: Secret
metadata:
name: csi-s3-secret
# Namespace depends on the configuration in the storageclass.yaml
namespace: kube-system
stringData:
accessKeyID: <YOUR_ACCESS_KEY_ID>
secretAccessKey: <YOUR_SECRET_ACCESS_KEY>
# For AWS set it to "https://s3.<region>.amazonaws.com", for example https://s3.eu-central-1.amazonaws.com
endpoint: https://storage.yandexcloud.net
# For AWS set it to AWS region
#region: ""
```
The region can be empty if you are using some other S3 compatible storage.
#### 2. Deploy the driver
```bash
cd deploy/kubernetes
kubectl create -f provisioner.yaml
kubectl create -f driver.yaml
kubectl create -f csi-s3.yaml
```
If you're upgrading from a previous version which had `attacher.yaml` you
can safely delete all resources created from that file:
```
wget https://raw.githubusercontent.com/yandex-cloud/k8s-csi-s3/v0.35.5/deploy/kubernetes/attacher.yaml
kubectl delete -f attacher.yaml
```
#### 3. Create the storage class
```bash
kubectl create -f examples/storageclass.yaml
```
#### 4. Test the S3 driver
1. Create a pvc using the new storage class:
```bash
kubectl create -f examples/pvc.yaml
```
1. Check if the PVC has been bound:
```bash
$ kubectl get pvc csi-s3-pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
csi-s3-pvc Bound pvc-c5d4634f-8507-11e8-9f33-0e243832354b 5Gi RWO csi-s3 9s
```
1. Create a test pod which mounts your volume:
```bash
kubectl create -f examples/pod.yaml
```
If the pod can start, everything should be working.
1. Test the mount
```bash
$ kubectl exec -ti csi-s3-test-nginx bash
$ mount | grep fuse
pvc-035763df-0488-4941-9a34-f637292eb95c: on /usr/share/nginx/html/s3 type fuse.geesefs (rw,nosuid,nodev,relatime,user_id=65534,group_id=0,default_permissions,allow_other)
$ touch /usr/share/nginx/html/s3/hello_world
```
If something does not work as expected, check the troubleshooting section below.
## Additional configuration
### Bucket
By default, csi-s3 will create a new bucket per volume. The bucket name will match that of the volume ID. If you want your volumes to live in a precreated bucket, you can simply specify the bucket in the storage class parameters:
```yaml
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: csi-s3-existing-bucket
provisioner: ru.yandex.s3.csi
parameters:
mounter: geesefs
options: "--memory-limit 1000 --dir-mode 0777 --file-mode 0666"
bucket: some-existing-bucket-name
```
If the bucket is specified, it will still be created if it does not exist on the backend. Every volume will get its own prefix within the bucket which matches the volume ID. When deleting a volume, also just the prefix will be deleted.
### Static Provisioning
If you want to mount a pre-existing bucket or prefix within a pre-existing bucket and don't want csi-s3 to delete it when PV is deleted, you can use static provisioning.
To do that you should omit `storageClassName` in the `PersistentVolumeClaim` and manually create a `PersistentVolume` with a matching `claimRef`, like in the following example: [deploy/kubernetes/examples/pvc-manual.yaml](deploy/kubernetes/examples/pvc-manual.yaml).
### Mounter
We **strongly recommend** to use the default mounter which is [GeeseFS](https://github.com/yandex-cloud/geesefs).
However there is also support for two other backends: [s3fs](https://github.com/s3fs-fuse/s3fs-fuse) and [rclone](https://rclone.org/commands/rclone_mount).
The mounter can be set as a parameter in the storage class. You can also create multiple storage classes for each mounter if you like.
As S3 is not a real file system there are some limitations to consider here.
Depending on what mounter you are using, you will have different levels of POSIX compability.
Also depending on what S3 storage backend you are using there are not always [consistency guarantees](https://github.com/gaul/are-we-consistent-yet#observed-consistency).
You can check POSIX compatibility matrix here: https://github.com/yandex-cloud/geesefs#posix-compatibility-matrix.
#### GeeseFS
* Almost full POSIX compatibility
* Good performance for both small and big files
* Does not store file permissions and custom modification times
* By default runs **outside** of the csi-s3 container using systemd, to not crash
mountpoints with "Transport endpoint is not connected" when csi-s3 is upgraded
or restarted. Add `--no-systemd` to `parameters.options` of the `StorageClass`
to disable this behaviour.
#### s3fs
* Almost full POSIX compatibility
* Good performance for big files, poor performance for small files
* Very slow for directories with a large number of files
#### rclone
* Poor POSIX compatibility
* Bad performance for big files, okayish performance for small files
* Doesn't create directory objects like s3fs or GeeseFS
* May hang :-)
## Troubleshooting
### Issues while creating PVC
Check the logs of the provisioner:
```bash
kubectl logs -l app=csi-provisioner-s3 -c csi-s3
```
### Issues creating containers
1. Ensure feature gate `MountPropagation` is not set to `false`
2. Check the logs of the s3-driver:
```bash
kubectl logs -l app=csi-s3 -c csi-s3
```
## Development
This project can be built like any other go application.
```bash
go get -u github.com/yandex-cloud/k8s-csi-s3
```
### Build executable
```bash
make build
```
### Tests
Currently the driver is tested by the [CSI Sanity Tester](https://github.com/kubernetes-csi/csi-test/tree/master/pkg/sanity). As end-to-end tests require S3 storage and a mounter like s3fs, this is best done in a docker container. A Dockerfile and the test script are in the `test` directory. The easiest way to run the tests is to just use the make command:
```bash
make test
```

BIN
charts/csi-s3-0.37.4.tgz Normal file

Binary file not shown.

BIN
charts/csi-s3-0.38.3.tgz Normal file

Binary file not shown.

34
charts/index.yaml Normal file
View file

@ -0,0 +1,34 @@
apiVersion: v1
entries:
csi-s3:
- apiVersion: v1
appVersion: 0.38.3
created: "2023-09-28T08:49:10.782710489Z"
description: Container Storage Interface (CSI) driver for S3 volumes
digest: 255ca61b90c3f0b11347fbb44614e08a156cf93b85cd213166a74e84a5937d42
home: https://github.com/yandex-cloud/k8s-csi-s3
icon: https://raw.githubusercontent.com/yandex-cloud/geesefs/master/doc/geesefs.png
keywords:
- s3
name: csi-s3
sources:
- https://github.com/yandex-cloud/k8s-csi-s3/deploy/helm
urls:
- https://yandex-cloud.github.io/k8s-csi-s3/charts/csi-s3-0.38.3.tgz
version: 0.38.3
- apiVersion: v1
appVersion: 0.37.4
created: "2023-09-13T13:53:38.432546182Z"
description: Container Storage Interface (CSI) driver for S3 volumes
digest: 265c47ea37854d1e5f355975fede1420f8a3393953a9e58a6e8680060d180e3d
home: https://github.com/yandex-cloud/k8s-csi-s3
icon: https://raw.githubusercontent.com/yandex-cloud/geesefs/master/doc/geesefs.png
keywords:
- s3
name: csi-s3
sources:
- https://github.com/yandex-cloud/k8s-csi-s3/deploy/helm
urls:
- https://yandex-cloud.github.io/k8s-csi-s3/charts/csi-s3-0.37.4.tgz
version: 0.37.4
generated: "2023-09-28T08:49:10.781658883Z"

View file

@ -1,45 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"log"
"os"
"github.com/yandex-cloud/k8s-csi-s3/pkg/driver"
)
func init() {
flag.Set("logtostderr", "true")
}
var (
endpoint = flag.String("endpoint", "unix://tmp/csi.sock", "CSI endpoint")
nodeID = flag.String("nodeid", "", "node id")
)
func main() {
flag.Parse()
driver, err := driver.New(*nodeID, *endpoint)
if err != nil {
log.Fatal(err)
}
driver.Run()
os.Exit(0)
}

View file

@ -1,12 +0,0 @@
---
apiVersion: v1
appVersion: 0.38.3
description: "Container Storage Interface (CSI) driver for S3 volumes"
name: csi-s3
version: 0.38.3
keywords:
- s3
home: https://github.com/yandex-cloud/k8s-csi-s3
sources:
- https://github.com/yandex-cloud/k8s-csi-s3/deploy/helm
icon: https://raw.githubusercontent.com/yandex-cloud/geesefs/master/doc/geesefs.png

View file

@ -1,40 +0,0 @@
# Helm chart for csi-s3
This chart adds S3 volume support to your cluster.
## Install chart
- Helm 2.x: `helm install [--set secret.accessKey=... --set secret.secretKey=... ...] --namespace kube-system --name csi-s3 .`
- Helm 3.x: `helm install [--set secret.accessKey=... --set secret.secretKey=... ...] --namespace kube-system csi-s3 .`
After installation succeeds, you can get a status of Chart: `helm status csi-s3`.
## Delete Chart
- Helm 2.x: `helm delete --purge csi-s3`
- Helm 3.x: `helm uninstall csi-s3 --namespace kube-system`
## Configuration
By default, this chart creates a secret and a storage class. You should at least set `secret.accessKey` and `secret.secretKey`
to your [Yandex Object Storage](https://cloud.yandex.com/en-ru/services/storage) keys for it to work.
The following table lists all configuration parameters and their default values.
| Parameter | Description | Default |
| ---------------------------- | ---------------------------------------------------------------------- | ------------------------------------------------------ |
| `storageClass.create` | Specifies whether the storage class should be created | true |
| `storageClass.name` | Storage class name | csi-s3 |
| `storageClass.singleBucket` | Use a single bucket for all dynamically provisioned persistent volumes | |
| `storageClass.mounter` | Mounter to use. Either geesefs, s3fs or rclone. geesefs recommended | geesefs |
| `storageClass.mountOptions` | GeeseFS mount options | `--memory-limit 1000 --dir-mode 0777 --file-mode 0666` |
| `storageClass.reclaimPolicy` | Volume reclaim policy | Delete |
| `storageClass.annotations` | Annotations for the storage class | |
| `secret.create` | Specifies whether the secret should be created | true |
| `secret.name` | Name of the secret | csi-s3-secret |
| `secret.accessKey` | S3 Access Key | |
| `secret.secretKey` | S3 Secret Key | |
| `secret.endpoint` | Endpoint | https://storage.yandexcloud.net |
| `tolerations.all` | Tolerate all taints by the CSI-S3 node driver (mounter) | false |
| `tolerations.node` | Custom tolerations for the CSI-S3 node driver (mounter) | [] |
| `tolerations.controller` | Custom tolerations for the CSI-S3 controller (provisioner) | [] |

View file

@ -1,112 +0,0 @@
helm_chart:
name: cr.yandex/crp9ftr22d26age3hulg/yandex-cloud/csi-s3/csi-s3
tag: 0.38.3
requirements:
k8s_version: ">=1.13"
images:
- full: images.registrar
- full: images.provisioner
- full: images.csi
user_values:
- name: storageClass.create
title:
en: Create storage class
ru: Создать класс хранения
description:
en: Specifies whether the storage class should be created
ru: Выберите, чтобы создать новый S3-класс хранения при развёртывании приложения.
boolean_value:
default_value: true
- name: secret.create
title:
en: Create secret
ru: Создать секрет
description:
en: Specifies whether the secret should be created
ru: Выберите, чтобы создать новый секрет для класса хранения при установке приложения, а не использовать существующий.
boolean_value:
default_value: true
- name: secret.accessKey
title:
en: S3 Access Key ID
ru: Идентификатор ключа S3
description:
en: S3 Access Key ID
ru: Идентификатор ключа S3.
string_value:
default_value: ""
- name: secret.secretKey
title:
en: S3 Secret Key
ru: Секретный ключ S3
description:
en: S3 Secret Key
ru: Секретный ключ S3.
string_value:
default_value: ""
- name: storageClass.singleBucket
title:
en: Single S3 bucket for volumes
ru: Общий бакет S3 для томов
description:
en: Single S3 bucket to use for all dynamically provisioned persistent volumes
ru: Общий бакет S3, в котором будут создаваться все динамически распределяемые тома. Если пусто, под каждый том будет создаваться новый бакет.
string_value:
default_value: ""
- name: secret.endpoint
title:
en: S3 endpoint
ru: Адрес S3-сервиса
description:
en: S3 service endpoint to use
ru: Адрес S3-сервиса, который будет использоваться.
string_value:
default_value: "https://storage.yandexcloud.net"
- name: storageClass.mountOptions
title:
en: GeeseFS mount options
ru: Опции монтирования GeeseFS
description:
en: GeeseFS mount options to use. Refer to `geesefs --help` command output for the whole list of options (https://github.com/yandex-cloud/geesefs).
ru: Опции монтирования GeeseFS. Полный перечень и описание опций смотрите в выводе команды `geesefs --help` (https://github.com/yandex-cloud/geesefs).
string_value:
default_value: "--memory-limit 1000 --dir-mode 0777 --file-mode 0666"
- name: storageClass.reclaimPolicy
title:
en: Volume reclaim policy
ru: Политика очистки томов
description:
en: Volume reclaim policy for the storage class (Retain or Delete)
ru: Выберите политику очистки томов PersistentVolume при удалении PersistentVolumeClaim. Retain — сохранять том, Delete — удалять том.
string_selector_value:
default_value: Delete
values:
- Delete
- Retain
- name: storageClass.name
title:
en: Storage class name
ru: Название класса хранения
description:
en: Name of the storage class that will be created
ru: Название класса хранения, который будет создан при установке.
string_value:
default_value: csi-s3
- name: secret.name
title:
en: Name of the secret
ru: Название секрета
description:
en: Name of the secret to create or use for the storage class
ru: Название секрета, который будет создан или использован для класса хранения.
string_value:
default_value: csi-s3-secret
- name: tolerations.all
title:
en: Tolerate all taints
ru: Игнорировать все политики taint
description:
en: Tolerate all taints by the CSI-S3 node driver (mounter)
ru: Выберите, чтобы драйвер CSI, который монтирует файловую систему на узлах, игнорировал все политики taint для узлов кластера.
boolean_value:
default_value: false

View file

@ -1,129 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-s3
namespace: {{ .Release.Namespace }}
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-s3
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-s3
subjects:
- kind: ServiceAccount
name: csi-s3
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: csi-s3
apiGroup: rbac.authorization.k8s.io
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: csi-s3
namespace: {{ .Release.Namespace }}
spec:
selector:
matchLabels:
app: csi-s3
template:
metadata:
labels:
app: csi-s3
spec:
tolerations:
{{- if .Values.tolerations.all }}
- operator: Exists
{{- else }}
- key: CriticalAddonsOnly
operator: Exists
- operator: Exists
effect: NoExecute
tolerationSeconds: 300
{{- end }}
{{- with .Values.tolerations.node }}
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccount: csi-s3
containers:
- name: driver-registrar
image: {{ .Values.images.registrar }}
args:
- "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)"
- "--v=4"
- "--csi-address=$(ADDRESS)"
env:
- name: ADDRESS
value: /csi/csi.sock
- name: DRIVER_REG_SOCK_PATH
value: {{ .Values.kubeletPath }}/plugins/ru.yandex.s3.csi/csi.sock
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: plugin-dir
mountPath: /csi
- name: registration-dir
mountPath: /registration/
- name: csi-s3
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: {{ .Values.images.csi }}
imagePullPolicy: IfNotPresent
args:
- "--endpoint=$(CSI_ENDPOINT)"
- "--nodeid=$(NODE_ID)"
- "--v=4"
env:
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: plugin-dir
mountPath: /csi
- name: stage-dir
mountPath: {{ .Values.kubeletPath }}/plugins/kubernetes.io/csi
mountPropagation: "Bidirectional"
- name: pods-mount-dir
mountPath: {{ .Values.kubeletPath }}/pods
mountPropagation: "Bidirectional"
- name: fuse-device
mountPath: /dev/fuse
- name: systemd-control
mountPath: /run/systemd
volumes:
- name: registration-dir
hostPath:
path: {{ .Values.kubeletPath }}/plugins_registry/
type: DirectoryOrCreate
- name: plugin-dir
hostPath:
path: {{ .Values.kubeletPath }}/plugins/ru.yandex.s3.csi
type: DirectoryOrCreate
- name: stage-dir
hostPath:
path: {{ .Values.kubeletPath }}/plugins/kubernetes.io/csi
type: DirectoryOrCreate
- name: pods-mount-dir
hostPath:
path: {{ .Values.kubeletPath }}/pods
type: Directory
- name: fuse-device
hostPath:
path: /dev/fuse
- name: systemd-control
hostPath:
path: /run/systemd
type: DirectoryOrCreate

View file

@ -1,10 +0,0 @@
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: ru.yandex.s3.csi
spec:
attachRequired: false
podInfoOnMount: true
fsGroupPolicy: File # added in Kubernetes 1.19, this field is GA as of Kubernetes 1.23
volumeLifecycleModes: # added in Kubernetes 1.16, this field is beta
- Persistent

View file

@ -1,116 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-provisioner-sa
namespace: {{ .Release.Namespace }}
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: external-provisioner-runner
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-provisioner-role
subjects:
- kind: ServiceAccount
name: csi-provisioner-sa
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: external-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Service
apiVersion: v1
metadata:
name: csi-provisioner-s3
namespace: {{ .Release.Namespace }}
labels:
app: csi-provisioner-s3
spec:
selector:
app: csi-provisioner-s3
ports:
- name: csi-s3-dummy
port: 65535
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: csi-provisioner-s3
namespace: {{ .Release.Namespace }}
spec:
serviceName: "csi-provisioner-s3"
replicas: 1
selector:
matchLabels:
app: csi-provisioner-s3
template:
metadata:
labels:
app: csi-provisioner-s3
spec:
serviceAccount: csi-provisioner-sa
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
- key: CriticalAddonsOnly
operator: Exists
{{- with .Values.tolerations.controller }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
containers:
- name: csi-provisioner
image: {{ .Values.images.provisioner }}
args:
- "--csi-address=$(ADDRESS)"
- "--v=4"
env:
- name: ADDRESS
value: {{ .Values.kubeletPath }}/plugins/ru.yandex.s3.csi/csi.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: {{ .Values.kubeletPath }}/plugins/ru.yandex.s3.csi
- name: csi-s3
image: {{ .Values.images.csi }}
imagePullPolicy: IfNotPresent
args:
- "--endpoint=$(CSI_ENDPOINT)"
- "--nodeid=$(NODE_ID)"
- "--v=4"
env:
- name: CSI_ENDPOINT
value: unix://{{ .Values.kubeletPath }}/plugins/ru.yandex.s3.csi/csi.sock
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: socket-dir
mountPath: {{ .Values.kubeletPath }}/plugins/ru.yandex.s3.csi
volumes:
- name: socket-dir
emptyDir: {}

View file

@ -1,15 +0,0 @@
{{- if .Values.secret.create -}}
apiVersion: v1
kind: Secret
metadata:
namespace: {{ .Release.Namespace }}
name: {{ .Values.secret.name }}
stringData:
{{- if .Values.secret.accessKey }}
accessKeyID: {{ .Values.secret.accessKey }}
{{- end }}
{{- if .Values.secret.secretKey }}
secretAccessKey: {{ .Values.secret.secretKey }}
{{- end }}
endpoint: {{ .Values.secret.endpoint }}
{{- end -}}

View file

@ -1,26 +0,0 @@
{{- if .Values.storageClass.create -}}
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: {{ .Values.storageClass.name }}
{{- if .Values.storageClass.annotations }}
annotations:
{{ toYaml .Values.storageClass.annotations | indent 4 }}
{{- end }}
provisioner: ru.yandex.s3.csi
parameters:
mounter: "{{ .Values.storageClass.mounter }}"
options: "{{ .Values.storageClass.mountOptions }}"
{{- if .Values.storageClass.singleBucket }}
bucket: "{{ .Values.storageClass.singleBucket }}"
{{- end }}
csi.storage.k8s.io/provisioner-secret-name: {{ .Values.secret.name }}
csi.storage.k8s.io/provisioner-secret-namespace: {{ .Release.Namespace }}
csi.storage.k8s.io/controller-publish-secret-name: {{ .Values.secret.name }}
csi.storage.k8s.io/controller-publish-secret-namespace: {{ .Release.Namespace }}
csi.storage.k8s.io/node-stage-secret-name: {{ .Values.secret.name }}
csi.storage.k8s.io/node-stage-secret-namespace: {{ .Release.Namespace }}
csi.storage.k8s.io/node-publish-secret-name: {{ .Values.secret.name }}
csi.storage.k8s.io/node-publish-secret-namespace: {{ .Release.Namespace }}
reclaimPolicy: {{ .Values.storageClass.reclaimPolicy }}
{{- end -}}

View file

@ -1,48 +0,0 @@
---
images:
# Source: quay.io/k8scsi/csi-node-driver-registrar:v1.2.0
registrar: cr.yandex/crp9ftr22d26age3hulg/yandex-cloud/csi-s3/csi-node-driver-registrar:v1.2.0
# Source: quay.io/k8scsi/csi-provisioner:v2.1.0
provisioner: cr.yandex/crp9ftr22d26age3hulg/yandex-cloud/csi-s3/csi-provisioner:v2.1.0
# Main image
csi: cr.yandex/crp9ftr22d26age3hulg/yandex-cloud/csi-s3/csi-s3-driver:0.38.3
storageClass:
# Specifies whether the storage class should be created
create: true
# Name
name: csi-s3
# Use a single bucket for all dynamically provisioned persistent volumes
singleBucket: ""
# mounter to use - either geesefs, s3fs or rclone (default geesefs)
mounter: geesefs
# GeeseFS mount options
mountOptions: "--memory-limit 1000 --dir-mode 0777 --file-mode 0666"
# Volume reclaim policy
reclaimPolicy: Delete
# Annotations for the storage class
# Example:
# annotations:
# storageclass.kubernetes.io/is-default-class: "true"
annotations: {}
secret:
# Specifies whether the secret should be created
create: true
# Name of the secret
name: csi-s3-secret
# S3 Access Key
accessKey: ""
# S3 Secret Key
secretKey: ""
# Endpoint
endpoint: https://storage.yandexcloud.net
tolerations:
all: false
node: []
controller: []
nodeSelector: {}
kubeletPath: /var/lib/kubelet

View file

@ -1,122 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-s3
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-s3
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-s3
subjects:
- kind: ServiceAccount
name: csi-s3
namespace: kube-system
roleRef:
kind: ClusterRole
name: csi-s3
apiGroup: rbac.authorization.k8s.io
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: csi-s3
namespace: kube-system
spec:
selector:
matchLabels:
app: csi-s3
template:
metadata:
labels:
app: csi-s3
spec:
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- operator: Exists
effect: NoExecute
tolerationSeconds: 300
serviceAccount: csi-s3
containers:
- name: driver-registrar
image: quay.io/k8scsi/csi-node-driver-registrar:v1.2.0
args:
- "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)"
- "--v=4"
- "--csi-address=$(ADDRESS)"
env:
- name: ADDRESS
value: /csi/csi.sock
- name: DRIVER_REG_SOCK_PATH
value: /var/lib/kubelet/plugins/ru.yandex.s3.csi/csi.sock
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: plugin-dir
mountPath: /csi
- name: registration-dir
mountPath: /registration/
- name: csi-s3
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: cr.yandex/crp9ftr22d26age3hulg/csi-s3:0.38.3
imagePullPolicy: IfNotPresent
args:
- "--endpoint=$(CSI_ENDPOINT)"
- "--nodeid=$(NODE_ID)"
- "--v=4"
env:
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: plugin-dir
mountPath: /csi
- name: stage-dir
mountPath: /var/lib/kubelet/plugins/kubernetes.io/csi
mountPropagation: "Bidirectional"
- name: pods-mount-dir
mountPath: /var/lib/kubelet/pods
mountPropagation: "Bidirectional"
- name: fuse-device
mountPath: /dev/fuse
- name: systemd-control
mountPath: /run/systemd
volumes:
- name: registration-dir
hostPath:
path: /var/lib/kubelet/plugins_registry/
type: DirectoryOrCreate
- name: plugin-dir
hostPath:
path: /var/lib/kubelet/plugins/ru.yandex.s3.csi
type: DirectoryOrCreate
- name: stage-dir
hostPath:
path: /var/lib/kubelet/plugins/kubernetes.io/csi
type: DirectoryOrCreate
- name: pods-mount-dir
hostPath:
path: /var/lib/kubelet/pods
type: Directory
- name: fuse-device
hostPath:
path: /dev/fuse
- name: systemd-control
hostPath:
path: /run/systemd
type: DirectoryOrCreate

View file

@ -1,7 +0,0 @@
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: ru.yandex.s3.csi
spec:
attachRequired: false
podInfoOnMount: true

View file

@ -1,17 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: csi-s3-test-nginx
namespace: default
spec:
containers:
- name: csi-s3-test-nginx
image: nginx
volumeMounts:
- mountPath: /usr/share/nginx/html/s3
name: webroot
volumes:
- name: webroot
persistentVolumeClaim:
claimName: csi-s3-pvc
readOnly: false

View file

@ -1,46 +0,0 @@
# Statically provisioned PVC:
# An existing bucket or path inside bucket manually created
# by the administrator beforehand will be bound to the PVC,
# and it won't be removed when you remove the PV
apiVersion: v1
kind: PersistentVolume
metadata:
name: manualbucket-with-path
spec:
storageClassName: csi-s3
capacity:
storage: 10Gi
accessModes:
- ReadWriteMany
claimRef:
namespace: default
name: csi-s3-manual-pvc
csi:
driver: ru.yandex.s3.csi
controllerPublishSecretRef:
name: csi-s3-secret
namespace: kube-system
nodePublishSecretRef:
name: csi-s3-secret
namespace: kube-system
nodeStageSecretRef:
name: csi-s3-secret
namespace: kube-system
volumeAttributes:
capacity: 10Gi
mounter: geesefs
options: --memory-limit 1000 --dir-mode 0777 --file-mode 0666
volumeHandle: manualbucket/path
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: csi-s3-manual-pvc
spec:
# Empty storage class disables dynamic provisioning
storageClassName: ""
accessModes:
- ReadWriteMany
resources:
requests:
storage: 10Gi

View file

@ -1,15 +0,0 @@
# Dynamically provisioned PVC:
# A bucket or path inside bucket will be created automatically
# for the PV and removed when the PV will be removed
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: csi-s3-pvc
namespace: default
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 5Gi
storageClassName: csi-s3

View file

@ -1,12 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
namespace: kube-system
name: csi-s3-secret
stringData:
accessKeyID: YOUR_ACCESS_KEY_ID
secretAccessKey: YOUR_SECRET_ACCESS_KEY
# For AWS set it to "https://s3.<region>.amazonaws.com", for example https://s3.eu-central-1.amazonaws.com
endpoint: https://storage.yandexcloud.net
# For AWS set it to AWS region
#region: ""

View file

@ -1,20 +0,0 @@
---
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: csi-s3
provisioner: ru.yandex.s3.csi
parameters:
mounter: geesefs
# you can set mount options here, for example limit memory cache size (recommended)
options: "--memory-limit 1000 --dir-mode 0777 --file-mode 0666"
# to use an existing bucket, specify it here:
#bucket: some-existing-bucket
csi.storage.k8s.io/provisioner-secret-name: csi-s3-secret
csi.storage.k8s.io/provisioner-secret-namespace: kube-system
csi.storage.k8s.io/controller-publish-secret-name: csi-s3-secret
csi.storage.k8s.io/controller-publish-secret-namespace: kube-system
csi.storage.k8s.io/node-stage-secret-name: csi-s3-secret
csi.storage.k8s.io/node-stage-secret-namespace: kube-system
csi.storage.k8s.io/node-publish-secret-name: csi-s3-secret
csi.storage.k8s.io/node-publish-secret-namespace: kube-system

View file

@ -1,109 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-provisioner-sa
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: external-provisioner-runner
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-provisioner-role
subjects:
- kind: ServiceAccount
name: csi-provisioner-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: external-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Service
apiVersion: v1
metadata:
name: csi-provisioner-s3
namespace: kube-system
labels:
app: csi-provisioner-s3
spec:
selector:
app: csi-provisioner-s3
ports:
- name: csi-s3-dummy
port: 65535
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: csi-provisioner-s3
namespace: kube-system
spec:
serviceName: "csi-provisioner-s3"
replicas: 1
selector:
matchLabels:
app: csi-provisioner-s3
template:
metadata:
labels:
app: csi-provisioner-s3
spec:
serviceAccount: csi-provisioner-sa
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
- key: CriticalAddonsOnly
operator: Exists
containers:
- name: csi-provisioner
image: quay.io/k8scsi/csi-provisioner:v2.1.0
args:
- "--csi-address=$(ADDRESS)"
- "--v=4"
env:
- name: ADDRESS
value: /var/lib/kubelet/plugins/ru.yandex.s3.csi/csi.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /var/lib/kubelet/plugins/ru.yandex.s3.csi
- name: csi-s3
image: cr.yandex/crp9ftr22d26age3hulg/csi-s3:0.38.3
imagePullPolicy: IfNotPresent
args:
- "--endpoint=$(CSI_ENDPOINT)"
- "--nodeid=$(NODE_ID)"
- "--v=4"
env:
- name: CSI_ENDPOINT
value: unix:///var/lib/kubelet/plugins/ru.yandex.s3.csi/csi.sock
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: socket-dir
mountPath: /var/lib/kubelet/plugins/ru.yandex.s3.csi
volumes:
- name: socket-dir
emptyDir: {}

29
go.mod
View file

@ -1,29 +0,0 @@
module github.com/yandex-cloud/k8s-csi-s3
go 1.15
require (
github.com/container-storage-interface/spec v1.1.0
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/godbus/dbus/v5 v5.0.4 // indirect
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b
github.com/golang/protobuf v1.1.0 // indirect
github.com/kubernetes-csi/csi-lib-utils v0.6.1 // indirect
github.com/kubernetes-csi/csi-test v2.0.0+incompatible
github.com/kubernetes-csi/drivers v1.0.2
github.com/minio/minio-go/v7 v7.0.5
github.com/mitchellh/go-ps v0.0.0-20170309133038-4fdf99ab2936
github.com/onsi/ginkgo v1.5.0
github.com/onsi/gomega v1.4.0
github.com/spf13/afero v1.2.1 // indirect
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a // indirect
golang.org/x/net v0.0.0-20200707034311-ab3426394381
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 // indirect
golang.org/x/sys v0.0.0-20200922070232-aee5d888a860 // indirect
google.golang.org/genproto v0.0.0-20180716172848-2731d4fa720b // indirect
google.golang.org/grpc v1.13.0
k8s.io/apimachinery v0.0.0-20180714051327-705cfa51a97f // indirect
k8s.io/klog v0.2.0 // indirect
k8s.io/kubernetes v1.13.4
k8s.io/utils v0.0.0-20180703210027-ab9069044f32 // indirect
)

116
go.sum
View file

@ -1,116 +0,0 @@
github.com/container-storage-interface/spec v1.1.0 h1:qPsTqtR1VUPvMPeK0UnCZMtXaKGyyLPG8gj/wG6VqMs=
github.com/container-storage-interface/spec v1.1.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4=
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/protobuf v1.1.0 h1:0iH4Ffd/meGoXqF2lSAhZHt8X+cPgkfn/cb6Cce5Vpc=
github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid v1.3.1 h1:5JNjFYYQrZeKRJ0734q51WCEEn2huer72Dc7K+R/b6s=
github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kubernetes-csi/csi-lib-utils v0.6.1 h1:+AZ58SRSRWh2vmMoWAAGcv7x6fIyBMpyCXAgIc9kT28=
github.com/kubernetes-csi/csi-lib-utils v0.6.1/go.mod h1:GVmlUmxZ+SUjVLXicRFjqWUUvWez0g0Y78zNV9t7KfQ=
github.com/kubernetes-csi/csi-test v2.0.0+incompatible h1:ia04uVFUM/J9n/v3LEMn3rEG6FmKV5BH9QLw7H68h44=
github.com/kubernetes-csi/csi-test v2.0.0+incompatible/go.mod h1:YxJ4UiuPWIhMBkxUKY5c267DyA0uDZ/MtAimhx/2TA0=
github.com/kubernetes-csi/drivers v1.0.2 h1:kaEAMfo+W5YFr23yedBIY+NGnNjr6/PbPzx7N4GYgiQ=
github.com/kubernetes-csi/drivers v1.0.2/go.mod h1:V6rHbbSLCZGaQoIZ8MkyDtoXtcKXZM0F7N3bkloDCOY=
github.com/minio/md5-simd v1.1.0 h1:QPfiOqlZH+Cj9teu0t9b1nTBfPbyTl16Of5MeuShdK4=
github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw=
github.com/minio/minio-go/v7 v7.0.5 h1:I2NIJ2ojwJqD/YByemC1M59e1b4FW9kS7NlOar7HPV4=
github.com/minio/minio-go/v7 v7.0.5/go.mod h1:TA0CQCjJZHM5SJj9IjqR0NmpmQJ6bCbXifAJ3mUU6Hw=
github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU=
github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-ps v0.0.0-20170309133038-4fdf99ab2936 h1:kw1v0NlnN+GZcU8Ma8CLF2Zzgjfx95gs3/GN3vYAPpo=
github.com/mitchellh/go-ps v0.0.0-20170309133038-4fdf99ab2936/go.mod h1:r1VsdOzOPt1ZSrGZWFoNhsAedKnEd6r9Np1+5blZCWk=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/onsi/ginkgo v1.5.0 h1:uZr+v/TFDdYkdA+j02sPO1kA5owrfjBGCJAogfIyThE=
github.com/onsi/ginkgo v1.5.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v1.4.0 h1:p/ZBjQI9G/VwoPrslo/sqS6R5vHU9Od60+axIiP6WuQ=
github.com/onsi/gomega v1.4.0/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc=
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a h1:pa8hGb/2YqsZKovtsgrwcDH1RZhVbTKCjLp47XpqCDs=
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/spf13/afero v1.2.1 h1:qgMbHoJbPbw579P+1zVY+6n4nIFuIchaIjzZ/I/Yq8M=
github.com/spf13/afero v1.2.1/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a h1:vclmkQCjlDX5OydZ9wv8rBCcS0QyQY66Mpf/7BZbInM=
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200922070232-aee5d888a860 h1:YEu4SMq7D0cmT7CBbXfcH0NZeuChAXwsHe/9XueUO6o=
golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
google.golang.org/genproto v0.0.0-20180716172848-2731d4fa720b h1:mXqBiicV0B+k8wzFNkKeNBRL7LyRV5xG0s+S6ffLb/E=
google.golang.org/genproto v0.0.0-20180716172848-2731d4fa720b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/grpc v1.13.0 h1:bHIbVsCwmvbArgCJmLdgOdHFXlKqTOVjbibbS19cXHc=
google.golang.org/grpc v1.13.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/ini.v1 v1.57.0 h1:9unxIsFcTt4I55uWluz+UmL95q4kdJ0buvQ1ZIqVQww=
gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
k8s.io/apimachinery v0.0.0-20180714051327-705cfa51a97f h1:mjXiDUfs+4mhzRTLNTkAfQS9lqJCXQN/fIcMysNGW/Y=
k8s.io/apimachinery v0.0.0-20180714051327-705cfa51a97f/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0=
k8s.io/klog v0.2.0 h1:0ElL0OHzF3N+OhoJTL0uca20SxtYt4X4+bzHeqrB83c=
k8s.io/klog v0.2.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/kubernetes v1.13.4 h1:gQqFv/pH8hlbznLXQUsi8s5zqYnv0slmUDl/yVA0EWc=
k8s.io/kubernetes v1.13.4/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
k8s.io/utils v0.0.0-20180703210027-ab9069044f32 h1:Bhn4kFG8fxBouj05v9Y7bOYXKGYfnpmqiTXGgPCmPr8=
k8s.io/utils v0.0.0-20180703210027-ab9069044f32/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0=

View file

@ -1,219 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package driver
import (
"crypto/sha1"
"encoding/hex"
"fmt"
"io"
"path"
"strings"
"github.com/yandex-cloud/k8s-csi-s3/pkg/mounter"
"github.com/yandex-cloud/k8s-csi-s3/pkg/s3"
"github.com/golang/glog"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"github.com/container-storage-interface/spec/lib/go/csi"
csicommon "github.com/kubernetes-csi/drivers/pkg/csi-common"
)
type controllerServer struct {
*csicommon.DefaultControllerServer
}
func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) {
params := req.GetParameters()
capacityBytes := int64(req.GetCapacityRange().GetRequiredBytes())
volumeID := sanitizeVolumeID(req.GetName())
bucketName := volumeID
prefix := ""
// check if bucket name is overridden
if params[mounter.BucketKey] != "" {
bucketName = params[mounter.BucketKey]
prefix = volumeID
volumeID = path.Join(bucketName, prefix)
}
if err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {
glog.V(3).Infof("invalid create volume req: %v", req)
return nil, err
}
// Check arguments
if len(volumeID) == 0 {
return nil, status.Error(codes.InvalidArgument, "Name missing in request")
}
if req.GetVolumeCapabilities() == nil {
return nil, status.Error(codes.InvalidArgument, "Volume Capabilities missing in request")
}
glog.V(4).Infof("Got a request to create volume %s", volumeID)
client, err := s3.NewClientFromSecret(req.GetSecrets())
if err != nil {
return nil, fmt.Errorf("failed to initialize S3 client: %s", err)
}
exists, err := client.BucketExists(bucketName)
if err != nil {
return nil, fmt.Errorf("failed to check if bucket %s exists: %v", volumeID, err)
}
if !exists {
if err = client.CreateBucket(bucketName); err != nil {
return nil, fmt.Errorf("failed to create bucket %s: %v", bucketName, err)
}
}
if err = client.CreatePrefix(bucketName, prefix); err != nil {
return nil, fmt.Errorf("failed to create prefix %s: %v", prefix, err)
}
glog.V(4).Infof("create volume %s", volumeID)
// DeleteVolume lacks VolumeContext, but publish&unpublish requests have it,
// so we don't need to store additional metadata anywhere
context := make(map[string]string)
for k, v := range params {
context[k] = v
}
context["capacity"] = fmt.Sprintf("%v", capacityBytes)
return &csi.CreateVolumeResponse{
Volume: &csi.Volume{
VolumeId: volumeID,
CapacityBytes: capacityBytes,
VolumeContext: context,
},
}, nil
}
func (cs *controllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) {
volumeID := req.GetVolumeId()
bucketName, prefix := volumeIDToBucketPrefix(volumeID)
// Check arguments
if len(volumeID) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
}
if err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {
glog.V(3).Infof("Invalid delete volume req: %v", req)
return nil, err
}
glog.V(4).Infof("Deleting volume %s", volumeID)
client, err := s3.NewClientFromSecret(req.GetSecrets())
if err != nil {
return nil, fmt.Errorf("failed to initialize S3 client: %s", err)
}
var deleteErr error
if prefix == "" {
// prefix is empty, we delete the whole bucket
if err := client.RemoveBucket(bucketName); err != nil && err.Error() != "The specified bucket does not exist" {
deleteErr = err
}
glog.V(4).Infof("Bucket %s removed", bucketName)
} else {
if err := client.RemovePrefix(bucketName, prefix); err != nil {
deleteErr = fmt.Errorf("unable to remove prefix: %w", err)
}
glog.V(4).Infof("Prefix %s removed", prefix)
}
if deleteErr != nil {
return nil, deleteErr
}
return &csi.DeleteVolumeResponse{}, nil
}
func (cs *controllerServer) ValidateVolumeCapabilities(ctx context.Context, req *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) {
// Check arguments
if len(req.GetVolumeId()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
}
if req.GetVolumeCapabilities() == nil {
return nil, status.Error(codes.InvalidArgument, "Volume capabilities missing in request")
}
bucketName, _ := volumeIDToBucketPrefix(req.GetVolumeId())
client, err := s3.NewClientFromSecret(req.GetSecrets())
if err != nil {
return nil, fmt.Errorf("failed to initialize S3 client: %s", err)
}
exists, err := client.BucketExists(bucketName)
if err != nil {
return nil, err
}
if !exists {
// return an error if the bucket of the requested volume does not exist
return nil, status.Error(codes.NotFound, fmt.Sprintf("bucket of volume with id %s does not exist", req.GetVolumeId()))
}
supportedAccessMode := &csi.VolumeCapability_AccessMode{
Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER,
}
for _, capability := range req.VolumeCapabilities {
if capability.GetAccessMode().GetMode() != supportedAccessMode.GetMode() {
return &csi.ValidateVolumeCapabilitiesResponse{Message: "Only single node writer is supported"}, nil
}
}
return &csi.ValidateVolumeCapabilitiesResponse{
Confirmed: &csi.ValidateVolumeCapabilitiesResponse_Confirmed{
VolumeCapabilities: []*csi.VolumeCapability{
{
AccessMode: supportedAccessMode,
},
},
},
}, nil
}
func (cs *controllerServer) ControllerExpandVolume(ctx context.Context, req *csi.ControllerExpandVolumeRequest) (*csi.ControllerExpandVolumeResponse, error) {
return &csi.ControllerExpandVolumeResponse{}, status.Error(codes.Unimplemented, "ControllerExpandVolume is not implemented")
}
func sanitizeVolumeID(volumeID string) string {
volumeID = strings.ToLower(volumeID)
if len(volumeID) > 63 {
h := sha1.New()
io.WriteString(h, volumeID)
volumeID = hex.EncodeToString(h.Sum(nil))
}
return volumeID
}
// volumeIDToBucketPrefix returns the bucket name and prefix based on the volumeID.
// Prefix is empty if volumeID does not have a slash in the name.
func volumeIDToBucketPrefix(volumeID string) (string, string) {
// if the volumeID has a slash in it, this volume is
// stored under a certain prefix within the bucket.
splitVolumeID := strings.SplitN(volumeID, "/", 2)
if len(splitVolumeID) > 1 {
return splitVolumeID[0], splitVolumeID[1]
}
return volumeID, ""
}

View file

@ -1,88 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package driver
import (
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/golang/glog"
csicommon "github.com/kubernetes-csi/drivers/pkg/csi-common"
)
type driver struct {
driver *csicommon.CSIDriver
endpoint string
ids *identityServer
ns *nodeServer
cs *controllerServer
}
var (
vendorVersion = "v1.34.7"
driverName = "ru.yandex.s3.csi"
)
// New initializes the driver
func New(nodeID string, endpoint string) (*driver, error) {
d := csicommon.NewCSIDriver(driverName, vendorVersion, nodeID)
if d == nil {
glog.Fatalln("Failed to initialize CSI Driver.")
}
s3Driver := &driver{
endpoint: endpoint,
driver: d,
}
return s3Driver, nil
}
func (s3 *driver) newIdentityServer(d *csicommon.CSIDriver) *identityServer {
return &identityServer{
DefaultIdentityServer: csicommon.NewDefaultIdentityServer(d),
}
}
func (s3 *driver) newControllerServer(d *csicommon.CSIDriver) *controllerServer {
return &controllerServer{
DefaultControllerServer: csicommon.NewDefaultControllerServer(d),
}
}
func (s3 *driver) newNodeServer(d *csicommon.CSIDriver) *nodeServer {
return &nodeServer{
DefaultNodeServer: csicommon.NewDefaultNodeServer(d),
}
}
func (s3 *driver) Run() {
glog.Infof("Driver: %v ", driverName)
glog.Infof("Version: %v ", vendorVersion)
// Initialize default library driver
s3.driver.AddControllerServiceCapabilities([]csi.ControllerServiceCapability_RPC_Type{csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME})
s3.driver.AddVolumeCapabilityAccessModes([]csi.VolumeCapability_AccessMode_Mode{csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER})
// Create GRPC servers
s3.ids = s3.newIdentityServer(s3.driver)
s3.ns = s3.newNodeServer(s3.driver)
s3.cs = s3.newControllerServer(s3.driver)
s := csicommon.NewNonBlockingGRPCServer()
s.Start(s3.endpoint, s3.ids, s3.cs, s3.ns)
s.Wait()
}

View file

@ -1,123 +0,0 @@
package driver_test
import (
"log"
"os"
"github.com/yandex-cloud/k8s-csi-s3/pkg/driver"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/kubernetes-csi/csi-test/pkg/sanity"
)
var _ = Describe("S3Driver", func() {
Context("geesefs", func() {
socket := "/tmp/csi-geesefs.sock"
csiEndpoint := "unix://" + socket
if err := os.Remove(socket); err != nil && !os.IsNotExist(err) {
Expect(err).NotTo(HaveOccurred())
}
driver, err := driver.New("test-node", csiEndpoint)
if err != nil {
log.Fatal(err)
}
go driver.Run()
Describe("CSI sanity", func() {
sanityCfg := &sanity.Config{
TargetPath: os.TempDir() + "/geesefs-target",
StagingPath: os.TempDir() + "/geesefs-staging",
Address: csiEndpoint,
SecretsFile: "../../test/secret.yaml",
TestVolumeParameters: map[string]string{
"mounter": "geesefs",
"bucket": "testbucket0",
},
}
sanity.GinkgoTest(sanityCfg)
})
})
Context("geesefs-no-bucket", func() {
socket := "/tmp/csi-geesefs-no-bucket.sock"
csiEndpoint := "unix://" + socket
if err := os.Remove(socket); err != nil && !os.IsNotExist(err) {
Expect(err).NotTo(HaveOccurred())
}
driver, err := driver.New("test-node", csiEndpoint)
if err != nil {
log.Fatal(err)
}
go driver.Run()
Describe("CSI sanity", func() {
sanityCfg := &sanity.Config{
TargetPath: os.TempDir() + "/geesefs-no-bucket-target",
StagingPath: os.TempDir() + "/geesefs-no-bucket-staging",
Address: csiEndpoint,
SecretsFile: "../../test/secret.yaml",
TestVolumeParameters: map[string]string{
"mounter": "geesefs",
},
}
sanity.GinkgoTest(sanityCfg)
})
})
/* Context("s3fs", func() {
socket := "/tmp/csi-s3fs.sock"
csiEndpoint := "unix://" + socket
if err := os.Remove(socket); err != nil && !os.IsNotExist(err) {
Expect(err).NotTo(HaveOccurred())
}
driver, err := driver.New("test-node", csiEndpoint)
if err != nil {
log.Fatal(err)
}
go driver.Run()
Describe("CSI sanity", func() {
sanityCfg := &sanity.Config{
TargetPath: os.TempDir() + "/s3fs-target",
StagingPath: os.TempDir() + "/s3fs-staging",
Address: csiEndpoint,
SecretsFile: "../../test/secret.yaml",
TestVolumeParameters: map[string]string{
"mounter": "s3fs",
"bucket": "testbucket1",
},
}
sanity.GinkgoTest(sanityCfg)
})
})
Context("rclone", func() {
socket := "/tmp/csi-rclone.sock"
csiEndpoint := "unix://" + socket
if err := os.Remove(socket); err != nil && !os.IsNotExist(err) {
Expect(err).NotTo(HaveOccurred())
}
driver, err := driver.New("test-node", csiEndpoint)
if err != nil {
log.Fatal(err)
}
go driver.Run()
Describe("CSI sanity", func() {
sanityCfg := &sanity.Config{
TargetPath: os.TempDir() + "/rclone-target",
StagingPath: os.TempDir() + "/rclone-staging",
Address: csiEndpoint,
SecretsFile: "../../test/secret.yaml",
TestVolumeParameters: map[string]string{
"mounter": "rclone",
"bucket": "testbucket3",
},
}
sanity.GinkgoTest(sanityCfg)
})
})*/
})

View file

@ -1,29 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package driver_test
import (
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func TestS3Driver(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "S3Driver")
}

View file

@ -1,25 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package driver
import (
csicommon "github.com/kubernetes-csi/drivers/pkg/csi-common"
)
type identityServer struct {
*csicommon.DefaultIdentityServer
}

View file

@ -1,265 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package driver
import (
"fmt"
"os"
"os/exec"
"regexp"
"strconv"
"github.com/yandex-cloud/k8s-csi-s3/pkg/mounter"
"github.com/yandex-cloud/k8s-csi-s3/pkg/s3"
"github.com/golang/glog"
"golang.org/x/net/context"
"github.com/container-storage-interface/spec/lib/go/csi"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"k8s.io/kubernetes/pkg/util/mount"
csicommon "github.com/kubernetes-csi/drivers/pkg/csi-common"
)
type nodeServer struct {
*csicommon.DefaultNodeServer
}
func getMeta(bucketName, prefix string, context map[string]string) *s3.FSMeta {
mountOptions := make([]string, 0)
mountOptStr := context[mounter.OptionsKey]
if mountOptStr != "" {
re, _ := regexp.Compile(`([^\s"]+|"([^"\\]+|\\")*")+`)
re2, _ := regexp.Compile(`"([^"\\]+|\\")*"`)
re3, _ := regexp.Compile(`\\(.)`)
for _, opt := range re.FindAll([]byte(mountOptStr), -1) {
// Unquote options
opt = re2.ReplaceAllFunc(opt, func(q []byte) []byte {
return re3.ReplaceAll(q[1 : len(q)-1], []byte("$1"))
})
mountOptions = append(mountOptions, string(opt))
}
}
capacity, _ := strconv.ParseInt(context["capacity"], 10, 64)
return &s3.FSMeta{
BucketName: bucketName,
Prefix: prefix,
Mounter: context[mounter.TypeKey],
MountOptions: mountOptions,
CapacityBytes: capacity,
}
}
func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) {
volumeID := req.GetVolumeId()
targetPath := req.GetTargetPath()
stagingTargetPath := req.GetStagingTargetPath()
// Check arguments
if req.GetVolumeCapability() == nil {
return nil, status.Error(codes.InvalidArgument, "Volume capability missing in request")
}
if len(volumeID) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
}
if len(stagingTargetPath) == 0 {
return nil, status.Error(codes.InvalidArgument, "Staging Target path missing in request")
}
if len(targetPath) == 0 {
return nil, status.Error(codes.InvalidArgument, "Target path missing in request")
}
notMnt, err := checkMount(stagingTargetPath)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
if notMnt {
// Staged mount is dead by some reason. Revive it
bucketName, prefix := volumeIDToBucketPrefix(volumeID)
s3, err := s3.NewClientFromSecret(req.GetSecrets())
if err != nil {
return nil, fmt.Errorf("failed to initialize S3 client: %s", err)
}
meta := getMeta(bucketName, prefix, req.VolumeContext)
mounter, err := mounter.New(meta, s3.Config)
if err != nil {
return nil, err
}
if err := mounter.Mount(stagingTargetPath, volumeID); err != nil {
return nil, err
}
}
notMnt, err = checkMount(targetPath)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
if !notMnt {
return &csi.NodePublishVolumeResponse{}, nil
}
// TODO: Implement readOnly & mountFlags
readOnly := req.GetReadonly()
mountFlags := req.GetVolumeCapability().GetMount().GetMountFlags()
attrib := req.GetVolumeContext()
glog.V(4).Infof("target %v\nreadonly %v\nvolumeId %v\nattributes %v\nmountflags %v\n",
targetPath, readOnly, volumeID, attrib, mountFlags)
cmd := exec.Command("mount", "--bind", stagingTargetPath, targetPath)
cmd.Stderr = os.Stderr
glog.V(3).Infof("Binding volume %v from %v to %v", volumeID, stagingTargetPath, targetPath)
out, err := cmd.Output()
if err != nil {
return nil, fmt.Errorf("Error running mount --bind %v %v: %s", stagingTargetPath, targetPath, out)
}
glog.V(4).Infof("s3: volume %s successfully mounted to %s", volumeID, targetPath)
return &csi.NodePublishVolumeResponse{}, nil
}
func (ns *nodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) {
volumeID := req.GetVolumeId()
targetPath := req.GetTargetPath()
// Check arguments
if len(volumeID) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
}
if len(targetPath) == 0 {
return nil, status.Error(codes.InvalidArgument, "Target path missing in request")
}
if err := mounter.Unmount(targetPath); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
glog.V(4).Infof("s3: volume %s has been unmounted.", volumeID)
return &csi.NodeUnpublishVolumeResponse{}, nil
}
func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) {
volumeID := req.GetVolumeId()
stagingTargetPath := req.GetStagingTargetPath()
bucketName, prefix := volumeIDToBucketPrefix(volumeID)
// Check arguments
if len(volumeID) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
}
if len(stagingTargetPath) == 0 {
return nil, status.Error(codes.InvalidArgument, "Target path missing in request")
}
if req.VolumeCapability == nil {
return nil, status.Error(codes.InvalidArgument, "NodeStageVolume Volume Capability must be provided")
}
notMnt, err := checkMount(stagingTargetPath)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
if !notMnt {
return &csi.NodeStageVolumeResponse{}, nil
}
client, err := s3.NewClientFromSecret(req.GetSecrets())
if err != nil {
return nil, fmt.Errorf("failed to initialize S3 client: %s", err)
}
meta := getMeta(bucketName, prefix, req.VolumeContext)
mounter, err := mounter.New(meta, client.Config)
if err != nil {
return nil, err
}
if err := mounter.Mount(stagingTargetPath, volumeID); err != nil {
return nil, err
}
return &csi.NodeStageVolumeResponse{}, nil
}
func (ns *nodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) {
volumeID := req.GetVolumeId()
stagingTargetPath := req.GetStagingTargetPath()
// Check arguments
if len(volumeID) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
}
if len(stagingTargetPath) == 0 {
return nil, status.Error(codes.InvalidArgument, "Target path missing in request")
}
proc, err := mounter.FindFuseMountProcess(stagingTargetPath)
if err != nil {
return nil, err
}
exists := false
if proc == nil {
exists, err = mounter.SystemdUnmount(volumeID)
if exists && err != nil {
return nil, err
}
}
if !exists {
err = mounter.FuseUnmount(stagingTargetPath)
}
glog.V(4).Infof("s3: volume %s has been unmounted from stage path %v.", volumeID, stagingTargetPath)
return &csi.NodeUnstageVolumeResponse{}, nil
}
// NodeGetCapabilities returns the supported capabilities of the node server
func (ns *nodeServer) NodeGetCapabilities(ctx context.Context, req *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) {
// currently there is a single NodeServer capability according to the spec
nscap := &csi.NodeServiceCapability{
Type: &csi.NodeServiceCapability_Rpc{
Rpc: &csi.NodeServiceCapability_RPC{
Type: csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME,
},
},
}
return &csi.NodeGetCapabilitiesResponse{
Capabilities: []*csi.NodeServiceCapability{
nscap,
},
}, nil
}
func (ns *nodeServer) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandVolumeRequest) (*csi.NodeExpandVolumeResponse, error) {
return &csi.NodeExpandVolumeResponse{}, status.Error(codes.Unimplemented, "NodeExpandVolume is not implemented")
}
func checkMount(targetPath string) (bool, error) {
notMnt, err := mount.New("").IsLikelyNotMountPoint(targetPath)
if err != nil {
if os.IsNotExist(err) {
if err = os.MkdirAll(targetPath, 0750); err != nil {
return false, err
}
notMnt = true
} else {
return false, err
}
}
return notMnt, nil
}

View file

@ -1,205 +0,0 @@
package mounter
import (
"fmt"
"os"
"strings"
"time"
systemd "github.com/coreos/go-systemd/v22/dbus"
dbus "github.com/godbus/dbus/v5"
"github.com/golang/glog"
"github.com/yandex-cloud/k8s-csi-s3/pkg/s3"
)
const (
geesefsCmd = "geesefs"
)
// Implements Mounter
type geesefsMounter struct {
meta *s3.FSMeta
endpoint string
region string
accessKeyID string
secretAccessKey string
}
func newGeeseFSMounter(meta *s3.FSMeta, cfg *s3.Config) (Mounter, error) {
return &geesefsMounter{
meta: meta,
endpoint: cfg.Endpoint,
region: cfg.Region,
accessKeyID: cfg.AccessKeyID,
secretAccessKey: cfg.SecretAccessKey,
}, nil
}
func (geesefs *geesefsMounter) CopyBinary(from, to string) error {
st, err := os.Stat(from)
if err != nil {
return fmt.Errorf("Failed to stat %s: %v", from, err)
}
st2, err := os.Stat(to)
if err != nil && !os.IsNotExist(err) {
return fmt.Errorf("Failed to stat %s: %v", to, err)
}
if err != nil || st2.Size() != st.Size() || st2.ModTime() != st.ModTime() {
if err == nil {
// remove the file first to not hit "text file busy" errors
err = os.Remove(to)
if err != nil {
return fmt.Errorf("Error removing %s to update it: %v", to, err)
}
}
bin, err := os.ReadFile(from)
if err != nil {
return fmt.Errorf("Error copying %s to %s: %v", from, to, err)
}
err = os.WriteFile(to, bin, 0755)
if err != nil {
return fmt.Errorf("Error copying %s to %s: %v", from, to, err)
}
err = os.Chtimes(to, st.ModTime(), st.ModTime())
if err != nil {
return fmt.Errorf("Error copying %s to %s: %v", from, to, err)
}
}
return nil
}
func (geesefs *geesefsMounter) MountDirect(target string, args []string) error {
args = append([]string{
"--endpoint", geesefs.endpoint,
"-o", "allow_other",
"--log-file", "/dev/stderr",
}, args...)
envs := []string{
"AWS_ACCESS_KEY_ID=" + geesefs.accessKeyID,
"AWS_SECRET_ACCESS_KEY=" + geesefs.secretAccessKey,
}
return fuseMount(target, geesefsCmd, args, envs)
}
type execCmd struct {
Path string
Args []string
UncleanIsFailure bool
}
func (geesefs *geesefsMounter) Mount(target, volumeID string) error {
fullPath := fmt.Sprintf("%s:%s", geesefs.meta.BucketName, geesefs.meta.Prefix)
var args []string
if geesefs.region != "" {
args = append(args, "--region", geesefs.region)
}
args = append(
args,
"--setuid", "65534", // nobody. drop root privileges
"--setgid", "65534", // nogroup
)
useSystemd := true
for i := 0; i < len(geesefs.meta.MountOptions); i++ {
opt := geesefs.meta.MountOptions[i]
if opt == "--no-systemd" {
useSystemd = false
} else if len(opt) > 0 && opt[0] == '-' {
// Remove unsafe options
s := 1
if len(opt) > 1 && opt[1] == '-' {
s++
}
key := opt[s:]
e := strings.Index(opt, "=")
if e >= 0 {
key = opt[s:e]
}
if key == "log-file" || key == "shared-config" || key == "cache" {
// Skip options accessing local FS
if e < 0 {
i++
}
} else if key != "" {
args = append(args, opt)
}
} else if len(opt) > 0 {
args = append(args, opt)
}
}
args = append(args, fullPath, target)
// Try to start geesefs using systemd so it doesn't get killed when the container exits
if !useSystemd {
return geesefs.MountDirect(target, args)
}
conn, err := systemd.New()
if err != nil {
glog.Errorf("Failed to connect to systemd dbus service: %v, starting geesefs directly", err)
return geesefs.MountDirect(target, args)
}
defer conn.Close()
// systemd is present
if err = geesefs.CopyBinary("/usr/bin/geesefs", "/csi/geesefs"); err != nil {
return err
}
pluginDir := os.Getenv("PLUGIN_DIR")
if pluginDir == "" {
pluginDir = "/var/lib/kubelet/plugins/ru.yandex.s3.csi"
}
args = append([]string{pluginDir+"/geesefs", "-f", "-o", "allow_other", "--endpoint", geesefs.endpoint}, args...)
glog.Info("Starting geesefs using systemd: "+strings.Join(args, " "))
unitName := "geesefs-"+systemd.PathBusEscape(volumeID)+".service"
newProps := []systemd.Property{
systemd.Property{
Name: "Description",
Value: dbus.MakeVariant("GeeseFS mount for Kubernetes volume "+volumeID),
},
systemd.PropExecStart(args, false),
systemd.Property{
Name: "ExecStopPost",
// force & lazy unmount to cleanup possibly dead mountpoints
Value: dbus.MakeVariant([]execCmd{ execCmd{ "/bin/umount", []string{ "/bin/umount", "-f", "-l", target }, false } }),
},
systemd.Property{
Name: "Environment",
Value: dbus.MakeVariant([]string{ "AWS_ACCESS_KEY_ID="+geesefs.accessKeyID, "AWS_SECRET_ACCESS_KEY="+geesefs.secretAccessKey }),
},
systemd.Property{
Name: "CollectMode",
Value: dbus.MakeVariant("inactive-or-failed"),
},
}
unitProps, err := conn.GetAllProperties(unitName)
if err == nil {
// Unit already exists
if s, ok := unitProps["ActiveState"].(string); ok && (s == "active" || s == "activating" || s == "reloading") {
// Unit is already active
curPath := ""
prevExec, ok := unitProps["ExecStart"].([][]interface{})
if ok && len(prevExec) > 0 && len(prevExec[0]) >= 2 {
execArgs, ok := prevExec[0][1].([]string)
if ok && len(execArgs) >= 2 {
curPath = execArgs[len(execArgs)-1]
}
}
if curPath != target {
return fmt.Errorf(
"GeeseFS for volume %v is already mounted on host, but"+
" in a different directory. We want %v, but it's in %v",
volumeID, target, curPath,
)
}
// Already mounted at right location
return nil
} else {
// Stop and garbage collect the unit if automatic collection didn't work for some reason
conn.StopUnit(unitName, "replace", nil)
conn.ResetFailedUnit(unitName)
}
}
_, err = conn.StartTransientUnit(unitName, "replace", newProps, nil)
if err != nil {
return fmt.Errorf("Error starting systemd unit %s on host: %v", unitName, err)
}
return waitForMount(target, 10*time.Second)
}

View file

@ -1,208 +0,0 @@
package mounter
import (
"errors"
"fmt"
"io/ioutil"
"math"
"os"
"os/exec"
"strings"
"syscall"
"time"
systemd "github.com/coreos/go-systemd/v22/dbus"
"github.com/golang/glog"
"github.com/mitchellh/go-ps"
"k8s.io/kubernetes/pkg/util/mount"
"github.com/yandex-cloud/k8s-csi-s3/pkg/s3"
)
// Mounter interface which can be implemented
// by the different mounter types
type Mounter interface {
Mount(target, volumeID string) error
}
const (
s3fsMounterType = "s3fs"
geesefsMounterType = "geesefs"
rcloneMounterType = "rclone"
TypeKey = "mounter"
BucketKey = "bucket"
OptionsKey = "options"
)
// New returns a new mounter depending on the mounterType parameter
func New(meta *s3.FSMeta, cfg *s3.Config) (Mounter, error) {
mounter := meta.Mounter
// Fall back to mounterType in cfg
if len(meta.Mounter) == 0 {
mounter = cfg.Mounter
}
switch mounter {
case geesefsMounterType:
return newGeeseFSMounter(meta, cfg)
case s3fsMounterType:
return newS3fsMounter(meta, cfg)
case rcloneMounterType:
return newRcloneMounter(meta, cfg)
default:
// default to GeeseFS
return newGeeseFSMounter(meta, cfg)
}
}
func fuseMount(path string, command string, args []string, envs []string) error {
cmd := exec.Command(command, args...)
cmd.Stderr = os.Stderr
// cmd.Environ() returns envs inherited from the current process
cmd.Env = append(cmd.Environ(), envs...)
glog.V(3).Infof("Mounting fuse with command: %s and args: %s", command, args)
out, err := cmd.Output()
if err != nil {
return fmt.Errorf("Error fuseMount command: %s\nargs: %s\noutput: %s", command, args, out)
}
return waitForMount(path, 10*time.Second)
}
func Unmount(path string) error {
if err := mount.New("").Unmount(path); err != nil {
return err
}
return nil
}
func SystemdUnmount(volumeID string) (bool, error) {
conn, err := systemd.New()
if err != nil {
glog.Errorf("Failed to connect to systemd dbus service: %v", err)
return false, err
}
defer conn.Close()
unitName := "geesefs-"+systemd.PathBusEscape(volumeID)+".service"
units, err := conn.ListUnitsByNames([]string{ unitName })
glog.Errorf("Got %v", units)
if err != nil {
glog.Errorf("Failed to list systemd unit by name %v: %v", unitName, err)
return false, err
}
if len(units) == 0 || units[0].ActiveState == "inactive" || units[0].ActiveState == "failed" {
return true, nil
}
_, err = conn.StopUnit(unitName, "replace", nil)
return true, err
}
func FuseUnmount(path string) error {
if err := mount.New("").Unmount(path); err != nil {
return err
}
// as fuse quits immediately, we will try to wait until the process is done
process, err := FindFuseMountProcess(path)
if err != nil {
glog.Errorf("Error getting PID of fuse mount: %s", err)
return nil
}
if process == nil {
glog.Warningf("Unable to find PID of fuse mount %s, it must have finished already", path)
return nil
}
glog.Infof("Found fuse pid %v of mount %s, checking if it still runs", process.Pid, path)
return waitForProcess(process, 20)
}
func waitForMount(path string, timeout time.Duration) error {
var elapsed time.Duration
var interval = 10 * time.Millisecond
for {
notMount, err := mount.New("").IsNotMountPoint(path)
if err != nil {
return err
}
if !notMount {
return nil
}
time.Sleep(interval)
elapsed = elapsed + interval
if elapsed >= timeout {
return errors.New("Timeout waiting for mount")
}
}
}
func FindFuseMountProcess(path string) (*os.Process, error) {
processes, err := ps.Processes()
if err != nil {
return nil, err
}
for _, p := range processes {
cmdLine, err := getCmdLine(p.Pid())
if err != nil {
glog.Errorf("Unable to get cmdline of PID %v: %s", p.Pid(), err)
continue
}
if strings.Contains(cmdLine, path) {
glog.Infof("Found matching pid %v on path %s", p.Pid(), path)
return os.FindProcess(p.Pid())
}
}
return nil, nil
}
func waitForProcess(p *os.Process, limit int) error {
for backoff := 0; backoff < limit; backoff++ {
cmdLine, err := getCmdLine(p.Pid)
if err != nil {
glog.Warningf("Error checking cmdline of PID %v, assuming it is dead: %s", p.Pid, err)
p.Wait()
return nil
}
if cmdLine == "" {
glog.Warning("Fuse process seems dead, returning")
p.Wait()
return nil
}
if err := p.Signal(syscall.Signal(0)); err != nil {
glog.Warningf("Fuse process does not seem active or we are unprivileged: %s", err)
p.Wait()
return nil
}
glog.Infof("Fuse process with PID %v still active, waiting...", p.Pid)
time.Sleep(time.Duration(math.Pow(1.5, float64(backoff))*100) * time.Millisecond)
}
p.Release()
return fmt.Errorf("Timeout waiting for PID %v to end", p.Pid)
}
func getCmdLine(pid int) (string, error) {
cmdLineFile := fmt.Sprintf("/proc/%v/cmdline", pid)
cmdLine, err := ioutil.ReadFile(cmdLineFile)
if err != nil {
return "", err
}
return string(cmdLine), nil
}
func createLoopDevice(device string) error {
if _, err := os.Stat(device); !os.IsNotExist(err) {
return nil
}
args := []string{
device,
"b", "7", "0",
}
cmd := exec.Command("mknod", args...)
out, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("Error configuring loop device: %s", out)
}
return nil
}

View file

@ -1,54 +0,0 @@
package mounter
import (
"fmt"
"path"
"github.com/yandex-cloud/k8s-csi-s3/pkg/s3"
)
// Implements Mounter
type rcloneMounter struct {
meta *s3.FSMeta
url string
region string
accessKeyID string
secretAccessKey string
}
const (
rcloneCmd = "rclone"
)
func newRcloneMounter(meta *s3.FSMeta, cfg *s3.Config) (Mounter, error) {
return &rcloneMounter{
meta: meta,
url: cfg.Endpoint,
region: cfg.Region,
accessKeyID: cfg.AccessKeyID,
secretAccessKey: cfg.SecretAccessKey,
}, nil
}
func (rclone *rcloneMounter) Mount(target, volumeID string) error {
args := []string{
"mount",
fmt.Sprintf(":s3:%s", path.Join(rclone.meta.BucketName, rclone.meta.Prefix)),
fmt.Sprintf("%s", target),
"--daemon",
"--s3-provider=AWS",
"--s3-env-auth=true",
fmt.Sprintf("--s3-endpoint=%s", rclone.url),
"--allow-other",
"--vfs-cache-mode=writes",
}
if rclone.region != "" {
args = append(args, fmt.Sprintf("--s3-region=%s", rclone.region))
}
args = append(args, rclone.meta.MountOptions...)
envs := []string{
"AWS_ACCESS_KEY_ID=" + rclone.accessKeyID,
"AWS_SECRET_ACCESS_KEY=" + rclone.secretAccessKey,
}
return fuseMount(target, rcloneCmd, args, envs)
}

View file

@ -1,62 +0,0 @@
package mounter
import (
"fmt"
"os"
"github.com/yandex-cloud/k8s-csi-s3/pkg/s3"
)
// Implements Mounter
type s3fsMounter struct {
meta *s3.FSMeta
url string
region string
pwFileContent string
}
const (
s3fsCmd = "s3fs"
)
func newS3fsMounter(meta *s3.FSMeta, cfg *s3.Config) (Mounter, error) {
return &s3fsMounter{
meta: meta,
url: cfg.Endpoint,
region: cfg.Region,
pwFileContent: cfg.AccessKeyID + ":" + cfg.SecretAccessKey,
}, nil
}
func (s3fs *s3fsMounter) Mount(target, volumeID string) error {
if err := writes3fsPass(s3fs.pwFileContent); err != nil {
return err
}
args := []string{
fmt.Sprintf("%s:/%s", s3fs.meta.BucketName, s3fs.meta.Prefix),
target,
"-o", "use_path_request_style",
"-o", fmt.Sprintf("url=%s", s3fs.url),
"-o", "allow_other",
"-o", "mp_umask=000",
}
if s3fs.region != "" {
args = append(args, "-o", fmt.Sprintf("endpoint=%s", s3fs.region))
}
args = append(args, s3fs.meta.MountOptions...)
return fuseMount(target, s3fsCmd, args, nil)
}
func writes3fsPass(pwFileContent string) error {
pwFileName := fmt.Sprintf("%s/.passwd-s3fs", os.Getenv("HOME"))
pwFile, err := os.OpenFile(pwFileName, os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
return err
}
_, err = pwFile.WriteString(pwFileContent)
if err != nil {
return err
}
pwFile.Close()
return nil
}

View file

@ -1,222 +0,0 @@
package s3
import (
"bytes"
"context"
"fmt"
"net/url"
"github.com/golang/glog"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
)
const (
metadataName = ".metadata.json"
)
type s3Client struct {
Config *Config
minio *minio.Client
ctx context.Context
}
// Config holds values to configure the driver
type Config struct {
AccessKeyID string
SecretAccessKey string
Region string
Endpoint string
Mounter string
}
type FSMeta struct {
BucketName string `json:"Name"`
Prefix string `json:"Prefix"`
Mounter string `json:"Mounter"`
MountOptions []string `json:"MountOptions"`
CapacityBytes int64 `json:"CapacityBytes"`
}
func NewClient(cfg *Config) (*s3Client, error) {
var client = &s3Client{}
client.Config = cfg
u, err := url.Parse(client.Config.Endpoint)
if err != nil {
return nil, err
}
ssl := u.Scheme == "https"
endpoint := u.Hostname()
if u.Port() != "" {
endpoint = u.Hostname() + ":" + u.Port()
}
minioClient, err := minio.New(endpoint, &minio.Options{
Creds: credentials.NewStaticV4(client.Config.AccessKeyID, client.Config.SecretAccessKey, ""),
Secure: ssl,
})
if err != nil {
return nil, err
}
client.minio = minioClient
client.ctx = context.Background()
return client, nil
}
func NewClientFromSecret(secret map[string]string) (*s3Client, error) {
return NewClient(&Config{
AccessKeyID: secret["accessKeyID"],
SecretAccessKey: secret["secretAccessKey"],
Region: secret["region"],
Endpoint: secret["endpoint"],
// Mounter is set in the volume preferences, not secrets
Mounter: "",
})
}
func (client *s3Client) BucketExists(bucketName string) (bool, error) {
return client.minio.BucketExists(client.ctx, bucketName)
}
func (client *s3Client) CreateBucket(bucketName string) error {
return client.minio.MakeBucket(client.ctx, bucketName, minio.MakeBucketOptions{Region: client.Config.Region})
}
func (client *s3Client) CreatePrefix(bucketName string, prefix string) error {
if prefix != "" {
_, err := client.minio.PutObject(client.ctx, bucketName, prefix+"/", bytes.NewReader([]byte("")), 0, minio.PutObjectOptions{})
if err != nil {
return err
}
}
return nil
}
func (client *s3Client) RemovePrefix(bucketName string, prefix string) error {
var err error
if err = client.removeObjects(bucketName, prefix); err == nil {
return client.minio.RemoveObject(client.ctx, bucketName, prefix, minio.RemoveObjectOptions{})
}
glog.Warningf("removeObjects failed with: %s, will try removeObjectsOneByOne", err)
if err = client.removeObjectsOneByOne(bucketName, prefix); err == nil {
return client.minio.RemoveObject(client.ctx, bucketName, prefix, minio.RemoveObjectOptions{})
}
return err
}
func (client *s3Client) RemoveBucket(bucketName string) error {
var err error
if err = client.removeObjects(bucketName, ""); err == nil {
return client.minio.RemoveBucket(client.ctx, bucketName)
}
glog.Warningf("removeObjects failed with: %s, will try removeObjectsOneByOne", err)
if err = client.removeObjectsOneByOne(bucketName, ""); err == nil {
return client.minio.RemoveBucket(client.ctx, bucketName)
}
return err
}
func (client *s3Client) removeObjects(bucketName, prefix string) error {
objectsCh := make(chan minio.ObjectInfo)
var listErr error
go func() {
defer close(objectsCh)
for object := range client.minio.ListObjects(
client.ctx,
bucketName,
minio.ListObjectsOptions{Prefix: prefix, Recursive: true}) {
if object.Err != nil {
listErr = object.Err
return
}
objectsCh <- object
}
}()
if listErr != nil {
glog.Error("Error listing objects", listErr)
return listErr
}
select {
default:
opts := minio.RemoveObjectsOptions{
GovernanceBypass: true,
}
errorCh := client.minio.RemoveObjects(client.ctx, bucketName, objectsCh, opts)
haveErrWhenRemoveObjects := false
for e := range errorCh {
glog.Errorf("Failed to remove object %s, error: %s", e.ObjectName, e.Err)
haveErrWhenRemoveObjects = true
}
if haveErrWhenRemoveObjects {
return fmt.Errorf("Failed to remove all objects of bucket %s", bucketName)
}
}
return nil
}
// will delete files one by one without file lock
func (client *s3Client) removeObjectsOneByOne(bucketName, prefix string) error {
parallelism := 16
objectsCh := make(chan minio.ObjectInfo, 1)
guardCh := make(chan int, parallelism)
var listErr error
totalObjects := 0
removeErrors := 0
go func() {
defer close(objectsCh)
for object := range client.minio.ListObjects(client.ctx, bucketName,
minio.ListObjectsOptions{Prefix: prefix, Recursive: true}) {
if object.Err != nil {
listErr = object.Err
return
}
totalObjects++
objectsCh <- object
}
}()
if listErr != nil {
glog.Error("Error listing objects", listErr)
return listErr
}
for object := range objectsCh {
guardCh <- 1
go func() {
err := client.minio.RemoveObject(client.ctx, bucketName, object.Key,
minio.RemoveObjectOptions{VersionID: object.VersionID})
if err != nil {
glog.Errorf("Failed to remove object %s, error: %s", object.Key, err)
removeErrors++
}
<- guardCh
}()
}
for i := 0; i < parallelism; i++ {
guardCh <- 1
}
for i := 0; i < parallelism; i++ {
<- guardCh
}
if removeErrors > 0 {
return fmt.Errorf("Failed to remove %v objects out of total %v of path %s", removeErrors, totalObjects, bucketName)
}
return nil
}

View file

@ -1,24 +0,0 @@
FROM golang:1.16-buster
LABEL maintainers="Vitaliy Filippov <vitalif@yourcmc.ru>"
LABEL description="csi-s3 testing image"
# Minio download servers are TERRIBLY SLOW as of 2021-10-27
#RUN wget https://dl.min.io/server/minio/release/linux-amd64/minio && \
# chmod +x minio && \
# mv minio /usr/local/bin
RUN git clone --depth=1 https://github.com/minio/minio
RUN cd minio && go build && mv minio /usr/local/bin
WORKDIR /build
# prewarm go mod cache
COPY go.mod .
COPY go.sum .
RUN go mod download
RUN wget https://github.com/yandex-cloud/geesefs/releases/latest/download/geesefs-linux-amd64 \
-O /usr/bin/geesefs && chmod 755 /usr/bin/geesefs
ENTRYPOINT ["/build/test/test.sh"]

View file

@ -1,25 +0,0 @@
CreateVolumeSecret:
accessKeyID: FJDSJ
secretAccessKey: DSG643HGDS
endpoint: http://127.0.0.1:9000
region: ""
DeleteVolumeSecret:
accessKeyID: FJDSJ
secretAccessKey: DSG643HGDS
endpoint: http://127.0.0.1:9000
region: ""
NodeStageVolumeSecret:
accessKeyID: FJDSJ
secretAccessKey: DSG643HGDS
endpoint: http://127.0.0.1:9000
region: ""
NodePublishVolumeSecret:
accessKeyID: FJDSJ
secretAccessKey: DSG643HGDS
endpoint: http://127.0.0.1:9000
region: ""
ControllerValidateVolumeCapabilitiesSecret:
accessKeyID: FJDSJ
secretAccessKey: DSG643HGDS
endpoint: http://127.0.0.1:9000
region: ""

View file

@ -1,8 +0,0 @@
#!/bin/sh
export MINIO_ACCESS_KEY=FJDSJ
export MINIO_SECRET_KEY=DSG643HGDS
mkdir -p /tmp/minio
minio server /tmp/minio &>/dev/null &
sleep 5
go test ./... -cover -ginkgo.noisySkippings=false -ginkgo.skip="should fail when requesting to create a volume with already existing name and different capacity"