Compare commits

..

No commits in common. "master" and "v0.34.0" have entirely different histories.

29 changed files with 367 additions and 446 deletions

View file

@ -1,48 +0,0 @@
name: Publish Helm chart
on:
push:
branches:
- master
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
permissions:
contents: write
pages: write
id-token: write
# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued.
# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete.
concurrency:
group: "pages"
cancel-in-progress: false
jobs:
deploy:
environment:
name: github-pages
url: ${{ steps.deployment.outputs.page_url }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Setup Pages
uses: actions/configure-pages@v3
- name: Publish Helm chart
uses: stefanprodan/helm-gh-pages@0ad2bb377311d61ac04ad9eb6f252fb68e207260 # v1.7.0
with:
# GitHub automatically creates a GITHUB_TOKEN secret to use in your workflow. You can use the GITHUB_TOKEN to authenticate in a workflow run.
# See https://docs.github.com/en/actions/reference/authentication-in-a-workflow#about-the-github_token-secret
token: ${{ secrets.GITHUB_TOKEN }}
branch: github-pages
charts_dir: deploy/helm
target_dir: charts
linting: off

16
.github/workflows/test.yml vendored Normal file
View file

@ -0,0 +1,16 @@
name: Test
on:
push:
tags:
- "v*"
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Test
run: make test

View file

@ -1,18 +1,22 @@
FROM golang:1.19-alpine as gobuild
FROM golang:1.16-alpine as gobuild
WORKDIR /build
ADD go.mod go.sum /build/
RUN go mod download -x
ADD cmd /build/cmd
ADD pkg /build/pkg
RUN go get -d -v ./...
RUN CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o ./s3driver ./cmd/s3driver
FROM alpine:3.17
FROM alpine:3.16
LABEL maintainers="Vitaliy Filippov <vitalif@yourcmc.ru>"
LABEL description="csi-s3 slim image"
RUN apk add --no-cache fuse mailcap rclone
RUN apk add --no-cache -X http://dl-cdn.alpinelinux.org/alpine/edge/community s3fs-fuse
# apk add temporarily broken:
#ERROR: unable to select packages:
# so:libcrypto.so.3 (no such package):
# required by: s3fs-fuse-1.91-r1[so:libcrypto.so.3]
#RUN apk add --no-cache -X http://dl-cdn.alpinelinux.org/alpine/edge/testing s3fs-fuse rclone
ADD https://github.com/yandex-cloud/geesefs/releases/latest/download/geesefs-linux-amd64 /usr/bin/geesefs
RUN chmod 755 /usr/bin/geesefs

View file

@ -14,10 +14,9 @@
.PHONY: test build container push clean
REGISTRY_NAME=cr.yandex/crp9ftr22d26age3hulg
REGISTRY_NAME2=cr.il.nebius.cloud/crll7us9n6i5j3v4n92m
IMAGE_NAME=csi-s3
IMAGE_NAME2=yandex-cloud/csi-s3/csi-s3-driver
VERSION ?= 0.38.3
VERSION ?= 0.34.0
IMAGE_TAG=$(REGISTRY_NAME)/$(IMAGE_NAME):$(VERSION)
TEST_IMAGE_TAG=$(IMAGE_NAME):test

View file

@ -10,19 +10,7 @@ This is a Container Storage Interface ([CSI](https://github.com/container-storag
* Kubernetes has to allow privileged containers
* Docker daemon must allow shared mounts (systemd flag `MountFlags=shared`)
### Helm chart
Helm chart is published at `https://yandex-cloud.github.io/k8s-csi-s3`:
```
helm repo add yandex-s3 https://yandex-cloud.github.io/k8s-csi-s3/charts
helm install csi-s3 yandex-s3/csi-s3
```
### Manual installation
#### 1. Create a secret with your S3 credentials
### 1. Create a secret with your S3 credentials
```yaml
apiVersion: v1
@ -42,30 +30,22 @@ stringData:
The region can be empty if you are using some other S3 compatible storage.
#### 2. Deploy the driver
### 2. Deploy the driver
```bash
cd deploy/kubernetes
kubectl create -f provisioner.yaml
kubectl create -f driver.yaml
kubectl create -f attacher.yaml
kubectl create -f csi-s3.yaml
```
If you're upgrading from a previous version which had `attacher.yaml` you
can safely delete all resources created from that file:
```
wget https://raw.githubusercontent.com/yandex-cloud/k8s-csi-s3/v0.35.5/deploy/kubernetes/attacher.yaml
kubectl delete -f attacher.yaml
```
#### 3. Create the storage class
### 3. Create the storage class
```bash
kubectl create -f examples/storageclass.yaml
```
#### 4. Test the S3 driver
### 4. Test the S3 driver
1. Create a pvc using the new storage class:
@ -94,8 +74,8 @@ kubectl create -f examples/storageclass.yaml
```bash
$ kubectl exec -ti csi-s3-test-nginx bash
$ mount | grep fuse
pvc-035763df-0488-4941-9a34-f637292eb95c: on /usr/share/nginx/html/s3 type fuse.geesefs (rw,nosuid,nodev,relatime,user_id=65534,group_id=0,default_permissions,allow_other)
$ touch /usr/share/nginx/html/s3/hello_world
s3fs on /var/lib/www/html type fuse.s3fs (rw,nosuid,nodev,relatime,user_id=0,group_id=0,allow_other)
$ touch /var/lib/www/html/hello_world
```
If something does not work as expected, check the troubleshooting section below.
@ -114,18 +94,11 @@ metadata:
provisioner: ru.yandex.s3.csi
parameters:
mounter: geesefs
options: "--memory-limit 1000 --dir-mode 0777 --file-mode 0666"
bucket: some-existing-bucket-name
```
If the bucket is specified, it will still be created if it does not exist on the backend. Every volume will get its own prefix within the bucket which matches the volume ID. When deleting a volume, also just the prefix will be deleted.
### Static Provisioning
If you want to mount a pre-existing bucket or prefix within a pre-existing bucket and don't want csi-s3 to delete it when PV is deleted, you can use static provisioning.
To do that you should omit `storageClassName` in the `PersistentVolumeClaim` and manually create a `PersistentVolume` with a matching `claimRef`, like in the following example: [deploy/kubernetes/examples/pvc-manual.yaml](deploy/kubernetes/examples/pvc-manual.yaml).
### Mounter
We **strongly recommend** to use the default mounter which is [GeeseFS](https://github.com/yandex-cloud/geesefs).
@ -145,10 +118,6 @@ You can check POSIX compatibility matrix here: https://github.com/yandex-cloud/g
* Almost full POSIX compatibility
* Good performance for both small and big files
* Does not store file permissions and custom modification times
* By default runs **outside** of the csi-s3 container using systemd, to not crash
mountpoints with "Transport endpoint is not connected" when csi-s3 is upgraded
or restarted. Add `--no-systemd` to `parameters.options` of the `StorageClass`
to disable this behaviour.
#### s3fs

View file

@ -1,9 +1,9 @@
---
apiVersion: v1
appVersion: 0.38.3
appVersion: 0.34.0
description: "Container Storage Interface (CSI) driver for S3 volumes"
name: csi-s3
version: 0.38.3
version: 0.34.0
keywords:
- s3
home: https://github.com/yandex-cloud/k8s-csi-s3

View file

@ -26,7 +26,6 @@ The following table lists all configuration parameters and their default values.
| `storageClass.create` | Specifies whether the storage class should be created | true |
| `storageClass.name` | Storage class name | csi-s3 |
| `storageClass.singleBucket` | Use a single bucket for all dynamically provisioned persistent volumes | |
| `storageClass.mounter` | Mounter to use. Either geesefs, s3fs or rclone. geesefs recommended | geesefs |
| `storageClass.mountOptions` | GeeseFS mount options | `--memory-limit 1000 --dir-mode 0777 --file-mode 0666` |
| `storageClass.reclaimPolicy` | Volume reclaim policy | Delete |
| `storageClass.annotations` | Annotations for the storage class | |

View file

@ -1,10 +0,0 @@
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: ru.yandex.s3.csi
spec:
attachRequired: false
podInfoOnMount: true
fsGroupPolicy: File # added in Kubernetes 1.19, this field is GA as of Kubernetes 1.23
volumeLifecycleModes: # added in Kubernetes 1.16, this field is beta
- Persistent

View file

@ -1,9 +1,10 @@
helm_chart:
name: cr.yandex/crp9ftr22d26age3hulg/yandex-cloud/csi-s3/csi-s3
tag: 0.38.3
tag: 0.34.0
requirements:
k8s_version: ">=1.13"
images:
- full: images.attacher
- full: images.registrar
- full: images.provisioner
- full: images.csi
@ -14,7 +15,7 @@ user_values:
ru: Создать класс хранения
description:
en: Specifies whether the storage class should be created
ru: Выберите, чтобы создать новый S3-класс хранения при развёртывании приложения.
ru: 'Если "да", при установке будет создан класс хранения S3'
boolean_value:
default_value: true
- name: secret.create
@ -23,7 +24,7 @@ user_values:
ru: Создать секрет
description:
en: Specifies whether the secret should be created
ru: Выберите, чтобы создать новый секрет для класса хранения при установке приложения, а не использовать существующий.
ru: 'Если "да", при установке будет создан секрет, иначе для класса хранения будет использован существующий'
boolean_value:
default_value: true
- name: secret.accessKey
@ -32,7 +33,7 @@ user_values:
ru: Идентификатор ключа S3
description:
en: S3 Access Key ID
ru: Идентификатор ключа S3.
ru: Идентификатор ключа S3
string_value:
default_value: ""
- name: secret.secretKey
@ -41,16 +42,16 @@ user_values:
ru: Секретный ключ S3
description:
en: S3 Secret Key
ru: Секретный ключ S3.
ru: Секретный ключ S3
string_value:
default_value: ""
- name: storageClass.singleBucket
title:
en: Single S3 bucket for volumes
ru: Общий бакет S3 для томов
ru: Общий S3 бакет для томов
description:
en: Single S3 bucket to use for all dynamically provisioned persistent volumes
ru: Общий бакет S3, в котором будут создаваться все динамически распределяемые тома. Если пусто, под каждый том будет создаваться новый бакет.
ru: Общий бакет S3, в котором будут создаваться все динамически распределяемые тома. Если пусто, под каждый том будет создаваться новый бакет
string_value:
default_value: ""
- name: secret.endpoint
@ -59,7 +60,7 @@ user_values:
ru: Адрес S3-сервиса
description:
en: S3 service endpoint to use
ru: Адрес S3-сервиса, который будет использоваться.
ru: Адрес S3-сервиса, который будет использоваться
string_value:
default_value: "https://storage.yandexcloud.net"
- name: storageClass.mountOptions
@ -67,8 +68,8 @@ user_values:
en: GeeseFS mount options
ru: Опции монтирования GeeseFS
description:
en: GeeseFS mount options to use. Refer to `geesefs --help` command output for the whole list of options (https://github.com/yandex-cloud/geesefs).
ru: Опции монтирования GeeseFS. Полный перечень и описание опций смотрите в выводе команды `geesefs --help` (https://github.com/yandex-cloud/geesefs).
en: GeeseFS mount options to use. Consult GeeseFS (https://github.com/yandex-cloud/geesefs) help for the full option list
ru: Опции монтирования GeeseFS. Смотрите справку GeeseFS (https://github.com/yandex-cloud/geesefs) для полного перечня опций
string_value:
default_value: "--memory-limit 1000 --dir-mode 0777 --file-mode 0666"
- name: storageClass.reclaimPolicy
@ -77,7 +78,7 @@ user_values:
ru: Политика очистки томов
description:
en: Volume reclaim policy for the storage class (Retain or Delete)
ru: Выберите политику очистки томов PersistentVolume при удалении PersistentVolumeClaim. Retain — сохранять том, Delete — удалять том.
ru: Политика очистки PV, связанных с PVC (Retain - сохранять при удалении PVC, Delete - удалять при удалении PVC)
string_selector_value:
default_value: Delete
values:
@ -89,7 +90,7 @@ user_values:
ru: Название класса хранения
description:
en: Name of the storage class that will be created
ru: Название класса хранения, который будет создан при установке.
ru: Название класса хранения, который будет создан при установке
string_value:
default_value: csi-s3
- name: secret.name
@ -98,15 +99,15 @@ user_values:
ru: Название секрета
description:
en: Name of the secret to create or use for the storage class
ru: Название секрета, который будет создан или использован для класса хранения.
ru: Название секрета, который будет создан или использован для класса хранения
string_value:
default_value: csi-s3-secret
- name: tolerations.all
title:
en: Tolerate all taints
ru: Игнорировать все политики taint
ru: Игнорировать все taint
description:
en: Tolerate all taints by the CSI-S3 node driver (mounter)
ru: Выберите, чтобы драйвер CSI, который монтирует файловую систему на узлах, игнорировал все политики taint для узлов кластера.
ru: Игнорировать все taint-ы узлов кластера драйвером CSI-S3, монтирующим ФС на узлах
boolean_value:
default_value: false

View file

@ -0,0 +1,99 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-attacher-sa
namespace: {{ .Release.Namespace }}
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: external-attacher-runner
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["events"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments/status"]
verbs: ["patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-attacher-role
subjects:
- kind: ServiceAccount
name: csi-attacher-sa
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: external-attacher-runner
apiGroup: rbac.authorization.k8s.io
---
# needed for StatefulSet
kind: Service
apiVersion: v1
metadata:
name: csi-attacher-s3
namespace: {{ .Release.Namespace }}
labels:
app: csi-attacher-s3
spec:
selector:
app: csi-attacher-s3
ports:
- name: csi-s3-dummy
port: 65535
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: csi-attacher-s3
namespace: {{ .Release.Namespace }}
spec:
serviceName: "csi-attacher-s3"
replicas: 1
selector:
matchLabels:
app: csi-attacher-s3
template:
metadata:
labels:
app: csi-attacher-s3
spec:
serviceAccount: csi-attacher-sa
tolerations:
- key: node-role.kubernetes.io/master
operator: "Exists"
containers:
- name: csi-attacher
image: {{ .Values.images.attacher }}
args:
- "--v=4"
- "--csi-address=$(ADDRESS)"
env:
- name: ADDRESS
value: /var/lib/kubelet/plugins/ru.yandex.s3.csi/csi.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /var/lib/kubelet/plugins/ru.yandex.s3.csi
volumes:
- name: socket-dir
hostPath:
path: /var/lib/kubelet/plugins/ru.yandex.s3.csi
type: DirectoryOrCreate

View file

@ -8,6 +8,22 @@ kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-s3
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "update"]
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
@ -50,6 +66,7 @@ spec:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccount: csi-s3
hostNetwork: true
containers:
- name: driver-registrar
image: {{ .Values.images.registrar }}
@ -61,7 +78,7 @@ spec:
- name: ADDRESS
value: /csi/csi.sock
- name: DRIVER_REG_SOCK_PATH
value: {{ .Values.kubeletPath }}/plugins/ru.yandex.s3.csi/csi.sock
value: /var/lib/kubelet/plugins/ru.yandex.s3.csi/csi.sock
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
@ -93,37 +110,24 @@ spec:
volumeMounts:
- name: plugin-dir
mountPath: /csi
- name: stage-dir
mountPath: {{ .Values.kubeletPath }}/plugins/kubernetes.io/csi
mountPropagation: "Bidirectional"
- name: pods-mount-dir
mountPath: {{ .Values.kubeletPath }}/pods
mountPath: /var/lib/kubelet/pods
mountPropagation: "Bidirectional"
- name: fuse-device
mountPath: /dev/fuse
- name: systemd-control
mountPath: /run/systemd
volumes:
- name: registration-dir
hostPath:
path: {{ .Values.kubeletPath }}/plugins_registry/
path: /var/lib/kubelet/plugins_registry/
type: DirectoryOrCreate
- name: plugin-dir
hostPath:
path: {{ .Values.kubeletPath }}/plugins/ru.yandex.s3.csi
type: DirectoryOrCreate
- name: stage-dir
hostPath:
path: {{ .Values.kubeletPath }}/plugins/kubernetes.io/csi
path: /var/lib/kubelet/plugins/ru.yandex.s3.csi
type: DirectoryOrCreate
- name: pods-mount-dir
hostPath:
path: {{ .Values.kubeletPath }}/pods
path: /var/lib/kubelet/pods
type: Directory
- name: fuse-device
hostPath:
path: /dev/fuse
- name: systemd-control
hostPath:
path: /run/systemd
type: DirectoryOrCreate

View file

@ -74,13 +74,12 @@ spec:
operator: Exists
- key: CriticalAddonsOnly
operator: Exists
- operator: Exists
effect: NoExecute
tolerationSeconds: 300
{{- with .Values.tolerations.controller }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
containers:
- name: csi-provisioner
image: {{ .Values.images.provisioner }}
@ -89,11 +88,11 @@ spec:
- "--v=4"
env:
- name: ADDRESS
value: {{ .Values.kubeletPath }}/plugins/ru.yandex.s3.csi/csi.sock
value: /var/lib/kubelet/plugins/ru.yandex.s3.csi/csi.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: {{ .Values.kubeletPath }}/plugins/ru.yandex.s3.csi
mountPath: /var/lib/kubelet/plugins/ru.yandex.s3.csi
- name: csi-s3
image: {{ .Values.images.csi }}
imagePullPolicy: IfNotPresent
@ -103,14 +102,14 @@ spec:
- "--v=4"
env:
- name: CSI_ENDPOINT
value: unix://{{ .Values.kubeletPath }}/plugins/ru.yandex.s3.csi/csi.sock
value: unix:///var/lib/kubelet/plugins/ru.yandex.s3.csi/csi.sock
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: socket-dir
mountPath: {{ .Values.kubeletPath }}/plugins/ru.yandex.s3.csi
mountPath: /var/lib/kubelet/plugins/ru.yandex.s3.csi
volumes:
- name: socket-dir
emptyDir: {}

View file

@ -5,11 +5,7 @@ metadata:
namespace: {{ .Release.Namespace }}
name: {{ .Values.secret.name }}
stringData:
{{- if .Values.secret.accessKey }}
accessKeyID: {{ .Values.secret.accessKey }}
{{- end }}
{{- if .Values.secret.secretKey }}
secretAccessKey: {{ .Values.secret.secretKey }}
{{- end }}
endpoint: {{ .Values.secret.endpoint }}
{{- end -}}

View file

@ -9,7 +9,7 @@ metadata:
{{- end }}
provisioner: ru.yandex.s3.csi
parameters:
mounter: "{{ .Values.storageClass.mounter }}"
mounter: geesefs
options: "{{ .Values.storageClass.mountOptions }}"
{{- if .Values.storageClass.singleBucket }}
bucket: "{{ .Values.storageClass.singleBucket }}"

View file

@ -1,11 +1,13 @@
---
images:
# Source: quay.io/k8scsi/csi-attacher:v3.0.1
attacher: cr.yandex/crp9ftr22d26age3hulg/yandex-cloud/csi-s3/csi-attacher:v3.0.1
# Source: quay.io/k8scsi/csi-node-driver-registrar:v1.2.0
registrar: cr.yandex/crp9ftr22d26age3hulg/yandex-cloud/csi-s3/csi-node-driver-registrar:v1.2.0
# Source: quay.io/k8scsi/csi-provisioner:v2.1.0
provisioner: cr.yandex/crp9ftr22d26age3hulg/yandex-cloud/csi-s3/csi-provisioner:v2.1.0
# Main image
csi: cr.yandex/crp9ftr22d26age3hulg/yandex-cloud/csi-s3/csi-s3-driver:0.38.3
csi: cr.yandex/crp9ftr22d26age3hulg/yandex-cloud/csi-s3/csi-s3-driver:0.34.0
storageClass:
# Specifies whether the storage class should be created
@ -14,8 +16,6 @@ storageClass:
name: csi-s3
# Use a single bucket for all dynamically provisioned persistent volumes
singleBucket: ""
# mounter to use - either geesefs, s3fs or rclone (default geesefs)
mounter: geesefs
# GeeseFS mount options
mountOptions: "--memory-limit 1000 --dir-mode 0777 --file-mode 0666"
# Volume reclaim policy
@ -42,7 +42,3 @@ tolerations:
all: false
node: []
controller: []
nodeSelector: {}
kubeletPath: /var/lib/kubelet

View file

@ -0,0 +1,104 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-attacher-sa
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: external-attacher-runner
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["events"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments/status"]
verbs: ["patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-attacher-role
subjects:
- kind: ServiceAccount
name: csi-attacher-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: external-attacher-runner
apiGroup: rbac.authorization.k8s.io
---
# needed for StatefulSet
kind: Service
apiVersion: v1
metadata:
name: csi-attacher-s3
namespace: kube-system
labels:
app: csi-attacher-s3
spec:
selector:
app: csi-attacher-s3
ports:
- name: csi-s3-dummy
port: 65535
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: csi-attacher-s3
namespace: kube-system
spec:
serviceName: "csi-attacher-s3"
replicas: 1
selector:
matchLabels:
app: csi-attacher-s3
template:
metadata:
labels:
app: csi-attacher-s3
spec:
serviceAccount: csi-attacher-sa
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
- key: CriticalAddonsOnly
operator: Exists
- operator: Exists
effect: NoExecute
tolerationSeconds: 300
containers:
- name: csi-attacher
image: quay.io/k8scsi/csi-attacher:v3.0.1
args:
- "--v=4"
- "--csi-address=$(ADDRESS)"
env:
- name: ADDRESS
value: /var/lib/kubelet/plugins/ru.yandex.s3.csi/csi.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /var/lib/kubelet/plugins/ru.yandex.s3.csi
volumes:
- name: socket-dir
hostPath:
path: /var/lib/kubelet/plugins/ru.yandex.s3.csi
type: DirectoryOrCreate

View file

@ -8,6 +8,22 @@ kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-s3
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "update"]
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
@ -43,6 +59,7 @@ spec:
effect: NoExecute
tolerationSeconds: 300
serviceAccount: csi-s3
hostNetwork: true
containers:
- name: driver-registrar
image: quay.io/k8scsi/csi-node-driver-registrar:v1.2.0
@ -70,7 +87,7 @@ spec:
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: cr.yandex/crp9ftr22d26age3hulg/csi-s3:0.38.3
image: cr.yandex/crp9ftr22d26age3hulg/csi-s3:0.34.0
imagePullPolicy: IfNotPresent
args:
- "--endpoint=$(CSI_ENDPOINT)"
@ -86,16 +103,11 @@ spec:
volumeMounts:
- name: plugin-dir
mountPath: /csi
- name: stage-dir
mountPath: /var/lib/kubelet/plugins/kubernetes.io/csi
mountPropagation: "Bidirectional"
- name: pods-mount-dir
mountPath: /var/lib/kubelet/pods
mountPropagation: "Bidirectional"
- name: fuse-device
mountPath: /dev/fuse
- name: systemd-control
mountPath: /run/systemd
volumes:
- name: registration-dir
hostPath:
@ -105,10 +117,6 @@ spec:
hostPath:
path: /var/lib/kubelet/plugins/ru.yandex.s3.csi
type: DirectoryOrCreate
- name: stage-dir
hostPath:
path: /var/lib/kubelet/plugins/kubernetes.io/csi
type: DirectoryOrCreate
- name: pods-mount-dir
hostPath:
path: /var/lib/kubelet/pods
@ -116,7 +124,3 @@ spec:
- name: fuse-device
hostPath:
path: /dev/fuse
- name: systemd-control
hostPath:
path: /run/systemd
type: DirectoryOrCreate

View file

@ -1,7 +0,0 @@
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: ru.yandex.s3.csi
spec:
attachRequired: false
podInfoOnMount: true

View file

@ -29,7 +29,6 @@ spec:
volumeAttributes:
capacity: 10Gi
mounter: geesefs
options: --memory-limit 1000 --dir-mode 0777 --file-mode 0666
volumeHandle: manualbucket/path
---
apiVersion: v1

View file

@ -70,10 +70,8 @@ spec:
spec:
serviceAccount: csi-provisioner-sa
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master
operator: "Exists"
containers:
- name: csi-provisioner
image: quay.io/k8scsi/csi-provisioner:v2.1.0
@ -88,7 +86,7 @@ spec:
- name: socket-dir
mountPath: /var/lib/kubelet/plugins/ru.yandex.s3.csi
- name: csi-s3
image: cr.yandex/crp9ftr22d26age3hulg/csi-s3:0.38.3
image: cr.yandex/crp9ftr22d26age3hulg/csi-s3:0.34.0
imagePullPolicy: IfNotPresent
args:
- "--endpoint=$(CSI_ENDPOINT)"

2
go.mod
View file

@ -4,8 +4,6 @@ go 1.15
require (
github.com/container-storage-interface/spec v1.1.0
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/godbus/dbus/v5 v5.0.4 // indirect
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b
github.com/golang/protobuf v1.1.0 // indirect
github.com/kubernetes-csi/csi-lib-utils v0.6.1 // indirect

4
go.sum
View file

@ -1,13 +1,9 @@
github.com/container-storage-interface/spec v1.1.0 h1:qPsTqtR1VUPvMPeK0UnCZMtXaKGyyLPG8gj/wG6VqMs=
github.com/container-storage-interface/spec v1.1.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4=
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/protobuf v1.1.0 h1:0iH4Ffd/meGoXqF2lSAhZHt8X+cPgkfn/cb6Cce5Vpc=

View file

@ -33,7 +33,7 @@ type driver struct {
}
var (
vendorVersion = "v1.34.7"
vendorVersion = "v1.2.0"
driverName = "ru.yandex.s3.csi"
)

View file

@ -19,7 +19,6 @@ package driver
import (
"fmt"
"os"
"os/exec"
"regexp"
"strconv"
@ -69,6 +68,7 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
volumeID := req.GetVolumeId()
targetPath := req.GetTargetPath()
stagingTargetPath := req.GetStagingTargetPath()
bucketName, prefix := volumeIDToBucketPrefix(volumeID)
// Check arguments
if req.GetVolumeCapability() == nil {
@ -84,28 +84,7 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
return nil, status.Error(codes.InvalidArgument, "Target path missing in request")
}
notMnt, err := checkMount(stagingTargetPath)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
if notMnt {
// Staged mount is dead by some reason. Revive it
bucketName, prefix := volumeIDToBucketPrefix(volumeID)
s3, err := s3.NewClientFromSecret(req.GetSecrets())
if err != nil {
return nil, fmt.Errorf("failed to initialize S3 client: %s", err)
}
meta := getMeta(bucketName, prefix, req.VolumeContext)
mounter, err := mounter.New(meta, s3.Config)
if err != nil {
return nil, err
}
if err := mounter.Mount(stagingTargetPath, volumeID); err != nil {
return nil, err
}
}
notMnt, err = checkMount(targetPath)
notMnt, err := checkMount(targetPath)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
@ -121,12 +100,18 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
glog.V(4).Infof("target %v\nreadonly %v\nvolumeId %v\nattributes %v\nmountflags %v\n",
targetPath, readOnly, volumeID, attrib, mountFlags)
cmd := exec.Command("mount", "--bind", stagingTargetPath, targetPath)
cmd.Stderr = os.Stderr
glog.V(3).Infof("Binding volume %v from %v to %v", volumeID, stagingTargetPath, targetPath)
out, err := cmd.Output()
s3, err := s3.NewClientFromSecret(req.GetSecrets())
if err != nil {
return nil, fmt.Errorf("Error running mount --bind %v %v: %s", stagingTargetPath, targetPath, out)
return nil, fmt.Errorf("failed to initialize S3 client: %s", err)
}
meta := getMeta(bucketName, prefix, req.VolumeContext)
mounter, err := mounter.New(meta, s3.Config)
if err != nil {
return nil, err
}
if err := mounter.Mount(stagingTargetPath, targetPath); err != nil {
return nil, err
}
glog.V(4).Infof("s3: volume %s successfully mounted to %s", volumeID, targetPath)
@ -146,7 +131,7 @@ func (ns *nodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpu
return nil, status.Error(codes.InvalidArgument, "Target path missing in request")
}
if err := mounter.Unmount(targetPath); err != nil {
if err := mounter.FuseUnmount(targetPath); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
glog.V(4).Infof("s3: volume %s has been unmounted.", volumeID)
@ -189,7 +174,7 @@ func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVol
if err != nil {
return nil, err
}
if err := mounter.Mount(stagingTargetPath, volumeID); err != nil {
if err := mounter.Stage(stagingTargetPath); err != nil {
return nil, err
}
@ -208,22 +193,6 @@ func (ns *nodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstag
return nil, status.Error(codes.InvalidArgument, "Target path missing in request")
}
proc, err := mounter.FindFuseMountProcess(stagingTargetPath)
if err != nil {
return nil, err
}
exists := false
if proc == nil {
exists, err = mounter.SystemdUnmount(volumeID)
if exists && err != nil {
return nil, err
}
}
if !exists {
err = mounter.FuseUnmount(stagingTargetPath)
}
glog.V(4).Infof("s3: volume %s has been unmounted from stage path %v.", volumeID, stagingTargetPath)
return &csi.NodeUnstageVolumeResponse{}, nil
}

View file

@ -3,12 +3,6 @@ package mounter
import (
"fmt"
"os"
"strings"
"time"
systemd "github.com/coreos/go-systemd/v22/dbus"
dbus "github.com/godbus/dbus/v5"
"github.com/golang/glog"
"github.com/yandex-cloud/k8s-csi-s3/pkg/s3"
)
@ -36,170 +30,27 @@ func newGeeseFSMounter(meta *s3.FSMeta, cfg *s3.Config) (Mounter, error) {
}, nil
}
func (geesefs *geesefsMounter) CopyBinary(from, to string) error {
st, err := os.Stat(from)
if err != nil {
return fmt.Errorf("Failed to stat %s: %v", from, err)
}
st2, err := os.Stat(to)
if err != nil && !os.IsNotExist(err) {
return fmt.Errorf("Failed to stat %s: %v", to, err)
}
if err != nil || st2.Size() != st.Size() || st2.ModTime() != st.ModTime() {
if err == nil {
// remove the file first to not hit "text file busy" errors
err = os.Remove(to)
if err != nil {
return fmt.Errorf("Error removing %s to update it: %v", to, err)
}
}
bin, err := os.ReadFile(from)
if err != nil {
return fmt.Errorf("Error copying %s to %s: %v", from, to, err)
}
err = os.WriteFile(to, bin, 0755)
if err != nil {
return fmt.Errorf("Error copying %s to %s: %v", from, to, err)
}
err = os.Chtimes(to, st.ModTime(), st.ModTime())
if err != nil {
return fmt.Errorf("Error copying %s to %s: %v", from, to, err)
}
}
func (geesefs *geesefsMounter) Stage(stageTarget string) error {
return nil
}
func (geesefs *geesefsMounter) MountDirect(target string, args []string) error {
args = append([]string{
func (geesefs *geesefsMounter) Unstage(stageTarget string) error {
return nil
}
func (geesefs *geesefsMounter) Mount(source string, target string) error {
fullPath := fmt.Sprintf("%s:%s", geesefs.meta.BucketName, geesefs.meta.Prefix)
args := []string{
"--endpoint", geesefs.endpoint,
"-o", "allow_other",
"--log-file", "/dev/stderr",
}, args...)
envs := []string{
"AWS_ACCESS_KEY_ID=" + geesefs.accessKeyID,
"AWS_SECRET_ACCESS_KEY=" + geesefs.secretAccessKey,
}
return fuseMount(target, geesefsCmd, args, envs)
}
type execCmd struct {
Path string
Args []string
UncleanIsFailure bool
}
func (geesefs *geesefsMounter) Mount(target, volumeID string) error {
fullPath := fmt.Sprintf("%s:%s", geesefs.meta.BucketName, geesefs.meta.Prefix)
var args []string
if geesefs.region != "" {
args = append(args, "--region", geesefs.region)
}
args = append(
args,
"--setuid", "65534", // nobody. drop root privileges
"--setgid", "65534", // nogroup
)
useSystemd := true
for i := 0; i < len(geesefs.meta.MountOptions); i++ {
opt := geesefs.meta.MountOptions[i]
if opt == "--no-systemd" {
useSystemd = false
} else if len(opt) > 0 && opt[0] == '-' {
// Remove unsafe options
s := 1
if len(opt) > 1 && opt[1] == '-' {
s++
}
key := opt[s:]
e := strings.Index(opt, "=")
if e >= 0 {
key = opt[s:e]
}
if key == "log-file" || key == "shared-config" || key == "cache" {
// Skip options accessing local FS
if e < 0 {
i++
}
} else if key != "" {
args = append(args, opt)
}
} else if len(opt) > 0 {
args = append(args, opt)
}
}
args = append(args, geesefs.meta.MountOptions...)
args = append(args, fullPath, target)
// Try to start geesefs using systemd so it doesn't get killed when the container exits
if !useSystemd {
return geesefs.MountDirect(target, args)
}
conn, err := systemd.New()
if err != nil {
glog.Errorf("Failed to connect to systemd dbus service: %v, starting geesefs directly", err)
return geesefs.MountDirect(target, args)
}
defer conn.Close()
// systemd is present
if err = geesefs.CopyBinary("/usr/bin/geesefs", "/csi/geesefs"); err != nil {
return err
}
pluginDir := os.Getenv("PLUGIN_DIR")
if pluginDir == "" {
pluginDir = "/var/lib/kubelet/plugins/ru.yandex.s3.csi"
}
args = append([]string{pluginDir+"/geesefs", "-f", "-o", "allow_other", "--endpoint", geesefs.endpoint}, args...)
glog.Info("Starting geesefs using systemd: "+strings.Join(args, " "))
unitName := "geesefs-"+systemd.PathBusEscape(volumeID)+".service"
newProps := []systemd.Property{
systemd.Property{
Name: "Description",
Value: dbus.MakeVariant("GeeseFS mount for Kubernetes volume "+volumeID),
},
systemd.PropExecStart(args, false),
systemd.Property{
Name: "ExecStopPost",
// force & lazy unmount to cleanup possibly dead mountpoints
Value: dbus.MakeVariant([]execCmd{ execCmd{ "/bin/umount", []string{ "/bin/umount", "-f", "-l", target }, false } }),
},
systemd.Property{
Name: "Environment",
Value: dbus.MakeVariant([]string{ "AWS_ACCESS_KEY_ID="+geesefs.accessKeyID, "AWS_SECRET_ACCESS_KEY="+geesefs.secretAccessKey }),
},
systemd.Property{
Name: "CollectMode",
Value: dbus.MakeVariant("inactive-or-failed"),
},
}
unitProps, err := conn.GetAllProperties(unitName)
if err == nil {
// Unit already exists
if s, ok := unitProps["ActiveState"].(string); ok && (s == "active" || s == "activating" || s == "reloading") {
// Unit is already active
curPath := ""
prevExec, ok := unitProps["ExecStart"].([][]interface{})
if ok && len(prevExec) > 0 && len(prevExec[0]) >= 2 {
execArgs, ok := prevExec[0][1].([]string)
if ok && len(execArgs) >= 2 {
curPath = execArgs[len(execArgs)-1]
}
}
if curPath != target {
return fmt.Errorf(
"GeeseFS for volume %v is already mounted on host, but"+
" in a different directory. We want %v, but it's in %v",
volumeID, target, curPath,
)
}
// Already mounted at right location
return nil
} else {
// Stop and garbage collect the unit if automatic collection didn't work for some reason
conn.StopUnit(unitName, "replace", nil)
conn.ResetFailedUnit(unitName)
}
}
_, err = conn.StartTransientUnit(unitName, "replace", newProps, nil)
if err != nil {
return fmt.Errorf("Error starting systemd unit %s on host: %v", unitName, err)
}
return waitForMount(target, 10*time.Second)
os.Setenv("AWS_ACCESS_KEY_ID", geesefs.accessKeyID)
os.Setenv("AWS_SECRET_ACCESS_KEY", geesefs.secretAccessKey)
return fuseMount(target, geesefsCmd, args)
}

View file

@ -11,18 +11,18 @@ import (
"syscall"
"time"
systemd "github.com/coreos/go-systemd/v22/dbus"
"github.com/yandex-cloud/k8s-csi-s3/pkg/s3"
"github.com/golang/glog"
"github.com/mitchellh/go-ps"
"k8s.io/kubernetes/pkg/util/mount"
"github.com/yandex-cloud/k8s-csi-s3/pkg/s3"
)
// Mounter interface which can be implemented
// by the different mounter types
type Mounter interface {
Mount(target, volumeID string) error
Stage(stagePath string) error
Unstage(stagePath string) error
Mount(source string, target string) error
}
const (
@ -57,11 +57,9 @@ func New(meta *s3.FSMeta, cfg *s3.Config) (Mounter, error) {
}
}
func fuseMount(path string, command string, args []string, envs []string) error {
func fuseMount(path string, command string, args []string) error {
cmd := exec.Command(command, args...)
cmd.Stderr = os.Stderr
// cmd.Environ() returns envs inherited from the current process
cmd.Env = append(cmd.Environ(), envs...)
glog.V(3).Infof("Mounting fuse with command: %s and args: %s", command, args)
out, err := cmd.Output()
@ -72,40 +70,12 @@ func fuseMount(path string, command string, args []string, envs []string) error
return waitForMount(path, 10*time.Second)
}
func Unmount(path string) error {
if err := mount.New("").Unmount(path); err != nil {
return err
}
return nil
}
func SystemdUnmount(volumeID string) (bool, error) {
conn, err := systemd.New()
if err != nil {
glog.Errorf("Failed to connect to systemd dbus service: %v", err)
return false, err
}
defer conn.Close()
unitName := "geesefs-"+systemd.PathBusEscape(volumeID)+".service"
units, err := conn.ListUnitsByNames([]string{ unitName })
glog.Errorf("Got %v", units)
if err != nil {
glog.Errorf("Failed to list systemd unit by name %v: %v", unitName, err)
return false, err
}
if len(units) == 0 || units[0].ActiveState == "inactive" || units[0].ActiveState == "failed" {
return true, nil
}
_, err = conn.StopUnit(unitName, "replace", nil)
return true, err
}
func FuseUnmount(path string) error {
if err := mount.New("").Unmount(path); err != nil {
return err
}
// as fuse quits immediately, we will try to wait until the process is done
process, err := FindFuseMountProcess(path)
process, err := findFuseMountProcess(path)
if err != nil {
glog.Errorf("Error getting PID of fuse mount: %s", err)
return nil
@ -137,7 +107,7 @@ func waitForMount(path string, timeout time.Duration) error {
}
}
func FindFuseMountProcess(path string) (*os.Process, error) {
func findFuseMountProcess(path string) (*os.Process, error) {
processes, err := ps.Processes()
if err != nil {
return nil, err

View file

@ -2,6 +2,7 @@ package mounter
import (
"fmt"
"os"
"path"
"github.com/yandex-cloud/k8s-csi-s3/pkg/s3"
@ -30,7 +31,15 @@ func newRcloneMounter(meta *s3.FSMeta, cfg *s3.Config) (Mounter, error) {
}, nil
}
func (rclone *rcloneMounter) Mount(target, volumeID string) error {
func (rclone *rcloneMounter) Stage(stageTarget string) error {
return nil
}
func (rclone *rcloneMounter) Unstage(stageTarget string) error {
return nil
}
func (rclone *rcloneMounter) Mount(source string, target string) error {
args := []string{
"mount",
fmt.Sprintf(":s3:%s", path.Join(rclone.meta.BucketName, rclone.meta.Prefix)),
@ -46,9 +55,7 @@ func (rclone *rcloneMounter) Mount(target, volumeID string) error {
args = append(args, fmt.Sprintf("--s3-region=%s", rclone.region))
}
args = append(args, rclone.meta.MountOptions...)
envs := []string{
"AWS_ACCESS_KEY_ID=" + rclone.accessKeyID,
"AWS_SECRET_ACCESS_KEY=" + rclone.secretAccessKey,
}
return fuseMount(target, rcloneCmd, args, envs)
os.Setenv("AWS_ACCESS_KEY_ID", rclone.accessKeyID)
os.Setenv("AWS_SECRET_ACCESS_KEY", rclone.secretAccessKey)
return fuseMount(target, rcloneCmd, args)
}

View file

@ -28,7 +28,15 @@ func newS3fsMounter(meta *s3.FSMeta, cfg *s3.Config) (Mounter, error) {
}, nil
}
func (s3fs *s3fsMounter) Mount(target, volumeID string) error {
func (s3fs *s3fsMounter) Stage(stageTarget string) error {
return nil
}
func (s3fs *s3fsMounter) Unstage(stageTarget string) error {
return nil
}
func (s3fs *s3fsMounter) Mount(source string, target string) error {
if err := writes3fsPass(s3fs.pwFileContent); err != nil {
return err
}
@ -44,7 +52,7 @@ func (s3fs *s3fsMounter) Mount(target, volumeID string) error {
args = append(args, "-o", fmt.Sprintf("endpoint=%s", s3fs.region))
}
args = append(args, s3fs.meta.MountOptions...)
return fuseMount(target, s3fsCmd, args, nil)
return fuseMount(target, s3fsCmd, args)
}
func writes3fsPass(pwFileContent string) error {

View file

@ -52,7 +52,7 @@ func NewClient(cfg *Config) (*s3Client, error) {
endpoint = u.Hostname() + ":" + u.Port()
}
minioClient, err := minio.New(endpoint, &minio.Options{
Creds: credentials.NewStaticV4(client.Config.AccessKeyID, client.Config.SecretAccessKey, ""),
Creds: credentials.NewStaticV4(client.Config.AccessKeyID, client.Config.SecretAccessKey, client.Config.Region),
Secure: ssl,
})
if err != nil {