k8s-csi-s3/pkg/mounter/rclone.go
Vitaliy Filippov ecf1031dfc Implement mounting via stage directory
Previously, multiple containers with the same mounted volume resulted in multiple
FUSE processes. This behaviour was breaking parallel modifications from different
containers, consumed extra resources, and after mounting via systemd was introduced,
led to the total inability to mount the same volume into multiple containers on
the same host.

Now only one FUSE process is started per volume, per host.
2023-03-07 00:49:12 +03:00

53 lines
1.2 KiB
Go

package mounter
import (
"fmt"
"os"
"path"
"github.com/yandex-cloud/k8s-csi-s3/pkg/s3"
)
// Implements Mounter
type rcloneMounter struct {
meta *s3.FSMeta
url string
region string
accessKeyID string
secretAccessKey string
}
const (
rcloneCmd = "rclone"
)
func newRcloneMounter(meta *s3.FSMeta, cfg *s3.Config) (Mounter, error) {
return &rcloneMounter{
meta: meta,
url: cfg.Endpoint,
region: cfg.Region,
accessKeyID: cfg.AccessKeyID,
secretAccessKey: cfg.SecretAccessKey,
}, nil
}
func (rclone *rcloneMounter) Mount(target, volumeID string) error {
args := []string{
"mount",
fmt.Sprintf(":s3:%s", path.Join(rclone.meta.BucketName, rclone.meta.Prefix)),
fmt.Sprintf("%s", target),
"--daemon",
"--s3-provider=AWS",
"--s3-env-auth=true",
fmt.Sprintf("--s3-endpoint=%s", rclone.url),
"--allow-other",
"--vfs-cache-mode=writes",
}
if rclone.region != "" {
args = append(args, fmt.Sprintf("--s3-region=%s", rclone.region))
}
args = append(args, rclone.meta.MountOptions...)
os.Setenv("AWS_ACCESS_KEY_ID", rclone.accessKeyID)
os.Setenv("AWS_SECRET_ACCESS_KEY", rclone.secretAccessKey)
return fuseMount(target, rcloneCmd, args)
}