Rename Goofys mounter to GeeseFS and use it by default

This commit is contained in:
Vitaliy Filippov 2021-07-26 14:51:19 +03:00
parent 322362ab88
commit f723bccaf3
7 changed files with 90 additions and 91 deletions

View file

@ -110,31 +110,33 @@ As S3 is not a real file system there are some limitations to consider here. Dep
The driver can be configured to use one of these mounters to mount buckets: The driver can be configured to use one of these mounters to mount buckets:
* [rclone](https://rclone.org/commands/rclone_mount) * [geesefs](https://github.com/yandex-cloud/geesefs) (recommended and default)
* [s3fs](https://github.com/s3fs-fuse/s3fs-fuse) * [s3fs](https://github.com/s3fs-fuse/s3fs-fuse)
* [goofys](https://github.com/kahing/goofys) * [rclone](https://rclone.org/commands/rclone_mount)
* [s3backer](https://github.com/archiecobbs/s3backer) * [s3backer](https://github.com/archiecobbs/s3backer)
The mounter can be set as a parameter in the storage class. You can also create multiple storage classes for each mounter if you like. The mounter can be set as a parameter in the storage class. You can also create multiple storage classes for each mounter if you like.
All mounters have different strengths and weaknesses depending on your use case. Here are some characteristics which should help you choose a mounter: Characteristics of different mounters (for more detailed information consult their own documentation):
#### rclone #### GeeseFS
* Almost full POSIX compatibility (depends on caching mode) * Almost full POSIX compatibility
* Good performance for both small and big files
* Files can be viewed normally with any S3 client * Files can be viewed normally with any S3 client
#### s3fs #### s3fs
* Large subset of POSIX * Almost full POSIX compatibility
* Good performance for big files, poor performance for small files
* Files can be viewed normally with any S3 client * Files can be viewed normally with any S3 client
#### goofys #### rclone
* Weak POSIX compatibility * Less POSIX compatible than s3fs
* Performance first * Bad performance for big files, okayish performance for small files
* Files can be viewed normally with any S3 client * Files can be viewed normally with any S3 client
* Does not support appends or random writes * Doesn't create directory objects like s3fs or GeeseFS
#### s3backer (experimental*) #### s3backer (experimental*)
@ -148,8 +150,6 @@ All mounters have different strengths and weaknesses depending on your use case.
*s3backer is experimental at this point because volume corruption can occur pretty quickly in case of an unexpected shutdown of a Kubernetes node or CSI pod. *s3backer is experimental at this point because volume corruption can occur pretty quickly in case of an unexpected shutdown of a Kubernetes node or CSI pod.
The s3backer binary is not bundled with the normal docker image to keep that as small as possible. Use the `<version>-full` image tag for testing s3backer. The s3backer binary is not bundled with the normal docker image to keep that as small as possible. Use the `<version>-full` image tag for testing s3backer.
Fore more detailed limitations consult the documentation of the different projects.
## Troubleshooting ## Troubleshooting
### Issues while creating PVC ### Issues while creating PVC

View file

@ -10,8 +10,7 @@ FROM debian:buster-slim
LABEL maintainers="Vitaliy Filippov <vitalif@yourcmc.ru>" LABEL maintainers="Vitaliy Filippov <vitalif@yourcmc.ru>"
LABEL description="csi-s3 slim image" LABEL description="csi-s3 slim image"
# patched goofys ADD geesefs /usr/bin/
ADD goofys /usr/bin/
RUN apt-get update && \ RUN apt-get update && \
apt-get install -y ca-certificates && \ apt-get install -y ca-certificates && \

View file

@ -5,7 +5,7 @@ metadata:
name: csi-s3 name: csi-s3
provisioner: ch.ctrox.csi.s3-driver provisioner: ch.ctrox.csi.s3-driver
parameters: parameters:
mounter: goofys mounter: geesefs
# to use an existing bucket, specify it here: # to use an existing bucket, specify it here:
# bucket: some-existing-bucket # bucket: some-existing-bucket
csi.storage.k8s.io/provisioner-secret-name: csi-s3-secret csi.storage.k8s.io/provisioner-secret-name: csi-s3-secret

View file

@ -14,8 +14,8 @@ import (
var _ = Describe("S3Driver", func() { var _ = Describe("S3Driver", func() {
Context("goofys", func() { Context("geesefs", func() {
socket := "/tmp/csi-goofys.sock" socket := "/tmp/csi-geesefs.sock"
csiEndpoint := "unix://" + socket csiEndpoint := "unix://" + socket
if err := os.Remove(socket); err != nil && !os.IsNotExist(err) { if err := os.Remove(socket); err != nil && !os.IsNotExist(err) {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -28,12 +28,12 @@ var _ = Describe("S3Driver", func() {
Describe("CSI sanity", func() { Describe("CSI sanity", func() {
sanityCfg := &sanity.Config{ sanityCfg := &sanity.Config{
TargetPath: os.TempDir() + "/goofys-target", TargetPath: os.TempDir() + "/geesefs-target",
StagingPath: os.TempDir() + "/goofys-staging", StagingPath: os.TempDir() + "/geesefs-staging",
Address: csiEndpoint, Address: csiEndpoint,
SecretsFile: "../../test/secret.yaml", SecretsFile: "../../test/secret.yaml",
TestVolumeParameters: map[string]string{ TestVolumeParameters: map[string]string{
"mounter": "goofys", "mounter": "geesefs",
"bucket": "testbucket0", "bucket": "testbucket0",
}, },
} }
@ -41,8 +41,8 @@ var _ = Describe("S3Driver", func() {
}) })
}) })
Context("goofys-no-bucket", func() { Context("geesefs-no-bucket", func() {
socket := "/tmp/csi-goofys-no-bucket.sock" socket := "/tmp/csi-geesefs-no-bucket.sock"
csiEndpoint := "unix://" + socket csiEndpoint := "unix://" + socket
if err := os.Remove(socket); err != nil && !os.IsNotExist(err) { if err := os.Remove(socket); err != nil && !os.IsNotExist(err) {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -55,12 +55,12 @@ var _ = Describe("S3Driver", func() {
Describe("CSI sanity", func() { Describe("CSI sanity", func() {
sanityCfg := &sanity.Config{ sanityCfg := &sanity.Config{
TargetPath: os.TempDir() + "/goofys-no-bucket-target", TargetPath: os.TempDir() + "/geesefs-no-bucket-target",
StagingPath: os.TempDir() + "/goofys-no-bucket-staging", StagingPath: os.TempDir() + "/geesefs-no-bucket-staging",
Address: csiEndpoint, Address: csiEndpoint,
SecretsFile: "../../test/secret.yaml", SecretsFile: "../../test/secret.yaml",
TestVolumeParameters: map[string]string{ TestVolumeParameters: map[string]string{
"mounter": "goofys", "mounter": "geesefs",
}, },
} }
sanity.GinkgoTest(sanityCfg) sanity.GinkgoTest(sanityCfg)

60
pkg/mounter/geesefs.go Normal file
View file

@ -0,0 +1,60 @@
package mounter
import (
"fmt"
"os"
"github.com/ctrox/csi-s3/pkg/s3"
)
const (
geesefsCmd = "geesefs"
defaultRegion = "us-east-1"
)
// Implements Mounter
type geesefsMounter struct {
meta *s3.FSMeta
endpoint string
region string
accessKeyID string
secretAccessKey string
}
func newGeeseFSMounter(meta *s3.FSMeta, cfg *s3.Config) (Mounter, error) {
region := cfg.Region
// if endpoint is set we need a default region
if region == "" && cfg.Endpoint != "" {
region = defaultRegion
}
return &geesefsMounter{
meta: meta,
endpoint: cfg.Endpoint,
region: region,
accessKeyID: cfg.AccessKeyID,
secretAccessKey: cfg.SecretAccessKey,
}, nil
}
func (geesefs *geesefsMounter) Stage(stageTarget string) error {
return nil
}
func (geesefs *geesefsMounter) Unstage(stageTarget string) error {
return nil
}
func (geesefs *geesefsMounter) Mount(source string, target string) error {
fullPath := fmt.Sprintf("%s:%s", geesefs.meta.BucketName, geesefs.meta.Prefix)
// FIXME Add memory limits
args := []string{
"--endpoint", geesefs.endpoint,
"--region", geesefs.region,
"-o", "allow_other",
fullPath, target,
}
args = append(args, geesefs.meta.MountOptions...)
os.Setenv("AWS_ACCESS_KEY_ID", geesefs.accessKeyID)
os.Setenv("AWS_SECRET_ACCESS_KEY", geesefs.secretAccessKey)
return fuseMount(target, geesefsCmd, args)
}

View file

@ -1,60 +0,0 @@
package mounter
import (
"fmt"
"os"
"github.com/ctrox/csi-s3/pkg/s3"
)
const (
goofysCmd = "goofys"
defaultRegion = "us-east-1"
)
// Implements Mounter
type goofysMounter struct {
meta *s3.FSMeta
endpoint string
region string
accessKeyID string
secretAccessKey string
}
func newGoofysMounter(meta *s3.FSMeta, cfg *s3.Config) (Mounter, error) {
region := cfg.Region
// if endpoint is set we need a default region
if region == "" && cfg.Endpoint != "" {
region = defaultRegion
}
return &goofysMounter{
meta: meta,
endpoint: cfg.Endpoint,
region: region,
accessKeyID: cfg.AccessKeyID,
secretAccessKey: cfg.SecretAccessKey,
}, nil
}
func (goofys *goofysMounter) Stage(stageTarget string) error {
return nil
}
func (goofys *goofysMounter) Unstage(stageTarget string) error {
return nil
}
func (goofys *goofysMounter) Mount(source string, target string) error {
fullPath := fmt.Sprintf("%s:%s", goofys.meta.BucketName, goofys.meta.Prefix)
// FIXME Add memory limits
args := []string{
"--endpoint", goofys.endpoint,
"--region", goofys.region,
"-o", "allow_other",
fullPath, target,
}
args = append(args, goofys.meta.MountOptions...)
os.Setenv("AWS_ACCESS_KEY_ID", goofys.accessKeyID)
os.Setenv("AWS_SECRET_ACCESS_KEY", goofys.secretAccessKey)
return fuseMount(target, goofysCmd, args)
}

View file

@ -27,7 +27,7 @@ type Mounter interface {
const ( const (
s3fsMounterType = "s3fs" s3fsMounterType = "s3fs"
goofysMounterType = "goofys" geesefsMounterType = "geesefs"
s3backerMounterType = "s3backer" s3backerMounterType = "s3backer"
rcloneMounterType = "rclone" rcloneMounterType = "rclone"
TypeKey = "mounter" TypeKey = "mounter"
@ -43,12 +43,12 @@ func New(meta *s3.FSMeta, cfg *s3.Config) (Mounter, error) {
mounter = cfg.Mounter mounter = cfg.Mounter
} }
switch mounter { switch mounter {
case geesefsMounterType:
return newGeeseFSMounter(meta, cfg)
case s3fsMounterType: case s3fsMounterType:
return newS3fsMounter(meta, cfg) return newS3fsMounter(meta, cfg)
case goofysMounterType:
return newGoofysMounter(meta, cfg)
case s3backerMounterType: case s3backerMounterType:
return newS3backerMounter(meta, cfg) return newS3backerMounter(meta, cfg)
@ -56,8 +56,8 @@ func New(meta *s3.FSMeta, cfg *s3.Config) (Mounter, error) {
return newRcloneMounter(meta, cfg) return newRcloneMounter(meta, cfg)
default: default:
// default to s3backer // default to GeeseFS
return newS3backerMounter(meta, cfg) return newGeeseFSMounter(meta, cfg)
} }
} }