Rename Goofys mounter to GeeseFS and use it by default

This commit is contained in:
Vitaliy Filippov 2021-07-26 14:51:19 +03:00
parent 322362ab88
commit f723bccaf3
7 changed files with 90 additions and 91 deletions

View file

@ -110,31 +110,33 @@ As S3 is not a real file system there are some limitations to consider here. Dep
The driver can be configured to use one of these mounters to mount buckets:
* [rclone](https://rclone.org/commands/rclone_mount)
* [geesefs](https://github.com/yandex-cloud/geesefs) (recommended and default)
* [s3fs](https://github.com/s3fs-fuse/s3fs-fuse)
* [goofys](https://github.com/kahing/goofys)
* [rclone](https://rclone.org/commands/rclone_mount)
* [s3backer](https://github.com/archiecobbs/s3backer)
The mounter can be set as a parameter in the storage class. You can also create multiple storage classes for each mounter if you like.
All mounters have different strengths and weaknesses depending on your use case. Here are some characteristics which should help you choose a mounter:
Characteristics of different mounters (for more detailed information consult their own documentation):
#### rclone
#### GeeseFS
* Almost full POSIX compatibility (depends on caching mode)
* Almost full POSIX compatibility
* Good performance for both small and big files
* Files can be viewed normally with any S3 client
#### s3fs
* Large subset of POSIX
* Almost full POSIX compatibility
* Good performance for big files, poor performance for small files
* Files can be viewed normally with any S3 client
#### goofys
#### rclone
* Weak POSIX compatibility
* Performance first
* Less POSIX compatible than s3fs
* Bad performance for big files, okayish performance for small files
* Files can be viewed normally with any S3 client
* Does not support appends or random writes
* Doesn't create directory objects like s3fs or GeeseFS
#### s3backer (experimental*)
@ -148,8 +150,6 @@ All mounters have different strengths and weaknesses depending on your use case.
*s3backer is experimental at this point because volume corruption can occur pretty quickly in case of an unexpected shutdown of a Kubernetes node or CSI pod.
The s3backer binary is not bundled with the normal docker image to keep that as small as possible. Use the `<version>-full` image tag for testing s3backer.
Fore more detailed limitations consult the documentation of the different projects.
## Troubleshooting
### Issues while creating PVC

View file

@ -10,8 +10,7 @@ FROM debian:buster-slim
LABEL maintainers="Vitaliy Filippov <vitalif@yourcmc.ru>"
LABEL description="csi-s3 slim image"
# patched goofys
ADD goofys /usr/bin/
ADD geesefs /usr/bin/
RUN apt-get update && \
apt-get install -y ca-certificates && \

View file

@ -5,7 +5,7 @@ metadata:
name: csi-s3
provisioner: ch.ctrox.csi.s3-driver
parameters:
mounter: goofys
mounter: geesefs
# to use an existing bucket, specify it here:
# bucket: some-existing-bucket
csi.storage.k8s.io/provisioner-secret-name: csi-s3-secret

View file

@ -14,8 +14,8 @@ import (
var _ = Describe("S3Driver", func() {
Context("goofys", func() {
socket := "/tmp/csi-goofys.sock"
Context("geesefs", func() {
socket := "/tmp/csi-geesefs.sock"
csiEndpoint := "unix://" + socket
if err := os.Remove(socket); err != nil && !os.IsNotExist(err) {
Expect(err).NotTo(HaveOccurred())
@ -28,12 +28,12 @@ var _ = Describe("S3Driver", func() {
Describe("CSI sanity", func() {
sanityCfg := &sanity.Config{
TargetPath: os.TempDir() + "/goofys-target",
StagingPath: os.TempDir() + "/goofys-staging",
TargetPath: os.TempDir() + "/geesefs-target",
StagingPath: os.TempDir() + "/geesefs-staging",
Address: csiEndpoint,
SecretsFile: "../../test/secret.yaml",
TestVolumeParameters: map[string]string{
"mounter": "goofys",
"mounter": "geesefs",
"bucket": "testbucket0",
},
}
@ -41,8 +41,8 @@ var _ = Describe("S3Driver", func() {
})
})
Context("goofys-no-bucket", func() {
socket := "/tmp/csi-goofys-no-bucket.sock"
Context("geesefs-no-bucket", func() {
socket := "/tmp/csi-geesefs-no-bucket.sock"
csiEndpoint := "unix://" + socket
if err := os.Remove(socket); err != nil && !os.IsNotExist(err) {
Expect(err).NotTo(HaveOccurred())
@ -55,12 +55,12 @@ var _ = Describe("S3Driver", func() {
Describe("CSI sanity", func() {
sanityCfg := &sanity.Config{
TargetPath: os.TempDir() + "/goofys-no-bucket-target",
StagingPath: os.TempDir() + "/goofys-no-bucket-staging",
TargetPath: os.TempDir() + "/geesefs-no-bucket-target",
StagingPath: os.TempDir() + "/geesefs-no-bucket-staging",
Address: csiEndpoint,
SecretsFile: "../../test/secret.yaml",
TestVolumeParameters: map[string]string{
"mounter": "goofys",
"mounter": "geesefs",
},
}
sanity.GinkgoTest(sanityCfg)

60
pkg/mounter/geesefs.go Normal file
View file

@ -0,0 +1,60 @@
package mounter
import (
"fmt"
"os"
"github.com/ctrox/csi-s3/pkg/s3"
)
const (
geesefsCmd = "geesefs"
defaultRegion = "us-east-1"
)
// Implements Mounter
type geesefsMounter struct {
meta *s3.FSMeta
endpoint string
region string
accessKeyID string
secretAccessKey string
}
func newGeeseFSMounter(meta *s3.FSMeta, cfg *s3.Config) (Mounter, error) {
region := cfg.Region
// if endpoint is set we need a default region
if region == "" && cfg.Endpoint != "" {
region = defaultRegion
}
return &geesefsMounter{
meta: meta,
endpoint: cfg.Endpoint,
region: region,
accessKeyID: cfg.AccessKeyID,
secretAccessKey: cfg.SecretAccessKey,
}, nil
}
func (geesefs *geesefsMounter) Stage(stageTarget string) error {
return nil
}
func (geesefs *geesefsMounter) Unstage(stageTarget string) error {
return nil
}
func (geesefs *geesefsMounter) Mount(source string, target string) error {
fullPath := fmt.Sprintf("%s:%s", geesefs.meta.BucketName, geesefs.meta.Prefix)
// FIXME Add memory limits
args := []string{
"--endpoint", geesefs.endpoint,
"--region", geesefs.region,
"-o", "allow_other",
fullPath, target,
}
args = append(args, geesefs.meta.MountOptions...)
os.Setenv("AWS_ACCESS_KEY_ID", geesefs.accessKeyID)
os.Setenv("AWS_SECRET_ACCESS_KEY", geesefs.secretAccessKey)
return fuseMount(target, geesefsCmd, args)
}

View file

@ -1,60 +0,0 @@
package mounter
import (
"fmt"
"os"
"github.com/ctrox/csi-s3/pkg/s3"
)
const (
goofysCmd = "goofys"
defaultRegion = "us-east-1"
)
// Implements Mounter
type goofysMounter struct {
meta *s3.FSMeta
endpoint string
region string
accessKeyID string
secretAccessKey string
}
func newGoofysMounter(meta *s3.FSMeta, cfg *s3.Config) (Mounter, error) {
region := cfg.Region
// if endpoint is set we need a default region
if region == "" && cfg.Endpoint != "" {
region = defaultRegion
}
return &goofysMounter{
meta: meta,
endpoint: cfg.Endpoint,
region: region,
accessKeyID: cfg.AccessKeyID,
secretAccessKey: cfg.SecretAccessKey,
}, nil
}
func (goofys *goofysMounter) Stage(stageTarget string) error {
return nil
}
func (goofys *goofysMounter) Unstage(stageTarget string) error {
return nil
}
func (goofys *goofysMounter) Mount(source string, target string) error {
fullPath := fmt.Sprintf("%s:%s", goofys.meta.BucketName, goofys.meta.Prefix)
// FIXME Add memory limits
args := []string{
"--endpoint", goofys.endpoint,
"--region", goofys.region,
"-o", "allow_other",
fullPath, target,
}
args = append(args, goofys.meta.MountOptions...)
os.Setenv("AWS_ACCESS_KEY_ID", goofys.accessKeyID)
os.Setenv("AWS_SECRET_ACCESS_KEY", goofys.secretAccessKey)
return fuseMount(target, goofysCmd, args)
}

View file

@ -27,7 +27,7 @@ type Mounter interface {
const (
s3fsMounterType = "s3fs"
goofysMounterType = "goofys"
geesefsMounterType = "geesefs"
s3backerMounterType = "s3backer"
rcloneMounterType = "rclone"
TypeKey = "mounter"
@ -43,12 +43,12 @@ func New(meta *s3.FSMeta, cfg *s3.Config) (Mounter, error) {
mounter = cfg.Mounter
}
switch mounter {
case geesefsMounterType:
return newGeeseFSMounter(meta, cfg)
case s3fsMounterType:
return newS3fsMounter(meta, cfg)
case goofysMounterType:
return newGoofysMounter(meta, cfg)
case s3backerMounterType:
return newS3backerMounter(meta, cfg)
@ -56,8 +56,8 @@ func New(meta *s3.FSMeta, cfg *s3.Config) (Mounter, error) {
return newRcloneMounter(meta, cfg)
default:
// default to s3backer
return newS3backerMounter(meta, cfg)
// default to GeeseFS
return newGeeseFSMounter(meta, cfg)
}
}