Add fs prefix (directory)

This ensures the fs root is clean and does not mess with
the metadata. Also in the future this will allow for multiple
filesystems to be created in one bucket.
This commit is contained in:
Cyrill Troxler 2018-07-27 21:37:32 +02:00
parent 66390f76b7
commit e46440d1fa
6 changed files with 31 additions and 8 deletions

View file

@ -61,7 +61,7 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
var b *bucket var b *bucket
b, err = cs.s3.client.getBucket(volumeID) b, err = cs.s3.client.getBucket(volumeID)
if err != nil { if err != nil {
return nil, err return nil, fmt.Errorf("failed to get bucket: %v", err)
} }
// Check if volume capacity requested is bigger than the already existing capacity // Check if volume capacity requested is bigger than the already existing capacity
if capacityBytes > b.CapacityBytes { if capacityBytes > b.CapacityBytes {
@ -69,16 +69,19 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
} }
} else { } else {
if err = cs.s3.client.createBucket(volumeID); err != nil { if err = cs.s3.client.createBucket(volumeID); err != nil {
glog.V(3).Infof("failed to create volume: %v", err) return nil, fmt.Errorf("failed to create volume: %v", err)
return nil, err }
if err = cs.s3.client.createPrefix(volumeID, fsPrefix); err != nil {
return nil, fmt.Errorf("failed to create prefix: %v", err)
} }
} }
b := &bucket{ b := &bucket{
Name: volumeID, Name: volumeID,
CapacityBytes: capacityBytes, CapacityBytes: capacityBytes,
FSPath: fsPrefix,
} }
if err := cs.s3.client.setBucket(b); err != nil { if err := cs.s3.client.setBucket(b); err != nil {
return nil, err return nil, fmt.Errorf("Error setting bucket metadata: %v", err)
} }
glog.V(4).Infof("create volume %s", volumeID) glog.V(4).Infof("create volume %s", volumeID)

View file

@ -60,8 +60,9 @@ func (goofys *goofysMounter) Mount(source string, target string) error {
os.Setenv("AWS_ACCESS_KEY_ID", goofys.accessKeyID) os.Setenv("AWS_ACCESS_KEY_ID", goofys.accessKeyID)
os.Setenv("AWS_SECRET_ACCESS_KEY", goofys.secretAccessKey) os.Setenv("AWS_SECRET_ACCESS_KEY", goofys.secretAccessKey)
fullPath := fmt.Sprintf("%s:%s", goofys.bucket.Name, goofys.bucket.FSPath)
_, _, err := goofysApi.Mount(context.Background(), goofys.bucket.Name, goofysCfg) _, _, err := goofysApi.Mount(context.Background(), fullPath, goofysCfg)
if err != nil { if err != nil {
return fmt.Errorf("Error mounting via goofys: %s", err) return fmt.Errorf("Error mounting via goofys: %s", err)

View file

@ -2,6 +2,7 @@ package s3
import ( import (
"fmt" "fmt"
"net/url"
"os" "os"
"os/exec" "os/exec"
"path" "path"
@ -29,6 +30,11 @@ const (
) )
func newS3backerMounter(bucket *bucket, cfg *Config) (Mounter, error) { func newS3backerMounter(bucket *bucket, cfg *Config) (Mounter, error) {
url, err := url.Parse(cfg.Endpoint)
if err != nil {
return nil, err
}
url.Path = path.Join(url.Path, bucket.Name, bucket.FSPath)
// s3backer cannot work with 0 size volumes // s3backer cannot work with 0 size volumes
if bucket.CapacityBytes == 0 { if bucket.CapacityBytes == 0 {
bucket.CapacityBytes = s3backerDefaultSize bucket.CapacityBytes = s3backerDefaultSize
@ -88,8 +94,9 @@ func (s3backer *s3backerMounter) mountInit(path string) error {
args := []string{ args := []string{
// baseURL must end with / // baseURL must end with /
fmt.Sprintf("--baseURL=%s/", s3backer.url), fmt.Sprintf("--baseURL=%s/", s3backer.url),
fmt.Sprintf("--blockSize=%v", s3backerBlockSize), fmt.Sprintf("--blockSize=%s", s3backerBlockSize),
fmt.Sprintf("--size=%v", s3backer.bucket.CapacityBytes), fmt.Sprintf("--size=%v", s3backer.bucket.CapacityBytes),
fmt.Sprintf("--prefix=%s/", s3backer.bucket.FSPath),
"--listBlocks", "--listBlocks",
s3backer.bucket.Name, s3backer.bucket.Name,
path, path,

View file

@ -39,7 +39,7 @@ func (s3fs *s3fsMounter) Mount(source string, target string) error {
return err return err
} }
args := []string{ args := []string{
fmt.Sprintf("%s", s3fs.bucket.Name), fmt.Sprintf("%s:/%s", s3fs.bucket.Name, s3fs.bucket.FSPath),
fmt.Sprintf("%s", target), fmt.Sprintf("%s", target),
"-o", "sigv2", "-o", "sigv2",
"-o", "use_path_request_style", "-o", "use_path_request_style",

View file

@ -49,7 +49,9 @@ func newS3qlMounter(b *bucket, cfg *Config) (Mounter, error) {
ssl: ssl, ssl: ssl,
} }
url.Path = path.Join(url.Path, b.Name) // s3ql requires a trailing slash or it will just
// prepend the fspath to the s3ql files
url.Path = path.Join(url.Path, b.Name, b.FSPath) + "/"
s3ql.bucketURL = url.String() s3ql.bucketURL = url.String()
if !ssl { if !ssl {

View file

@ -13,6 +13,7 @@ import (
const ( const (
metadataName = ".metadata.json" metadataName = ".metadata.json"
fsPrefix = "csi-fs"
) )
type s3Client struct { type s3Client struct {
@ -22,6 +23,7 @@ type s3Client struct {
type bucket struct { type bucket struct {
Name string Name string
FSPath string
CapacityBytes int64 CapacityBytes int64
} }
@ -54,6 +56,14 @@ func (client *s3Client) createBucket(bucketName string) error {
return client.minio.MakeBucket(bucketName, client.cfg.Region) return client.minio.MakeBucket(bucketName, client.cfg.Region)
} }
func (client *s3Client) createPrefix(bucketName string, prefix string) error {
_, err := client.minio.PutObject(bucketName, prefix+"/", bytes.NewReader([]byte("")), 0, minio.PutObjectOptions{})
if err != nil {
return err
}
return nil
}
func (client *s3Client) removeBucket(bucketName string) error { func (client *s3Client) removeBucket(bucketName string) error {
if err := client.emptyBucket(bucketName); err != nil { if err := client.emptyBucket(bucketName); err != nil {
return err return err