Remove s3backer mounter as it's mostly unusable

This commit is contained in:
Vitaliy Filippov 2021-07-26 14:54:44 +03:00
parent f723bccaf3
commit 1f29b29459
7 changed files with 2 additions and 264 deletions

View file

@ -18,21 +18,17 @@ REGISTRY_NAME=ctrox
IMAGE_NAME=csi-s3
VERSION ?= dev
IMAGE_TAG=$(REGISTRY_NAME)/$(IMAGE_NAME):$(VERSION)
FULL_IMAGE_TAG=$(IMAGE_TAG)-full
TEST_IMAGE_TAG=$(REGISTRY_NAME)/$(IMAGE_NAME):test
build:
CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o _output/s3driver ./cmd/s3driver
test:
docker build -t $(FULL_IMAGE_TAG) -f cmd/s3driver/Dockerfile.full .
docker build -t $(TEST_IMAGE_TAG) -f test/Dockerfile .
docker run --rm --privileged -v $(PWD):$(PROJECT_DIR) --device /dev/fuse $(TEST_IMAGE_TAG)
container:
docker build -t $(IMAGE_TAG) -f cmd/s3driver/Dockerfile .
docker build -t $(FULL_IMAGE_TAG) -f cmd/s3driver/Dockerfile.full .
push: container
docker push $(IMAGE_TAG)
docker push $(FULL_IMAGE_TAG)
clean:
go clean -r -x
-rm -rf _output

View file

@ -113,7 +113,6 @@ The driver can be configured to use one of these mounters to mount buckets:
* [geesefs](https://github.com/yandex-cloud/geesefs) (recommended and default)
* [s3fs](https://github.com/s3fs-fuse/s3fs-fuse)
* [rclone](https://rclone.org/commands/rclone_mount)
* [s3backer](https://github.com/archiecobbs/s3backer)
The mounter can be set as a parameter in the storage class. You can also create multiple storage classes for each mounter if you like.
@ -138,18 +137,6 @@ Characteristics of different mounters (for more detailed information consult the
* Files can be viewed normally with any S3 client
* Doesn't create directory objects like s3fs or GeeseFS
#### s3backer (experimental*)
* Represents a block device stored on S3
* Allows to use a real filesystem
* Files are not readable with other S3 clients
* Support appends
* Supports compression before upload (Not yet implemented in this driver)
* Supports encryption before upload (Not yet implemented in this driver)
*s3backer is experimental at this point because volume corruption can occur pretty quickly in case of an unexpected shutdown of a Kubernetes node or CSI pod.
The s3backer binary is not bundled with the normal docker image to keep that as small as possible. Use the `<version>-full` image tag for testing s3backer.
## Troubleshooting
### Issues while creating PVC

View file

@ -1,56 +0,0 @@
FROM golang:1.16-alpine as gobuild
WORKDIR /build
ADD . /build
RUN go get -d -v ./...
RUN CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o ./s3driver ./cmd/s3driver
FROM debian:buster-slim as s3backer
ARG S3BACKER_VERSION=1.5.0
RUN apt-get update && apt-get install -y \
build-essential \
autoconf \
libcurl4-openssl-dev \
libfuse-dev \
libexpat1-dev \
libssl-dev \
zlib1g-dev \
psmisc \
pkg-config \
git && \
rm -rf /var/lib/apt/lists/*
# Compile & install s3backer
RUN git clone https://github.com/archiecobbs/s3backer.git /src/s3backer
WORKDIR /src/s3backer
RUN git checkout tags/${S3BACKER_VERSION}
RUN ./autogen.sh && \
./configure && \
make && \
make install
FROM debian:buster-slim
LABEL maintainers="Cyrill Troxler <cyrilltroxler@gmail.com>"
LABEL description="csi-s3 image"
COPY --from=s3backer /usr/bin/s3backer /usr/bin/s3backer
# s3fs and some other dependencies
RUN apt-get update && \
apt-get install -y \
libfuse2 gcc sqlite3 libsqlite3-dev \
s3fs psmisc procps libcurl4 xfsprogs curl unzip && \
rm -rf /var/lib/apt/lists/*
# install rclone
ARG RCLONE_VERSION=v1.54.1
RUN cd /tmp \
&& curl -O https://downloads.rclone.org/${RCLONE_VERSION}/rclone-${RCLONE_VERSION}-linux-amd64.zip \
&& unzip /tmp/rclone-${RCLONE_VERSION}-linux-amd64.zip \
&& mv /tmp/rclone-*-linux-amd64/rclone /usr/bin \
&& rm -r /tmp/rclone*
COPY --from=gobuild /build/s3driver /s3driver
ENTRYPOINT ["/s3driver"]

View file

@ -94,36 +94,6 @@ var _ = Describe("S3Driver", func() {
})
})
Context("s3backer", func() {
socket := "/tmp/csi-s3backer.sock"
csiEndpoint := "unix://" + socket
if err := os.Remove(socket); err != nil && !os.IsNotExist(err) {
Expect(err).NotTo(HaveOccurred())
}
// Clear loop device so we cover the creation of it
os.Remove(mounter.S3backerLoopDevice)
driver, err := driver.New("test-node", csiEndpoint)
if err != nil {
log.Fatal(err)
}
go driver.Run()
Describe("CSI sanity", func() {
sanityCfg := &sanity.Config{
TargetPath: os.TempDir() + "/s3backer-target",
StagingPath: os.TempDir() + "/s3backer-staging",
Address: csiEndpoint,
SecretsFile: "../../test/secret.yaml",
TestVolumeParameters: map[string]string{
"mounter": "s3backer",
"bucket": "testbucket2",
},
}
sanity.GinkgoTest(sanityCfg)
})
})
Context("rclone", func() {
socket := "/tmp/csi-rclone.sock"
csiEndpoint := "unix://" + socket

View file

@ -28,7 +28,6 @@ type Mounter interface {
const (
s3fsMounterType = "s3fs"
geesefsMounterType = "geesefs"
s3backerMounterType = "s3backer"
rcloneMounterType = "rclone"
TypeKey = "mounter"
BucketKey = "bucket"
@ -49,9 +48,6 @@ func New(meta *s3.FSMeta, cfg *s3.Config) (Mounter, error) {
case s3fsMounterType:
return newS3fsMounter(meta, cfg)
case s3backerMounterType:
return newS3backerMounter(meta, cfg)
case rcloneMounterType:
return newRcloneMounter(meta, cfg)

View file

@ -1,155 +0,0 @@
package mounter
import (
"fmt"
"net/url"
"os"
"os/exec"
"path"
"github.com/ctrox/csi-s3/pkg/s3"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/util/mount"
)
// Implements Mounter
type s3backerMounter struct {
meta *s3.FSMeta
url string
region string
accessKeyID string
secretAccessKey string
ssl bool
}
const (
s3backerCmd = "s3backer"
s3backerFsType = "xfs"
s3backerDevice = "file"
// blockSize to use in k
s3backerBlockSize = "128k"
s3backerDefaultSize = 1024 * 1024 * 1024 // 1GiB
// S3backerLoopDevice the loop device required by s3backer
S3backerLoopDevice = "/dev/loop0"
)
func newS3backerMounter(meta *s3.FSMeta, cfg *s3.Config) (Mounter, error) {
url, err := url.Parse(cfg.Endpoint)
if err != nil {
return nil, err
}
url.Path = path.Join(url.Path, meta.BucketName, meta.Prefix)
// s3backer cannot work with 0 size volumes
if meta.CapacityBytes == 0 {
meta.CapacityBytes = s3backerDefaultSize
}
s3backer := &s3backerMounter{
meta: meta,
url: cfg.Endpoint,
region: cfg.Region,
accessKeyID: cfg.AccessKeyID,
secretAccessKey: cfg.SecretAccessKey,
ssl: url.Scheme == "https",
}
return s3backer, s3backer.writePasswd()
}
func (s3backer *s3backerMounter) String() string {
return path.Join(s3backer.meta.BucketName, s3backer.meta.Prefix)
}
func (s3backer *s3backerMounter) Stage(stageTarget string) error {
// s3backer uses the loop device
if err := createLoopDevice(S3backerLoopDevice); err != nil {
return err
}
// s3backer requires two mounts
// first mount will fuse mount the bucket to a single 'file'
if err := s3backer.mountInit(stageTarget); err != nil {
return err
}
// ensure 'file' device is formatted
err := formatFs(s3backerFsType, path.Join(stageTarget, s3backerDevice))
if err != nil {
FuseUnmount(stageTarget)
}
return err
}
func (s3backer *s3backerMounter) Unstage(stageTarget string) error {
// Unmount the s3backer fuse mount
return FuseUnmount(stageTarget)
}
func (s3backer *s3backerMounter) Mount(source string, target string) error {
device := path.Join(source, s3backerDevice)
// second mount will mount the 'file' as a filesystem
err := mount.New("").Mount(device, target, s3backerFsType, []string{})
if err != nil {
// cleanup fuse mount
FuseUnmount(target)
return err
}
return nil
}
func (s3backer *s3backerMounter) mountInit(p string) error {
args := []string{
fmt.Sprintf("--blockSize=%s", s3backerBlockSize),
fmt.Sprintf("--size=%v", s3backer.meta.CapacityBytes),
fmt.Sprintf("--prefix=%s/", s3backer.meta.Prefix),
"--listBlocks",
s3backer.meta.BucketName,
p,
}
if s3backer.region != "" {
args = append(args, fmt.Sprintf("--region=%s", s3backer.region))
} else {
// only set baseURL if not on AWS (region is not set)
// baseURL must end with /
args = append(args, fmt.Sprintf("--baseURL=%s/", s3backer.url))
}
if s3backer.ssl {
args = append(args, "--ssl")
}
return fuseMount(p, s3backerCmd, args)
}
func (s3backer *s3backerMounter) writePasswd() error {
pwFileName := fmt.Sprintf("%s/.s3backer_passwd", os.Getenv("HOME"))
pwFile, err := os.OpenFile(pwFileName, os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
return err
}
_, err = pwFile.WriteString(s3backer.accessKeyID + ":" + s3backer.secretAccessKey)
if err != nil {
return err
}
pwFile.Close()
return nil
}
func formatFs(fsType string, device string) error {
diskMounter := &mount.SafeFormatAndMount{Interface: mount.New(""), Exec: mount.NewOsExec()}
format, err := diskMounter.GetDiskFormat(device)
if err != nil {
return err
}
if format != "" {
glog.Infof("Disk %s is already formatted with format %s", device, format)
return nil
}
args := []string{
device,
}
cmd := exec.Command("mkfs."+fsType, args...)
out, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("Error formatting disk: %s", out)
}
glog.Infof("Formatting fs with type %s", fsType)
return nil
}

View file

@ -1,5 +1,5 @@
FROM ctrox/csi-s3:dev-full
LABEL maintainers="Cyrill Troxler <cyrilltroxler@gmail.com>"
FROM ctrox/csi-s3:dev
LABEL maintainers="Vitaliy Filippov <vitalif@yourcmc.ru>"
LABEL description="csi-s3 testing image"
RUN apt-get update && \