k8s-csi-s3/pkg/s3/s3-driver_suite_test.go
Cyrill Troxler 82ab4b0983 Add experimental s3backer mounter
This also adds some generic handling of stale umounts.
Fuse returns immediately and does not indicate that
the mounter has finished writing to the backend.
The process finding is sort of hacky as I could not
find a better way to get to the PID from a fuse mount.
2018-07-23 20:58:25 +02:00

147 lines
3.1 KiB
Go

package s3_test
import (
"io/ioutil"
"log"
"os"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/ctrox/csi-s3-driver/pkg/s3"
"github.com/kubernetes-csi/csi-test/pkg/sanity"
)
const ()
var _ = Describe("S3Driver", func() {
mntDir, err := ioutil.TempDir("", "mnt")
if err != nil {
Expect(err).NotTo(HaveOccurred())
}
AfterSuite(func() {
os.RemoveAll(mntDir)
})
Context("goofys", func() {
socket := "/tmp/csi-goofys.sock"
csiEndpoint := "unix://" + socket
cfg := &s3.Config{
AccessKeyID: "FJDSJ",
SecretAccessKey: "DSG643HGDS",
Endpoint: "http://127.0.0.1:9000",
Mounter: "goofys",
}
if err := os.Remove(socket); err != nil && !os.IsNotExist(err) {
Expect(err).NotTo(HaveOccurred())
}
driver, err := s3.NewS3("test-node", csiEndpoint, cfg)
if err != nil {
log.Fatal(err)
}
go driver.Run()
Describe("CSI sanity", func() {
sanityCfg := &sanity.Config{
TargetPath: mntDir,
Address: csiEndpoint,
TestVolumeSize: 1,
}
sanity.GinkgoTest(sanityCfg)
})
})
Context("s3fs", func() {
socket := "/tmp/csi-s3fs.sock"
csiEndpoint := "unix://" + socket
cfg := &s3.Config{
AccessKeyID: "FJDSJ",
SecretAccessKey: "DSG643HGDS",
Endpoint: "http://127.0.0.1:9000",
Mounter: "s3fs",
}
if err := os.Remove(socket); err != nil && !os.IsNotExist(err) {
Expect(err).NotTo(HaveOccurred())
}
driver, err := s3.NewS3("test-node", csiEndpoint, cfg)
if err != nil {
log.Fatal(err)
}
go driver.Run()
defer os.RemoveAll(mntDir)
Describe("CSI sanity", func() {
sanityCfg := &sanity.Config{
TargetPath: mntDir,
Address: csiEndpoint,
TestVolumeSize: 1,
}
sanity.GinkgoTest(sanityCfg)
})
})
Context("s3ql", func() {
socket := "/tmp/csi-s3ql.sock"
csiEndpoint := "unix://" + socket
cfg := &s3.Config{
AccessKeyID: "FJDSJ",
SecretAccessKey: "DSG643HGDS",
Endpoint: "http://127.0.0.1:9000",
Mounter: "s3ql",
}
if err := os.Remove(socket); err != nil && !os.IsNotExist(err) {
Expect(err).NotTo(HaveOccurred())
}
driver, err := s3.NewS3("test-node", csiEndpoint, cfg)
if err != nil {
log.Fatal(err)
}
go driver.Run()
defer os.RemoveAll(mntDir)
Describe("CSI sanity", func() {
sanityCfg := &sanity.Config{
TargetPath: mntDir,
Address: csiEndpoint,
TestVolumeSize: 1,
}
sanity.GinkgoTest(sanityCfg)
})
})
Context("s3backer", func() {
socket := "/tmp/csi-s3backer.sock"
csiEndpoint := "unix://" + socket
cfg := &s3.Config{
AccessKeyID: "FJDSJ",
SecretAccessKey: "DSG643HGDS",
Endpoint: "http://127.0.0.1:9000",
Mounter: "s3backer",
}
if err := os.Remove(socket); err != nil && !os.IsNotExist(err) {
Expect(err).NotTo(HaveOccurred())
}
driver, err := s3.NewS3("test-node", csiEndpoint, cfg)
if err != nil {
log.Fatal(err)
}
go driver.Run()
defer os.RemoveAll(mntDir)
Describe("CSI sanity", func() {
sanityCfg := &sanity.Config{
TargetPath: mntDir,
Address: csiEndpoint,
TestVolumeSize: 1,
}
sanity.GinkgoTest(sanityCfg)
})
})
})