feat(storage): finish initial basic implementation

- support both s3 & file storage backends
- support imgproxy to scale images
- manually tested with MinIO & local storage
- fixed service discovery issue in APIGatey reconciler not detecting
  service changes
- refactored defaults and env variable code to make it manageable again
- add repo link to docs
This commit is contained in:
Peter 2025-01-23 18:00:05 +01:00
parent 604525de38
commit 0014927ca9
Signed by: prskr
GPG key ID: F56BED6903BC5E37
46 changed files with 16170 additions and 606 deletions

View file

@ -57,7 +57,7 @@ func SetupDashboardWebhookWithManager(mgr ctrl.Manager) error {
// SetupStorageWebhookWithManager registers the webhook for Storage in the manager.
func SetupStorageWebhookWithManager(mgr ctrl.Manager) error {
return ctrl.NewWebhookManagedBy(mgr).For(&supabasev1alpha1.Storage{}).
WithValidator(&StorageCustomValidator{}).
WithValidator(&StorageCustomValidator{Client: mgr.GetClient()}).
WithDefaulter(&StorageCustomDefaulter{Client: mgr.GetClient()}).
Complete()
}

View file

@ -19,18 +19,12 @@ package v1alpha1
import (
"context"
"fmt"
"maps"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/webhook"
supabasev1alpha1 "code.icb4dc0.de/prskr/supabase-operator/api/v1alpha1"
"code.icb4dc0.de/prskr/supabase-operator/internal/meta"
"code.icb4dc0.de/prskr/supabase-operator/internal/pw"
)
// +kubebuilder:webhook:path=/mutate-supabase-k8s-icb4dc0-de-v1alpha1-storage,mutating=true,failurePolicy=fail,sideEffects=None,groups=supabase.k8s.icb4dc0.de,resources=storages,verbs=create;update,versions=v1alpha1,name=mstorage-v1alpha1.kb.io,admissionReviewVersions=v1
@ -55,55 +49,19 @@ func (d *StorageCustomDefaulter) Default(ctx context.Context, obj runtime.Object
}
storagelog.Info("Defaulting for Storage", "name", storage.GetName())
if err := d.defaultS3Protocol(ctx, storage); err != nil {
return err
}
d.defaultS3Protocol(storage)
return nil
}
func (d *StorageCustomDefaulter) defaultS3Protocol(ctx context.Context, storage *supabasev1alpha1.Storage) error {
if storage.Spec.S3 == nil {
storage.Spec.S3 = new(supabasev1alpha1.S3ProtocolSpec)
func (d *StorageCustomDefaulter) defaultS3Protocol(storage *supabasev1alpha1.Storage) {
if storage.Spec.Api.S3Protocol == nil {
storage.Spec.Api.S3Protocol = new(supabasev1alpha1.S3ProtocolSpec)
}
if storage.Spec.S3.CredentialsSecretRef == nil {
storage.Spec.S3.CredentialsSecretRef = &supabasev1alpha1.S3CredentialsRef{
AccessKeyIdKey: "accessKeyId",
AccessSecretKeyKey: "secretAccessKey",
SecretName: fmt.Sprintf("%s-storage-protocol-s3-credentials", storage.Name),
if storage.Spec.Api.S3Protocol.CredentialsSecretRef == nil {
storage.Spec.Api.S3Protocol.CredentialsSecretRef = &supabasev1alpha1.S3CredentialsRef{
SecretName: fmt.Sprintf("%s-storage-protocol-s3-credentials", storage.Name),
}
}
credentialsSecret := corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: storage.Spec.S3.CredentialsSecretRef.SecretName,
Namespace: storage.Namespace,
},
}
_, err := controllerutil.CreateOrUpdate(ctx, d.Client, &credentialsSecret, func() error {
credentialsSecret.Labels = maps.Clone(storage.Labels)
if credentialsSecret.Labels == nil {
credentialsSecret.Labels = make(map[string]string)
}
credentialsSecret.Labels[meta.SupabaseLabel.Reload] = ""
if credentialsSecret.Data == nil {
credentialsSecret.Data = make(map[string][]byte, 2)
}
if _, ok := credentialsSecret.Data[storage.Spec.S3.CredentialsSecretRef.AccessKeyIdKey]; !ok {
credentialsSecret.Data[storage.Spec.S3.CredentialsSecretRef.AccessKeyIdKey] = pw.GeneratePW(32, nil)
}
if _, ok := credentialsSecret.Data[storage.Spec.S3.CredentialsSecretRef.AccessSecretKeyKey]; !ok {
credentialsSecret.Data[storage.Spec.S3.CredentialsSecretRef.AccessSecretKeyKey] = pw.GeneratePW(64, nil)
}
return nil
})
return err
}

View file

@ -18,9 +18,13 @@ package v1alpha1
import (
"context"
"errors"
"fmt"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/webhook"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
@ -42,35 +46,43 @@ var storagelog = logf.Log.WithName("storage-resource")
// NOTE: The +kubebuilder:object:generate=false marker prevents controller-gen from generating DeepCopy methods,
// as this struct is used only for temporary operations and does not need to be deeply copied.
type StorageCustomValidator struct {
// TODO(user): Add more fields as needed for validation
client.Client
}
var _ webhook.CustomValidator = &StorageCustomValidator{}
// ValidateCreate implements webhook.CustomValidator so a webhook will be registered for the type Storage.
func (v *StorageCustomValidator) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) {
func (v *StorageCustomValidator) ValidateCreate(ctx context.Context, obj runtime.Object) (warnings admission.Warnings, err error) {
storage, ok := obj.(*supabasev1alpha1.Storage)
if !ok {
return nil, fmt.Errorf("expected a Storage object but got %T", obj)
}
storagelog.Info("Validation for Storage upon creation", "name", storage.GetName())
// TODO(user): fill in your validation logic upon object creation.
if ws, err := v.validateStorageApi(ctx, storage); err != nil {
return ws, err
} else {
warnings = append(warnings, ws...)
}
return nil, nil
return warnings, nil
}
// ValidateUpdate implements webhook.CustomValidator so a webhook will be registered for the type Storage.
func (v *StorageCustomValidator) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) {
func (v *StorageCustomValidator) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (warnings admission.Warnings, err error) {
storage, ok := newObj.(*supabasev1alpha1.Storage)
if !ok {
return nil, fmt.Errorf("expected a Storage object for the newObj but got %T", newObj)
}
storagelog.Info("Validation for Storage upon update", "name", storage.GetName())
// TODO(user): fill in your validation logic upon object update.
if ws, err := v.validateStorageApi(ctx, storage); err != nil {
return ws, err
} else {
warnings = append(warnings, ws...)
}
return nil, nil
return warnings, nil
}
// ValidateDelete implements webhook.CustomValidator so a webhook will be registered for the type Storage.
@ -81,7 +93,49 @@ func (v *StorageCustomValidator) ValidateDelete(ctx context.Context, obj runtime
}
storagelog.Info("Validation for Storage upon deletion", "name", storage.GetName())
// TODO(user): fill in your validation logic upon object deletion.
return nil, nil
}
func (v *StorageCustomValidator) validateStorageApi(ctx context.Context, storage *supabasev1alpha1.Storage) (admission.Warnings, error) {
var warnings admission.Warnings
apiSpec := storage.Spec.Api
if (apiSpec.FileBackend == nil) == (apiSpec.S3Backend == nil) {
return nil, errors.New("it is not possible to configure both or non backend at all - please configure either file or S3 backend")
}
if apiSpec.S3Backend != nil {
if apiSpec.S3Backend.CredentialsSecretRef == nil {
return nil, errors.New(".spec.api.s3Backend.credentialsSecretRef cannot be empty")
}
s3CredentialsSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: apiSpec.S3Backend.CredentialsSecretRef.SecretName,
},
}
if err := v.Get(ctx, client.ObjectKeyFromObject(s3CredentialsSecret), s3CredentialsSecret); err != nil {
if client.IgnoreNotFound(err) == nil {
warnings = append(warnings, fmt.Sprintf("Secret %q could not be found", apiSpec.S3Backend.CredentialsSecretRef.SecretName))
} else {
return nil, err
}
} else {
if accessKeyId, ok := s3CredentialsSecret.Data[apiSpec.S3Backend.CredentialsSecretRef.AccessKeyIdKey]; !ok {
return warnings, fmt.Errorf("secret %q does not contain an access key id at specified key %q", apiSpec.S3Backend.CredentialsSecretRef.SecretName, apiSpec.S3Backend.CredentialsSecretRef.AccessKeyIdKey)
} else if len(accessKeyId) == 0 {
return warnings, fmt.Errorf("access key id in Secret %q with key %q is empty", apiSpec.S3Backend.CredentialsSecretRef.SecretName, apiSpec.S3Backend.CredentialsSecretRef.AccessKeyIdKey)
}
if accessSecretKey, ok := s3CredentialsSecret.Data[apiSpec.S3Backend.CredentialsSecretRef.AccessSecretKeyKey]; !ok {
return warnings, fmt.Errorf("secret %q does not contain an access secret key at specified key %q", apiSpec.S3Backend.CredentialsSecretRef.SecretName, apiSpec.S3Backend.CredentialsSecretRef.AccessSecretKeyKey)
} else if len(accessSecretKey) == 0 {
return warnings, fmt.Errorf("access secret key in Secret %q with key %q is empty", apiSpec.S3Backend.CredentialsSecretRef.SecretName, apiSpec.S3Backend.CredentialsSecretRef.AccessSecretKeyKey)
}
}
}
return warnings, nil
}