feat(storage): finish initial basic implementation

- support both s3 & file storage backends
- support imgproxy to scale images
- manually tested with MinIO & local storage
- fixed service discovery issue in APIGatey reconciler not detecting
  service changes
- refactored defaults and env variable code to make it manageable again
- add repo link to docs
This commit is contained in:
Peter 2025-01-23 18:00:05 +01:00
parent 604525de38
commit 0014927ca9
Signed by: prskr
GPG key ID: F56BED6903BC5E37
46 changed files with 16170 additions and 606 deletions

View file

@ -35,8 +35,10 @@ import (
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
supabasev1alpha1 "code.icb4dc0.de/prskr/supabase-operator/api/v1alpha1"
"code.icb4dc0.de/prskr/supabase-operator/internal/meta"
@ -129,12 +131,23 @@ func (r *APIGatewayReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Ma
return fmt.Errorf("constructor selector for watching secrets: %w", err)
}
apiGatewayTargetSelector, err := predicate.LabelSelectorPredicate(metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{{
Key: meta.SupabaseLabel.ApiGatewayTarget,
Operator: metav1.LabelSelectorOpExists,
}},
})
if err != nil {
return fmt.Errorf("failed to build selector for watching API target services: %w", err)
}
return ctrl.NewControllerManagedBy(mgr).
For(&supabasev1alpha1.APIGateway{}).
Named("apigateway").
Owns(new(corev1.ConfigMap)).
Owns(new(appsv1.Deployment)).
Owns(new(corev1.Service)).
// watch JWKS secret
Watches(
new(corev1.Secret),
FieldSelectorEventHandler[*supabasev1alpha1.APIGateway, *supabasev1alpha1.APIGatewayList](r.Client,
@ -145,9 +158,49 @@ func (r *APIGatewayReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Ma
reloadSelector,
),
).
Watches(
new(corev1.Service),
r.apiTargetServiceEventHandler(),
builder.WithPredicates(apiGatewayTargetSelector),
).
Complete(r)
}
func (r *APIGatewayReconciler) apiTargetServiceEventHandler() handler.TypedEventHandler[client.Object, reconcile.Request] {
return handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request {
var (
list supabasev1alpha1.APIGatewayList
logger = log.FromContext(ctx, "object", obj.GetName(), "namespace", obj.GetNamespace())
)
targetName, ok := obj.GetLabels()[meta.SupabaseLabel.ApiGatewayTarget]
if !ok {
logger.Info("Service is not APIGateway target")
return nil
}
if err := r.Client.List(ctx, &list, client.InNamespace(obj.GetNamespace())); err != nil {
logger.Error(err, "Failed to list Services to map updates to APIGateway reconciliation requests")
return nil
}
if targetName != "" {
for gw := range list.Iter() {
if gw.Name == targetName {
return []reconcile.Request{{NamespacedName: client.ObjectKeyFromObject(gw)}}
}
}
} else {
requests := make([]reconcile.Request, 0, len(list.Items))
for gw := range list.Iter() {
requests = append(requests, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(gw)})
}
}
return nil
})
}
func (r *APIGatewayReconciler) reconcileJwksSecret(
ctx context.Context,
gateway *supabasev1alpha1.APIGateway,

View file

@ -19,7 +19,7 @@ package controller
import (
"bytes"
"context"
"crypto/sha256"
"hash/fnv"
"maps"
"net/url"
"time"
@ -173,7 +173,7 @@ func (r *CoreDbReconciler) ensureDbRolesSecrets(
core.Status.Database.Roles = make(map[string][]byte)
}
hash := sha256.New()
hash := fnv.New64a()
for secretName, role := range roles {
secretLogger := logger.WithValues("secret_name", secretName, "role_name", role.String())

View file

@ -71,7 +71,7 @@ func (r *CoreAuthReconciler) Reconcile(ctx context.Context, req ctrl.Request) (r
// SetupWithManager sets up the controller with the Manager.
func (r *CoreAuthReconciler) SetupWithManager(mgr ctrl.Manager) error {
// TODO watch changes in DB credentials secret
// TODO watch changes in DB credentials secret & JWT secret
return ctrl.NewControllerManagedBy(mgr).
For(new(supabasev1alpha1.Core)).
Owns(new(appsv1.Deployment)).

View file

@ -91,10 +91,7 @@ func (r *CorePostgrestReconiler) reconilePostgrestDeployment(
postgrestDeployment = &appsv1.Deployment{
ObjectMeta: serviceCfg.ObjectMeta(core),
}
postgrestSpec = core.Spec.Postgrest
)
var (
postgrestSpec = core.Spec.Postgrest
anonRole = ValueOrFallback(postgrestSpec.AnonRole, serviceCfg.Defaults.AnonRole)
postgrestSchemas = ValueOrFallback(postgrestSpec.Schemas, serviceCfg.Defaults.Schemas)
jwtSecretHash string
@ -178,12 +175,12 @@ func (r *CorePostgrestReconiler) reconilePostgrestDeployment(
Env: postgrestSpec.WorkloadTemplate.MergeEnv(postgrestEnv),
Ports: []corev1.ContainerPort{
{
Name: "rest",
Name: serviceCfg.Defaults.ServerPortName,
ContainerPort: serviceCfg.Defaults.ServerPort,
Protocol: corev1.ProtocolTCP,
},
{
Name: "admin",
Name: serviceCfg.Defaults.AdminPortName,
ContainerPort: serviceCfg.Defaults.AdminPort,
Protocol: corev1.ProtocolTCP,
},
@ -234,13 +231,16 @@ func (r *CorePostgrestReconiler) reconcilePostgrestService(
ctx context.Context,
core *supabasev1alpha1.Core,
) error {
postgrestService := &corev1.Service{
ObjectMeta: supabase.ServiceConfig.Postgrest.ObjectMeta(core),
}
var (
serviceCfg = supabase.ServiceConfig.Postgrest
postgrestService = &corev1.Service{
ObjectMeta: supabase.ServiceConfig.Postgrest.ObjectMeta(core),
}
)
_, err := controllerutil.CreateOrUpdate(ctx, r.Client, postgrestService, func() error {
postgrestService.Labels = core.Spec.Postgrest.WorkloadTemplate.MergeLabels(
objectLabels(core, supabase.ServiceConfig.Postgrest.Name, "core", supabase.Images.Postgrest.Tag),
objectLabels(core, serviceCfg.Name, "core", supabase.Images.Postgrest.Tag),
core.Labels,
)
@ -249,14 +249,14 @@ func (r *CorePostgrestReconiler) reconcilePostgrestService(
}
postgrestService.Spec = corev1.ServiceSpec{
Selector: selectorLabels(core, supabase.ServiceConfig.Postgrest.Name),
Selector: selectorLabels(core, serviceCfg.Name),
Ports: []corev1.ServicePort{
{
Name: "rest",
Name: serviceCfg.Defaults.ServerPortName,
Protocol: corev1.ProtocolTCP,
AppProtocol: ptrOf("http"),
Port: 3000,
TargetPort: intstr.IntOrString{IntVal: 3000},
Port: serviceCfg.Defaults.ServerPort,
TargetPort: intstr.IntOrString{IntVal: serviceCfg.Defaults.ServerPort},
},
},
}

View file

@ -0,0 +1,309 @@
/*
Copyright 2025 Peter Kurfer.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"context"
"encoding/base64"
"fmt"
"slices"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/log"
supabasev1alpha1 "code.icb4dc0.de/prskr/supabase-operator/api/v1alpha1"
"code.icb4dc0.de/prskr/supabase-operator/internal/meta"
"code.icb4dc0.de/prskr/supabase-operator/internal/supabase"
)
// StorageApiReconciler reconciles a Storage object
type StorageApiReconciler struct {
client.Client
Scheme *runtime.Scheme
}
// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
//
// For more details, check Reconcile and its Result here:
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.4/pkg/reconcile
func (r *StorageApiReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
var (
storage supabasev1alpha1.Storage
logger = log.FromContext(ctx)
)
if err := r.Get(ctx, req.NamespacedName, &storage); err != nil {
if client.IgnoreNotFound(err) != nil {
return ctrl.Result{}, nil
}
return ctrl.Result{}, err
}
logger.Info("Reconciling Storage API")
if err := r.reconcileStorageApiDeployment(ctx, &storage); err != nil {
return ctrl.Result{}, err
}
if err := r.reconcileStorageApiService(ctx, &storage); err != nil {
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
// SetupWithManager sets up the controller with the Manager.
func (r *StorageApiReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&supabasev1alpha1.Storage{}).
Named("storage-api").
Owns(new(corev1.Secret)).
Owns(new(appsv1.Deployment)).
Owns(new(corev1.Service)).
Complete(r)
}
func (r *StorageApiReconciler) reconcileStorageApiDeployment(
ctx context.Context,
storage *supabasev1alpha1.Storage,
) error {
var (
serviceCfg = supabase.ServiceConfig.Storage
apiSpec = storage.Spec.Api
storageApiDeployment = &appsv1.Deployment{
ObjectMeta: serviceCfg.ObjectMeta(storage),
}
jwtSecret = &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: apiSpec.JwtAuth.SecretName,
Namespace: storage.Namespace,
},
}
s3ProtocolSecret = &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: apiSpec.S3Protocol.CredentialsSecretRef.SecretName,
Namespace: storage.Namespace,
},
}
jwtStateHash, s3ProtoCredentialsStateHash string
)
if err := r.Get(ctx, client.ObjectKeyFromObject(jwtSecret), jwtSecret); err != nil {
return err
}
jwtStateHash = base64.StdEncoding.EncodeToString(HashBytes(
jwtSecret.Data[apiSpec.JwtAuth.SecretKey],
jwtSecret.Data[apiSpec.JwtAuth.JwksKey],
))
if err := r.Get(ctx, client.ObjectKeyFromObject(s3ProtocolSecret), s3ProtocolSecret); err != nil {
return err
}
s3ProtoCredentialsStateHash = base64.StdEncoding.EncodeToString(HashBytes(
s3ProtocolSecret.Data[apiSpec.S3Protocol.CredentialsSecretRef.AccessKeyIdKey],
s3ProtocolSecret.Data[apiSpec.S3Protocol.CredentialsSecretRef.AccessSecretKeyKey],
))
_, err := controllerutil.CreateOrUpdate(ctx, r.Client, storageApiDeployment, func() error {
storageApiDeployment.Labels = apiSpec.WorkloadTemplate.MergeLabels(
objectLabels(storage, serviceCfg.Name, "storage", supabase.Images.Storage.Tag),
storage.Labels,
)
storagApiEnv := []corev1.EnvVar{
{
Name: "DB_USERNAME",
ValueFrom: &corev1.EnvVarSource{
SecretKeyRef: &corev1.SecretKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
Name: apiSpec.DBSpec.DBCredentialsRef.SecretName,
},
Key: apiSpec.DBSpec.DBCredentialsRef.UsernameKey,
},
},
},
{
Name: "DB_PASSWORD",
ValueFrom: &corev1.EnvVarSource{
SecretKeyRef: &corev1.SecretKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
Name: apiSpec.DBSpec.DBCredentialsRef.SecretName,
},
Key: apiSpec.DBSpec.DBCredentialsRef.PasswordKey,
},
},
},
serviceCfg.EnvKeys.DatabaseDSN.Var(fmt.Sprintf("postgres://$(DB_USERNAME):$(DB_PASSWORD)@%s:%d/%s", apiSpec.DBSpec.Host, apiSpec.DBSpec.Port, apiSpec.DBSpec.DBName)),
serviceCfg.EnvKeys.ServiceKey.Var(apiSpec.JwtAuth.ServiceKeySelector()),
serviceCfg.EnvKeys.JwtSecret.Var(apiSpec.JwtAuth.SecretKeySelector()),
serviceCfg.EnvKeys.JwtJwks.Var(apiSpec.JwtAuth.JwksKeySelector()),
serviceCfg.EnvKeys.S3ProtocolPrefix.Var(),
serviceCfg.EnvKeys.S3ProtocolAllowForwardedHeader.Var(apiSpec.S3Protocol.AllowForwardedHeader),
serviceCfg.EnvKeys.S3ProtocolAccessKeyId.Var(apiSpec.S3Protocol.CredentialsSecretRef.AccessKeyIdSelector()),
serviceCfg.EnvKeys.S3ProtocolAccessKeySecret.Var(apiSpec.S3Protocol.CredentialsSecretRef.AccessSecretKeySelector()),
serviceCfg.EnvKeys.TusUrlPath.Var(),
serviceCfg.EnvKeys.FileSizeLimit.Var(apiSpec.FileSizeLimit),
serviceCfg.EnvKeys.UploadFileSizeLimit.Var(apiSpec.FileSizeLimit),
serviceCfg.EnvKeys.UploadFileSizeLimitStandard.Var(apiSpec.FileSizeLimit),
serviceCfg.EnvKeys.AnonKey.Var(apiSpec.JwtAuth.AnonKeySelector()),
// TODO: https://github.com/supabase/storage-api/issues/55
serviceCfg.EnvKeys.FileStorageRegion.Var(),
}
if storage.Spec.ImageProxy != nil && storage.Spec.ImageProxy.Enable {
storagApiEnv = append(storagApiEnv, serviceCfg.EnvKeys.ImgProxyURL.Var(fmt.Sprintf("http://%s.%s.svc:%d", supabase.ServiceConfig.ImgProxy.ObjectName(storage), storage.Namespace, supabase.ServiceConfig.ImgProxy.Defaults.ApiPort)))
}
if storageApiDeployment.CreationTimestamp.IsZero() {
storageApiDeployment.Spec.Selector = &metav1.LabelSelector{
MatchLabels: selectorLabels(storage, serviceCfg.Name),
}
}
storageApiDeployment.Spec.Replicas = apiSpec.WorkloadTemplate.ReplicaCount()
storageApiDeployment.Spec.Template = corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
fmt.Sprintf("%s/%s", supabasev1alpha1.GroupVersion.Group, "jwt-hash"): jwtStateHash,
fmt.Sprintf("%s/%s", supabasev1alpha1.GroupVersion.Group, "s3-credentials-hash"): s3ProtoCredentialsStateHash,
},
Labels: objectLabels(storage, serviceCfg.Name, "storage", supabase.Images.Storage.Tag),
},
Spec: corev1.PodSpec{
ImagePullSecrets: apiSpec.WorkloadTemplate.PullSecrets(),
Containers: []corev1.Container{{
Name: "supabase-storage",
Image: apiSpec.WorkloadTemplate.Image(supabase.Images.Storage.String()),
ImagePullPolicy: apiSpec.WorkloadTemplate.ImagePullPolicy(),
Env: apiSpec.WorkloadTemplate.MergeEnv(append(storagApiEnv, slices.Concat(apiSpec.FileBackend.Env(), apiSpec.S3Backend.Env())...)),
Ports: []corev1.ContainerPort{{
Name: serviceCfg.Defaults.ApiPortName,
ContainerPort: serviceCfg.Defaults.ApiPort,
Protocol: corev1.ProtocolTCP,
}},
SecurityContext: apiSpec.WorkloadTemplate.ContainerSecurityContext(serviceCfg.Defaults.UID, serviceCfg.Defaults.GID),
Resources: apiSpec.WorkloadTemplate.Resources(),
VolumeMounts: apiSpec.WorkloadTemplate.AdditionalVolumeMounts(
corev1.VolumeMount{
Name: "tmp",
MountPath: "/tmp",
},
),
ReadinessProbe: &corev1.Probe{
InitialDelaySeconds: 5,
PeriodSeconds: 3,
TimeoutSeconds: 1,
SuccessThreshold: 2,
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/status",
Port: intstr.IntOrString{IntVal: serviceCfg.Defaults.ApiPort},
},
},
},
LivenessProbe: &corev1.Probe{
InitialDelaySeconds: 10,
PeriodSeconds: 5,
TimeoutSeconds: 3,
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/status",
Port: intstr.IntOrString{IntVal: serviceCfg.Defaults.ApiPort},
},
},
},
}},
SecurityContext: apiSpec.WorkloadTemplate.PodSecurityContext(),
Volumes: apiSpec.WorkloadTemplate.Volumes(
corev1.Volume{
Name: "tmp",
VolumeSource: corev1.VolumeSource{
EmptyDir: apiSpec.UploadTemp.VolumeSource(),
},
},
),
},
}
if err := controllerutil.SetControllerReference(storage, storageApiDeployment, r.Scheme); err != nil {
return err
}
return nil
})
return err
}
func (r *StorageApiReconciler) reconcileStorageApiService(
ctx context.Context,
storage *supabasev1alpha1.Storage,
) error {
var (
serviceCfg = supabase.ServiceConfig.Storage
storageApiService = &corev1.Service{
ObjectMeta: supabase.ServiceConfig.Storage.ObjectMeta(storage),
}
)
_, err := controllerutil.CreateOrUpdate(ctx, r.Client, storageApiService, func() error {
storageApiService.Labels = storage.Spec.Api.WorkloadTemplate.MergeLabels(
objectLabels(storage, serviceCfg.Name, "storage", supabase.Images.Storage.Tag),
storage.Labels,
)
if _, ok := storageApiService.Labels[meta.SupabaseLabel.ApiGatewayTarget]; !ok {
storageApiService.Labels[meta.SupabaseLabel.ApiGatewayTarget] = ""
}
storageApiService.Spec = corev1.ServiceSpec{
Selector: selectorLabels(storage, serviceCfg.Name),
Ports: []corev1.ServicePort{
{
Name: serviceCfg.Defaults.ApiPortName,
Protocol: corev1.ProtocolTCP,
AppProtocol: ptrOf("http"),
Port: serviceCfg.Defaults.ApiPort,
TargetPort: intstr.IntOrString{IntVal: serviceCfg.Defaults.ApiPort},
},
},
}
if err := controllerutil.SetControllerReference(storage, storageApiService, r.Scheme); err != nil {
return err
}
return nil
})
return err
}

View file

@ -1,55 +0,0 @@
/*
Copyright 2025 Peter Kurfer.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"context"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
supabasev1alpha1 "code.icb4dc0.de/prskr/supabase-operator/api/v1alpha1"
)
// StorageReconciler reconciles a Storage object
type StorageReconciler struct {
client.Client
Scheme *runtime.Scheme
}
// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
//
// For more details, check Reconcile and its Result here:
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.4/pkg/reconcile
func (r *StorageReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
_ = log.FromContext(ctx)
// TODO(user): your logic here
return ctrl.Result{}, nil
}
// SetupWithManager sets up the controller with the Manager.
func (r *StorageReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&supabasev1alpha1.Storage{}).
Named("storage").
Complete(r)
}

View file

@ -0,0 +1,218 @@
/*
Copyright 2025 Peter Kurfer.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"context"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/log"
supabasev1alpha1 "code.icb4dc0.de/prskr/supabase-operator/api/v1alpha1"
"code.icb4dc0.de/prskr/supabase-operator/internal/supabase"
)
type StorageImgProxyReconciler struct {
client.Client
Scheme *runtime.Scheme
}
// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
//
// For more details, check Reconcile and its Result here:
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.4/pkg/reconcile
func (r *StorageImgProxyReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
var (
storage supabasev1alpha1.Storage
logger = log.FromContext(ctx)
)
if err := r.Get(ctx, req.NamespacedName, &storage); err != nil {
if client.IgnoreNotFound(err) != nil {
return ctrl.Result{}, nil
}
return ctrl.Result{}, err
}
logger.Info("Reconciling Storage API")
if storage.Spec.ImageProxy == nil || !storage.Spec.ImageProxy.Enable {
logger.Info("ImgProxy is not enabled - skipping")
return ctrl.Result{}, nil
}
if err := r.reconcileImgProxyDeployment(ctx, &storage); err != nil {
return ctrl.Result{}, err
}
if err := r.reconcileImgProxyService(ctx, &storage); err != nil {
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
// SetupWithManager sets up the controller with the Manager.
func (r *StorageImgProxyReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&supabasev1alpha1.Storage{}).
Named("storage-imgproxy").
Owns(new(appsv1.Deployment)).
Owns(new(corev1.Service)).
Complete(r)
}
func (r *StorageImgProxyReconciler) reconcileImgProxyDeployment(
ctx context.Context,
storage *supabasev1alpha1.Storage,
) error {
var (
serviceCfg = supabase.ServiceConfig.ImgProxy
imgProxySpec = storage.Spec.ImageProxy
imgProxyDeployment = &appsv1.Deployment{
ObjectMeta: serviceCfg.ObjectMeta(storage),
}
)
_, err := controllerutil.CreateOrUpdate(ctx, r.Client, imgProxyDeployment, func() error {
imgProxyDeployment.Labels = imgProxySpec.WorkloadTemplate.MergeLabels(
objectLabels(storage, serviceCfg.Name, "storage", supabase.Images.ImgProxy.Tag),
storage.Labels,
)
imgProxyEnv := []corev1.EnvVar{
serviceCfg.EnvKeys.Bind.Var(),
serviceCfg.EnvKeys.UseETag.Var(),
serviceCfg.EnvKeys.EnableWebPDetection.Var(imgProxySpec.EnabledWebPDetection),
}
if storage.Spec.Api.FileBackend != nil {
imgProxyEnv = append(imgProxyEnv, serviceCfg.EnvKeys.LocalFileSystemRoot.Var(storage.Spec.Api.FileBackend.Path))
}
if imgProxyDeployment.CreationTimestamp.IsZero() {
imgProxyDeployment.Spec.Selector = &metav1.LabelSelector{
MatchLabels: selectorLabels(storage, serviceCfg.Name),
}
}
imgProxyDeployment.Spec.Replicas = imgProxySpec.WorkloadTemplate.ReplicaCount()
imgProxyDeployment.Spec.Template = corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: objectLabels(storage, serviceCfg.Name, "storage", supabase.Images.ImgProxy.Tag),
},
Spec: corev1.PodSpec{
ImagePullSecrets: imgProxySpec.WorkloadTemplate.PullSecrets(),
Containers: []corev1.Container{{
Name: "supabase-imgproxy",
Image: imgProxySpec.WorkloadTemplate.Image(supabase.Images.ImgProxy.String()),
ImagePullPolicy: imgProxySpec.WorkloadTemplate.ImagePullPolicy(),
Env: imgProxySpec.WorkloadTemplate.MergeEnv(imgProxyEnv),
Ports: []corev1.ContainerPort{{
Name: serviceCfg.Defaults.ApiPortName,
ContainerPort: serviceCfg.Defaults.ApiPort,
Protocol: corev1.ProtocolTCP,
}},
SecurityContext: imgProxySpec.WorkloadTemplate.ContainerSecurityContext(serviceCfg.Defaults.UID, serviceCfg.Defaults.GID),
Resources: imgProxySpec.WorkloadTemplate.Resources(),
VolumeMounts: imgProxySpec.WorkloadTemplate.AdditionalVolumeMounts(),
ReadinessProbe: &corev1.Probe{
InitialDelaySeconds: 5,
PeriodSeconds: 3,
TimeoutSeconds: 1,
SuccessThreshold: 2,
ProbeHandler: corev1.ProbeHandler{
Exec: &corev1.ExecAction{
Command: []string{"imgproxy", "health"},
},
},
},
LivenessProbe: &corev1.Probe{
InitialDelaySeconds: 10,
PeriodSeconds: 5,
TimeoutSeconds: 3,
ProbeHandler: corev1.ProbeHandler{
Exec: &corev1.ExecAction{
Command: []string{"imgproxy", "health"},
},
},
},
}},
SecurityContext: imgProxySpec.WorkloadTemplate.PodSecurityContext(),
Volumes: imgProxySpec.WorkloadTemplate.Volumes(),
},
}
if err := controllerutil.SetControllerReference(storage, imgProxyDeployment, r.Scheme); err != nil {
return err
}
return nil
})
return err
}
func (r *StorageImgProxyReconciler) reconcileImgProxyService(
ctx context.Context,
storage *supabasev1alpha1.Storage,
) error {
var (
serviceCfg = supabase.ServiceConfig.ImgProxy
imgProxyService = &corev1.Service{
ObjectMeta: supabase.ServiceConfig.Storage.ObjectMeta(storage),
}
)
_, err := controllerutil.CreateOrPatch(ctx, r.Client, imgProxyService, func() error {
imgProxyService.Labels = storage.Spec.Api.WorkloadTemplate.MergeLabels(
objectLabels(storage, serviceCfg.Name, "storage", supabase.Images.ImgProxy.Tag),
storage.Labels,
)
imgProxyService.Spec = corev1.ServiceSpec{
Selector: selectorLabels(storage, serviceCfg.Name),
Ports: []corev1.ServicePort{
{
Name: serviceCfg.Defaults.ApiPortName,
Protocol: corev1.ProtocolTCP,
AppProtocol: ptrOf("http"),
Port: serviceCfg.Defaults.ApiPort,
TargetPort: intstr.IntOrString{IntVal: serviceCfg.Defaults.ApiPort},
},
},
}
if err := controllerutil.SetControllerReference(storage, imgProxyService, r.Scheme); err != nil {
return err
}
return nil
})
return err
}

View file

@ -0,0 +1,147 @@
/*
Copyright 2025 Peter Kurfer.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"context"
"errors"
"maps"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/log"
supabasev1alpha1 "code.icb4dc0.de/prskr/supabase-operator/api/v1alpha1"
"code.icb4dc0.de/prskr/supabase-operator/internal/meta"
"code.icb4dc0.de/prskr/supabase-operator/internal/pw"
)
type StorageS3CredentialsReconciler struct {
client.Client
Scheme *runtime.Scheme
}
func (r *StorageS3CredentialsReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res ctrl.Result, err error) {
var (
storage supabasev1alpha1.Storage
logger = log.FromContext(ctx)
)
if err := r.Get(ctx, req.NamespacedName, &storage); err != nil {
if client.IgnoreNotFound(err) == nil {
logger.Info("Storage instance does not exist")
return ctrl.Result{}, nil
}
return ctrl.Result{}, err
}
if err := r.reconcileS3ProtoSecret(ctx, &storage); err != nil {
return ctrl.Result{}, err
}
if storage.Spec.Api.S3Backend != nil {
if err := r.reconcileS3StorageSecret(ctx, &storage); err != nil {
return ctrl.Result{}, err
}
}
return ctrl.Result{}, nil
}
// SetupWithManager sets up the controller with the Manager.
func (r *StorageS3CredentialsReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(new(supabasev1alpha1.Storage)).
Owns(new(corev1.Secret)).
Named("storage-s3-creds").
Complete(r)
}
func (r *StorageS3CredentialsReconciler) reconcileS3StorageSecret(
ctx context.Context,
storage *supabasev1alpha1.Storage,
) error {
if storage.Spec.Api.S3Backend.CredentialsSecretRef == nil {
return errors.New("S3 storage credentials secret is empty")
}
s3CredsSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: storage.Spec.Api.S3Backend.CredentialsSecretRef.SecretName,
Namespace: storage.Namespace,
},
}
if err := r.Get(ctx, client.ObjectKeyFromObject(s3CredsSecret), s3CredsSecret); err != nil {
return err
}
if err := controllerutil.SetControllerReference(storage, s3CredsSecret, r.Scheme); err != nil {
return err
}
return r.Update(ctx, s3CredsSecret)
}
func (r *StorageS3CredentialsReconciler) reconcileS3ProtoSecret(
ctx context.Context,
storage *supabasev1alpha1.Storage,
) error {
const (
acccessKeyIdAndSecret = 2
)
s3ProtoSecret := corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: storage.Spec.Api.S3Protocol.CredentialsSecretRef.SecretName,
Namespace: storage.Namespace,
},
}
_, err := controllerutil.CreateOrUpdate(ctx, r.Client, &s3ProtoSecret, func() error {
s3ProtoSecret.Labels = maps.Clone(storage.Labels)
if s3ProtoSecret.Labels == nil {
s3ProtoSecret.Labels = make(map[string]string)
}
s3ProtoSecret.Labels[meta.SupabaseLabel.Reload] = ""
if err := controllerutil.SetControllerReference(storage, &s3ProtoSecret, r.Scheme); err != nil {
return err
}
if s3ProtoSecret.Data == nil {
s3ProtoSecret.Data = make(map[string][]byte, acccessKeyIdAndSecret)
}
if _, ok := s3ProtoSecret.Data[storage.Spec.Api.S3Protocol.CredentialsSecretRef.AccessKeyIdKey]; !ok {
s3ProtoSecret.Data[storage.Spec.Api.S3Protocol.CredentialsSecretRef.AccessKeyIdKey] = pw.GeneratePW(32, nil)
}
if _, ok := s3ProtoSecret.Data[storage.Spec.Api.S3Protocol.CredentialsSecretRef.AccessSecretKeyKey]; !ok {
s3ProtoSecret.Data[storage.Spec.Api.S3Protocol.CredentialsSecretRef.AccessSecretKeyKey] = pw.GeneratePW(64, nil)
}
return nil
})
return err
}

View file

@ -18,7 +18,7 @@ package controller
import (
"context"
"crypto/sha256"
"hash/fnv"
"maps"
"reflect"
@ -93,7 +93,7 @@ func ValueOrFallback[T any](value, fallback T) T {
}
func HashStrings(vals ...string) []byte {
h := sha256.New()
h := fnv.New64a()
for _, v := range vals {
h.Write([]byte(v))
@ -103,7 +103,7 @@ func HashStrings(vals ...string) []byte {
}
func HashBytes(vals ...[]byte) []byte {
h := sha256.New()
h := fnv.New64a()
for _, v := range vals {
h.Write(v)

View file

@ -19,9 +19,9 @@ package controlplane
import (
"bytes"
"context"
"crypto/sha256"
"encoding/json"
"fmt"
"hash/fnv"
"strconv"
"strings"
"time"
@ -87,7 +87,7 @@ func (r *APIGatewayReconciler) Reconcile(ctx context.Context, req ctrl.Request)
return ctrl.Result{}, fmt.Errorf("failed to prepare config hash: %w", err)
}
serviceHash := sha256.New().Sum(rawServices)
serviceHash := fnv.New64a().Sum(rawServices)
if bytes.Equal(serviceHash, gateway.Status.Envoy.ResourceHash) {
logger.Info("Resource hash did not change - skipping reconciliation")
return ctrl.Result{}, nil

View file

@ -36,7 +36,9 @@ func (c *GoTrueCluster) Cluster(instance string) []*clusterv3.Cluster {
return nil
}
return []*clusterv3.Cluster{c.ServiceCluster.Cluster(fmt.Sprintf("auth@%s", instance), 9999)}
serviceCfg := supabase.ServiceConfig.Auth
return []*clusterv3.Cluster{c.ServiceCluster.Cluster(fmt.Sprintf("%s@%s", serviceCfg.Name, instance), uint32(serviceCfg.Defaults.APIPort))}
}
func (c *GoTrueCluster) Routes(instance string) []*routev3.Route {

View file

@ -34,8 +34,11 @@ func (c *PostgrestCluster) Cluster(instance string) []*clusterv3.Cluster {
if c == nil {
return nil
}
serviceCfg := supabase.ServiceConfig.Postgrest
return []*clusterv3.Cluster{
c.ServiceCluster.Cluster(fmt.Sprintf("%s@%s", supabase.ServiceConfig.Postgrest.Name, instance), 3000),
c.ServiceCluster.Cluster(fmt.Sprintf("%s@%s", serviceCfg.Name, instance), uint32(serviceCfg.Defaults.ServerPort)),
}
}
@ -44,6 +47,8 @@ func (c *PostgrestCluster) Routes(instance string) []*routev3.Route {
return nil
}
serviceCfg := supabase.ServiceConfig.Postgrest
return []*routev3.Route{
{
Name: "PostgREST: /rest/v1/* -> http://rest:3000/*",
@ -55,7 +60,7 @@ func (c *PostgrestCluster) Routes(instance string) []*routev3.Route {
Action: &routev3.Route_Route{
Route: &routev3.RouteAction{
ClusterSpecifier: &routev3.RouteAction_Cluster{
Cluster: fmt.Sprintf("%s@%s", supabase.ServiceConfig.Postgrest.Name, instance),
Cluster: fmt.Sprintf("%s@%s", serviceCfg.Name, instance),
},
PrefixRewrite: "/",
},
@ -71,7 +76,7 @@ func (c *PostgrestCluster) Routes(instance string) []*routev3.Route {
Action: &routev3.Route_Route{
Route: &routev3.RouteAction{
ClusterSpecifier: &routev3.RouteAction_Cluster{
Cluster: fmt.Sprintf("%s@%s", supabase.ServiceConfig.Postgrest.Name, instance),
Cluster: fmt.Sprintf("%s@%s", serviceCfg.Name, instance),
},
PrefixRewrite: "/rpc/graphql",
},

View file

@ -36,11 +36,12 @@ import (
)
type EnvoyServices struct {
ServiceLabelKey string `json:"-"`
Postgrest *PostgrestCluster `json:"postgrest,omitempty"`
GoTrue *GoTrueCluster `json:"auth,omitempty"`
PGMeta *PGMetaCluster `json:"pgmeta,omitempty"`
Studio *StudioCluster `json:"studio,omitempty"`
ServiceLabelKey string `json:"-"`
Postgrest *PostgrestCluster `json:"postgrest,omitempty"`
GoTrue *GoTrueCluster `json:"auth,omitempty"`
StorageApi *StorageApiCluster `json:"storageApi,omitempty"`
PGMeta *PGMetaCluster `json:"pgmeta,omitempty"`
Studio *StudioCluster `json:"studio,omitempty"`
}
func (s *EnvoyServices) UpsertEndpointSlices(endpointSlices ...discoveryv1.EndpointSlice) {
@ -56,6 +57,11 @@ func (s *EnvoyServices) UpsertEndpointSlices(endpointSlices ...discoveryv1.Endpo
s.GoTrue = new(GoTrueCluster)
}
s.GoTrue.AddOrUpdateEndpoints(eps)
case supabase.ServiceConfig.Storage.Name:
if s.StorageApi == nil {
s.StorageApi = new(StorageApiCluster)
}
s.StorageApi.AddOrUpdateEndpoints(eps)
case supabase.ServiceConfig.PGMeta.Name:
if s.PGMeta == nil {
s.PGMeta = new(PGMetaCluster)
@ -76,6 +82,10 @@ func (s EnvoyServices) Targets() map[string][]string {
targets[supabase.ServiceConfig.Auth.Name] = s.GoTrue.Targets()
}
if s.StorageApi != nil {
targets[supabase.ServiceConfig.Storage.Name] = s.StorageApi.Targets()
}
if s.PGMeta != nil {
targets[supabase.ServiceConfig.PGMeta.Name] = s.PGMeta.Targets()
}
@ -179,6 +189,7 @@ func (s *EnvoyServices) snapshot(ctx context.Context, instance, version string)
Routes: slices.Concat(
s.Postgrest.Routes(instance),
s.GoTrue.Routes(instance),
s.StorageApi.Routes(instance),
s.PGMeta.Routes(instance),
),
}},
@ -252,6 +263,7 @@ func (s *EnvoyServices) snapshot(ctx context.Context, instance, version string)
slices.Concat(
s.Postgrest.Cluster(instance),
s.GoTrue.Cluster(instance),
s.StorageApi.Cluster(instance),
s.PGMeta.Cluster(instance),
)...),
resource.RouteType: {apiRouteCfg},

View file

@ -0,0 +1,72 @@
/*
Copyright 2025 Peter Kurfer.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controlplane
import (
"fmt"
clusterv3 "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3"
routev3 "github.com/envoyproxy/go-control-plane/envoy/config/route/v3"
"google.golang.org/protobuf/types/known/anypb"
"code.icb4dc0.de/prskr/supabase-operator/internal/supabase"
)
type StorageApiCluster struct {
ServiceCluster
}
func (c *StorageApiCluster) Cluster(instance string) []*clusterv3.Cluster {
if c == nil {
return nil
}
serviceCfg := supabase.ServiceConfig.Storage
return []*clusterv3.Cluster{
c.ServiceCluster.Cluster(fmt.Sprintf("%s@%s", serviceCfg.Name, instance), uint32(serviceCfg.Defaults.ApiPort)),
}
}
func (c *StorageApiCluster) Routes(instance string) []*routev3.Route {
if c == nil {
return nil
}
serviceCfg := supabase.ServiceConfig.Storage
return []*routev3.Route{{
Name: "Storage: /storage/v1/* -> http://storage:5000/*",
Match: &routev3.RouteMatch{
PathSpecifier: &routev3.RouteMatch_Prefix{
Prefix: "/storage/v1/",
},
},
Action: &routev3.Route_Route{
Route: &routev3.RouteAction{
ClusterSpecifier: &routev3.RouteAction_Cluster{
Cluster: fmt.Sprintf("%s@%s", serviceCfg.Name, instance),
},
PrefixRewrite: "/",
},
},
TypedPerFilterConfig: map[string]*anypb.Any{
FilterNameRBAC: MustAny(RBACPerRoute(RBACAllowAllConfig())),
FilterNameJwtAuthn: MustAny(JWTAllowAll()),
},
}}
}

86
internal/supabase/auth.go Normal file
View file

@ -0,0 +1,86 @@
/*
Copyright 2025 Peter Kurfer.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package supabase
type authEnvKeys struct {
ApiHost fixedEnv
ApiPort fixedEnv
ApiExternalUrl stringEnv
DBDriver fixedEnv
DatabaseUrl string
SiteUrl stringEnv
AdditionalRedirectURLs stringSliceEnv
DisableSignup boolEnv
JWTIssuer fixedEnv
JWTAdminRoles fixedEnv
JWTAudience fixedEnv
JwtDefaultGroup fixedEnv
JwtExpiry intEnv[int]
JwtSecret secretEnv
EmailSignupDisabled boolEnv
MailerUrlPathsInvite stringEnv
MailerUrlPathsConfirmation stringEnv
MailerUrlPathsRecovery stringEnv
MailerUrlPathsEmailChange stringEnv
AnonymousUsersEnabled boolEnv
}
type authConfigDefaults struct {
MailerUrlPathsInvite string
MailerUrlPathsConfirmation string
MailerUrlPathsRecovery string
MailerUrlPathsEmailChange string
APIPort int32
UID, GID int64
}
func authServiceConfig() serviceConfig[authEnvKeys, authConfigDefaults] {
return serviceConfig[authEnvKeys, authConfigDefaults]{
Name: "auth",
EnvKeys: authEnvKeys{
ApiHost: fixedEnvOf("GOTRUE_API_HOST", "0.0.0.0"),
ApiPort: fixedEnvOf("GOTRUE_API_PORT", "9999"),
ApiExternalUrl: "API_EXTERNAL_URL",
DBDriver: fixedEnvOf("GOTRUE_DB_DRIVER", "postgres"),
DatabaseUrl: "GOTRUE_DB_DATABASE_URL",
SiteUrl: "GOTRUE_SITE_URL",
AdditionalRedirectURLs: stringSliceEnv{key: "GOTRUE_URI_ALLOW_LIST", separator: ","},
DisableSignup: "GOTRUE_DISABLE_SIGNUP",
JWTIssuer: fixedEnvOf("GOTRUE_JWT_ISSUER", "supabase"),
JWTAdminRoles: fixedEnvOf("GOTRUE_JWT_ADMIN_ROLES", "service_role"),
JWTAudience: fixedEnvOf("GOTRUE_JWT_AUD", "authenticated"),
JwtDefaultGroup: fixedEnvOf("GOTRUE_JWT_DEFAULT_GROUP_NAME", "authenticated"),
JwtExpiry: "GOTRUE_JWT_EXP",
JwtSecret: "GOTRUE_JWT_SECRET",
EmailSignupDisabled: "GOTRUE_EXTERNAL_EMAIL_ENABLED",
MailerUrlPathsInvite: "MAILER_URLPATHS_INVITE",
MailerUrlPathsConfirmation: "MAILER_URLPATHS_CONFIRMATION",
MailerUrlPathsRecovery: "MAILER_URLPATHS_RECOVERY",
MailerUrlPathsEmailChange: "MAILER_URLPATHS_EMAIL_CHANGE",
AnonymousUsersEnabled: "GOTRUE_EXTERNAL_ANONYMOUS_USERS_ENABLED",
},
Defaults: authConfigDefaults{
MailerUrlPathsInvite: "/auth/v1/verify",
MailerUrlPathsConfirmation: "/auth/v1/verify",
MailerUrlPathsRecovery: "/auth/v1/verify",
MailerUrlPathsEmailChange: "/auth/v1/verify",
APIPort: 9999,
UID: 1000,
GID: 1000,
},
}
}

View file

@ -36,294 +36,22 @@ func (cfg serviceConfig[TEnvKeys, TDefaults]) ObjectMeta(obj metav1.Object) meta
return metav1.ObjectMeta{Name: cfg.ObjectName(obj), Namespace: obj.GetNamespace()}
}
type postgrestEnvKeys struct {
Host fixedEnv
DBUri string
Schemas stringSliceEnv
AnonRole stringEnv
JWTSecret secretEnv
UseLegacyGucs boolEnv
ExtraSearchPath stringSliceEnv
AppSettingsJWTSecret secretEnv
AppSettingsJWTExpiry intEnv[int]
AdminServerPort intEnv[int32]
MaxRows intEnv[int]
OpenAPIProxyURI stringEnv
}
type postgrestConfigDefaults struct {
AnonRole string
Schemas []string
ExtraSearchPath []string
UID, GID int64
ServerPort, AdminPort int32
}
type authEnvKeys struct {
ApiHost fixedEnv
ApiPort fixedEnv
ApiExternalUrl stringEnv
DBDriver fixedEnv
DatabaseUrl string
SiteUrl stringEnv
AdditionalRedirectURLs stringSliceEnv
DisableSignup boolEnv
JWTIssuer fixedEnv
JWTAdminRoles fixedEnv
JWTAudience fixedEnv
JwtDefaultGroup fixedEnv
JwtExpiry intEnv[int]
JwtSecret secretEnv
EmailSignupDisabled boolEnv
MailerUrlPathsInvite stringEnv
MailerUrlPathsConfirmation stringEnv
MailerUrlPathsRecovery stringEnv
MailerUrlPathsEmailChange stringEnv
AnonymousUsersEnabled boolEnv
}
type authConfigDefaults struct {
MailerUrlPathsInvite string
MailerUrlPathsConfirmation string
MailerUrlPathsRecovery string
MailerUrlPathsEmailChange string
APIPort int32
UID, GID int64
}
type pgMetaEnvKeys struct {
APIPort intEnv[int32]
DBHost stringEnv
DBPort intEnv[int]
DBName stringEnv
DBUser secretEnv
DBPassword secretEnv
}
type pgMetaDefaults struct {
APIPort int32
DBPort string
NodeUID int64
NodeGID int64
}
type studioEnvKeys struct {
PGMetaURL stringEnv
DBPassword secretEnv
ApiUrl stringEnv
APIExternalURL stringEnv
JwtSecret secretEnv
AnonKey secretEnv
ServiceKey secretEnv
Host fixedEnv
LogsEnabled fixedEnv
}
type studioDefaults struct {
NodeUID int64
NodeGID int64
APIPort int32
}
type storageEnvApiKeys struct {
AnonKey secretEnv
ServiceKey secretEnv
JwtSecret secretEnv
JwtJwks secretEnv
DatabaseDSN stringEnv
FileSizeLimit intEnv[uint64]
UploadFileSizeLimit intEnv[uint64]
UploadFileSizeLimitStandard intEnv[uint64]
StorageBackend stringEnv
TenantID fixedEnv
StorageS3Region stringEnv
GlobalS3Bucket fixedEnv
EnableImaageTransformation boolEnv
ImgProxyURL stringEnv
TusUrlPath fixedEnv
S3AccessKeyId secretEnv
S3AccessKeySecret secretEnv
S3ProtocolPrefix fixedEnv
S3AllowForwardedHeader boolEnv
}
type storageApiDefaults struct{}
type envoyDefaults struct {
ConfigKey string
UID, GID int64
}
type envoyServiceConfig struct {
Defaults envoyDefaults
}
func (envoyServiceConfig) ObjectName(obj metav1.Object) string {
return fmt.Sprintf("%s-envoy", obj.GetName())
}
type jwtDefaults struct {
SecretKey string
JwksKey string
AnonKey string
ServiceKey string
SecretLength int
Expiry int
}
type jwtConfig struct {
Defaults jwtDefaults
}
func (jwtConfig) ObjectName(obj metav1.Object) string {
return fmt.Sprintf("%s-jwt", obj.GetName())
}
var ServiceConfig = struct {
Postgrest serviceConfig[postgrestEnvKeys, postgrestConfigDefaults]
Auth serviceConfig[authEnvKeys, authConfigDefaults]
PGMeta serviceConfig[pgMetaEnvKeys, pgMetaDefaults]
Studio serviceConfig[studioEnvKeys, studioDefaults]
Storage serviceConfig[storageEnvApiKeys, storageApiDefaults]
ImgProxy serviceConfig[imgProxyEnvKeys, imgProxyDefaults]
Envoy envoyServiceConfig
JWT jwtConfig
}{
Postgrest: serviceConfig[postgrestEnvKeys, postgrestConfigDefaults]{
Name: "postgrest",
EnvKeys: postgrestEnvKeys{
Host: fixedEnvOf("PGRST_SERVER_HOST", "*"),
DBUri: "PGRST_DB_URI",
Schemas: stringSliceEnv{key: "PGRST_DB_SCHEMAS", separator: ","},
AnonRole: "PGRST_DB_ANON_ROLE",
JWTSecret: "PGRST_JWT_SECRET",
UseLegacyGucs: "PGRST_DB_USE_LEGACY_GUCS",
AppSettingsJWTSecret: "PGRST_APP_SETTINGS_JWT_SECRET",
AppSettingsJWTExpiry: "PGRST_APP_SETTINGS_JWT_EXP",
AdminServerPort: "PGRST_ADMIN_SERVER_PORT",
ExtraSearchPath: stringSliceEnv{key: "PGRST_DB_EXTRA_SEARCH_PATH", separator: ","},
MaxRows: "PGRST_DB_MAX_ROWS",
OpenAPIProxyURI: "PGRST_OPENAPI_SERVER_PROXY_URI",
},
Defaults: postgrestConfigDefaults{
AnonRole: "anon",
Schemas: []string{"public", "graphql_public"},
ExtraSearchPath: []string{"public", "extensions"},
UID: 1000,
GID: 1000,
ServerPort: 3000,
AdminPort: 3001,
},
},
Auth: serviceConfig[authEnvKeys, authConfigDefaults]{
Name: "auth",
EnvKeys: authEnvKeys{
ApiHost: fixedEnvOf("GOTRUE_API_HOST", "0.0.0.0"),
ApiPort: fixedEnvOf("GOTRUE_API_PORT", "9999"),
ApiExternalUrl: "API_EXTERNAL_URL",
DBDriver: fixedEnvOf("GOTRUE_DB_DRIVER", "postgres"),
DatabaseUrl: "GOTRUE_DB_DATABASE_URL",
SiteUrl: "GOTRUE_SITE_URL",
AdditionalRedirectURLs: stringSliceEnv{key: "GOTRUE_URI_ALLOW_LIST", separator: ","},
DisableSignup: "GOTRUE_DISABLE_SIGNUP",
JWTIssuer: fixedEnvOf("GOTRUE_JWT_ISSUER", "supabase"),
JWTAdminRoles: fixedEnvOf("GOTRUE_JWT_ADMIN_ROLES", "service_role"),
JWTAudience: fixedEnvOf("GOTRUE_JWT_AUD", "authenticated"),
JwtDefaultGroup: fixedEnvOf("GOTRUE_JWT_DEFAULT_GROUP_NAME", "authenticated"),
JwtExpiry: "GOTRUE_JWT_EXP",
JwtSecret: "GOTRUE_JWT_SECRET",
EmailSignupDisabled: "GOTRUE_EXTERNAL_EMAIL_ENABLED",
MailerUrlPathsInvite: "MAILER_URLPATHS_INVITE",
MailerUrlPathsConfirmation: "MAILER_URLPATHS_CONFIRMATION",
MailerUrlPathsRecovery: "MAILER_URLPATHS_RECOVERY",
MailerUrlPathsEmailChange: "MAILER_URLPATHS_EMAIL_CHANGE",
AnonymousUsersEnabled: "GOTRUE_EXTERNAL_ANONYMOUS_USERS_ENABLED",
},
Defaults: authConfigDefaults{
MailerUrlPathsInvite: "/auth/v1/verify",
MailerUrlPathsConfirmation: "/auth/v1/verify",
MailerUrlPathsRecovery: "/auth/v1/verify",
MailerUrlPathsEmailChange: "/auth/v1/verify",
APIPort: 9999,
UID: 1000,
GID: 1000,
},
},
PGMeta: serviceConfig[pgMetaEnvKeys, pgMetaDefaults]{
Name: "pg-meta",
EnvKeys: pgMetaEnvKeys{
APIPort: "PG_META_PORT",
DBHost: "PG_META_DB_HOST",
DBPort: "PG_META_DB_PORT",
DBName: "PG_META_DB_NAME",
DBUser: "PG_META_DB_USER",
DBPassword: "PG_META_DB_PASSWORD",
},
Defaults: pgMetaDefaults{
APIPort: 8080,
DBPort: "5432",
NodeUID: 1000,
NodeGID: 1000,
},
},
Studio: serviceConfig[studioEnvKeys, studioDefaults]{
Name: "studio",
EnvKeys: studioEnvKeys{
PGMetaURL: "STUDIO_PG_META_URL",
DBPassword: "POSTGRES_PASSWORD",
ApiUrl: "SUPABASE_URL",
APIExternalURL: "SUPABASE_PUBLIC_URL",
JwtSecret: "AUTH_JWT_SECRET",
AnonKey: "SUPABASE_ANON_KEY",
ServiceKey: "SUPABASE_SERVICE_KEY",
Host: fixedEnvOf("HOSTNAME", "0.0.0.0"),
LogsEnabled: fixedEnvOf("NEXT_PUBLIC_ENABLE_LOGS", "true"),
},
Defaults: studioDefaults{
NodeUID: 1000,
NodeGID: 1000,
APIPort: 3000,
},
},
Storage: serviceConfig[storageEnvApiKeys, storageApiDefaults]{
Name: "storage-api",
EnvKeys: storageEnvApiKeys{
AnonKey: "ANON_KEY",
ServiceKey: "SERVICE_KEY",
JwtSecret: "AUTH_JWT_SECRET",
JwtJwks: "AUTH_JWT_JWKS",
StorageBackend: "STORAGE_BACKEND",
DatabaseDSN: "DATABASE_URL",
FileSizeLimit: "FILE_SIZE_LIMIT",
UploadFileSizeLimit: "UPLOAD_FILE_SIZE_LIMIT",
UploadFileSizeLimitStandard: "UPLOAD_FILE_SIZE_LIMIT_STANDARD",
TenantID: fixedEnvOf("TENANT_ID", "stub"),
StorageS3Region: "STORAGE_S3_REGION",
GlobalS3Bucket: fixedEnvOf("GLOBAL_S3_BUCKET", "stub"),
EnableImaageTransformation: "ENABLE_IMAGE_TRANSFORMATION",
ImgProxyURL: "IMGPROXY_URL",
TusUrlPath: fixedEnvOf("TUS_URL_PATH", "/storage/v1/upload/resumable"),
S3AccessKeyId: "S3_PROTOCOL_ACCESS_KEY_ID",
S3AccessKeySecret: "S3_PROTOCOL_ACCESS_KEY_SECRET",
S3ProtocolPrefix: fixedEnvOf("S3_PROTOCOL_PREFIX", "/storage/v1"),
S3AllowForwardedHeader: "S3_ALLOW_FORWARDED_HEADER",
},
Defaults: storageApiDefaults{},
},
Envoy: envoyServiceConfig{
Defaults: envoyDefaults{
ConfigKey: "config.yaml",
UID: 65532,
GID: 65532,
},
},
JWT: jwtConfig{
Defaults: jwtDefaults{
SecretKey: "secret",
JwksKey: "jwks.json",
AnonKey: "anon_key",
ServiceKey: "service_key",
SecretLength: 40,
Expiry: 3600,
},
},
Postgrest: postgrestServiceConfig(),
Auth: authServiceConfig(),
PGMeta: pgMetaServiceConfig(),
Studio: studioServiceConfig(),
Storage: storageServiceConfig(),
ImgProxy: imgProxyServiceConfig(),
Envoy: newEnvoyServiceConfig(),
JWT: newJwtConfig(),
}

View file

@ -0,0 +1,46 @@
/*
Copyright 2025 Peter Kurfer.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package supabase
import (
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func newEnvoyServiceConfig() envoyServiceConfig {
return envoyServiceConfig{
Defaults: envoyDefaults{
ConfigKey: "config.yaml",
UID: 65532,
GID: 65532,
},
}
}
type envoyDefaults struct {
ConfigKey string
UID, GID int64
}
type envoyServiceConfig struct {
Defaults envoyDefaults
}
func (envoyServiceConfig) ObjectName(obj metav1.Object) string {
return fmt.Sprintf("%s-envoy", obj.GetName())
}

View file

@ -0,0 +1,48 @@
/*
Copyright 2025 Peter Kurfer.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package supabase
type imgProxyEnvKeys struct {
Bind fixedEnv
LocalFileSystemRoot stringEnv
UseETag fixedEnv
EnableWebPDetection boolEnv
}
type imgProxyDefaults struct {
ApiPort int32
ApiPortName string
UID, GID int64
}
func imgProxyServiceConfig() serviceConfig[imgProxyEnvKeys, imgProxyDefaults] {
return serviceConfig[imgProxyEnvKeys, imgProxyDefaults]{
Name: "imgproxy",
EnvKeys: imgProxyEnvKeys{
Bind: fixedEnvOf("IMGPROXY_BIND", ":5001"),
LocalFileSystemRoot: "IMGPROXY_LOCAL_FILESYSTEM_ROOT",
UseETag: fixedEnvOf("IMGPROXY_USE_ETAG", "true"),
EnableWebPDetection: "IMGPROXY_ENABLE_WEBP_DETECTION",
},
Defaults: imgProxyDefaults{
ApiPort: 5001,
ApiPortName: "api",
UID: 999,
GID: 999,
},
}
}

View file

@ -19,8 +19,41 @@ package supabase
import (
"crypto/rand"
"encoding/hex"
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
type jwtDefaults struct {
SecretKey string
JwksKey string
AnonKey string
ServiceKey string
SecretLength int
Expiry int
}
type jwtConfig struct {
Defaults jwtDefaults
}
func newJwtConfig() jwtConfig {
return jwtConfig{
Defaults: jwtDefaults{
SecretKey: "secret",
JwksKey: "jwks.json",
AnonKey: "anon_key",
ServiceKey: "service_key",
SecretLength: 40,
Expiry: 3600,
},
}
}
func (jwtConfig) ObjectName(obj metav1.Object) string {
return fmt.Sprintf("%s-jwt", obj.GetName())
}
func RandomJWTSecret() ([]byte, error) {
jwtSecretBytes := make([]byte, ServiceConfig.JWT.Defaults.SecretLength)

View file

@ -0,0 +1,53 @@
/*
Copyright 2025 Peter Kurfer.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package supabase
type pgMetaEnvKeys struct {
APIPort intEnv[int32]
DBHost stringEnv
DBPort intEnv[int]
DBName stringEnv
DBUser secretEnv
DBPassword secretEnv
}
type pgMetaDefaults struct {
APIPort int32
DBPort string
NodeUID int64
NodeGID int64
}
func pgMetaServiceConfig() serviceConfig[pgMetaEnvKeys, pgMetaDefaults] {
return serviceConfig[pgMetaEnvKeys, pgMetaDefaults]{
Name: "pg-meta",
EnvKeys: pgMetaEnvKeys{
APIPort: "PG_META_PORT",
DBHost: "PG_META_DB_HOST",
DBPort: "PG_META_DB_PORT",
DBName: "PG_META_DB_NAME",
DBUser: "PG_META_DB_USER",
DBPassword: "PG_META_DB_PASSWORD",
},
Defaults: pgMetaDefaults{
APIPort: 8080,
DBPort: "5432",
NodeUID: 1000,
NodeGID: 1000,
},
}
}

View file

@ -0,0 +1,72 @@
/*
Copyright 2025 Peter Kurfer.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package supabase
type postgrestEnvKeys struct {
Host fixedEnv
DBUri string
Schemas stringSliceEnv
AnonRole stringEnv
JWTSecret secretEnv
UseLegacyGucs boolEnv
ExtraSearchPath stringSliceEnv
AppSettingsJWTSecret secretEnv
AppSettingsJWTExpiry intEnv[int]
AdminServerPort intEnv[int32]
MaxRows intEnv[int]
OpenAPIProxyURI stringEnv
}
type postgrestConfigDefaults struct {
AnonRole string
Schemas []string
ExtraSearchPath []string
UID, GID int64
ServerPort, AdminPort int32
ServerPortName, AdminPortName string
}
func postgrestServiceConfig() serviceConfig[postgrestEnvKeys, postgrestConfigDefaults] {
return serviceConfig[postgrestEnvKeys, postgrestConfigDefaults]{
Name: "postgrest",
EnvKeys: postgrestEnvKeys{
Host: fixedEnvOf("PGRST_SERVER_HOST", "*"),
DBUri: "PGRST_DB_URI",
Schemas: stringSliceEnv{key: "PGRST_DB_SCHEMAS", separator: ","},
AnonRole: "PGRST_DB_ANON_ROLE",
JWTSecret: "PGRST_JWT_SECRET",
UseLegacyGucs: "PGRST_DB_USE_LEGACY_GUCS",
AppSettingsJWTSecret: "PGRST_APP_SETTINGS_JWT_SECRET",
AppSettingsJWTExpiry: "PGRST_APP_SETTINGS_JWT_EXP",
AdminServerPort: "PGRST_ADMIN_SERVER_PORT",
ExtraSearchPath: stringSliceEnv{key: "PGRST_DB_EXTRA_SEARCH_PATH", separator: ","},
MaxRows: "PGRST_DB_MAX_ROWS",
OpenAPIProxyURI: "PGRST_OPENAPI_SERVER_PROXY_URI",
},
Defaults: postgrestConfigDefaults{
AnonRole: "anon",
Schemas: []string{"public", "graphql_public"},
ExtraSearchPath: []string{"public", "extensions"},
UID: 1000,
GID: 1000,
ServerPort: 3000,
AdminPort: 3001,
ServerPortName: "rest",
AdminPortName: "admin",
},
}
}

View file

@ -0,0 +1,92 @@
/*
Copyright 2025 Peter Kurfer.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package supabase
type storageEnvApiKeys struct {
AnonKey secretEnv
ServiceKey secretEnv
JwtSecret secretEnv
JwtJwks secretEnv
DatabaseDSN stringEnv
FileSizeLimit intEnv[uint64]
UploadFileSizeLimit intEnv[uint64]
UploadFileSizeLimitStandard intEnv[uint64]
StorageBackend stringEnv
FileStorageBackendPath stringEnv
FileStorageRegion fixedEnv
TenantID fixedEnv
StorageS3Bucket stringEnv
StorageS3MaxSockets intEnv[uint8]
StorageS3Endpoint stringEnv
StorageS3ForcePathStyle boolEnv
StorageS3Region stringEnv
StorageS3AccessKeyId secretEnv
StorageS3AccessSecretKey secretEnv
EnableImaageTransformation boolEnv
ImgProxyURL stringEnv
TusUrlPath fixedEnv
S3ProtocolAccessKeyId secretEnv
S3ProtocolAccessKeySecret secretEnv
S3ProtocolAllowForwardedHeader boolEnv
S3ProtocolPrefix fixedEnv
}
type storageApiDefaults struct {
ApiPort int32
ApiPortName string
UID, GID int64
}
func storageServiceConfig() serviceConfig[storageEnvApiKeys, storageApiDefaults] {
return serviceConfig[storageEnvApiKeys, storageApiDefaults]{
Name: "storage-api",
EnvKeys: storageEnvApiKeys{
AnonKey: "ANON_KEY",
ServiceKey: "SERVICE_KEY",
JwtSecret: "AUTH_JWT_SECRET",
JwtJwks: "AUTH_JWT_JWKS",
StorageBackend: "STORAGE_BACKEND",
FileStorageBackendPath: "FILE_STORAGE_BACKEND_PATH",
FileStorageRegion: fixedEnvOf("REGION", "stub"),
DatabaseDSN: "DATABASE_URL",
FileSizeLimit: "FILE_SIZE_LIMIT",
UploadFileSizeLimit: "UPLOAD_FILE_SIZE_LIMIT",
UploadFileSizeLimitStandard: "UPLOAD_FILE_SIZE_LIMIT_STANDARD",
TenantID: fixedEnvOf("TENANT_ID", "stub"),
StorageS3Bucket: "STORAGE_S3_BUCKET",
StorageS3MaxSockets: "STORAGE_S3_MAX_SOCKETS",
StorageS3Endpoint: "STORAGE_S3_ENDPOINT",
StorageS3ForcePathStyle: "STORAGE_S3_FORCE_PATH_STYLE",
StorageS3Region: "STORAGE_S3_REGION",
StorageS3AccessKeyId: "AWS_ACCESS_KEY_ID",
StorageS3AccessSecretKey: "AWS_SECRET_ACCESS_KEY",
EnableImaageTransformation: "ENABLE_IMAGE_TRANSFORMATION",
ImgProxyURL: "IMGPROXY_URL",
TusUrlPath: fixedEnvOf("TUS_URL_PATH", "/storage/v1/upload/resumable"),
S3ProtocolAccessKeyId: "S3_PROTOCOL_ACCESS_KEY_ID",
S3ProtocolAccessKeySecret: "S3_PROTOCOL_ACCESS_KEY_SECRET",
S3ProtocolPrefix: fixedEnvOf("S3_PROTOCOL_PREFIX", "/storage/v1"),
S3ProtocolAllowForwardedHeader: "S3_ALLOW_FORWARDED_HEADER",
},
Defaults: storageApiDefaults{
ApiPort: 5000,
ApiPortName: "api",
UID: 1000,
GID: 1000,
},
}
}

View file

@ -0,0 +1,57 @@
/*
Copyright 2025 Peter Kurfer.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package supabase
type studioEnvKeys struct {
PGMetaURL stringEnv
DBPassword secretEnv
ApiUrl stringEnv
APIExternalURL stringEnv
JwtSecret secretEnv
AnonKey secretEnv
ServiceKey secretEnv
Host fixedEnv
LogsEnabled fixedEnv
}
type studioDefaults struct {
NodeUID int64
NodeGID int64
APIPort int32
}
func studioServiceConfig() serviceConfig[studioEnvKeys, studioDefaults] {
return serviceConfig[studioEnvKeys, studioDefaults]{
Name: "studio",
EnvKeys: studioEnvKeys{
PGMetaURL: "STUDIO_PG_META_URL",
DBPassword: "POSTGRES_PASSWORD",
ApiUrl: "SUPABASE_URL",
APIExternalURL: "SUPABASE_PUBLIC_URL",
JwtSecret: "AUTH_JWT_SECRET",
AnonKey: "SUPABASE_ANON_KEY",
ServiceKey: "SUPABASE_SERVICE_KEY",
Host: fixedEnvOf("HOSTNAME", "0.0.0.0"),
LogsEnabled: fixedEnvOf("NEXT_PUBLIC_ENABLE_LOGS", "true"),
},
Defaults: studioDefaults{
NodeUID: 1000,
NodeGID: 1000,
APIPort: 3000,
},
}
}

View file

@ -57,7 +57,7 @@ func SetupDashboardWebhookWithManager(mgr ctrl.Manager) error {
// SetupStorageWebhookWithManager registers the webhook for Storage in the manager.
func SetupStorageWebhookWithManager(mgr ctrl.Manager) error {
return ctrl.NewWebhookManagedBy(mgr).For(&supabasev1alpha1.Storage{}).
WithValidator(&StorageCustomValidator{}).
WithValidator(&StorageCustomValidator{Client: mgr.GetClient()}).
WithDefaulter(&StorageCustomDefaulter{Client: mgr.GetClient()}).
Complete()
}

View file

@ -19,18 +19,12 @@ package v1alpha1
import (
"context"
"fmt"
"maps"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/webhook"
supabasev1alpha1 "code.icb4dc0.de/prskr/supabase-operator/api/v1alpha1"
"code.icb4dc0.de/prskr/supabase-operator/internal/meta"
"code.icb4dc0.de/prskr/supabase-operator/internal/pw"
)
// +kubebuilder:webhook:path=/mutate-supabase-k8s-icb4dc0-de-v1alpha1-storage,mutating=true,failurePolicy=fail,sideEffects=None,groups=supabase.k8s.icb4dc0.de,resources=storages,verbs=create;update,versions=v1alpha1,name=mstorage-v1alpha1.kb.io,admissionReviewVersions=v1
@ -55,55 +49,19 @@ func (d *StorageCustomDefaulter) Default(ctx context.Context, obj runtime.Object
}
storagelog.Info("Defaulting for Storage", "name", storage.GetName())
if err := d.defaultS3Protocol(ctx, storage); err != nil {
return err
}
d.defaultS3Protocol(storage)
return nil
}
func (d *StorageCustomDefaulter) defaultS3Protocol(ctx context.Context, storage *supabasev1alpha1.Storage) error {
if storage.Spec.S3 == nil {
storage.Spec.S3 = new(supabasev1alpha1.S3ProtocolSpec)
func (d *StorageCustomDefaulter) defaultS3Protocol(storage *supabasev1alpha1.Storage) {
if storage.Spec.Api.S3Protocol == nil {
storage.Spec.Api.S3Protocol = new(supabasev1alpha1.S3ProtocolSpec)
}
if storage.Spec.S3.CredentialsSecretRef == nil {
storage.Spec.S3.CredentialsSecretRef = &supabasev1alpha1.S3CredentialsRef{
AccessKeyIdKey: "accessKeyId",
AccessSecretKeyKey: "secretAccessKey",
SecretName: fmt.Sprintf("%s-storage-protocol-s3-credentials", storage.Name),
if storage.Spec.Api.S3Protocol.CredentialsSecretRef == nil {
storage.Spec.Api.S3Protocol.CredentialsSecretRef = &supabasev1alpha1.S3CredentialsRef{
SecretName: fmt.Sprintf("%s-storage-protocol-s3-credentials", storage.Name),
}
}
credentialsSecret := corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: storage.Spec.S3.CredentialsSecretRef.SecretName,
Namespace: storage.Namespace,
},
}
_, err := controllerutil.CreateOrUpdate(ctx, d.Client, &credentialsSecret, func() error {
credentialsSecret.Labels = maps.Clone(storage.Labels)
if credentialsSecret.Labels == nil {
credentialsSecret.Labels = make(map[string]string)
}
credentialsSecret.Labels[meta.SupabaseLabel.Reload] = ""
if credentialsSecret.Data == nil {
credentialsSecret.Data = make(map[string][]byte, 2)
}
if _, ok := credentialsSecret.Data[storage.Spec.S3.CredentialsSecretRef.AccessKeyIdKey]; !ok {
credentialsSecret.Data[storage.Spec.S3.CredentialsSecretRef.AccessKeyIdKey] = pw.GeneratePW(32, nil)
}
if _, ok := credentialsSecret.Data[storage.Spec.S3.CredentialsSecretRef.AccessSecretKeyKey]; !ok {
credentialsSecret.Data[storage.Spec.S3.CredentialsSecretRef.AccessSecretKeyKey] = pw.GeneratePW(64, nil)
}
return nil
})
return err
}

View file

@ -18,9 +18,13 @@ package v1alpha1
import (
"context"
"errors"
"fmt"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/webhook"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
@ -42,35 +46,43 @@ var storagelog = logf.Log.WithName("storage-resource")
// NOTE: The +kubebuilder:object:generate=false marker prevents controller-gen from generating DeepCopy methods,
// as this struct is used only for temporary operations and does not need to be deeply copied.
type StorageCustomValidator struct {
// TODO(user): Add more fields as needed for validation
client.Client
}
var _ webhook.CustomValidator = &StorageCustomValidator{}
// ValidateCreate implements webhook.CustomValidator so a webhook will be registered for the type Storage.
func (v *StorageCustomValidator) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) {
func (v *StorageCustomValidator) ValidateCreate(ctx context.Context, obj runtime.Object) (warnings admission.Warnings, err error) {
storage, ok := obj.(*supabasev1alpha1.Storage)
if !ok {
return nil, fmt.Errorf("expected a Storage object but got %T", obj)
}
storagelog.Info("Validation for Storage upon creation", "name", storage.GetName())
// TODO(user): fill in your validation logic upon object creation.
if ws, err := v.validateStorageApi(ctx, storage); err != nil {
return ws, err
} else {
warnings = append(warnings, ws...)
}
return nil, nil
return warnings, nil
}
// ValidateUpdate implements webhook.CustomValidator so a webhook will be registered for the type Storage.
func (v *StorageCustomValidator) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) {
func (v *StorageCustomValidator) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (warnings admission.Warnings, err error) {
storage, ok := newObj.(*supabasev1alpha1.Storage)
if !ok {
return nil, fmt.Errorf("expected a Storage object for the newObj but got %T", newObj)
}
storagelog.Info("Validation for Storage upon update", "name", storage.GetName())
// TODO(user): fill in your validation logic upon object update.
if ws, err := v.validateStorageApi(ctx, storage); err != nil {
return ws, err
} else {
warnings = append(warnings, ws...)
}
return nil, nil
return warnings, nil
}
// ValidateDelete implements webhook.CustomValidator so a webhook will be registered for the type Storage.
@ -81,7 +93,49 @@ func (v *StorageCustomValidator) ValidateDelete(ctx context.Context, obj runtime
}
storagelog.Info("Validation for Storage upon deletion", "name", storage.GetName())
// TODO(user): fill in your validation logic upon object deletion.
return nil, nil
}
func (v *StorageCustomValidator) validateStorageApi(ctx context.Context, storage *supabasev1alpha1.Storage) (admission.Warnings, error) {
var warnings admission.Warnings
apiSpec := storage.Spec.Api
if (apiSpec.FileBackend == nil) == (apiSpec.S3Backend == nil) {
return nil, errors.New("it is not possible to configure both or non backend at all - please configure either file or S3 backend")
}
if apiSpec.S3Backend != nil {
if apiSpec.S3Backend.CredentialsSecretRef == nil {
return nil, errors.New(".spec.api.s3Backend.credentialsSecretRef cannot be empty")
}
s3CredentialsSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: apiSpec.S3Backend.CredentialsSecretRef.SecretName,
},
}
if err := v.Get(ctx, client.ObjectKeyFromObject(s3CredentialsSecret), s3CredentialsSecret); err != nil {
if client.IgnoreNotFound(err) == nil {
warnings = append(warnings, fmt.Sprintf("Secret %q could not be found", apiSpec.S3Backend.CredentialsSecretRef.SecretName))
} else {
return nil, err
}
} else {
if accessKeyId, ok := s3CredentialsSecret.Data[apiSpec.S3Backend.CredentialsSecretRef.AccessKeyIdKey]; !ok {
return warnings, fmt.Errorf("secret %q does not contain an access key id at specified key %q", apiSpec.S3Backend.CredentialsSecretRef.SecretName, apiSpec.S3Backend.CredentialsSecretRef.AccessKeyIdKey)
} else if len(accessKeyId) == 0 {
return warnings, fmt.Errorf("access key id in Secret %q with key %q is empty", apiSpec.S3Backend.CredentialsSecretRef.SecretName, apiSpec.S3Backend.CredentialsSecretRef.AccessKeyIdKey)
}
if accessSecretKey, ok := s3CredentialsSecret.Data[apiSpec.S3Backend.CredentialsSecretRef.AccessSecretKeyKey]; !ok {
return warnings, fmt.Errorf("secret %q does not contain an access secret key at specified key %q", apiSpec.S3Backend.CredentialsSecretRef.SecretName, apiSpec.S3Backend.CredentialsSecretRef.AccessSecretKeyKey)
} else if len(accessSecretKey) == 0 {
return warnings, fmt.Errorf("access secret key in Secret %q with key %q is empty", apiSpec.S3Backend.CredentialsSecretRef.SecretName, apiSpec.S3Backend.CredentialsSecretRef.AccessSecretKeyKey)
}
}
}
return warnings, nil
}