feat(storage): finish initial basic implementation
Some checks failed
E2E Tests / Run on Ubuntu (push) Has been cancelled
Tests / Run on Ubuntu (push) Has been cancelled
Lint / Run on Ubuntu (push) Has been cancelled
Docs / deploy (push) Successful in 1m51s
release / release (push) Successful in 6m36s

- support both s3 & file storage backends
- support imgproxy to scale images
- manually tested with MinIO & local storage
- fixed service discovery issue in APIGatey reconciler not detecting
  service changes
- refactored defaults and env variable code to make it manageable again
- add repo link to docs
This commit is contained in:
Peter 2025-01-23 18:00:05 +01:00
parent 604525de38
commit 0014927ca9
Signed by: prskr
GPG key ID: F56BED6903BC5E37
46 changed files with 16170 additions and 606 deletions

View file

@ -22,4 +22,4 @@ jobs:
- name: Running Tests
run: |
go mod tidy
make test
go run mage.go Test

View file

@ -3,7 +3,6 @@ load('ext://restart_process', 'docker_build_with_restart')
allow_k8s_contexts('kind-kind')
local('./dev/prepare-dev-cluster.sh')
k8s_yaml(kustomize('config/dev'))
k8s_yaml(kustomize('config/samples'))
@ -49,6 +48,8 @@ k8s_resource(
port_forwards=5432
)
k8s_resource(workload='minio', port_forwards=[9000,9090])
k8s_resource(
objects=["core-sample:Core:supabase-demo"],
new_name='Supabase Core',

View file

@ -48,6 +48,15 @@ func (s JwtSpec) SecretKeySelector() *corev1.SecretKeySelector {
}
}
func (s JwtSpec) JwksKeySelector() *corev1.SecretKeySelector {
return &corev1.SecretKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
Name: s.SecretName,
},
Key: s.JwksKey,
}
}
func (s JwtSpec) AnonKeySelector() *corev1.SecretKeySelector {
return &corev1.SecretKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
@ -74,7 +83,8 @@ type ImageSpec struct {
type ContainerTemplate struct {
ImageSpec `json:",inline"`
ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"`
// SecurityContext -
// SecurityContext - override the container SecurityContext
// use with caution, by default the operator already uses sane defaults
SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty"`
Resources corev1.ResourceRequirements `json:"resources,omitempty"`
VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"`
@ -83,10 +93,11 @@ type ContainerTemplate struct {
type WorkloadTemplate struct {
Replicas *int32 `json:"replicas,omitempty"`
SecurityContext *corev1.PodSecurityContext `json:"securityContext"`
SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty"`
AdditionalLabels map[string]string `json:"additionalLabels,omitempty"`
// Workload - customize the container template of the workload
Workload *ContainerTemplate `json:"workload,omitempty"`
Workload *ContainerTemplate `json:"workload,omitempty"`
AdditionalVolumes []corev1.Volume `json:"additionalVolumes,omitempty"`
}
func (t *WorkloadTemplate) ReplicaCount() *int32 {
@ -185,6 +196,14 @@ func (t *WorkloadTemplate) AdditionalVolumeMounts(defaultMounts ...corev1.Volume
return defaultMounts
}
func (t *WorkloadTemplate) Volumes(defaultVolumes ...corev1.Volume) []corev1.Volume {
if t == nil {
return defaultVolumes
}
return append(defaultVolumes, t.AdditionalVolumes...)
}
func (t *WorkloadTemplate) PodSecurityContext() *corev1.PodSecurityContext {
if t != nil && t.SecurityContext != nil {
return t.SecurityContext

View file

@ -18,14 +18,17 @@ package v1alpha1
import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"code.icb4dc0.de/prskr/supabase-operator/internal/supabase"
)
type StorageBackend string
type BackendStorageType string
const (
StorageBackendFile StorageBackend = "file"
StorageBackendS3 StorageBackend = "s3"
BackendStorageTypeFile BackendStorageType = "file"
BackendStorageTypeS3 BackendStorageType = "s3"
)
type StorageApiDbSpec struct {
@ -67,11 +70,25 @@ type S3CredentialsRef struct {
AccessSecretKeyKey string `json:"accessSecretKeyKey,omitempty"`
}
type S3ProtocolSpec struct {
// Region - S3 region to use in the API
// +kubebuilder:default="us-east-1"
Region string `json:"region,omitempty"`
func (r S3CredentialsRef) AccessKeyIdSelector() *corev1.SecretKeySelector {
return &corev1.SecretKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
Name: r.SecretName,
},
Key: r.AccessKeyIdKey,
}
}
func (r S3CredentialsRef) AccessSecretKeySelector() *corev1.SecretKeySelector {
return &corev1.SecretKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
Name: r.SecretName,
},
Key: r.AccessSecretKeyKey,
}
}
type S3ProtocolSpec struct {
// AllowForwardedHeader
// +kubebuilder:default=true
AllowForwardedHeader bool `json:"allowForwardedHeader,omitempty"`
@ -80,11 +97,85 @@ type S3ProtocolSpec struct {
CredentialsSecretRef *S3CredentialsRef `json:"credentialsSecretRef,omitempty"`
}
// StorageSpec defines the desired state of Storage.
type StorageSpec struct {
// BackendType - backend storage type to use
// +kubebuilder:validation:Enum={s3,file}
BackendType StorageBackend `json:"backendType"`
type FileBackendSpec struct {
// Path - path to where files will be stored
Path string `json:"path"`
}
func (s *FileBackendSpec) Env() []corev1.EnvVar {
if s == nil {
return nil
}
svcCfg := supabase.ServiceConfig.Storage
return []corev1.EnvVar{
svcCfg.EnvKeys.StorageBackend.Var("file"),
svcCfg.EnvKeys.TenantID.Var(),
svcCfg.EnvKeys.FileStorageBackendPath.Var(s.Path),
svcCfg.EnvKeys.StorageS3Region.Var("local"),
svcCfg.EnvKeys.StorageS3Bucket.Var("stub"),
}
}
type S3BackendSpec struct {
// Region - S3 region of the backend
Region string `json:"region"`
// Endpoint - hostname and port **with** http/https
Endpoint string `json:"endpoint"`
// ForcePathStyle - whether to use path style (e.g. for MinIO) or domain style
// for bucket addressing
ForcePathStyle bool `json:"forcePathStyle,omitempty"`
// Bucket - bucke to use, if file backend is used, default value is sufficient
// +kubebuilder:default="stub"
Bucket string `json:"bucket"`
// CredentialsSecretRef - reference to the Secret where access key id and access secret key are stored
CredentialsSecretRef *S3CredentialsRef `json:"credentialsSecretRef"`
}
func (s *S3BackendSpec) Env() []corev1.EnvVar {
if s == nil {
return nil
}
svcCfg := supabase.ServiceConfig.Storage
return []corev1.EnvVar{
svcCfg.EnvKeys.StorageBackend.Var("s3"),
svcCfg.EnvKeys.StorageS3Endpoint.Var(s.Endpoint),
svcCfg.EnvKeys.StorageS3ForcePathStyle.Var(s.ForcePathStyle),
svcCfg.EnvKeys.StorageS3Bucket.Var(s.Bucket),
svcCfg.EnvKeys.StorageS3Region.Var(s.Region),
svcCfg.EnvKeys.StorageS3AccessKeyId.Var(s.CredentialsSecretRef.AccessKeyIdSelector()),
svcCfg.EnvKeys.StorageS3AccessSecretKey.Var(s.CredentialsSecretRef.AccessSecretKeySelector()),
}
}
type UploadTempSpec struct {
// Medium of the empty dir to cache uploads
Medium corev1.StorageMedium `json:"medium,omitempty"`
SizeLimit *resource.Quantity `json:"sizeLimit,omitempty"`
}
func (s *UploadTempSpec) VolumeSource() *corev1.EmptyDirVolumeSource {
if s == nil {
return &corev1.EmptyDirVolumeSource{
Medium: corev1.StorageMediumDefault,
}
}
return &corev1.EmptyDirVolumeSource{
Medium: s.Medium,
SizeLimit: s.SizeLimit,
}
}
type StorageApiSpec struct {
S3Backend *S3BackendSpec `json:"s3Backend,omitempty"`
// FileBackend - configure the file backend
// either S3 or file backend **MUST** be configured
FileBackend *FileBackendSpec `json:"fileBackend,omitempty"`
// FileSizeLimit - maximum file upload size in bytes
// +kubebuilder:default=52428800
FileSizeLimit uint64 `json:"fileSizeLimit,omitempty"`
@ -95,11 +186,30 @@ type StorageSpec struct {
// DBSpec - Configure access to the Postgres database
// In most cases this will reference the supabase-storage-admin credentials secret provided by the Core resource
DBSpec StorageApiDbSpec `json:"db"`
// S3 - Configure S3 protocol
S3 *S3ProtocolSpec `json:"s3,omitempty"`
// EnableImageTransformation - whether to deploy the image proxy
// S3Protocol - Configure S3 access to the Storage API allowing clients to use any S3 client
S3Protocol *S3ProtocolSpec `json:"s3,omitempty"`
// UploadTemp - configure the emptyDir for storing intermediate files during uploads
UploadTemp *UploadTempSpec `json:"uploadTemp,omitempty"`
// WorkloadTemplate - customize the Storage API workload
WorkloadTemplate *WorkloadTemplate `json:"workloadTemplate,omitempty"`
}
type ImageProxySpec struct {
// Enable - whether to deploy the image proxy or not
Enable bool `json:"enable,omitempty"`
EnabledWebPDetection bool `json:"enableWebPDetection,omitempty"`
// WorkloadTemplate - customize the image proxy workload
WorkloadTemplate *WorkloadTemplate `json:"workloadTemplate,omitempty"`
}
// StorageSpec defines the desired state of Storage.
type StorageSpec struct {
// Api - configure the Storage API
Api StorageApiSpec `json:"api,omitempty"`
// ImageProxy - optionally enable and configure the image proxy
// the image proxy scale images to lower resolutions on demand to reduce traffic for instance for mobile devices
EnableImageTransformation bool `json:"enableImageTransformation,omitempty"`
ImageProxy *ImageProxySpec `json:"imageProxy,omitempty"`
}
// StorageStatus defines the observed state of Storage.

View file

@ -21,7 +21,7 @@ limitations under the License.
package v1alpha1
import (
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
@ -773,6 +773,21 @@ func (in *EnvoyStatus) DeepCopy() *EnvoyStatus {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FileBackendSpec) DeepCopyInto(out *FileBackendSpec) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileBackendSpec.
func (in *FileBackendSpec) DeepCopy() *FileBackendSpec {
if in == nil {
return nil
}
out := new(FileBackendSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GithubAuthProvider) DeepCopyInto(out *GithubAuthProvider) {
*out = *in
@ -790,6 +805,26 @@ func (in *GithubAuthProvider) DeepCopy() *GithubAuthProvider {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageProxySpec) DeepCopyInto(out *ImageProxySpec) {
*out = *in
if in.WorkloadTemplate != nil {
in, out := &in.WorkloadTemplate, &out.WorkloadTemplate
*out = new(WorkloadTemplate)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageProxySpec.
func (in *ImageProxySpec) DeepCopy() *ImageProxySpec {
if in == nil {
return nil
}
out := new(ImageProxySpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageSpec) DeepCopyInto(out *ImageSpec) {
*out = *in
@ -927,6 +962,26 @@ func (in *PostgrestSpec) DeepCopy() *PostgrestSpec {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *S3BackendSpec) DeepCopyInto(out *S3BackendSpec) {
*out = *in
if in.CredentialsSecretRef != nil {
in, out := &in.CredentialsSecretRef, &out.CredentialsSecretRef
*out = new(S3CredentialsRef)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3BackendSpec.
func (in *S3BackendSpec) DeepCopy() *S3BackendSpec {
if in == nil {
return nil
}
out := new(S3BackendSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *S3CredentialsRef) DeepCopyInto(out *S3CredentialsRef) {
*out = *in
@ -1024,6 +1079,48 @@ func (in *StorageApiDbSpec) DeepCopy() *StorageApiDbSpec {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StorageApiSpec) DeepCopyInto(out *StorageApiSpec) {
*out = *in
if in.S3Backend != nil {
in, out := &in.S3Backend, &out.S3Backend
*out = new(S3BackendSpec)
(*in).DeepCopyInto(*out)
}
if in.FileBackend != nil {
in, out := &in.FileBackend, &out.FileBackend
*out = new(FileBackendSpec)
**out = **in
}
out.JwtAuth = in.JwtAuth
in.DBSpec.DeepCopyInto(&out.DBSpec)
if in.S3Protocol != nil {
in, out := &in.S3Protocol, &out.S3Protocol
*out = new(S3ProtocolSpec)
(*in).DeepCopyInto(*out)
}
if in.UploadTemp != nil {
in, out := &in.UploadTemp, &out.UploadTemp
*out = new(UploadTempSpec)
(*in).DeepCopyInto(*out)
}
if in.WorkloadTemplate != nil {
in, out := &in.WorkloadTemplate, &out.WorkloadTemplate
*out = new(WorkloadTemplate)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageApiSpec.
func (in *StorageApiSpec) DeepCopy() *StorageApiSpec {
if in == nil {
return nil
}
out := new(StorageApiSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StorageList) DeepCopyInto(out *StorageList) {
*out = *in
@ -1059,11 +1156,10 @@ func (in *StorageList) DeepCopyObject() runtime.Object {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StorageSpec) DeepCopyInto(out *StorageSpec) {
*out = *in
out.JwtAuth = in.JwtAuth
in.DBSpec.DeepCopyInto(&out.DBSpec)
if in.S3 != nil {
in, out := &in.S3, &out.S3
*out = new(S3ProtocolSpec)
in.Api.DeepCopyInto(&out.Api)
if in.ImageProxy != nil {
in, out := &in.ImageProxy, &out.ImageProxy
*out = new(ImageProxySpec)
(*in).DeepCopyInto(*out)
}
}
@ -1125,6 +1221,26 @@ func (in *StudioSpec) DeepCopy() *StudioSpec {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *UploadTempSpec) DeepCopyInto(out *UploadTempSpec) {
*out = *in
if in.SizeLimit != nil {
in, out := &in.SizeLimit, &out.SizeLimit
x := (*in).DeepCopy()
*out = &x
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UploadTempSpec.
func (in *UploadTempSpec) DeepCopy() *UploadTempSpec {
if in == nil {
return nil
}
out := new(UploadTempSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WorkloadTemplate) DeepCopyInto(out *WorkloadTemplate) {
*out = *in
@ -1150,6 +1266,13 @@ func (in *WorkloadTemplate) DeepCopyInto(out *WorkloadTemplate) {
*out = new(ContainerTemplate)
(*in).DeepCopyInto(*out)
}
if in.AdditionalVolumes != nil {
in, out := &in.AdditionalVolumes, &out.AdditionalVolumes
*out = make([]v1.Volume, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadTemplate.

View file

@ -149,6 +149,27 @@ func (m manager) Run(ctx context.Context) error {
return fmt.Errorf("unable to create controller APIGateway: %w", err)
}
if err = (&controller.StorageApiReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
return fmt.Errorf("unable to create controller APIGateway: %w", err)
}
if err = (&controller.StorageImgProxyReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
return fmt.Errorf("unable to create controller APIGateway: %w", err)
}
if err = (&controller.StorageS3CredentialsReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
return fmt.Errorf("unable to create controller APIGateway: %w", err)
}
// nolint:goconst
if os.Getenv("ENABLE_WEBHOOKS") != "false" {
if err = webhooksupabasev1alpha1.SetupCoreWebhookWithManager(mgr); err != nil {

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -2,9 +2,17 @@ apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml
- https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.25.0/cnpg-1.25.0.yaml
- resources/minio.yaml
- ../default
patches:
- path: manager_dev_settings.yaml
target:
kind: Deployment
labelSelector: app.kubernetes.io/name=supabase-operator
- path: manager_dev_settings.yaml
target:
kind: Deployment
labelSelector: app.kubernetes.io/name=control-plane

View file

@ -0,0 +1,3 @@
- op: replace
path: /spec/replicas
value: 1

View file

@ -0,0 +1,52 @@
# Deploys a new Namespace for the MinIO Pod
apiVersion: v1
kind: Namespace
metadata:
name: minio-dev
labels:
name: minio-dev
---
apiVersion: v1
kind: Pod
metadata:
labels:
app.kubernetes.io/name: minio
app.kubernetes.io/managed-by: tilt
name: minio
namespace: minio-dev # Change this value to match the namespace metadata.name
spec:
containers:
- name: minio
image: quay.io/minio/minio:latest
command:
- /bin/bash
- -c
env:
- name: MINIO_ROOT_USER
value: minio
- name: MINIO_ROOT_PASSWORD
value: 1n1t-R00t!
args:
- minio server /data --console-address :9090 --json
volumeMounts:
- mountPath: /data
name: localvolume # Corresponds to the `spec.volumes` Persistent Volume
volumes:
- name: localvolume
hostPath: # MinIO generally recommends using locally-attached volumes
path: /mnt/disk1/data # Specify a path to a local drive or volume on the Kubernetes worker node
type: DirectoryOrCreate # The path to the last directory must exist
---
apiVersion: v1
kind: Service
metadata:
name: minio
namespace: minio-dev
spec:
selector:
app.kubernetes.io/name: minio
app.kubernetes.io/managed-by: tilt
ports:
- protocol: TCP
port: 9000
targetPort: 9000

View file

@ -0,0 +1,35 @@
apiVersion: supabase.k8s.icb4dc0.de/v1alpha1
kind: Storage
metadata:
labels:
app.kubernetes.io/name: supabase-operator
app.kubernetes.io/managed-by: kustomize
name: storage-sample
spec:
api:
fileBackend:
path: /tmp
db:
host: cluster-example-rw.supabase-demo.svc
dbName: app
dbCredentialsRef:
# will be created by Core resource operator if not present
# just make sure the secret name is either based on the name of the core resource or explicitly set
# format <core-resource-name>-db-creds-supabase-storage-admin
secretName: core-sample-db-creds-supabase-storage-admin
enableImageTransformation: true
jwtAuth:
# will be created by Core resource operator if not present
# just make sure the secret name is either based on the name of the core resource or explicitly set
secretName: core-sample-jwt
workloadTemplate:
workload:
volumeMounts:
- name: storage-temp
mountPath: /tmp
additionalVolumes:
- name: storage-temp
emtpyDir:
sizeLimit: 500Mi
imageProxy:
enable: true

View file

@ -1,3 +1,12 @@
---
apiVersion: v1
kind: Secret
metadata:
name: storage-s3-credentials
stringData:
accessKeyId: FPxTAFL7NaubjPgIGBo3
secretAccessKey: 7F437pPe84QcoocD3MWdAIVBU3oXonhVHxK645tm
---
apiVersion: supabase.k8s.icb4dc0.de/v1alpha1
kind: Storage
metadata:
@ -6,17 +15,27 @@ metadata:
app.kubernetes.io/managed-by: kustomize
name: storage-sample
spec:
backendType: file
db:
host: cluster-example-rw.supabase-demo.svc
dbName: app
dbCredentialsRef:
api:
s3Backend:
endpoint: http://minio.minio-dev.svc:9000
region: us-east-1
forcePathStyle: true
bucket: test
credentialsSecretRef:
secretName: storage-s3-credentials
s3Protocol: {}
db:
host: cluster-example-rw.supabase-demo.svc
dbName: app
dbCredentialsRef:
# will be created by Core resource operator if not present
# just make sure the secret name is either based on the name of the core resource or explicitly set
# format <core-resource-name>-db-creds-supabase-storage-admin
secretName: core-sample-db-creds-supabase-storage-admin
enableImageTransformation: true
jwtAuth:
# will be created by Core resource operator if not present
# just make sure the secret name is either based on the name of the core resource or explicitly set
# format <core-resource-name>-db-creds-supabase-storage-admin
secretName: core-sample-db-creds-supabase-storage-admin
enableImageTransformation: true
jwtAuth:
# will be created by Core resource operator if not present
# just make sure the secret name is either based on the name of the core resource or explicitly set
secretName: core-sample-jwt
secretName: core-sample-jwt
imageProxy:
enable: true

View file

@ -1,4 +0,0 @@
#!/usr/bin/env bash
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.16.2/cert-manager.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.24/releases/cnpg-1.24.1.yaml

View file

@ -156,6 +156,8 @@ _Appears in:_
| `url` _string_ | | | |
#### ContainerTemplate
@ -172,7 +174,7 @@ _Appears in:_
| `image` _string_ | | | |
| `pullPolicy` _[PullPolicy](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#pullpolicy-v1-core)_ | | | |
| `imagePullSecrets` _[LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#localobjectreference-v1-core) array_ | | | |
| `securityContext` _[SecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#securitycontext-v1-core)_ | SecurityContext - | | |
| `securityContext` _[SecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#securitycontext-v1-core)_ | SecurityContext - override the container SecurityContext<br />use with caution, by default the operator already uses sane defaults | | |
| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core)_ | | | |
| `volumeMounts` _[VolumeMount](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#volumemount-v1-core) array_ | | | |
| `additionalEnv` _[EnvVar](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#envvar-v1-core) array_ | | | |
@ -522,6 +524,22 @@ _Appears in:_
| `resourceHash` _integer array_ | | | |
#### FileBackendSpec
_Appears in:_
- [StorageApiSpec](#storageapispec)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `path` _string_ | Path - path to where files will be stored | | |
#### GithubAuthProvider
@ -541,6 +559,24 @@ _Appears in:_
| `url` _string_ | | | |
#### ImageProxySpec
_Appears in:_
- [StorageSpec](#storagespec)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `enable` _boolean_ | Enable - whether to deploy the image proxy or not | | |
| `enableWebPDetection` _boolean_ | | | |
| `workloadTemplate` _[WorkloadTemplate](#workloadtemplate)_ | WorkloadTemplate - customize the image proxy workload | | |
#### ImageSpec
@ -568,7 +604,7 @@ _Appears in:_
_Appears in:_
- [CoreJwtSpec](#corejwtspec)
- [StorageSpec](#storagespec)
- [StorageApiSpec](#storageapispec)
- [StudioSpec](#studiospec)
| Field | Description | Default | Validation |
@ -664,6 +700,26 @@ _Appears in:_
| `workloadTemplate` _[WorkloadTemplate](#workloadtemplate)_ | WorkloadTemplate - customize the PostgREST workload | | |
#### S3BackendSpec
_Appears in:_
- [StorageApiSpec](#storageapispec)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `region` _string_ | Region - S3 region of the backend | | |
| `endpoint` _string_ | Endpoint - hostname and port **with** http/https | | |
| `forcePathStyle` _boolean_ | ForcePathStyle - whether to use path style (e.g. for MinIO) or domain style<br />for bucket addressing | | |
| `bucket` _string_ | Bucket - bucke to use, if file backend is used, default value is sufficient | stub | |
| `credentialsSecretRef` _[S3CredentialsRef](#s3credentialsref)_ | CredentialsSecretRef - reference to the Secret where access key id and access secret key are stored | | |
#### S3CredentialsRef
@ -673,6 +729,7 @@ _Appears in:_
_Appears in:_
- [S3BackendSpec](#s3backendspec)
- [S3ProtocolSpec](#s3protocolspec)
| Field | Description | Default | Validation |
@ -691,11 +748,10 @@ _Appears in:_
_Appears in:_
- [StorageSpec](#storagespec)
- [StorageApiSpec](#storageapispec)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `region` _string_ | Region - S3 region to use in the API | us-east-1 | |
| `allowForwardedHeader` _boolean_ | AllowForwardedHeader | true | |
| `credentialsSecretRef` _[S3CredentialsRef](#s3credentialsref)_ | CredentialsSecretRef - reference to the Secret where access key id and access secret key are stored | | |
@ -746,7 +802,7 @@ _Appears in:_
_Appears in:_
- [StorageSpec](#storagespec)
- [StorageApiSpec](#storageapispec)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
@ -756,9 +812,9 @@ _Appears in:_
| `dbCredentialsRef` _[DbCredentialsReference](#dbcredentialsreference)_ | DBCredentialsRef - reference to a Secret key where the DB credentials can be retrieved from<br />Credentials need to be stored in basic auth form | | |
#### StorageBackend
#### StorageApiSpec
_Underlying type:_ _string_
@ -767,10 +823,16 @@ _Underlying type:_ _string_
_Appears in:_
- [StorageSpec](#storagespec)
| Field | Description |
| --- | --- |
| `file` | |
| `s3` | |
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `s3Backend` _[S3BackendSpec](#s3backendspec)_ | | | |
| `fileBackend` _[FileBackendSpec](#filebackendspec)_ | FileBackend - configure the file backend<br />either S3 or file backend **MUST** be configured | | |
| `fileSizeLimit` _integer_ | FileSizeLimit - maximum file upload size in bytes | 52428800 | |
| `jwtAuth` _[JwtSpec](#jwtspec)_ | JwtAuth - Configure the JWT authentication parameters.<br />This includes where to retrieve anon and service key from as well as JWT secret and JWKS references<br />needed to validate JWTs send to the API | | |
| `db` _[StorageApiDbSpec](#storageapidbspec)_ | DBSpec - Configure access to the Postgres database<br />In most cases this will reference the supabase-storage-admin credentials secret provided by the Core resource | | |
| `s3` _[S3ProtocolSpec](#s3protocolspec)_ | S3Protocol - Configure S3 access to the Storage API allowing clients to use any S3 client | | |
| `uploadTemp` _[UploadTempSpec](#uploadtempspec)_ | UploadTemp - configure the emptyDir for storing intermediate files during uploads | | |
| `workloadTemplate` _[WorkloadTemplate](#workloadtemplate)_ | WorkloadTemplate - customize the Storage API workload | | |
#### StorageList
@ -804,12 +866,8 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `backendType` _[StorageBackend](#storagebackend)_ | BackendType - backend storage type to use | | Enum: [s3 file] <br /> |
| `fileSizeLimit` _integer_ | FileSizeLimit - maximum file upload size in bytes | 52428800 | |
| `jwtAuth` _[JwtSpec](#jwtspec)_ | JwtAuth - Configure the JWT authentication parameters.<br />This includes where to retrieve anon and service key from as well as JWT secret and JWKS references<br />needed to validate JWTs send to the API | | |
| `db` _[StorageApiDbSpec](#storageapidbspec)_ | DBSpec - Configure access to the Postgres database<br />In most cases this will reference the supabase-storage-admin credentials secret provided by the Core resource | | |
| `s3` _[S3ProtocolSpec](#s3protocolspec)_ | S3 - Configure S3 protocol | | |
| `enableImageTransformation` _boolean_ | EnableImageTransformation - whether to deploy the image proxy<br />the image proxy scale images to lower resolutions on demand to reduce traffic for instance for mobile devices | | |
| `api` _[StorageApiSpec](#storageapispec)_ | Api - configure the Storage API | | |
| `imageProxy` _[ImageProxySpec](#imageproxyspec)_ | ImageProxy - optionally enable and configure the image proxy<br />the image proxy scale images to lower resolutions on demand to reduce traffic for instance for mobile devices | | |
@ -833,6 +891,23 @@ _Appears in:_
| `externalUrl` _string_ | APIExternalURL is referring to the URL where Supabase API will be available<br />Typically this is the ingress of the API gateway | | |
#### UploadTempSpec
_Appears in:_
- [StorageApiSpec](#storageapispec)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `medium` _[StorageMedium](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#storagemedium-v1-core)_ | Medium of the empty dir to cache uploads | | |
| `sizeLimit` _[Quantity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#quantity-resource-api)_ | | | |
#### WorkloadTemplate
@ -844,8 +919,10 @@ _Appears in:_
_Appears in:_
- [AuthSpec](#authspec)
- [EnvoySpec](#envoyspec)
- [ImageProxySpec](#imageproxyspec)
- [PGMetaSpec](#pgmetaspec)
- [PostgrestSpec](#postgrestspec)
- [StorageApiSpec](#storageapispec)
- [StudioSpec](#studiospec)
| Field | Description | Default | Validation |
@ -854,5 +931,6 @@ _Appears in:_
| `securityContext` _[PodSecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#podsecuritycontext-v1-core)_ | | | |
| `additionalLabels` _object (keys:string, values:string)_ | | | |
| `workload` _[ContainerTemplate](#containertemplate)_ | Workload - customize the container template of the workload | | |
| `additionalVolumes` _[Volume](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#volume-v1-core) array_ | | | |

View file

@ -35,8 +35,10 @@ import (
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
supabasev1alpha1 "code.icb4dc0.de/prskr/supabase-operator/api/v1alpha1"
"code.icb4dc0.de/prskr/supabase-operator/internal/meta"
@ -129,12 +131,23 @@ func (r *APIGatewayReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Ma
return fmt.Errorf("constructor selector for watching secrets: %w", err)
}
apiGatewayTargetSelector, err := predicate.LabelSelectorPredicate(metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{{
Key: meta.SupabaseLabel.ApiGatewayTarget,
Operator: metav1.LabelSelectorOpExists,
}},
})
if err != nil {
return fmt.Errorf("failed to build selector for watching API target services: %w", err)
}
return ctrl.NewControllerManagedBy(mgr).
For(&supabasev1alpha1.APIGateway{}).
Named("apigateway").
Owns(new(corev1.ConfigMap)).
Owns(new(appsv1.Deployment)).
Owns(new(corev1.Service)).
// watch JWKS secret
Watches(
new(corev1.Secret),
FieldSelectorEventHandler[*supabasev1alpha1.APIGateway, *supabasev1alpha1.APIGatewayList](r.Client,
@ -145,9 +158,49 @@ func (r *APIGatewayReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Ma
reloadSelector,
),
).
Watches(
new(corev1.Service),
r.apiTargetServiceEventHandler(),
builder.WithPredicates(apiGatewayTargetSelector),
).
Complete(r)
}
func (r *APIGatewayReconciler) apiTargetServiceEventHandler() handler.TypedEventHandler[client.Object, reconcile.Request] {
return handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request {
var (
list supabasev1alpha1.APIGatewayList
logger = log.FromContext(ctx, "object", obj.GetName(), "namespace", obj.GetNamespace())
)
targetName, ok := obj.GetLabels()[meta.SupabaseLabel.ApiGatewayTarget]
if !ok {
logger.Info("Service is not APIGateway target")
return nil
}
if err := r.Client.List(ctx, &list, client.InNamespace(obj.GetNamespace())); err != nil {
logger.Error(err, "Failed to list Services to map updates to APIGateway reconciliation requests")
return nil
}
if targetName != "" {
for gw := range list.Iter() {
if gw.Name == targetName {
return []reconcile.Request{{NamespacedName: client.ObjectKeyFromObject(gw)}}
}
}
} else {
requests := make([]reconcile.Request, 0, len(list.Items))
for gw := range list.Iter() {
requests = append(requests, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(gw)})
}
}
return nil
})
}
func (r *APIGatewayReconciler) reconcileJwksSecret(
ctx context.Context,
gateway *supabasev1alpha1.APIGateway,

View file

@ -19,7 +19,7 @@ package controller
import (
"bytes"
"context"
"crypto/sha256"
"hash/fnv"
"maps"
"net/url"
"time"
@ -173,7 +173,7 @@ func (r *CoreDbReconciler) ensureDbRolesSecrets(
core.Status.Database.Roles = make(map[string][]byte)
}
hash := sha256.New()
hash := fnv.New64a()
for secretName, role := range roles {
secretLogger := logger.WithValues("secret_name", secretName, "role_name", role.String())

View file

@ -71,7 +71,7 @@ func (r *CoreAuthReconciler) Reconcile(ctx context.Context, req ctrl.Request) (r
// SetupWithManager sets up the controller with the Manager.
func (r *CoreAuthReconciler) SetupWithManager(mgr ctrl.Manager) error {
// TODO watch changes in DB credentials secret
// TODO watch changes in DB credentials secret & JWT secret
return ctrl.NewControllerManagedBy(mgr).
For(new(supabasev1alpha1.Core)).
Owns(new(appsv1.Deployment)).

View file

@ -91,10 +91,7 @@ func (r *CorePostgrestReconiler) reconilePostgrestDeployment(
postgrestDeployment = &appsv1.Deployment{
ObjectMeta: serviceCfg.ObjectMeta(core),
}
postgrestSpec = core.Spec.Postgrest
)
var (
postgrestSpec = core.Spec.Postgrest
anonRole = ValueOrFallback(postgrestSpec.AnonRole, serviceCfg.Defaults.AnonRole)
postgrestSchemas = ValueOrFallback(postgrestSpec.Schemas, serviceCfg.Defaults.Schemas)
jwtSecretHash string
@ -178,12 +175,12 @@ func (r *CorePostgrestReconiler) reconilePostgrestDeployment(
Env: postgrestSpec.WorkloadTemplate.MergeEnv(postgrestEnv),
Ports: []corev1.ContainerPort{
{
Name: "rest",
Name: serviceCfg.Defaults.ServerPortName,
ContainerPort: serviceCfg.Defaults.ServerPort,
Protocol: corev1.ProtocolTCP,
},
{
Name: "admin",
Name: serviceCfg.Defaults.AdminPortName,
ContainerPort: serviceCfg.Defaults.AdminPort,
Protocol: corev1.ProtocolTCP,
},
@ -234,13 +231,16 @@ func (r *CorePostgrestReconiler) reconcilePostgrestService(
ctx context.Context,
core *supabasev1alpha1.Core,
) error {
postgrestService := &corev1.Service{
ObjectMeta: supabase.ServiceConfig.Postgrest.ObjectMeta(core),
}
var (
serviceCfg = supabase.ServiceConfig.Postgrest
postgrestService = &corev1.Service{
ObjectMeta: supabase.ServiceConfig.Postgrest.ObjectMeta(core),
}
)
_, err := controllerutil.CreateOrUpdate(ctx, r.Client, postgrestService, func() error {
postgrestService.Labels = core.Spec.Postgrest.WorkloadTemplate.MergeLabels(
objectLabels(core, supabase.ServiceConfig.Postgrest.Name, "core", supabase.Images.Postgrest.Tag),
objectLabels(core, serviceCfg.Name, "core", supabase.Images.Postgrest.Tag),
core.Labels,
)
@ -249,14 +249,14 @@ func (r *CorePostgrestReconiler) reconcilePostgrestService(
}
postgrestService.Spec = corev1.ServiceSpec{
Selector: selectorLabels(core, supabase.ServiceConfig.Postgrest.Name),
Selector: selectorLabels(core, serviceCfg.Name),
Ports: []corev1.ServicePort{
{
Name: "rest",
Name: serviceCfg.Defaults.ServerPortName,
Protocol: corev1.ProtocolTCP,
AppProtocol: ptrOf("http"),
Port: 3000,
TargetPort: intstr.IntOrString{IntVal: 3000},
Port: serviceCfg.Defaults.ServerPort,
TargetPort: intstr.IntOrString{IntVal: serviceCfg.Defaults.ServerPort},
},
},
}

View file

@ -0,0 +1,309 @@
/*
Copyright 2025 Peter Kurfer.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"context"
"encoding/base64"
"fmt"
"slices"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/log"
supabasev1alpha1 "code.icb4dc0.de/prskr/supabase-operator/api/v1alpha1"
"code.icb4dc0.de/prskr/supabase-operator/internal/meta"
"code.icb4dc0.de/prskr/supabase-operator/internal/supabase"
)
// StorageApiReconciler reconciles a Storage object
type StorageApiReconciler struct {
client.Client
Scheme *runtime.Scheme
}
// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
//
// For more details, check Reconcile and its Result here:
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.4/pkg/reconcile
func (r *StorageApiReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
var (
storage supabasev1alpha1.Storage
logger = log.FromContext(ctx)
)
if err := r.Get(ctx, req.NamespacedName, &storage); err != nil {
if client.IgnoreNotFound(err) != nil {
return ctrl.Result{}, nil
}
return ctrl.Result{}, err
}
logger.Info("Reconciling Storage API")
if err := r.reconcileStorageApiDeployment(ctx, &storage); err != nil {
return ctrl.Result{}, err
}
if err := r.reconcileStorageApiService(ctx, &storage); err != nil {
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
// SetupWithManager sets up the controller with the Manager.
func (r *StorageApiReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&supabasev1alpha1.Storage{}).
Named("storage-api").
Owns(new(corev1.Secret)).
Owns(new(appsv1.Deployment)).
Owns(new(corev1.Service)).
Complete(r)
}
func (r *StorageApiReconciler) reconcileStorageApiDeployment(
ctx context.Context,
storage *supabasev1alpha1.Storage,
) error {
var (
serviceCfg = supabase.ServiceConfig.Storage
apiSpec = storage.Spec.Api
storageApiDeployment = &appsv1.Deployment{
ObjectMeta: serviceCfg.ObjectMeta(storage),
}
jwtSecret = &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: apiSpec.JwtAuth.SecretName,
Namespace: storage.Namespace,
},
}
s3ProtocolSecret = &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: apiSpec.S3Protocol.CredentialsSecretRef.SecretName,
Namespace: storage.Namespace,
},
}
jwtStateHash, s3ProtoCredentialsStateHash string
)
if err := r.Get(ctx, client.ObjectKeyFromObject(jwtSecret), jwtSecret); err != nil {
return err
}
jwtStateHash = base64.StdEncoding.EncodeToString(HashBytes(
jwtSecret.Data[apiSpec.JwtAuth.SecretKey],
jwtSecret.Data[apiSpec.JwtAuth.JwksKey],
))
if err := r.Get(ctx, client.ObjectKeyFromObject(s3ProtocolSecret), s3ProtocolSecret); err != nil {
return err
}
s3ProtoCredentialsStateHash = base64.StdEncoding.EncodeToString(HashBytes(
s3ProtocolSecret.Data[apiSpec.S3Protocol.CredentialsSecretRef.AccessKeyIdKey],
s3ProtocolSecret.Data[apiSpec.S3Protocol.CredentialsSecretRef.AccessSecretKeyKey],
))
_, err := controllerutil.CreateOrUpdate(ctx, r.Client, storageApiDeployment, func() error {
storageApiDeployment.Labels = apiSpec.WorkloadTemplate.MergeLabels(
objectLabels(storage, serviceCfg.Name, "storage", supabase.Images.Storage.Tag),
storage.Labels,
)
storagApiEnv := []corev1.EnvVar{
{
Name: "DB_USERNAME",
ValueFrom: &corev1.EnvVarSource{
SecretKeyRef: &corev1.SecretKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
Name: apiSpec.DBSpec.DBCredentialsRef.SecretName,
},
Key: apiSpec.DBSpec.DBCredentialsRef.UsernameKey,
},
},
},
{
Name: "DB_PASSWORD",
ValueFrom: &corev1.EnvVarSource{
SecretKeyRef: &corev1.SecretKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
Name: apiSpec.DBSpec.DBCredentialsRef.SecretName,
},
Key: apiSpec.DBSpec.DBCredentialsRef.PasswordKey,
},
},
},
serviceCfg.EnvKeys.DatabaseDSN.Var(fmt.Sprintf("postgres://$(DB_USERNAME):$(DB_PASSWORD)@%s:%d/%s", apiSpec.DBSpec.Host, apiSpec.DBSpec.Port, apiSpec.DBSpec.DBName)),
serviceCfg.EnvKeys.ServiceKey.Var(apiSpec.JwtAuth.ServiceKeySelector()),
serviceCfg.EnvKeys.JwtSecret.Var(apiSpec.JwtAuth.SecretKeySelector()),
serviceCfg.EnvKeys.JwtJwks.Var(apiSpec.JwtAuth.JwksKeySelector()),
serviceCfg.EnvKeys.S3ProtocolPrefix.Var(),
serviceCfg.EnvKeys.S3ProtocolAllowForwardedHeader.Var(apiSpec.S3Protocol.AllowForwardedHeader),
serviceCfg.EnvKeys.S3ProtocolAccessKeyId.Var(apiSpec.S3Protocol.CredentialsSecretRef.AccessKeyIdSelector()),
serviceCfg.EnvKeys.S3ProtocolAccessKeySecret.Var(apiSpec.S3Protocol.CredentialsSecretRef.AccessSecretKeySelector()),
serviceCfg.EnvKeys.TusUrlPath.Var(),
serviceCfg.EnvKeys.FileSizeLimit.Var(apiSpec.FileSizeLimit),
serviceCfg.EnvKeys.UploadFileSizeLimit.Var(apiSpec.FileSizeLimit),
serviceCfg.EnvKeys.UploadFileSizeLimitStandard.Var(apiSpec.FileSizeLimit),
serviceCfg.EnvKeys.AnonKey.Var(apiSpec.JwtAuth.AnonKeySelector()),
// TODO: https://github.com/supabase/storage-api/issues/55
serviceCfg.EnvKeys.FileStorageRegion.Var(),
}
if storage.Spec.ImageProxy != nil && storage.Spec.ImageProxy.Enable {
storagApiEnv = append(storagApiEnv, serviceCfg.EnvKeys.ImgProxyURL.Var(fmt.Sprintf("http://%s.%s.svc:%d", supabase.ServiceConfig.ImgProxy.ObjectName(storage), storage.Namespace, supabase.ServiceConfig.ImgProxy.Defaults.ApiPort)))
}
if storageApiDeployment.CreationTimestamp.IsZero() {
storageApiDeployment.Spec.Selector = &metav1.LabelSelector{
MatchLabels: selectorLabels(storage, serviceCfg.Name),
}
}
storageApiDeployment.Spec.Replicas = apiSpec.WorkloadTemplate.ReplicaCount()
storageApiDeployment.Spec.Template = corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
fmt.Sprintf("%s/%s", supabasev1alpha1.GroupVersion.Group, "jwt-hash"): jwtStateHash,
fmt.Sprintf("%s/%s", supabasev1alpha1.GroupVersion.Group, "s3-credentials-hash"): s3ProtoCredentialsStateHash,
},
Labels: objectLabels(storage, serviceCfg.Name, "storage", supabase.Images.Storage.Tag),
},
Spec: corev1.PodSpec{
ImagePullSecrets: apiSpec.WorkloadTemplate.PullSecrets(),
Containers: []corev1.Container{{
Name: "supabase-storage",
Image: apiSpec.WorkloadTemplate.Image(supabase.Images.Storage.String()),
ImagePullPolicy: apiSpec.WorkloadTemplate.ImagePullPolicy(),
Env: apiSpec.WorkloadTemplate.MergeEnv(append(storagApiEnv, slices.Concat(apiSpec.FileBackend.Env(), apiSpec.S3Backend.Env())...)),
Ports: []corev1.ContainerPort{{
Name: serviceCfg.Defaults.ApiPortName,
ContainerPort: serviceCfg.Defaults.ApiPort,
Protocol: corev1.ProtocolTCP,
}},
SecurityContext: apiSpec.WorkloadTemplate.ContainerSecurityContext(serviceCfg.Defaults.UID, serviceCfg.Defaults.GID),
Resources: apiSpec.WorkloadTemplate.Resources(),
VolumeMounts: apiSpec.WorkloadTemplate.AdditionalVolumeMounts(
corev1.VolumeMount{
Name: "tmp",
MountPath: "/tmp",
},
),
ReadinessProbe: &corev1.Probe{
InitialDelaySeconds: 5,
PeriodSeconds: 3,
TimeoutSeconds: 1,
SuccessThreshold: 2,
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/status",
Port: intstr.IntOrString{IntVal: serviceCfg.Defaults.ApiPort},
},
},
},
LivenessProbe: &corev1.Probe{
InitialDelaySeconds: 10,
PeriodSeconds: 5,
TimeoutSeconds: 3,
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/status",
Port: intstr.IntOrString{IntVal: serviceCfg.Defaults.ApiPort},
},
},
},
}},
SecurityContext: apiSpec.WorkloadTemplate.PodSecurityContext(),
Volumes: apiSpec.WorkloadTemplate.Volumes(
corev1.Volume{
Name: "tmp",
VolumeSource: corev1.VolumeSource{
EmptyDir: apiSpec.UploadTemp.VolumeSource(),
},
},
),
},
}
if err := controllerutil.SetControllerReference(storage, storageApiDeployment, r.Scheme); err != nil {
return err
}
return nil
})
return err
}
func (r *StorageApiReconciler) reconcileStorageApiService(
ctx context.Context,
storage *supabasev1alpha1.Storage,
) error {
var (
serviceCfg = supabase.ServiceConfig.Storage
storageApiService = &corev1.Service{
ObjectMeta: supabase.ServiceConfig.Storage.ObjectMeta(storage),
}
)
_, err := controllerutil.CreateOrUpdate(ctx, r.Client, storageApiService, func() error {
storageApiService.Labels = storage.Spec.Api.WorkloadTemplate.MergeLabels(
objectLabels(storage, serviceCfg.Name, "storage", supabase.Images.Storage.Tag),
storage.Labels,
)
if _, ok := storageApiService.Labels[meta.SupabaseLabel.ApiGatewayTarget]; !ok {
storageApiService.Labels[meta.SupabaseLabel.ApiGatewayTarget] = ""
}
storageApiService.Spec = corev1.ServiceSpec{
Selector: selectorLabels(storage, serviceCfg.Name),
Ports: []corev1.ServicePort{
{
Name: serviceCfg.Defaults.ApiPortName,
Protocol: corev1.ProtocolTCP,
AppProtocol: ptrOf("http"),
Port: serviceCfg.Defaults.ApiPort,
TargetPort: intstr.IntOrString{IntVal: serviceCfg.Defaults.ApiPort},
},
},
}
if err := controllerutil.SetControllerReference(storage, storageApiService, r.Scheme); err != nil {
return err
}
return nil
})
return err
}

View file

@ -1,55 +0,0 @@
/*
Copyright 2025 Peter Kurfer.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"context"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
supabasev1alpha1 "code.icb4dc0.de/prskr/supabase-operator/api/v1alpha1"
)
// StorageReconciler reconciles a Storage object
type StorageReconciler struct {
client.Client
Scheme *runtime.Scheme
}
// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
//
// For more details, check Reconcile and its Result here:
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.4/pkg/reconcile
func (r *StorageReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
_ = log.FromContext(ctx)
// TODO(user): your logic here
return ctrl.Result{}, nil
}
// SetupWithManager sets up the controller with the Manager.
func (r *StorageReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&supabasev1alpha1.Storage{}).
Named("storage").
Complete(r)
}

View file

@ -0,0 +1,218 @@
/*
Copyright 2025 Peter Kurfer.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"context"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/log"
supabasev1alpha1 "code.icb4dc0.de/prskr/supabase-operator/api/v1alpha1"
"code.icb4dc0.de/prskr/supabase-operator/internal/supabase"
)
type StorageImgProxyReconciler struct {
client.Client
Scheme *runtime.Scheme
}
// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
//
// For more details, check Reconcile and its Result here:
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.4/pkg/reconcile
func (r *StorageImgProxyReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
var (
storage supabasev1alpha1.Storage
logger = log.FromContext(ctx)
)
if err := r.Get(ctx, req.NamespacedName, &storage); err != nil {
if client.IgnoreNotFound(err) != nil {
return ctrl.Result{}, nil
}
return ctrl.Result{}, err
}
logger.Info("Reconciling Storage API")
if storage.Spec.ImageProxy == nil || !storage.Spec.ImageProxy.Enable {
logger.Info("ImgProxy is not enabled - skipping")
return ctrl.Result{}, nil
}
if err := r.reconcileImgProxyDeployment(ctx, &storage); err != nil {
return ctrl.Result{}, err
}
if err := r.reconcileImgProxyService(ctx, &storage); err != nil {
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
// SetupWithManager sets up the controller with the Manager.
func (r *StorageImgProxyReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&supabasev1alpha1.Storage{}).
Named("storage-imgproxy").
Owns(new(appsv1.Deployment)).
Owns(new(corev1.Service)).
Complete(r)
}
func (r *StorageImgProxyReconciler) reconcileImgProxyDeployment(
ctx context.Context,
storage *supabasev1alpha1.Storage,
) error {
var (
serviceCfg = supabase.ServiceConfig.ImgProxy
imgProxySpec = storage.Spec.ImageProxy
imgProxyDeployment = &appsv1.Deployment{
ObjectMeta: serviceCfg.ObjectMeta(storage),
}
)
_, err := controllerutil.CreateOrUpdate(ctx, r.Client, imgProxyDeployment, func() error {
imgProxyDeployment.Labels = imgProxySpec.WorkloadTemplate.MergeLabels(
objectLabels(storage, serviceCfg.Name, "storage", supabase.Images.ImgProxy.Tag),
storage.Labels,
)
imgProxyEnv := []corev1.EnvVar{
serviceCfg.EnvKeys.Bind.Var(),
serviceCfg.EnvKeys.UseETag.Var(),
serviceCfg.EnvKeys.EnableWebPDetection.Var(imgProxySpec.EnabledWebPDetection),
}
if storage.Spec.Api.FileBackend != nil {
imgProxyEnv = append(imgProxyEnv, serviceCfg.EnvKeys.LocalFileSystemRoot.Var(storage.Spec.Api.FileBackend.Path))
}
if imgProxyDeployment.CreationTimestamp.IsZero() {
imgProxyDeployment.Spec.Selector = &metav1.LabelSelector{
MatchLabels: selectorLabels(storage, serviceCfg.Name),
}
}
imgProxyDeployment.Spec.Replicas = imgProxySpec.WorkloadTemplate.ReplicaCount()
imgProxyDeployment.Spec.Template = corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: objectLabels(storage, serviceCfg.Name, "storage", supabase.Images.ImgProxy.Tag),
},
Spec: corev1.PodSpec{
ImagePullSecrets: imgProxySpec.WorkloadTemplate.PullSecrets(),
Containers: []corev1.Container{{
Name: "supabase-imgproxy",
Image: imgProxySpec.WorkloadTemplate.Image(supabase.Images.ImgProxy.String()),
ImagePullPolicy: imgProxySpec.WorkloadTemplate.ImagePullPolicy(),
Env: imgProxySpec.WorkloadTemplate.MergeEnv(imgProxyEnv),
Ports: []corev1.ContainerPort{{
Name: serviceCfg.Defaults.ApiPortName,
ContainerPort: serviceCfg.Defaults.ApiPort,
Protocol: corev1.ProtocolTCP,
}},
SecurityContext: imgProxySpec.WorkloadTemplate.ContainerSecurityContext(serviceCfg.Defaults.UID, serviceCfg.Defaults.GID),
Resources: imgProxySpec.WorkloadTemplate.Resources(),
VolumeMounts: imgProxySpec.WorkloadTemplate.AdditionalVolumeMounts(),
ReadinessProbe: &corev1.Probe{
InitialDelaySeconds: 5,
PeriodSeconds: 3,
TimeoutSeconds: 1,
SuccessThreshold: 2,
ProbeHandler: corev1.ProbeHandler{
Exec: &corev1.ExecAction{
Command: []string{"imgproxy", "health"},
},
},
},
LivenessProbe: &corev1.Probe{
InitialDelaySeconds: 10,
PeriodSeconds: 5,
TimeoutSeconds: 3,
ProbeHandler: corev1.ProbeHandler{
Exec: &corev1.ExecAction{
Command: []string{"imgproxy", "health"},
},
},
},
}},
SecurityContext: imgProxySpec.WorkloadTemplate.PodSecurityContext(),
Volumes: imgProxySpec.WorkloadTemplate.Volumes(),
},
}
if err := controllerutil.SetControllerReference(storage, imgProxyDeployment, r.Scheme); err != nil {
return err
}
return nil
})
return err
}
func (r *StorageImgProxyReconciler) reconcileImgProxyService(
ctx context.Context,
storage *supabasev1alpha1.Storage,
) error {
var (
serviceCfg = supabase.ServiceConfig.ImgProxy
imgProxyService = &corev1.Service{
ObjectMeta: supabase.ServiceConfig.Storage.ObjectMeta(storage),
}
)
_, err := controllerutil.CreateOrPatch(ctx, r.Client, imgProxyService, func() error {
imgProxyService.Labels = storage.Spec.Api.WorkloadTemplate.MergeLabels(
objectLabels(storage, serviceCfg.Name, "storage", supabase.Images.ImgProxy.Tag),
storage.Labels,
)
imgProxyService.Spec = corev1.ServiceSpec{
Selector: selectorLabels(storage, serviceCfg.Name),
Ports: []corev1.ServicePort{
{
Name: serviceCfg.Defaults.ApiPortName,
Protocol: corev1.ProtocolTCP,
AppProtocol: ptrOf("http"),
Port: serviceCfg.Defaults.ApiPort,
TargetPort: intstr.IntOrString{IntVal: serviceCfg.Defaults.ApiPort},
},
},
}
if err := controllerutil.SetControllerReference(storage, imgProxyService, r.Scheme); err != nil {
return err
}
return nil
})
return err
}

View file

@ -0,0 +1,147 @@
/*
Copyright 2025 Peter Kurfer.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"context"
"errors"
"maps"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/log"
supabasev1alpha1 "code.icb4dc0.de/prskr/supabase-operator/api/v1alpha1"
"code.icb4dc0.de/prskr/supabase-operator/internal/meta"
"code.icb4dc0.de/prskr/supabase-operator/internal/pw"
)
type StorageS3CredentialsReconciler struct {
client.Client
Scheme *runtime.Scheme
}
func (r *StorageS3CredentialsReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res ctrl.Result, err error) {
var (
storage supabasev1alpha1.Storage
logger = log.FromContext(ctx)
)
if err := r.Get(ctx, req.NamespacedName, &storage); err != nil {
if client.IgnoreNotFound(err) == nil {
logger.Info("Storage instance does not exist")
return ctrl.Result{}, nil
}
return ctrl.Result{}, err
}
if err := r.reconcileS3ProtoSecret(ctx, &storage); err != nil {
return ctrl.Result{}, err
}
if storage.Spec.Api.S3Backend != nil {
if err := r.reconcileS3StorageSecret(ctx, &storage); err != nil {
return ctrl.Result{}, err
}
}
return ctrl.Result{}, nil
}
// SetupWithManager sets up the controller with the Manager.
func (r *StorageS3CredentialsReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(new(supabasev1alpha1.Storage)).
Owns(new(corev1.Secret)).
Named("storage-s3-creds").
Complete(r)
}
func (r *StorageS3CredentialsReconciler) reconcileS3StorageSecret(
ctx context.Context,
storage *supabasev1alpha1.Storage,
) error {
if storage.Spec.Api.S3Backend.CredentialsSecretRef == nil {
return errors.New("S3 storage credentials secret is empty")
}
s3CredsSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: storage.Spec.Api.S3Backend.CredentialsSecretRef.SecretName,
Namespace: storage.Namespace,
},
}
if err := r.Get(ctx, client.ObjectKeyFromObject(s3CredsSecret), s3CredsSecret); err != nil {
return err
}
if err := controllerutil.SetControllerReference(storage, s3CredsSecret, r.Scheme); err != nil {
return err
}
return r.Update(ctx, s3CredsSecret)
}
func (r *StorageS3CredentialsReconciler) reconcileS3ProtoSecret(
ctx context.Context,
storage *supabasev1alpha1.Storage,
) error {
const (
acccessKeyIdAndSecret = 2
)
s3ProtoSecret := corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: storage.Spec.Api.S3Protocol.CredentialsSecretRef.SecretName,
Namespace: storage.Namespace,
},
}
_, err := controllerutil.CreateOrUpdate(ctx, r.Client, &s3ProtoSecret, func() error {
s3ProtoSecret.Labels = maps.Clone(storage.Labels)
if s3ProtoSecret.Labels == nil {
s3ProtoSecret.Labels = make(map[string]string)
}
s3ProtoSecret.Labels[meta.SupabaseLabel.Reload] = ""
if err := controllerutil.SetControllerReference(storage, &s3ProtoSecret, r.Scheme); err != nil {
return err
}
if s3ProtoSecret.Data == nil {
s3ProtoSecret.Data = make(map[string][]byte, acccessKeyIdAndSecret)
}
if _, ok := s3ProtoSecret.Data[storage.Spec.Api.S3Protocol.CredentialsSecretRef.AccessKeyIdKey]; !ok {
s3ProtoSecret.Data[storage.Spec.Api.S3Protocol.CredentialsSecretRef.AccessKeyIdKey] = pw.GeneratePW(32, nil)
}
if _, ok := s3ProtoSecret.Data[storage.Spec.Api.S3Protocol.CredentialsSecretRef.AccessSecretKeyKey]; !ok {
s3ProtoSecret.Data[storage.Spec.Api.S3Protocol.CredentialsSecretRef.AccessSecretKeyKey] = pw.GeneratePW(64, nil)
}
return nil
})
return err
}

View file

@ -18,7 +18,7 @@ package controller
import (
"context"
"crypto/sha256"
"hash/fnv"
"maps"
"reflect"
@ -93,7 +93,7 @@ func ValueOrFallback[T any](value, fallback T) T {
}
func HashStrings(vals ...string) []byte {
h := sha256.New()
h := fnv.New64a()
for _, v := range vals {
h.Write([]byte(v))
@ -103,7 +103,7 @@ func HashStrings(vals ...string) []byte {
}
func HashBytes(vals ...[]byte) []byte {
h := sha256.New()
h := fnv.New64a()
for _, v := range vals {
h.Write(v)

View file

@ -19,9 +19,9 @@ package controlplane
import (
"bytes"
"context"
"crypto/sha256"
"encoding/json"
"fmt"
"hash/fnv"
"strconv"
"strings"
"time"
@ -87,7 +87,7 @@ func (r *APIGatewayReconciler) Reconcile(ctx context.Context, req ctrl.Request)
return ctrl.Result{}, fmt.Errorf("failed to prepare config hash: %w", err)
}
serviceHash := sha256.New().Sum(rawServices)
serviceHash := fnv.New64a().Sum(rawServices)
if bytes.Equal(serviceHash, gateway.Status.Envoy.ResourceHash) {
logger.Info("Resource hash did not change - skipping reconciliation")
return ctrl.Result{}, nil

View file

@ -36,7 +36,9 @@ func (c *GoTrueCluster) Cluster(instance string) []*clusterv3.Cluster {
return nil
}
return []*clusterv3.Cluster{c.ServiceCluster.Cluster(fmt.Sprintf("auth@%s", instance), 9999)}
serviceCfg := supabase.ServiceConfig.Auth
return []*clusterv3.Cluster{c.ServiceCluster.Cluster(fmt.Sprintf("%s@%s", serviceCfg.Name, instance), uint32(serviceCfg.Defaults.APIPort))}
}
func (c *GoTrueCluster) Routes(instance string) []*routev3.Route {

View file

@ -34,8 +34,11 @@ func (c *PostgrestCluster) Cluster(instance string) []*clusterv3.Cluster {
if c == nil {
return nil
}
serviceCfg := supabase.ServiceConfig.Postgrest
return []*clusterv3.Cluster{
c.ServiceCluster.Cluster(fmt.Sprintf("%s@%s", supabase.ServiceConfig.Postgrest.Name, instance), 3000),
c.ServiceCluster.Cluster(fmt.Sprintf("%s@%s", serviceCfg.Name, instance), uint32(serviceCfg.Defaults.ServerPort)),
}
}
@ -44,6 +47,8 @@ func (c *PostgrestCluster) Routes(instance string) []*routev3.Route {
return nil
}
serviceCfg := supabase.ServiceConfig.Postgrest
return []*routev3.Route{
{
Name: "PostgREST: /rest/v1/* -> http://rest:3000/*",
@ -55,7 +60,7 @@ func (c *PostgrestCluster) Routes(instance string) []*routev3.Route {
Action: &routev3.Route_Route{
Route: &routev3.RouteAction{
ClusterSpecifier: &routev3.RouteAction_Cluster{
Cluster: fmt.Sprintf("%s@%s", supabase.ServiceConfig.Postgrest.Name, instance),
Cluster: fmt.Sprintf("%s@%s", serviceCfg.Name, instance),
},
PrefixRewrite: "/",
},
@ -71,7 +76,7 @@ func (c *PostgrestCluster) Routes(instance string) []*routev3.Route {
Action: &routev3.Route_Route{
Route: &routev3.RouteAction{
ClusterSpecifier: &routev3.RouteAction_Cluster{
Cluster: fmt.Sprintf("%s@%s", supabase.ServiceConfig.Postgrest.Name, instance),
Cluster: fmt.Sprintf("%s@%s", serviceCfg.Name, instance),
},
PrefixRewrite: "/rpc/graphql",
},

View file

@ -36,11 +36,12 @@ import (
)
type EnvoyServices struct {
ServiceLabelKey string `json:"-"`
Postgrest *PostgrestCluster `json:"postgrest,omitempty"`
GoTrue *GoTrueCluster `json:"auth,omitempty"`
PGMeta *PGMetaCluster `json:"pgmeta,omitempty"`
Studio *StudioCluster `json:"studio,omitempty"`
ServiceLabelKey string `json:"-"`
Postgrest *PostgrestCluster `json:"postgrest,omitempty"`
GoTrue *GoTrueCluster `json:"auth,omitempty"`
StorageApi *StorageApiCluster `json:"storageApi,omitempty"`
PGMeta *PGMetaCluster `json:"pgmeta,omitempty"`
Studio *StudioCluster `json:"studio,omitempty"`
}
func (s *EnvoyServices) UpsertEndpointSlices(endpointSlices ...discoveryv1.EndpointSlice) {
@ -56,6 +57,11 @@ func (s *EnvoyServices) UpsertEndpointSlices(endpointSlices ...discoveryv1.Endpo
s.GoTrue = new(GoTrueCluster)
}
s.GoTrue.AddOrUpdateEndpoints(eps)
case supabase.ServiceConfig.Storage.Name:
if s.StorageApi == nil {
s.StorageApi = new(StorageApiCluster)
}
s.StorageApi.AddOrUpdateEndpoints(eps)
case supabase.ServiceConfig.PGMeta.Name:
if s.PGMeta == nil {
s.PGMeta = new(PGMetaCluster)
@ -76,6 +82,10 @@ func (s EnvoyServices) Targets() map[string][]string {
targets[supabase.ServiceConfig.Auth.Name] = s.GoTrue.Targets()
}
if s.StorageApi != nil {
targets[supabase.ServiceConfig.Storage.Name] = s.StorageApi.Targets()
}
if s.PGMeta != nil {
targets[supabase.ServiceConfig.PGMeta.Name] = s.PGMeta.Targets()
}
@ -179,6 +189,7 @@ func (s *EnvoyServices) snapshot(ctx context.Context, instance, version string)
Routes: slices.Concat(
s.Postgrest.Routes(instance),
s.GoTrue.Routes(instance),
s.StorageApi.Routes(instance),
s.PGMeta.Routes(instance),
),
}},
@ -252,6 +263,7 @@ func (s *EnvoyServices) snapshot(ctx context.Context, instance, version string)
slices.Concat(
s.Postgrest.Cluster(instance),
s.GoTrue.Cluster(instance),
s.StorageApi.Cluster(instance),
s.PGMeta.Cluster(instance),
)...),
resource.RouteType: {apiRouteCfg},

View file

@ -0,0 +1,72 @@
/*
Copyright 2025 Peter Kurfer.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controlplane
import (
"fmt"
clusterv3 "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3"
routev3 "github.com/envoyproxy/go-control-plane/envoy/config/route/v3"
"google.golang.org/protobuf/types/known/anypb"
"code.icb4dc0.de/prskr/supabase-operator/internal/supabase"
)
type StorageApiCluster struct {
ServiceCluster
}
func (c *StorageApiCluster) Cluster(instance string) []*clusterv3.Cluster {
if c == nil {
return nil
}
serviceCfg := supabase.ServiceConfig.Storage
return []*clusterv3.Cluster{
c.ServiceCluster.Cluster(fmt.Sprintf("%s@%s", serviceCfg.Name, instance), uint32(serviceCfg.Defaults.ApiPort)),
}
}
func (c *StorageApiCluster) Routes(instance string) []*routev3.Route {
if c == nil {
return nil
}
serviceCfg := supabase.ServiceConfig.Storage
return []*routev3.Route{{
Name: "Storage: /storage/v1/* -> http://storage:5000/*",
Match: &routev3.RouteMatch{
PathSpecifier: &routev3.RouteMatch_Prefix{
Prefix: "/storage/v1/",
},
},
Action: &routev3.Route_Route{
Route: &routev3.RouteAction{
ClusterSpecifier: &routev3.RouteAction_Cluster{
Cluster: fmt.Sprintf("%s@%s", serviceCfg.Name, instance),
},
PrefixRewrite: "/",
},
},
TypedPerFilterConfig: map[string]*anypb.Any{
FilterNameRBAC: MustAny(RBACPerRoute(RBACAllowAllConfig())),
FilterNameJwtAuthn: MustAny(JWTAllowAll()),
},
}}
}

86
internal/supabase/auth.go Normal file
View file

@ -0,0 +1,86 @@
/*
Copyright 2025 Peter Kurfer.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package supabase
type authEnvKeys struct {
ApiHost fixedEnv
ApiPort fixedEnv
ApiExternalUrl stringEnv
DBDriver fixedEnv
DatabaseUrl string
SiteUrl stringEnv
AdditionalRedirectURLs stringSliceEnv
DisableSignup boolEnv
JWTIssuer fixedEnv
JWTAdminRoles fixedEnv
JWTAudience fixedEnv
JwtDefaultGroup fixedEnv
JwtExpiry intEnv[int]
JwtSecret secretEnv
EmailSignupDisabled boolEnv
MailerUrlPathsInvite stringEnv
MailerUrlPathsConfirmation stringEnv
MailerUrlPathsRecovery stringEnv
MailerUrlPathsEmailChange stringEnv
AnonymousUsersEnabled boolEnv
}
type authConfigDefaults struct {
MailerUrlPathsInvite string
MailerUrlPathsConfirmation string
MailerUrlPathsRecovery string
MailerUrlPathsEmailChange string
APIPort int32
UID, GID int64
}
func authServiceConfig() serviceConfig[authEnvKeys, authConfigDefaults] {
return serviceConfig[authEnvKeys, authConfigDefaults]{
Name: "auth",
EnvKeys: authEnvKeys{
ApiHost: fixedEnvOf("GOTRUE_API_HOST", "0.0.0.0"),
ApiPort: fixedEnvOf("GOTRUE_API_PORT", "9999"),
ApiExternalUrl: "API_EXTERNAL_URL",
DBDriver: fixedEnvOf("GOTRUE_DB_DRIVER", "postgres"),
DatabaseUrl: "GOTRUE_DB_DATABASE_URL",
SiteUrl: "GOTRUE_SITE_URL",
AdditionalRedirectURLs: stringSliceEnv{key: "GOTRUE_URI_ALLOW_LIST", separator: ","},
DisableSignup: "GOTRUE_DISABLE_SIGNUP",
JWTIssuer: fixedEnvOf("GOTRUE_JWT_ISSUER", "supabase"),
JWTAdminRoles: fixedEnvOf("GOTRUE_JWT_ADMIN_ROLES", "service_role"),
JWTAudience: fixedEnvOf("GOTRUE_JWT_AUD", "authenticated"),
JwtDefaultGroup: fixedEnvOf("GOTRUE_JWT_DEFAULT_GROUP_NAME", "authenticated"),
JwtExpiry: "GOTRUE_JWT_EXP",
JwtSecret: "GOTRUE_JWT_SECRET",
EmailSignupDisabled: "GOTRUE_EXTERNAL_EMAIL_ENABLED",
MailerUrlPathsInvite: "MAILER_URLPATHS_INVITE",
MailerUrlPathsConfirmation: "MAILER_URLPATHS_CONFIRMATION",
MailerUrlPathsRecovery: "MAILER_URLPATHS_RECOVERY",
MailerUrlPathsEmailChange: "MAILER_URLPATHS_EMAIL_CHANGE",
AnonymousUsersEnabled: "GOTRUE_EXTERNAL_ANONYMOUS_USERS_ENABLED",
},
Defaults: authConfigDefaults{
MailerUrlPathsInvite: "/auth/v1/verify",
MailerUrlPathsConfirmation: "/auth/v1/verify",
MailerUrlPathsRecovery: "/auth/v1/verify",
MailerUrlPathsEmailChange: "/auth/v1/verify",
APIPort: 9999,
UID: 1000,
GID: 1000,
},
}
}

View file

@ -36,294 +36,22 @@ func (cfg serviceConfig[TEnvKeys, TDefaults]) ObjectMeta(obj metav1.Object) meta
return metav1.ObjectMeta{Name: cfg.ObjectName(obj), Namespace: obj.GetNamespace()}
}
type postgrestEnvKeys struct {
Host fixedEnv
DBUri string
Schemas stringSliceEnv
AnonRole stringEnv
JWTSecret secretEnv
UseLegacyGucs boolEnv
ExtraSearchPath stringSliceEnv
AppSettingsJWTSecret secretEnv
AppSettingsJWTExpiry intEnv[int]
AdminServerPort intEnv[int32]
MaxRows intEnv[int]
OpenAPIProxyURI stringEnv
}
type postgrestConfigDefaults struct {
AnonRole string
Schemas []string
ExtraSearchPath []string
UID, GID int64
ServerPort, AdminPort int32
}
type authEnvKeys struct {
ApiHost fixedEnv
ApiPort fixedEnv
ApiExternalUrl stringEnv
DBDriver fixedEnv
DatabaseUrl string
SiteUrl stringEnv
AdditionalRedirectURLs stringSliceEnv
DisableSignup boolEnv
JWTIssuer fixedEnv
JWTAdminRoles fixedEnv
JWTAudience fixedEnv
JwtDefaultGroup fixedEnv
JwtExpiry intEnv[int]
JwtSecret secretEnv
EmailSignupDisabled boolEnv
MailerUrlPathsInvite stringEnv
MailerUrlPathsConfirmation stringEnv
MailerUrlPathsRecovery stringEnv
MailerUrlPathsEmailChange stringEnv
AnonymousUsersEnabled boolEnv
}
type authConfigDefaults struct {
MailerUrlPathsInvite string
MailerUrlPathsConfirmation string
MailerUrlPathsRecovery string
MailerUrlPathsEmailChange string
APIPort int32
UID, GID int64
}
type pgMetaEnvKeys struct {
APIPort intEnv[int32]
DBHost stringEnv
DBPort intEnv[int]
DBName stringEnv
DBUser secretEnv
DBPassword secretEnv
}
type pgMetaDefaults struct {
APIPort int32
DBPort string
NodeUID int64
NodeGID int64
}
type studioEnvKeys struct {
PGMetaURL stringEnv
DBPassword secretEnv
ApiUrl stringEnv
APIExternalURL stringEnv
JwtSecret secretEnv
AnonKey secretEnv
ServiceKey secretEnv
Host fixedEnv
LogsEnabled fixedEnv
}
type studioDefaults struct {
NodeUID int64
NodeGID int64
APIPort int32
}
type storageEnvApiKeys struct {
AnonKey secretEnv
ServiceKey secretEnv
JwtSecret secretEnv
JwtJwks secretEnv
DatabaseDSN stringEnv
FileSizeLimit intEnv[uint64]
UploadFileSizeLimit intEnv[uint64]
UploadFileSizeLimitStandard intEnv[uint64]
StorageBackend stringEnv
TenantID fixedEnv
StorageS3Region stringEnv
GlobalS3Bucket fixedEnv
EnableImaageTransformation boolEnv
ImgProxyURL stringEnv
TusUrlPath fixedEnv
S3AccessKeyId secretEnv
S3AccessKeySecret secretEnv
S3ProtocolPrefix fixedEnv
S3AllowForwardedHeader boolEnv
}
type storageApiDefaults struct{}
type envoyDefaults struct {
ConfigKey string
UID, GID int64
}
type envoyServiceConfig struct {
Defaults envoyDefaults
}
func (envoyServiceConfig) ObjectName(obj metav1.Object) string {
return fmt.Sprintf("%s-envoy", obj.GetName())
}
type jwtDefaults struct {
SecretKey string
JwksKey string
AnonKey string
ServiceKey string
SecretLength int
Expiry int
}
type jwtConfig struct {
Defaults jwtDefaults
}
func (jwtConfig) ObjectName(obj metav1.Object) string {
return fmt.Sprintf("%s-jwt", obj.GetName())
}
var ServiceConfig = struct {
Postgrest serviceConfig[postgrestEnvKeys, postgrestConfigDefaults]
Auth serviceConfig[authEnvKeys, authConfigDefaults]
PGMeta serviceConfig[pgMetaEnvKeys, pgMetaDefaults]
Studio serviceConfig[studioEnvKeys, studioDefaults]
Storage serviceConfig[storageEnvApiKeys, storageApiDefaults]
ImgProxy serviceConfig[imgProxyEnvKeys, imgProxyDefaults]
Envoy envoyServiceConfig
JWT jwtConfig
}{
Postgrest: serviceConfig[postgrestEnvKeys, postgrestConfigDefaults]{
Name: "postgrest",
EnvKeys: postgrestEnvKeys{
Host: fixedEnvOf("PGRST_SERVER_HOST", "*"),
DBUri: "PGRST_DB_URI",
Schemas: stringSliceEnv{key: "PGRST_DB_SCHEMAS", separator: ","},
AnonRole: "PGRST_DB_ANON_ROLE",
JWTSecret: "PGRST_JWT_SECRET",
UseLegacyGucs: "PGRST_DB_USE_LEGACY_GUCS",
AppSettingsJWTSecret: "PGRST_APP_SETTINGS_JWT_SECRET",
AppSettingsJWTExpiry: "PGRST_APP_SETTINGS_JWT_EXP",
AdminServerPort: "PGRST_ADMIN_SERVER_PORT",
ExtraSearchPath: stringSliceEnv{key: "PGRST_DB_EXTRA_SEARCH_PATH", separator: ","},
MaxRows: "PGRST_DB_MAX_ROWS",
OpenAPIProxyURI: "PGRST_OPENAPI_SERVER_PROXY_URI",
},
Defaults: postgrestConfigDefaults{
AnonRole: "anon",
Schemas: []string{"public", "graphql_public"},
ExtraSearchPath: []string{"public", "extensions"},
UID: 1000,
GID: 1000,
ServerPort: 3000,
AdminPort: 3001,
},
},
Auth: serviceConfig[authEnvKeys, authConfigDefaults]{
Name: "auth",
EnvKeys: authEnvKeys{
ApiHost: fixedEnvOf("GOTRUE_API_HOST", "0.0.0.0"),
ApiPort: fixedEnvOf("GOTRUE_API_PORT", "9999"),
ApiExternalUrl: "API_EXTERNAL_URL",
DBDriver: fixedEnvOf("GOTRUE_DB_DRIVER", "postgres"),
DatabaseUrl: "GOTRUE_DB_DATABASE_URL",
SiteUrl: "GOTRUE_SITE_URL",
AdditionalRedirectURLs: stringSliceEnv{key: "GOTRUE_URI_ALLOW_LIST", separator: ","},
DisableSignup: "GOTRUE_DISABLE_SIGNUP",
JWTIssuer: fixedEnvOf("GOTRUE_JWT_ISSUER", "supabase"),
JWTAdminRoles: fixedEnvOf("GOTRUE_JWT_ADMIN_ROLES", "service_role"),
JWTAudience: fixedEnvOf("GOTRUE_JWT_AUD", "authenticated"),
JwtDefaultGroup: fixedEnvOf("GOTRUE_JWT_DEFAULT_GROUP_NAME", "authenticated"),
JwtExpiry: "GOTRUE_JWT_EXP",
JwtSecret: "GOTRUE_JWT_SECRET",
EmailSignupDisabled: "GOTRUE_EXTERNAL_EMAIL_ENABLED",
MailerUrlPathsInvite: "MAILER_URLPATHS_INVITE",
MailerUrlPathsConfirmation: "MAILER_URLPATHS_CONFIRMATION",
MailerUrlPathsRecovery: "MAILER_URLPATHS_RECOVERY",
MailerUrlPathsEmailChange: "MAILER_URLPATHS_EMAIL_CHANGE",
AnonymousUsersEnabled: "GOTRUE_EXTERNAL_ANONYMOUS_USERS_ENABLED",
},
Defaults: authConfigDefaults{
MailerUrlPathsInvite: "/auth/v1/verify",
MailerUrlPathsConfirmation: "/auth/v1/verify",
MailerUrlPathsRecovery: "/auth/v1/verify",
MailerUrlPathsEmailChange: "/auth/v1/verify",
APIPort: 9999,
UID: 1000,
GID: 1000,
},
},
PGMeta: serviceConfig[pgMetaEnvKeys, pgMetaDefaults]{
Name: "pg-meta",
EnvKeys: pgMetaEnvKeys{
APIPort: "PG_META_PORT",
DBHost: "PG_META_DB_HOST",
DBPort: "PG_META_DB_PORT",
DBName: "PG_META_DB_NAME",
DBUser: "PG_META_DB_USER",
DBPassword: "PG_META_DB_PASSWORD",
},
Defaults: pgMetaDefaults{
APIPort: 8080,
DBPort: "5432",
NodeUID: 1000,
NodeGID: 1000,
},
},
Studio: serviceConfig[studioEnvKeys, studioDefaults]{
Name: "studio",
EnvKeys: studioEnvKeys{
PGMetaURL: "STUDIO_PG_META_URL",
DBPassword: "POSTGRES_PASSWORD",
ApiUrl: "SUPABASE_URL",
APIExternalURL: "SUPABASE_PUBLIC_URL",
JwtSecret: "AUTH_JWT_SECRET",
AnonKey: "SUPABASE_ANON_KEY",
ServiceKey: "SUPABASE_SERVICE_KEY",
Host: fixedEnvOf("HOSTNAME", "0.0.0.0"),
LogsEnabled: fixedEnvOf("NEXT_PUBLIC_ENABLE_LOGS", "true"),
},
Defaults: studioDefaults{
NodeUID: 1000,
NodeGID: 1000,
APIPort: 3000,
},
},
Storage: serviceConfig[storageEnvApiKeys, storageApiDefaults]{
Name: "storage-api",
EnvKeys: storageEnvApiKeys{
AnonKey: "ANON_KEY",
ServiceKey: "SERVICE_KEY",
JwtSecret: "AUTH_JWT_SECRET",
JwtJwks: "AUTH_JWT_JWKS",
StorageBackend: "STORAGE_BACKEND",
DatabaseDSN: "DATABASE_URL",
FileSizeLimit: "FILE_SIZE_LIMIT",
UploadFileSizeLimit: "UPLOAD_FILE_SIZE_LIMIT",
UploadFileSizeLimitStandard: "UPLOAD_FILE_SIZE_LIMIT_STANDARD",
TenantID: fixedEnvOf("TENANT_ID", "stub"),
StorageS3Region: "STORAGE_S3_REGION",
GlobalS3Bucket: fixedEnvOf("GLOBAL_S3_BUCKET", "stub"),
EnableImaageTransformation: "ENABLE_IMAGE_TRANSFORMATION",
ImgProxyURL: "IMGPROXY_URL",
TusUrlPath: fixedEnvOf("TUS_URL_PATH", "/storage/v1/upload/resumable"),
S3AccessKeyId: "S3_PROTOCOL_ACCESS_KEY_ID",
S3AccessKeySecret: "S3_PROTOCOL_ACCESS_KEY_SECRET",
S3ProtocolPrefix: fixedEnvOf("S3_PROTOCOL_PREFIX", "/storage/v1"),
S3AllowForwardedHeader: "S3_ALLOW_FORWARDED_HEADER",
},
Defaults: storageApiDefaults{},
},
Envoy: envoyServiceConfig{
Defaults: envoyDefaults{
ConfigKey: "config.yaml",
UID: 65532,
GID: 65532,
},
},
JWT: jwtConfig{
Defaults: jwtDefaults{
SecretKey: "secret",
JwksKey: "jwks.json",
AnonKey: "anon_key",
ServiceKey: "service_key",
SecretLength: 40,
Expiry: 3600,
},
},
Postgrest: postgrestServiceConfig(),
Auth: authServiceConfig(),
PGMeta: pgMetaServiceConfig(),
Studio: studioServiceConfig(),
Storage: storageServiceConfig(),
ImgProxy: imgProxyServiceConfig(),
Envoy: newEnvoyServiceConfig(),
JWT: newJwtConfig(),
}

View file

@ -0,0 +1,46 @@
/*
Copyright 2025 Peter Kurfer.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package supabase
import (
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func newEnvoyServiceConfig() envoyServiceConfig {
return envoyServiceConfig{
Defaults: envoyDefaults{
ConfigKey: "config.yaml",
UID: 65532,
GID: 65532,
},
}
}
type envoyDefaults struct {
ConfigKey string
UID, GID int64
}
type envoyServiceConfig struct {
Defaults envoyDefaults
}
func (envoyServiceConfig) ObjectName(obj metav1.Object) string {
return fmt.Sprintf("%s-envoy", obj.GetName())
}

View file

@ -0,0 +1,48 @@
/*
Copyright 2025 Peter Kurfer.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package supabase
type imgProxyEnvKeys struct {
Bind fixedEnv
LocalFileSystemRoot stringEnv
UseETag fixedEnv
EnableWebPDetection boolEnv
}
type imgProxyDefaults struct {
ApiPort int32
ApiPortName string
UID, GID int64
}
func imgProxyServiceConfig() serviceConfig[imgProxyEnvKeys, imgProxyDefaults] {
return serviceConfig[imgProxyEnvKeys, imgProxyDefaults]{
Name: "imgproxy",
EnvKeys: imgProxyEnvKeys{
Bind: fixedEnvOf("IMGPROXY_BIND", ":5001"),
LocalFileSystemRoot: "IMGPROXY_LOCAL_FILESYSTEM_ROOT",
UseETag: fixedEnvOf("IMGPROXY_USE_ETAG", "true"),
EnableWebPDetection: "IMGPROXY_ENABLE_WEBP_DETECTION",
},
Defaults: imgProxyDefaults{
ApiPort: 5001,
ApiPortName: "api",
UID: 999,
GID: 999,
},
}
}

View file

@ -19,8 +19,41 @@ package supabase
import (
"crypto/rand"
"encoding/hex"
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
type jwtDefaults struct {
SecretKey string
JwksKey string
AnonKey string
ServiceKey string
SecretLength int
Expiry int
}
type jwtConfig struct {
Defaults jwtDefaults
}
func newJwtConfig() jwtConfig {
return jwtConfig{
Defaults: jwtDefaults{
SecretKey: "secret",
JwksKey: "jwks.json",
AnonKey: "anon_key",
ServiceKey: "service_key",
SecretLength: 40,
Expiry: 3600,
},
}
}
func (jwtConfig) ObjectName(obj metav1.Object) string {
return fmt.Sprintf("%s-jwt", obj.GetName())
}
func RandomJWTSecret() ([]byte, error) {
jwtSecretBytes := make([]byte, ServiceConfig.JWT.Defaults.SecretLength)

View file

@ -0,0 +1,53 @@
/*
Copyright 2025 Peter Kurfer.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package supabase
type pgMetaEnvKeys struct {
APIPort intEnv[int32]
DBHost stringEnv
DBPort intEnv[int]
DBName stringEnv
DBUser secretEnv
DBPassword secretEnv
}
type pgMetaDefaults struct {
APIPort int32
DBPort string
NodeUID int64
NodeGID int64
}
func pgMetaServiceConfig() serviceConfig[pgMetaEnvKeys, pgMetaDefaults] {
return serviceConfig[pgMetaEnvKeys, pgMetaDefaults]{
Name: "pg-meta",
EnvKeys: pgMetaEnvKeys{
APIPort: "PG_META_PORT",
DBHost: "PG_META_DB_HOST",
DBPort: "PG_META_DB_PORT",
DBName: "PG_META_DB_NAME",
DBUser: "PG_META_DB_USER",
DBPassword: "PG_META_DB_PASSWORD",
},
Defaults: pgMetaDefaults{
APIPort: 8080,
DBPort: "5432",
NodeUID: 1000,
NodeGID: 1000,
},
}
}

View file

@ -0,0 +1,72 @@
/*
Copyright 2025 Peter Kurfer.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package supabase
type postgrestEnvKeys struct {
Host fixedEnv
DBUri string
Schemas stringSliceEnv
AnonRole stringEnv
JWTSecret secretEnv
UseLegacyGucs boolEnv
ExtraSearchPath stringSliceEnv
AppSettingsJWTSecret secretEnv
AppSettingsJWTExpiry intEnv[int]
AdminServerPort intEnv[int32]
MaxRows intEnv[int]
OpenAPIProxyURI stringEnv
}
type postgrestConfigDefaults struct {
AnonRole string
Schemas []string
ExtraSearchPath []string
UID, GID int64
ServerPort, AdminPort int32
ServerPortName, AdminPortName string
}
func postgrestServiceConfig() serviceConfig[postgrestEnvKeys, postgrestConfigDefaults] {
return serviceConfig[postgrestEnvKeys, postgrestConfigDefaults]{
Name: "postgrest",
EnvKeys: postgrestEnvKeys{
Host: fixedEnvOf("PGRST_SERVER_HOST", "*"),
DBUri: "PGRST_DB_URI",
Schemas: stringSliceEnv{key: "PGRST_DB_SCHEMAS", separator: ","},
AnonRole: "PGRST_DB_ANON_ROLE",
JWTSecret: "PGRST_JWT_SECRET",
UseLegacyGucs: "PGRST_DB_USE_LEGACY_GUCS",
AppSettingsJWTSecret: "PGRST_APP_SETTINGS_JWT_SECRET",
AppSettingsJWTExpiry: "PGRST_APP_SETTINGS_JWT_EXP",
AdminServerPort: "PGRST_ADMIN_SERVER_PORT",
ExtraSearchPath: stringSliceEnv{key: "PGRST_DB_EXTRA_SEARCH_PATH", separator: ","},
MaxRows: "PGRST_DB_MAX_ROWS",
OpenAPIProxyURI: "PGRST_OPENAPI_SERVER_PROXY_URI",
},
Defaults: postgrestConfigDefaults{
AnonRole: "anon",
Schemas: []string{"public", "graphql_public"},
ExtraSearchPath: []string{"public", "extensions"},
UID: 1000,
GID: 1000,
ServerPort: 3000,
AdminPort: 3001,
ServerPortName: "rest",
AdminPortName: "admin",
},
}
}

View file

@ -0,0 +1,92 @@
/*
Copyright 2025 Peter Kurfer.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package supabase
type storageEnvApiKeys struct {
AnonKey secretEnv
ServiceKey secretEnv
JwtSecret secretEnv
JwtJwks secretEnv
DatabaseDSN stringEnv
FileSizeLimit intEnv[uint64]
UploadFileSizeLimit intEnv[uint64]
UploadFileSizeLimitStandard intEnv[uint64]
StorageBackend stringEnv
FileStorageBackendPath stringEnv
FileStorageRegion fixedEnv
TenantID fixedEnv
StorageS3Bucket stringEnv
StorageS3MaxSockets intEnv[uint8]
StorageS3Endpoint stringEnv
StorageS3ForcePathStyle boolEnv
StorageS3Region stringEnv
StorageS3AccessKeyId secretEnv
StorageS3AccessSecretKey secretEnv
EnableImaageTransformation boolEnv
ImgProxyURL stringEnv
TusUrlPath fixedEnv
S3ProtocolAccessKeyId secretEnv
S3ProtocolAccessKeySecret secretEnv
S3ProtocolAllowForwardedHeader boolEnv
S3ProtocolPrefix fixedEnv
}
type storageApiDefaults struct {
ApiPort int32
ApiPortName string
UID, GID int64
}
func storageServiceConfig() serviceConfig[storageEnvApiKeys, storageApiDefaults] {
return serviceConfig[storageEnvApiKeys, storageApiDefaults]{
Name: "storage-api",
EnvKeys: storageEnvApiKeys{
AnonKey: "ANON_KEY",
ServiceKey: "SERVICE_KEY",
JwtSecret: "AUTH_JWT_SECRET",
JwtJwks: "AUTH_JWT_JWKS",
StorageBackend: "STORAGE_BACKEND",
FileStorageBackendPath: "FILE_STORAGE_BACKEND_PATH",
FileStorageRegion: fixedEnvOf("REGION", "stub"),
DatabaseDSN: "DATABASE_URL",
FileSizeLimit: "FILE_SIZE_LIMIT",
UploadFileSizeLimit: "UPLOAD_FILE_SIZE_LIMIT",
UploadFileSizeLimitStandard: "UPLOAD_FILE_SIZE_LIMIT_STANDARD",
TenantID: fixedEnvOf("TENANT_ID", "stub"),
StorageS3Bucket: "STORAGE_S3_BUCKET",
StorageS3MaxSockets: "STORAGE_S3_MAX_SOCKETS",
StorageS3Endpoint: "STORAGE_S3_ENDPOINT",
StorageS3ForcePathStyle: "STORAGE_S3_FORCE_PATH_STYLE",
StorageS3Region: "STORAGE_S3_REGION",
StorageS3AccessKeyId: "AWS_ACCESS_KEY_ID",
StorageS3AccessSecretKey: "AWS_SECRET_ACCESS_KEY",
EnableImaageTransformation: "ENABLE_IMAGE_TRANSFORMATION",
ImgProxyURL: "IMGPROXY_URL",
TusUrlPath: fixedEnvOf("TUS_URL_PATH", "/storage/v1/upload/resumable"),
S3ProtocolAccessKeyId: "S3_PROTOCOL_ACCESS_KEY_ID",
S3ProtocolAccessKeySecret: "S3_PROTOCOL_ACCESS_KEY_SECRET",
S3ProtocolPrefix: fixedEnvOf("S3_PROTOCOL_PREFIX", "/storage/v1"),
S3ProtocolAllowForwardedHeader: "S3_ALLOW_FORWARDED_HEADER",
},
Defaults: storageApiDefaults{
ApiPort: 5000,
ApiPortName: "api",
UID: 1000,
GID: 1000,
},
}
}

View file

@ -0,0 +1,57 @@
/*
Copyright 2025 Peter Kurfer.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package supabase
type studioEnvKeys struct {
PGMetaURL stringEnv
DBPassword secretEnv
ApiUrl stringEnv
APIExternalURL stringEnv
JwtSecret secretEnv
AnonKey secretEnv
ServiceKey secretEnv
Host fixedEnv
LogsEnabled fixedEnv
}
type studioDefaults struct {
NodeUID int64
NodeGID int64
APIPort int32
}
func studioServiceConfig() serviceConfig[studioEnvKeys, studioDefaults] {
return serviceConfig[studioEnvKeys, studioDefaults]{
Name: "studio",
EnvKeys: studioEnvKeys{
PGMetaURL: "STUDIO_PG_META_URL",
DBPassword: "POSTGRES_PASSWORD",
ApiUrl: "SUPABASE_URL",
APIExternalURL: "SUPABASE_PUBLIC_URL",
JwtSecret: "AUTH_JWT_SECRET",
AnonKey: "SUPABASE_ANON_KEY",
ServiceKey: "SUPABASE_SERVICE_KEY",
Host: fixedEnvOf("HOSTNAME", "0.0.0.0"),
LogsEnabled: fixedEnvOf("NEXT_PUBLIC_ENABLE_LOGS", "true"),
},
Defaults: studioDefaults{
NodeUID: 1000,
NodeGID: 1000,
APIPort: 3000,
},
}
}

View file

@ -57,7 +57,7 @@ func SetupDashboardWebhookWithManager(mgr ctrl.Manager) error {
// SetupStorageWebhookWithManager registers the webhook for Storage in the manager.
func SetupStorageWebhookWithManager(mgr ctrl.Manager) error {
return ctrl.NewWebhookManagedBy(mgr).For(&supabasev1alpha1.Storage{}).
WithValidator(&StorageCustomValidator{}).
WithValidator(&StorageCustomValidator{Client: mgr.GetClient()}).
WithDefaulter(&StorageCustomDefaulter{Client: mgr.GetClient()}).
Complete()
}

View file

@ -19,18 +19,12 @@ package v1alpha1
import (
"context"
"fmt"
"maps"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/webhook"
supabasev1alpha1 "code.icb4dc0.de/prskr/supabase-operator/api/v1alpha1"
"code.icb4dc0.de/prskr/supabase-operator/internal/meta"
"code.icb4dc0.de/prskr/supabase-operator/internal/pw"
)
// +kubebuilder:webhook:path=/mutate-supabase-k8s-icb4dc0-de-v1alpha1-storage,mutating=true,failurePolicy=fail,sideEffects=None,groups=supabase.k8s.icb4dc0.de,resources=storages,verbs=create;update,versions=v1alpha1,name=mstorage-v1alpha1.kb.io,admissionReviewVersions=v1
@ -55,55 +49,19 @@ func (d *StorageCustomDefaulter) Default(ctx context.Context, obj runtime.Object
}
storagelog.Info("Defaulting for Storage", "name", storage.GetName())
if err := d.defaultS3Protocol(ctx, storage); err != nil {
return err
}
d.defaultS3Protocol(storage)
return nil
}
func (d *StorageCustomDefaulter) defaultS3Protocol(ctx context.Context, storage *supabasev1alpha1.Storage) error {
if storage.Spec.S3 == nil {
storage.Spec.S3 = new(supabasev1alpha1.S3ProtocolSpec)
func (d *StorageCustomDefaulter) defaultS3Protocol(storage *supabasev1alpha1.Storage) {
if storage.Spec.Api.S3Protocol == nil {
storage.Spec.Api.S3Protocol = new(supabasev1alpha1.S3ProtocolSpec)
}
if storage.Spec.S3.CredentialsSecretRef == nil {
storage.Spec.S3.CredentialsSecretRef = &supabasev1alpha1.S3CredentialsRef{
AccessKeyIdKey: "accessKeyId",
AccessSecretKeyKey: "secretAccessKey",
SecretName: fmt.Sprintf("%s-storage-protocol-s3-credentials", storage.Name),
if storage.Spec.Api.S3Protocol.CredentialsSecretRef == nil {
storage.Spec.Api.S3Protocol.CredentialsSecretRef = &supabasev1alpha1.S3CredentialsRef{
SecretName: fmt.Sprintf("%s-storage-protocol-s3-credentials", storage.Name),
}
}
credentialsSecret := corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: storage.Spec.S3.CredentialsSecretRef.SecretName,
Namespace: storage.Namespace,
},
}
_, err := controllerutil.CreateOrUpdate(ctx, d.Client, &credentialsSecret, func() error {
credentialsSecret.Labels = maps.Clone(storage.Labels)
if credentialsSecret.Labels == nil {
credentialsSecret.Labels = make(map[string]string)
}
credentialsSecret.Labels[meta.SupabaseLabel.Reload] = ""
if credentialsSecret.Data == nil {
credentialsSecret.Data = make(map[string][]byte, 2)
}
if _, ok := credentialsSecret.Data[storage.Spec.S3.CredentialsSecretRef.AccessKeyIdKey]; !ok {
credentialsSecret.Data[storage.Spec.S3.CredentialsSecretRef.AccessKeyIdKey] = pw.GeneratePW(32, nil)
}
if _, ok := credentialsSecret.Data[storage.Spec.S3.CredentialsSecretRef.AccessSecretKeyKey]; !ok {
credentialsSecret.Data[storage.Spec.S3.CredentialsSecretRef.AccessSecretKeyKey] = pw.GeneratePW(64, nil)
}
return nil
})
return err
}

View file

@ -18,9 +18,13 @@ package v1alpha1
import (
"context"
"errors"
"fmt"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/webhook"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
@ -42,35 +46,43 @@ var storagelog = logf.Log.WithName("storage-resource")
// NOTE: The +kubebuilder:object:generate=false marker prevents controller-gen from generating DeepCopy methods,
// as this struct is used only for temporary operations and does not need to be deeply copied.
type StorageCustomValidator struct {
// TODO(user): Add more fields as needed for validation
client.Client
}
var _ webhook.CustomValidator = &StorageCustomValidator{}
// ValidateCreate implements webhook.CustomValidator so a webhook will be registered for the type Storage.
func (v *StorageCustomValidator) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) {
func (v *StorageCustomValidator) ValidateCreate(ctx context.Context, obj runtime.Object) (warnings admission.Warnings, err error) {
storage, ok := obj.(*supabasev1alpha1.Storage)
if !ok {
return nil, fmt.Errorf("expected a Storage object but got %T", obj)
}
storagelog.Info("Validation for Storage upon creation", "name", storage.GetName())
// TODO(user): fill in your validation logic upon object creation.
if ws, err := v.validateStorageApi(ctx, storage); err != nil {
return ws, err
} else {
warnings = append(warnings, ws...)
}
return nil, nil
return warnings, nil
}
// ValidateUpdate implements webhook.CustomValidator so a webhook will be registered for the type Storage.
func (v *StorageCustomValidator) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) {
func (v *StorageCustomValidator) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (warnings admission.Warnings, err error) {
storage, ok := newObj.(*supabasev1alpha1.Storage)
if !ok {
return nil, fmt.Errorf("expected a Storage object for the newObj but got %T", newObj)
}
storagelog.Info("Validation for Storage upon update", "name", storage.GetName())
// TODO(user): fill in your validation logic upon object update.
if ws, err := v.validateStorageApi(ctx, storage); err != nil {
return ws, err
} else {
warnings = append(warnings, ws...)
}
return nil, nil
return warnings, nil
}
// ValidateDelete implements webhook.CustomValidator so a webhook will be registered for the type Storage.
@ -81,7 +93,49 @@ func (v *StorageCustomValidator) ValidateDelete(ctx context.Context, obj runtime
}
storagelog.Info("Validation for Storage upon deletion", "name", storage.GetName())
// TODO(user): fill in your validation logic upon object deletion.
return nil, nil
}
func (v *StorageCustomValidator) validateStorageApi(ctx context.Context, storage *supabasev1alpha1.Storage) (admission.Warnings, error) {
var warnings admission.Warnings
apiSpec := storage.Spec.Api
if (apiSpec.FileBackend == nil) == (apiSpec.S3Backend == nil) {
return nil, errors.New("it is not possible to configure both or non backend at all - please configure either file or S3 backend")
}
if apiSpec.S3Backend != nil {
if apiSpec.S3Backend.CredentialsSecretRef == nil {
return nil, errors.New(".spec.api.s3Backend.credentialsSecretRef cannot be empty")
}
s3CredentialsSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: apiSpec.S3Backend.CredentialsSecretRef.SecretName,
},
}
if err := v.Get(ctx, client.ObjectKeyFromObject(s3CredentialsSecret), s3CredentialsSecret); err != nil {
if client.IgnoreNotFound(err) == nil {
warnings = append(warnings, fmt.Sprintf("Secret %q could not be found", apiSpec.S3Backend.CredentialsSecretRef.SecretName))
} else {
return nil, err
}
} else {
if accessKeyId, ok := s3CredentialsSecret.Data[apiSpec.S3Backend.CredentialsSecretRef.AccessKeyIdKey]; !ok {
return warnings, fmt.Errorf("secret %q does not contain an access key id at specified key %q", apiSpec.S3Backend.CredentialsSecretRef.SecretName, apiSpec.S3Backend.CredentialsSecretRef.AccessKeyIdKey)
} else if len(accessKeyId) == 0 {
return warnings, fmt.Errorf("access key id in Secret %q with key %q is empty", apiSpec.S3Backend.CredentialsSecretRef.SecretName, apiSpec.S3Backend.CredentialsSecretRef.AccessKeyIdKey)
}
if accessSecretKey, ok := s3CredentialsSecret.Data[apiSpec.S3Backend.CredentialsSecretRef.AccessSecretKeyKey]; !ok {
return warnings, fmt.Errorf("secret %q does not contain an access secret key at specified key %q", apiSpec.S3Backend.CredentialsSecretRef.SecretName, apiSpec.S3Backend.CredentialsSecretRef.AccessSecretKeyKey)
} else if len(accessSecretKey) == 0 {
return warnings, fmt.Errorf("access secret key in Secret %q with key %q is empty", apiSpec.S3Backend.CredentialsSecretRef.SecretName, apiSpec.S3Backend.CredentialsSecretRef.AccessSecretKeyKey)
}
}
}
return warnings, nil
}

View file

@ -33,5 +33,9 @@ func Test() error {
"PATH": strings.Join([]string{os.Getenv("PATH"), out}, string(os.PathListSeparator)),
}
return sh.RunWithV(testEnv, "go", "run", "-modfile=tools/go.mod", tools[Gotestsum], "-f", "pkgname-and-test-fails", "--", "-race", "-shuffle=on", "./...")
return sh.RunWithV(
testEnv,
"go", "run", "-modfile=tools/go.mod",
tools[Gotestsum], "-f", "pkgname-and-test-fails", "--", "-race", "-shuffle=on", "./...",
)
}

View file

@ -1,5 +1,13 @@
site_name: Supabase Operator
site_author: Peter Kurfer
site_url: https://docs.supabase-operator.icb4dc0.de/
site_description: |
Documentation for the Supabase Operator, a Kubernetes operator for managing Supabase instances.
repo_name: prskr/supabase-operator
repo_url: https://code.icb4dc0.de/prskr/supabase-operator
dev_addr: "127.0.0.1:8001"
theme:
name: material
custom_dir: docs/overrides