feat(storage): finish initial basic implementation

- support both s3 & file storage backends
- support imgproxy to scale images
- manually tested with MinIO & local storage
- fixed service discovery issue in APIGatey reconciler not detecting
  service changes
- refactored defaults and env variable code to make it manageable again
- add repo link to docs
This commit is contained in:
Peter 2025-01-23 18:00:05 +01:00
parent 604525de38
commit 0014927ca9
Signed by: prskr
GPG key ID: F56BED6903BC5E37
46 changed files with 16170 additions and 606 deletions

View file

@ -48,6 +48,15 @@ func (s JwtSpec) SecretKeySelector() *corev1.SecretKeySelector {
}
}
func (s JwtSpec) JwksKeySelector() *corev1.SecretKeySelector {
return &corev1.SecretKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
Name: s.SecretName,
},
Key: s.JwksKey,
}
}
func (s JwtSpec) AnonKeySelector() *corev1.SecretKeySelector {
return &corev1.SecretKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
@ -74,7 +83,8 @@ type ImageSpec struct {
type ContainerTemplate struct {
ImageSpec `json:",inline"`
ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"`
// SecurityContext -
// SecurityContext - override the container SecurityContext
// use with caution, by default the operator already uses sane defaults
SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty"`
Resources corev1.ResourceRequirements `json:"resources,omitempty"`
VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"`
@ -83,10 +93,11 @@ type ContainerTemplate struct {
type WorkloadTemplate struct {
Replicas *int32 `json:"replicas,omitempty"`
SecurityContext *corev1.PodSecurityContext `json:"securityContext"`
SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty"`
AdditionalLabels map[string]string `json:"additionalLabels,omitempty"`
// Workload - customize the container template of the workload
Workload *ContainerTemplate `json:"workload,omitempty"`
Workload *ContainerTemplate `json:"workload,omitempty"`
AdditionalVolumes []corev1.Volume `json:"additionalVolumes,omitempty"`
}
func (t *WorkloadTemplate) ReplicaCount() *int32 {
@ -185,6 +196,14 @@ func (t *WorkloadTemplate) AdditionalVolumeMounts(defaultMounts ...corev1.Volume
return defaultMounts
}
func (t *WorkloadTemplate) Volumes(defaultVolumes ...corev1.Volume) []corev1.Volume {
if t == nil {
return defaultVolumes
}
return append(defaultVolumes, t.AdditionalVolumes...)
}
func (t *WorkloadTemplate) PodSecurityContext() *corev1.PodSecurityContext {
if t != nil && t.SecurityContext != nil {
return t.SecurityContext

View file

@ -18,14 +18,17 @@ package v1alpha1
import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"code.icb4dc0.de/prskr/supabase-operator/internal/supabase"
)
type StorageBackend string
type BackendStorageType string
const (
StorageBackendFile StorageBackend = "file"
StorageBackendS3 StorageBackend = "s3"
BackendStorageTypeFile BackendStorageType = "file"
BackendStorageTypeS3 BackendStorageType = "s3"
)
type StorageApiDbSpec struct {
@ -67,11 +70,25 @@ type S3CredentialsRef struct {
AccessSecretKeyKey string `json:"accessSecretKeyKey,omitempty"`
}
type S3ProtocolSpec struct {
// Region - S3 region to use in the API
// +kubebuilder:default="us-east-1"
Region string `json:"region,omitempty"`
func (r S3CredentialsRef) AccessKeyIdSelector() *corev1.SecretKeySelector {
return &corev1.SecretKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
Name: r.SecretName,
},
Key: r.AccessKeyIdKey,
}
}
func (r S3CredentialsRef) AccessSecretKeySelector() *corev1.SecretKeySelector {
return &corev1.SecretKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
Name: r.SecretName,
},
Key: r.AccessSecretKeyKey,
}
}
type S3ProtocolSpec struct {
// AllowForwardedHeader
// +kubebuilder:default=true
AllowForwardedHeader bool `json:"allowForwardedHeader,omitempty"`
@ -80,11 +97,85 @@ type S3ProtocolSpec struct {
CredentialsSecretRef *S3CredentialsRef `json:"credentialsSecretRef,omitempty"`
}
// StorageSpec defines the desired state of Storage.
type StorageSpec struct {
// BackendType - backend storage type to use
// +kubebuilder:validation:Enum={s3,file}
BackendType StorageBackend `json:"backendType"`
type FileBackendSpec struct {
// Path - path to where files will be stored
Path string `json:"path"`
}
func (s *FileBackendSpec) Env() []corev1.EnvVar {
if s == nil {
return nil
}
svcCfg := supabase.ServiceConfig.Storage
return []corev1.EnvVar{
svcCfg.EnvKeys.StorageBackend.Var("file"),
svcCfg.EnvKeys.TenantID.Var(),
svcCfg.EnvKeys.FileStorageBackendPath.Var(s.Path),
svcCfg.EnvKeys.StorageS3Region.Var("local"),
svcCfg.EnvKeys.StorageS3Bucket.Var("stub"),
}
}
type S3BackendSpec struct {
// Region - S3 region of the backend
Region string `json:"region"`
// Endpoint - hostname and port **with** http/https
Endpoint string `json:"endpoint"`
// ForcePathStyle - whether to use path style (e.g. for MinIO) or domain style
// for bucket addressing
ForcePathStyle bool `json:"forcePathStyle,omitempty"`
// Bucket - bucke to use, if file backend is used, default value is sufficient
// +kubebuilder:default="stub"
Bucket string `json:"bucket"`
// CredentialsSecretRef - reference to the Secret where access key id and access secret key are stored
CredentialsSecretRef *S3CredentialsRef `json:"credentialsSecretRef"`
}
func (s *S3BackendSpec) Env() []corev1.EnvVar {
if s == nil {
return nil
}
svcCfg := supabase.ServiceConfig.Storage
return []corev1.EnvVar{
svcCfg.EnvKeys.StorageBackend.Var("s3"),
svcCfg.EnvKeys.StorageS3Endpoint.Var(s.Endpoint),
svcCfg.EnvKeys.StorageS3ForcePathStyle.Var(s.ForcePathStyle),
svcCfg.EnvKeys.StorageS3Bucket.Var(s.Bucket),
svcCfg.EnvKeys.StorageS3Region.Var(s.Region),
svcCfg.EnvKeys.StorageS3AccessKeyId.Var(s.CredentialsSecretRef.AccessKeyIdSelector()),
svcCfg.EnvKeys.StorageS3AccessSecretKey.Var(s.CredentialsSecretRef.AccessSecretKeySelector()),
}
}
type UploadTempSpec struct {
// Medium of the empty dir to cache uploads
Medium corev1.StorageMedium `json:"medium,omitempty"`
SizeLimit *resource.Quantity `json:"sizeLimit,omitempty"`
}
func (s *UploadTempSpec) VolumeSource() *corev1.EmptyDirVolumeSource {
if s == nil {
return &corev1.EmptyDirVolumeSource{
Medium: corev1.StorageMediumDefault,
}
}
return &corev1.EmptyDirVolumeSource{
Medium: s.Medium,
SizeLimit: s.SizeLimit,
}
}
type StorageApiSpec struct {
S3Backend *S3BackendSpec `json:"s3Backend,omitempty"`
// FileBackend - configure the file backend
// either S3 or file backend **MUST** be configured
FileBackend *FileBackendSpec `json:"fileBackend,omitempty"`
// FileSizeLimit - maximum file upload size in bytes
// +kubebuilder:default=52428800
FileSizeLimit uint64 `json:"fileSizeLimit,omitempty"`
@ -95,11 +186,30 @@ type StorageSpec struct {
// DBSpec - Configure access to the Postgres database
// In most cases this will reference the supabase-storage-admin credentials secret provided by the Core resource
DBSpec StorageApiDbSpec `json:"db"`
// S3 - Configure S3 protocol
S3 *S3ProtocolSpec `json:"s3,omitempty"`
// EnableImageTransformation - whether to deploy the image proxy
// S3Protocol - Configure S3 access to the Storage API allowing clients to use any S3 client
S3Protocol *S3ProtocolSpec `json:"s3,omitempty"`
// UploadTemp - configure the emptyDir for storing intermediate files during uploads
UploadTemp *UploadTempSpec `json:"uploadTemp,omitempty"`
// WorkloadTemplate - customize the Storage API workload
WorkloadTemplate *WorkloadTemplate `json:"workloadTemplate,omitempty"`
}
type ImageProxySpec struct {
// Enable - whether to deploy the image proxy or not
Enable bool `json:"enable,omitempty"`
EnabledWebPDetection bool `json:"enableWebPDetection,omitempty"`
// WorkloadTemplate - customize the image proxy workload
WorkloadTemplate *WorkloadTemplate `json:"workloadTemplate,omitempty"`
}
// StorageSpec defines the desired state of Storage.
type StorageSpec struct {
// Api - configure the Storage API
Api StorageApiSpec `json:"api,omitempty"`
// ImageProxy - optionally enable and configure the image proxy
// the image proxy scale images to lower resolutions on demand to reduce traffic for instance for mobile devices
EnableImageTransformation bool `json:"enableImageTransformation,omitempty"`
ImageProxy *ImageProxySpec `json:"imageProxy,omitempty"`
}
// StorageStatus defines the observed state of Storage.

View file

@ -21,7 +21,7 @@ limitations under the License.
package v1alpha1
import (
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
@ -773,6 +773,21 @@ func (in *EnvoyStatus) DeepCopy() *EnvoyStatus {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FileBackendSpec) DeepCopyInto(out *FileBackendSpec) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileBackendSpec.
func (in *FileBackendSpec) DeepCopy() *FileBackendSpec {
if in == nil {
return nil
}
out := new(FileBackendSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GithubAuthProvider) DeepCopyInto(out *GithubAuthProvider) {
*out = *in
@ -790,6 +805,26 @@ func (in *GithubAuthProvider) DeepCopy() *GithubAuthProvider {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageProxySpec) DeepCopyInto(out *ImageProxySpec) {
*out = *in
if in.WorkloadTemplate != nil {
in, out := &in.WorkloadTemplate, &out.WorkloadTemplate
*out = new(WorkloadTemplate)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageProxySpec.
func (in *ImageProxySpec) DeepCopy() *ImageProxySpec {
if in == nil {
return nil
}
out := new(ImageProxySpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageSpec) DeepCopyInto(out *ImageSpec) {
*out = *in
@ -927,6 +962,26 @@ func (in *PostgrestSpec) DeepCopy() *PostgrestSpec {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *S3BackendSpec) DeepCopyInto(out *S3BackendSpec) {
*out = *in
if in.CredentialsSecretRef != nil {
in, out := &in.CredentialsSecretRef, &out.CredentialsSecretRef
*out = new(S3CredentialsRef)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3BackendSpec.
func (in *S3BackendSpec) DeepCopy() *S3BackendSpec {
if in == nil {
return nil
}
out := new(S3BackendSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *S3CredentialsRef) DeepCopyInto(out *S3CredentialsRef) {
*out = *in
@ -1024,6 +1079,48 @@ func (in *StorageApiDbSpec) DeepCopy() *StorageApiDbSpec {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StorageApiSpec) DeepCopyInto(out *StorageApiSpec) {
*out = *in
if in.S3Backend != nil {
in, out := &in.S3Backend, &out.S3Backend
*out = new(S3BackendSpec)
(*in).DeepCopyInto(*out)
}
if in.FileBackend != nil {
in, out := &in.FileBackend, &out.FileBackend
*out = new(FileBackendSpec)
**out = **in
}
out.JwtAuth = in.JwtAuth
in.DBSpec.DeepCopyInto(&out.DBSpec)
if in.S3Protocol != nil {
in, out := &in.S3Protocol, &out.S3Protocol
*out = new(S3ProtocolSpec)
(*in).DeepCopyInto(*out)
}
if in.UploadTemp != nil {
in, out := &in.UploadTemp, &out.UploadTemp
*out = new(UploadTempSpec)
(*in).DeepCopyInto(*out)
}
if in.WorkloadTemplate != nil {
in, out := &in.WorkloadTemplate, &out.WorkloadTemplate
*out = new(WorkloadTemplate)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageApiSpec.
func (in *StorageApiSpec) DeepCopy() *StorageApiSpec {
if in == nil {
return nil
}
out := new(StorageApiSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StorageList) DeepCopyInto(out *StorageList) {
*out = *in
@ -1059,11 +1156,10 @@ func (in *StorageList) DeepCopyObject() runtime.Object {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StorageSpec) DeepCopyInto(out *StorageSpec) {
*out = *in
out.JwtAuth = in.JwtAuth
in.DBSpec.DeepCopyInto(&out.DBSpec)
if in.S3 != nil {
in, out := &in.S3, &out.S3
*out = new(S3ProtocolSpec)
in.Api.DeepCopyInto(&out.Api)
if in.ImageProxy != nil {
in, out := &in.ImageProxy, &out.ImageProxy
*out = new(ImageProxySpec)
(*in).DeepCopyInto(*out)
}
}
@ -1125,6 +1221,26 @@ func (in *StudioSpec) DeepCopy() *StudioSpec {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *UploadTempSpec) DeepCopyInto(out *UploadTempSpec) {
*out = *in
if in.SizeLimit != nil {
in, out := &in.SizeLimit, &out.SizeLimit
x := (*in).DeepCopy()
*out = &x
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UploadTempSpec.
func (in *UploadTempSpec) DeepCopy() *UploadTempSpec {
if in == nil {
return nil
}
out := new(UploadTempSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WorkloadTemplate) DeepCopyInto(out *WorkloadTemplate) {
*out = *in
@ -1150,6 +1266,13 @@ func (in *WorkloadTemplate) DeepCopyInto(out *WorkloadTemplate) {
*out = new(ContainerTemplate)
(*in).DeepCopyInto(*out)
}
if in.AdditionalVolumes != nil {
in, out := &in.AdditionalVolumes, &out.AdditionalVolumes
*out = make([]v1.Volume, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadTemplate.