Initial commit

This commit is contained in:
Peter 2022-09-11 18:49:31 +02:00
commit 222dade21c
Signed by: prskr
GPG key ID: C1DB5D2E8DB512F9
49 changed files with 1259 additions and 0 deletions

16
.editorconfig Normal file
View file

@ -0,0 +1,16 @@
root = true
[*]
indent_style = space
end_of_line = lf
charset = utf-8
trim_trailing_whitespace = true
insert_final_newline = true
max_line_length = 120
[*.tf]
trim_trailing_whitespace = false
[*.{yml,yaml,yml.j2,yaml.j2}]
indent_size = 2
insert_final_newline = true

8
.gitignore vendored Normal file
View file

@ -0,0 +1,8 @@
*.tfvars
*.tfstate
*.tfstate.backup
*.lock.hcl
.terraform/
.vaultpw
.vscode/

View file

@ -0,0 +1,74 @@
# resource "hcloud_primary_ip" "cp1_ip6" {
# name = "cp1_ip6"
# datacenter = "hel1-dc2"
# type = "ipv6"
# assignee_type = "server"
# auto_delete = false
# }
# resource "hcloud_primary_ip" "worker1_ip6" {
# name = "worker1_ip6"
# datacenter = "hel1-dc2"
# type = "ipv6"
# assignee_type = "server"
# auto_delete = false
# }
# resource "hcloud_primary_ip" "worker2_ip6" {
# name = "worker2_ip6"
# datacenter = "hel1-dc2"
# type = "ipv6"
# assignee_type = "server"
# auto_delete = false
# }
resource "hcloud_network" "k8s_net" {
name = "k8s-net"
ip_range = "172.16.0.0/12"
}
resource "hcloud_network_subnet" "k8s_internal" {
network_id = hcloud_network.k8s_net.id
type = "cloud"
network_zone = "eu-central"
ip_range = "172.23.2.0/23"
}
resource "hcloud_ssh_key" "default" {
name = "Default Management"
public_key = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKfHZaI0F5GjAcrM8hjWqwMfULDkAZ2TOIBTQtRocg1F id_ed25519"
}
resource "hcloud_server" "nodes" {
for_each = var.vms
name = each.key
server_type = each.value.server_type
datacenter = "hel1-dc2"
image = "ubuntu-22.04"
backups = each.value.backups
ssh_keys = [
hcloud_ssh_key.default.id
]
labels = {
"node_type" = each.value.node_type
"cluster" = "icb4dc0.de"
}
public_net {
ipv4_enabled = true
ipv6_enabled = true
}
}
resource "hcloud_server_network" "k8s_internal" {
for_each = var.vms
server_id = hcloud_server.nodes[each.key].id
network_id = hcloud_network.k8s_net.id
ip = each.value.private_ip
}

71
infrastructure/lb.tf Normal file
View file

@ -0,0 +1,71 @@
resource "hcloud_load_balancer" "k8s_lb" {
name = "k8s-lb"
load_balancer_type = "lb11"
location = "hel1"
}
resource "hcloud_load_balancer_network" "k8s_lb_net" {
load_balancer_id = hcloud_load_balancer.k8s_lb.id
network_id = hcloud_network.k8s_net.id
ip = "172.23.2.5"
}
resource "hcloud_load_balancer_target" "k8s_lb_target" {
type = "label_selector"
label_selector = "node_type=worker"
load_balancer_id = hcloud_load_balancer.k8s_lb.id
use_private_ip = true
}
resource "hcloud_managed_certificate" "icb4dc0de_wildcard" {
name = "icb4dc0.de-wildcard"
domain_names = ["*.icb4dc0.de", "icb4dc0.de"]
labels = {
}
}
resource "hcloud_load_balancer_service" "k8s_lb_svc_https" {
load_balancer_id = hcloud_load_balancer.k8s_lb.id
protocol = "https"
destination_port = 32080
health_check {
protocol = "tcp"
port = 32080
interval = 5
timeout = 3
retries = 3
http {
domain = "code.icb4dc0.de"
path = "/"
tls = false
status_codes = [
"2??",
"3??"
]
}
}
http {
redirect_http = true
certificates = [
hcloud_managed_certificate.icb4dc0de_wildcard.id
]
}
}
resource "hcloud_load_balancer_service" "k8s_lb_svc_ssh" {
load_balancer_id = hcloud_load_balancer.k8s_lb.id
protocol = "tcp"
destination_port = 32022
listen_port = 22
health_check {
protocol = "tcp"
port = 32022
interval = 5
timeout = 3
retries = 3
}
}

12
infrastructure/main.tf Normal file
View file

@ -0,0 +1,12 @@
terraform {
required_providers {
hcloud = {
source = "hetznercloud/hcloud"
version = "1.35.1"
}
}
}
provider "hcloud" {
token = var.hcloud_token
}

12
infrastructure/vars.tf Normal file
View file

@ -0,0 +1,12 @@
variable "hcloud_token" {
sensitive = true
}
variable "vms" {
type = map(object({
node_type = string
server_type = string
backups = bool
private_ip = string
}))
}

6
k8s/ansible.cfg Normal file
View file

@ -0,0 +1,6 @@
[defaults]
nocows = True
inventory = ./inventory/
ansible_python_interpreter = /usr/bin/python3
become = True

View file

@ -0,0 +1,8 @@
- name: Configure cluster
hosts: localhost
roles:
- role: postgres
- role: csi
- role: minio
- role: gitea
- role: agola

View file

@ -0,0 +1,28 @@
all:
vars:
ansible_user: root
k3s_version: v1.24.3+k3s1
clusterctl_version: v1.2.1
clusterapi_provider_hetzner_version: v1.0.0-beta.0
extra_server_args: "--node-taint=node-type=master:NoSchedule --tls-san='2a01:4f9:c012:7d4b::1' --tls-san='k8s.icb4dc0.de' --tls-san='127.0.0.1'"
extra_agent_args: ""
ansible_ssh_common_args: '-o StrictHostKeyChecking=no'
systemd_dir: /etc/systemd/system
master_ip: "172.23.2.10"
domain: icb4dc0.de
agola_image: docker.io/sorintlab/agola:v0.7.0
children:
control_plane:
hosts:
cp01:
ansible_host: "2a01:4f9:c012:7d4b::1"
k8s_ip: "172.23.2.10"
worker_nodes:
hosts:
worker01:
ansible_host: "2a01:4f9:c012:7521::1"
k8s_ip: "172.23.2.20"
worker02:
ansible_host: "2a01:4f9:c011:b313::1"
k8s_ip: "172.23.2.21"

View file

@ -0,0 +1,28 @@
$ANSIBLE_VAULT;1.1;AES256
62336464623834373037373035346466366536643133656330613533613739323833313936366130
3862346331393636353034343363323164366434303063360a363164346638326163373332333438
61326566386530643830353934366232386636663133346632633866336665383331636266356434
6466656238326364310a316136613737663761653563616437396661623662616538633937323231
62616665613335333632633133663536626237386539646635613031643830633235623661373735
33646532613234373535356532386465303733356261303362316237303633363231313666613464
62366666626231623439333432353832313463346431653763383935333534323234356530613736
62306462616232393164333137663961303532316234336339353830646333666334663936356530
38363136663865333232643330326163623266353766613438626433303331323733303664366631
65653263666333373532393764376632613230373035643131303939623461363435643932336136
33663439643866333765623733396334313666383237653839303037643134313066336337396537
34623933336165333063373062626237656132316337646237346637346331303239353136623431
63383837623531306566376665643363626435653537376131303735343736323338333438353937
39633138306564376463366463383738343438393033343337353932306132383064633236336166
37663537653130366634636537626462666231363465623834643032396534323238383365653638
35323537303437356663333838316633353964333230633038643230613833633333363661326132
32663136333132393436383338313837623139316238666136626338616135383131353430306662
38366266373864623437666263663665653061616266386539623435346435383735313830306432
32363936343830306432656263613339356363373961623635666337376166306666353232626531
33376237633263356538356133613532663536643731393562616161643864383166636364326539
66373266326530306565383031383835623966646331653937333231643961656461323063313137
31386634326265343832393365306138623937616332626233656266306562323636616637363238
30366238633265663465663436613030656231343139333066656231646462346230306134626462
33373339366331343761373238643137636430643830323638396465636536386363663132333461
66383466373839616165306131383331396362333865383635313433343962646532313536393430
34373263313830616663613435343838356366313161313938633032396338656331616135336633
326132356566613361383664663334363535

View file

@ -0,0 +1,4 @@
plugin: hcloud
label_selector: cluster=icb4dc0.de
keyed_groups:
- key: labels

View file

@ -0,0 +1,34 @@
---
- name: Create Agola namespace
kubernetes.core.k8s:
name: agola
api_version: v1
kind: Namespace
state: present
- name: Create Agola manifests
kubernetes.core.k8s:
state: present
namespace: agola
definition: "{{ lookup('template', item) | from_yaml }}"
with_items:
- config.yml.j2
- pvc.yml.j2
- all-deployment.yml.j2
- all-internal-svc.yml.j2
- all-svc.yml.j2
- role.yml.j2
- rolebinding.yml.j2
- serviceaccount.yml.j2
- clusterrole.yml.j2
- clusterrolebinding.yml.j2
- ingress.yml.j2
# - gateway-deployment.yml.j2
# - gateway-svc.yml.j2
# - gitserver-deployment.yml.j2
# - gitserver-svc.yml.j2
# - runservice-deployment.yml.j2
# - runservice-svc.yml.j2
# - configstore-deployment.yml.j2
# - configstore-svc.yml.j2
# - executor-deployment.yml.j2

View file

@ -0,0 +1,56 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: agola
spec:
# Do not increase replica count or everything will break since every pod will
# have its own database
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: agola
template:
metadata:
labels:
app: agola
spec:
serviceAccountName: agola
containers:
- name: agola
image: {{ agola_image }}
command:
- /bin/agola
- serve
- "--config"
- /mnt/agola/config/config.yml
- "--components"
- all-base,executor
- "--detailed-errors"
- "--embedded-etcd"
- "--embedded-etcd-data-dir"
- "/mnt/agola/data/etcd"
env:
ports:
- containerPort: 8000
- containerPort: 4000
- containerPort: 4002
- containerPort: 4003
volumeMounts:
- name: config-volume
mountPath: /mnt/agola/config
- name: agola-localdata
mountPath: /mnt/agola/local
- name: agola-objectstorage
mountPath: /mnt/agola/data
volumes:
- name: config-volume
secret:
secretName: agola
- name: agola-localdata
emptyDir: {}
- name: agola-objectstorage
persistentVolumeClaim:
claimName: agola-data

View file

@ -0,0 +1,21 @@
---
# The service for internal components communication.
# We are using an headless service since some k8s deployment doesn't have
# hairpin mode enabled and pods cannot communicate with themself via a
# service
apiVersion: v1
kind: Service
metadata:
name: agola-internal
spec:
ports:
- port: 8000
name: api
- port: 4000
name: runservice
- port: 4002
name: configstore
- port: 4003
name: gitserver
selector:
app: agola

View file

@ -0,0 +1,11 @@
---
apiVersion: v1
kind: Service
metadata:
name: agola
spec:
ports:
- port: 8000
name: api
selector:
app: agola

View file

@ -0,0 +1,12 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: agola
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- "*"

View file

@ -0,0 +1,13 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: agola
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: agola
subjects:
- kind: ServiceAccount
name: agola
namespace: agola

View file

@ -0,0 +1,86 @@
---
apiVersion: v1
kind: Secret
metadata:
name: agola
stringData:
config.yml: |
gateway:
# The api url that clients will call
# Change this to the exposed "agola" service IP
apiExposedURL: "https://ci.{{ domain }}"
# The web interface url that clients will use
# Change this to the exposed "agola" service IP
webExposedURL: "https://ci.{{ domain }}"
runserviceURL: "http://agola-internal:4000"
configstoreURL: "http://agola-internal:4002"
gitserverURL: "http://agola-internal:4003"
web:
listenAddress: ":8000"
tokenSigning:
# hmac or rsa (it possible use rsa)
method: hmac
# key to use when signing with hmac
key: DeerahXi8iChoh6VohG9to9vo
# paths to the private and public keys in pem encoding when using rsa signing
#privateKeyPath: /path/to/privatekey.pem
#publicKeyPath: /path/to/public.pem
adminToken: "{{ agola.adminToken }}"
scheduler:
runserviceURL: "http://agola-internal:4000"
notification:
webExposedURL: "https://ci.{{ domain }}"
runserviceURL: "http://agola-internal:4000"
configstoreURL: "http://agola-internal:4002"
db:
# example with a postgres db
type: sqlite3
# connString: "postgres://{{ agola.dbUser }}:{{ agola.dbPassword }}@default-postgres-postgresql.postgres.svc.cluster.local:5432/agola_notification?sslmode=disable"
connString: "/opt/agola/notification/db/db.db"
configstore:
dataDir: /mnt/agola/local/configstore
db:
# example with a postgres db
type: sqlite
# connString: "postgres://{{ agola.dbUser }}:{{ agola.dbPassword }}@default-postgres-postgresql.postgres.svc.cluster.local:5432/agola_configstore?sslmode=disable"
connString: "/opt/agola/configstore/db/db.db"
objectStorage:
type: s3
endpoint: "http://minio.minio.svc.cluster.local:9000"
bucket: agola-configstore
accessKey: "{{ minio.rootUser }}"
secretAccessKey: "{{ minio.rootPassword }}"
web:
listenAddress: ":4002"
runservice:
# debug: true
dataDir: /mnt/agola/local/runservice
db:
type: sqlite
# connString: "postgres://{{ agola.dbUser }}:{{ agola.dbPassword }}@default-postgres-postgresql.postgres.svc.cluster.local:5432/agola_runservice?sslmode=disable"
connString: "/opt/agola/runservice/db/db.db"
objectStorage:
type: s3
# example with minio
endpoint: "http://minio.minio.svc.cluster.local:9000"
bucket: agola-runservice
accessKey: "{{ minio.rootUser }}"
secretAccessKey: "{{ minio.rootPassword }}"
web:
listenAddress: ":4000"
executor:
dataDir: /mnt/agola/local/executor
# The directory containing the toolbox compiled for the various supported architectures
toolboxPath: ./bin
runserviceURL: "http://agola-internal:4000"
web:
listenAddress: ":4001"
activeTasksLimit: 2
driver:
type: kubernetes
allowPrivilegedContainers: true
gitserver:
dataDir: /mnt/agola/local/gitserver
gatewayURL: "http://agola-internal:8000"
web:
listenAddress: ":4003"

View file

@ -0,0 +1,43 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: agola-configstore
spec:
replicas: 1
selector:
matchLabels:
app: agola
component: configstore
template:
metadata:
labels:
app: agola
component: configstore
spec:
serviceAccount: agola
containers:
- name: agola
image: {{ agola_image }}
command:
- /bin/agola
- serve
- "--config"
- /mnt/agola/config/config.yml
- "--components"
- configstore
- --embedded-etcd
env:
ports:
- containerPort: 4002
volumeMounts:
- name: config-volume
mountPath: /mnt/agola/config
- name: agola-localdata
mountPath: /mnt/agola/local
volumes:
- name: config-volume
secret:
secretName: agola
- name: agola-localdata
emptyDir: {}

View file

@ -0,0 +1,12 @@
---
# The service for internal components communication with the configstore.
apiVersion: v1
kind: Service
metadata:
name: agola-configstore
spec:
ports:
- port: 4002
selector:
app: agola
component: configstore

View file

@ -0,0 +1,42 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: agola-executor
spec:
replicas: 2
selector:
matchLabels:
app: agola
component: executor
template:
metadata:
labels:
app: agola
component: executor
spec:
serviceAccount: agola
containers:
- name: agola
image: {{ agola_image }}
command:
- /bin/agola
- serve
- "--config"
- /mnt/agola/config/config.yml
- "--components"
- executor
env:
ports:
- containerPort: 4001
volumeMounts:
- name: config-volume
mountPath: /mnt/agola/config
- name: agola-localdata
mountPath: /mnt/agola/local
volumes:
- name: config-volume
secret:
secretName: agola
- name: agola-localdata
emptyDir: {}

View file

@ -0,0 +1,42 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: agola-gateway
spec:
replicas: 2
selector:
matchLabels:
app: agola
component: gateway
template:
metadata:
labels:
app: agola
component: gateway
spec:
serviceAccount: agola
containers:
- name: agola
image: {{ agola_image }}
command:
- /bin/agola
- serve
- "--config"
- /mnt/agola/config/config.yml
- "--components"
- gateway,scheduler,notification
env:
ports:
- containerPort: 8000
volumeMounts:
- name: config-volume
mountPath: /mnt/agola/config
- name: agola-localdata
mountPath: /mnt/agola/local
volumes:
- name: config-volume
secret:
secretName: agola
- name: agola-localdata
emptyDir: {}

View file

@ -0,0 +1,13 @@
---
# The client service. It's a node port for easier testing on minikube. Change
# it to become a LoadBalancer if needed.
apiVersion: v1
kind: Service
metadata:
name: agola-gateway
spec:
ports:
- port: 8000
selector:
app: agola
component: gateway

View file

@ -0,0 +1,47 @@
---
# The gitserver. Since it'll primarily store temporary git build data the
# simple way to deploy it is to use a deployment with 1 replica and an emptyDir
# volume. A statefulset with 1 replica and a persistent volume will be a better
# alternative.
apiVersion: apps/v1
kind: Deployment
metadata:
name: agola-gitserver
spec:
# Don't increase the replicas
replicas: 1
selector:
matchLabels:
app: agola
component: gitserver
template:
metadata:
labels:
app: agola
component: gitserver
spec:
serviceAccount: agola
containers:
- name: agola
image: {{ agola_image }}
command:
- /bin/agola
- serve
- "--config"
- /mnt/agola/config/config.yml
- "--components"
- gitserver
env:
ports:
- containerPort: 4003
volumeMounts:
- name: config-volume
mountPath: /mnt/agola/config
- name: agola-localdata
mountPath: /mnt/agola/local
volumes:
- name: config-volume
secret:
secretName: agola
- name: agola-localdata
emptyDir: {}

View file

@ -0,0 +1,12 @@
---
# The service for internal components communication with the gitserver.
apiVersion: v1
kind: Service
metadata:
name: agola-gitserver
spec:
ports:
- port: 4003
selector:
app: agola
component: gitserver

View file

@ -0,0 +1,21 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: agola
labels:
app: agola
app.kubernetes.io/instance: agola
app.kubernetes.io/name: agola
spec:
rules:
- host: "ci.{{ domain }}"
http:
paths:
- pathType: Prefix
path: /
backend:
service:
name: agola
port:
number: 8000

View file

@ -0,0 +1,12 @@
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: agola-data
spec:
storageClassName: hcloud-volumes
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi

View file

@ -0,0 +1,24 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: agola
namespace: agola
rules:
- apiGroups:
- ""
resources:
- nodes
- pods
- pods/exec
- configmaps
- secrets
verbs:
- "*"
- apiGroups:
- "coordination.k8s.io"
resources:
- leases
verbs:
- "*"

View file

@ -0,0 +1,13 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: agola
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: agola
subjects:
- kind: ServiceAccount
name: agola
namespace: agola

View file

@ -0,0 +1,43 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: agola-runservice
spec:
replicas: 1
selector:
matchLabels:
app: agola
component: runservice
template:
metadata:
labels:
app: agola
component: runservice
spec:
serviceAccount: agola
containers:
- name: agola
image: {{ agola_image }}
command:
- /bin/agola
- serve
- "--config"
- /mnt/agola/config/config.yml
- "--components"
- runservice
- --embedded-etcd
env:
ports:
- containerPort: 4000
volumeMounts:
- name: config-volume
mountPath: /mnt/agola/config
- name: agola-localdata
mountPath: /mnt/agola/local
volumes:
- name: config-volume
secret:
secretName: agola
- name: agola-localdata
emptyDir: {}

View file

@ -0,0 +1,12 @@
---
# The service for internal components communication with the runservice.
apiVersion: v1
kind: Service
metadata:
name: agola-runservice
spec:
ports:
- port: 4000
selector:
app: agola
component: runservice

View file

@ -0,0 +1,5 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: agola

View file

@ -0,0 +1,29 @@
---
- name: Create Hcloud token secret
kubernetes.core.k8s:
state: present
definition:
apiVersion: v1
kind: Secret
metadata:
name: hcloud-csi
namespace: kube-system
data:
token: "{{ HcloudToken | b64encode }}"
- name: Create temporary file
ansible.builtin.tempfile:
state: file
suffix: temp
register: csi_manifest_tmp
- name: Download CSI manifest
ansible.builtin.get_url:
url: https://raw.githubusercontent.com/hetznercloud/csi-driver/v1.6.0/deploy/kubernetes/hcloud-csi.yml
dest: "{{ csi_manifest_tmp.path }}"
mode: '0664'
- name: Deploy CSI driver
kubernetes.core.k8s:
state: present
src: "{{ csi_manifest_tmp.path }}"

View file

@ -0,0 +1,24 @@
---
- name: Create temporary file
ansible.builtin.tempfile:
state: file
suffix: temp
register: k3s_binary_tmp
delegate_to: localhost
run_once: true
- name: Download k3s binary
get_url:
url: https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/k3s
checksum: sha256:https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/sha256sum-amd64.txt
dest: "{{ k3s_binary_tmp.path }}"
delegate_to: localhost
run_once: true
- name: Copy k3s binary
ansible.builtin.copy:
src: "{{ k3s_binary_tmp.path }}"
dest: /usr/local/bin/k3s
owner: root
group: root
mode: 0755

View file

@ -0,0 +1,32 @@
---
- name: Create gitea namespace
kubernetes.core.k8s:
name: gitea
api_version: v1
kind: Namespace
state: present
- name: Create Gitea admin credentials
kubernetes.core.k8s:
state: present
definition:
apiVersion: v1
kind: Secret
metadata:
name: gitea-admin-credentials
namespace: gitea
data:
username: "{{ gitea.adminUser | b64encode }}"
password: "{{ gitea.adminPassword | b64encode }}"
- name: Add Gitea chart repo
kubernetes.core.helm_repository:
name: gitea
repo_url: https://dl.gitea.io/charts/
- name: Deploy Gitea chart
kubernetes.core.helm:
name: gitea
chart_ref: gitea/gitea
release_namespace: gitea
release_values: "{{ lookup('template', 'values.gitea.yml.j2') | from_yaml }}"

View file

@ -0,0 +1,50 @@
service:
ssh:
type: NodePort
nodePort: 32022
ingress:
enabled: true
hosts:
- host: code.icb4dc0.de
paths:
- path: /
pathType: Prefix
resources:
limits:
cpu: 100m
memory: 256Mi
requests:
cpu: 50m
memory: 128Mi
persistence:
enabled: false
gitea:
admin:
existingSecret: gitea-admin-credentials
config:
server:
PROTOCOL: http
ROOT_URL: https://code.icb4dc0.de/
LFS_START_SERVER: 'true'
storage:
STORAGE_TYPE: minio
MINIO_ENDPOINT: minio.minio.svc.cluster.local:9000
MINIO_ACCESS_KEY_ID: "{{ minio.rootUser }}"
MINIO_SECRET_ACCESS_KEY: "{{ minio.rootPassword }}"
MINIO_BUCKET: gitea
MINIO_LOCATION: us-east-1
MINIO_USE_SSL: 'false'
database:
DB_TYPE: postgres
HOST: default-postgres-postgresql.postgres.svc.cluster.local:5432
NAME: gitea
USER: gitea
PASSWD: "{{ gitea.dbPassword }}"
postgresql:
enabled: false

View file

@ -0,0 +1,2 @@
---
k3s_server_location: /var/lib/rancher/k3s

View file

@ -0,0 +1,14 @@
apiVersion: helm.cattle.io/v1
kind: HelmChartConfig
metadata:
name: traefik
namespace: kube-system
spec:
valuesContent: |-
ports:
web:
nodePort: 32080
websecure:
expose: false
service:
type: NodePort

View file

@ -0,0 +1,86 @@
---
- name: Copy K3s service file
register: k3s_service
template:
src: "k3s.service.j2"
dest: "{{ systemd_dir }}/k3s.service"
owner: root
group: root
mode: 0644
- name: Copy Traefik customization
ansible.builtin.copy:
src: traefik.yaml
dest: /var/lib/rancher/k3s/server/manifests/traefik-config.yaml
owner: root
group: root
mode: 0644
- name: Enable and check K3s service
systemd:
name: k3s
daemon_reload: true
state: restarted
enabled: true
- name: Wait for node-token
wait_for:
path: "{{ k3s_server_location }}/server/node-token"
- name: Register node-token file access mode
stat:
path: "{{ k3s_server_location }}/server/node-token"
register: p
- name: Change file access node-token
file:
path: "{{ k3s_server_location }}/server/node-token"
mode: "g+rx,o+rx"
- name: Read node-token from master
slurp:
path: "{{ k3s_server_location }}/server/node-token"
register: node_token
- name: Store Master node-token
set_fact:
token: "{{ node_token.content | b64decode | regex_replace('\n', '') }}"
- name: Restore node-token file access
file:
path: "{{ k3s_server_location }}/server/node-token"
mode: "{{ p.stat.mode }}"
- name: Create directory .kube
file:
path: ~{{ ansible_user }}/.kube
state: directory
owner: "{{ ansible_user }}"
mode: "u=rwx,g=rx,o="
- name: Copy config file to user home directory
copy:
src: /etc/rancher/k3s/k3s.yaml
dest: ~{{ ansible_user }}/.kube/config
remote_src: yes
owner: "{{ ansible_user }}"
mode: "u=rw,g=,o="
- name: Replace https://localhost:6443 by https://master-ip:6443
command: >-
k3s kubectl config set-cluster default
--server=https://{{ master_ip }}:6443
--kubeconfig ~{{ ansible_user }}/.kube/config
changed_when: true
- name: Create kubectl symlink
file:
src: /usr/local/bin/k3s
dest: /usr/local/bin/kubectl
state: link
- name: Create crictl symlink
file:
src: /usr/local/bin/k3s
dest: /usr/local/bin/crictl
state: link

View file

@ -0,0 +1,24 @@
[Unit]
Description=Lightweight Kubernetes
Documentation=https://k3s.io
After=network-online.target
[Service]
Type=notify
ExecStartPre=-/sbin/modprobe br_netfilter
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/local/bin/k3s server --data-dir {{ k3s_server_location }} --advertise-address {{ k8s_ip }} --node-ip {{ k8s_ip }} {{ extra_server_args | default("") }}
KillMode=process
Delegate=yes
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=1048576
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
TimeoutStartSec=0
Restart=always
RestartSec=5s
[Install]
WantedBy=multi-user.target

View file

@ -0,0 +1,15 @@
---
- name: Copy K3s service file
template:
src: "k3s.service.j2"
dest: "{{ systemd_dir }}/k3s-node.service"
owner: root
group: root
mode: 0755
- name: Enable and check K3s service
systemd:
name: k3s-node
daemon_reload: yes
state: restarted
enabled: yes

View file

@ -0,0 +1,24 @@
[Unit]
Description=Lightweight Kubernetes
Documentation=https://k3s.io
After=network-online.target
[Service]
Type=notify
ExecStartPre=-/sbin/modprobe br_netfilter
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/local/bin/k3s agent --server https://{{ master_ip }}:6443 --node-ip {{ k8s_ip}} --token {{ hostvars[groups['control_plane'][0]]['token'] }} {{ extra_agent_args | default("") }}
KillMode=process
Delegate=yes
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=1048576
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
TimeoutStartSec=0
Restart=always
RestartSec=5s
[Install]
WantedBy=multi-user.target

View file

@ -0,0 +1,10 @@
mode: standalone
existingSecret: minio-credentials
persistence:
enabled: true
storageClass: hcloud-volumes
size: 50Gi
resources:
requests:
memory: 150Mi

View file

@ -0,0 +1,33 @@
---
- name: Create MinIO namespace
kubernetes.core.k8s:
name: minio
api_version: v1
kind: Namespace
state: present
- name: Create MinIO secret
kubernetes.core.k8s:
state: present
definition:
apiVersion: v1
kind: Secret
metadata:
name: minio-credentials
namespace: minio
data:
rootUser: "{{ minio.rootUser | b64encode }}"
rootPassword: "{{ minio.rootPassword | b64encode }}"
- name: Add MinIO chart repo
kubernetes.core.helm_repository:
name: minio
repo_url: https://charts.min.io/
- name: Deploy MinIO chart
kubernetes.core.helm:
name: minio
chart_ref: minio/minio
release_namespace: minio
release_values: "{{ lookup('ansible.builtin.file', 'values.minio.yaml') | from_yaml }}"

View file

@ -0,0 +1,7 @@
auth:
existingSecret: postgres-credentials
primary:
persistence:
storageClass: hcloud-volumes
size: 8Gi

View file

@ -0,0 +1,31 @@
---
- name: Create postgres namespace
kubernetes.core.k8s:
name: postgres
api_version: v1
kind: Namespace
state: present
- name: Create Postgres secret
kubernetes.core.k8s:
state: present
definition:
apiVersion: v1
kind: Secret
metadata:
name: postgres-credentials
namespace: postgres
data:
postgres-password: "{{ PostgresPassword | b64encode }}"
- name: Add Bitnami chart repo
kubernetes.core.helm_repository:
name: bitnami
repo_url: https://charts.bitnami.com/bitnami
- name: Deploy Postgres chart
kubernetes.core.helm:
name: default-postgres
chart_ref: bitnami/postgresql
release_namespace: postgres
release_values: "{{ lookup('ansible.builtin.file', 'values.postgres.yaml') | from_yaml }}"

View file

@ -0,0 +1,16 @@
---
- name: Enable IPv4 forwarding
sysctl:
name: net.ipv4.ip_forward
value: "1"
state: present
reload: yes
- name: Enable IPv6 forwarding
sysctl:
name: net.ipv6.conf.all.forwarding
value: "1"
state: present
reload: yes
when: ansible_all_ipv6_addresses

View file

@ -0,0 +1,4 @@
---
collections:
- kubernetes.core
- hetzner.hcloud

17
k8s/setup_cluster.yaml Normal file
View file

@ -0,0 +1,17 @@
---
- name: Prepare nodes
hosts: all
roles:
- role: prereq
- role: download
- name: Setup control plane
hosts: control_plane
roles:
- role: k3s/master
- name: Setup worker nodes
hosts: worker_nodes
roles:
- role: k3s/node