feat(ci): replace Agola with Concourse

This commit is contained in:
Peter 2022-12-23 14:04:44 +01:00
parent 1212236492
commit 27caef4eda
Signed by: prskr
GPG key ID: C1DB5D2E8DB512F9
36 changed files with 903 additions and 1243 deletions

View file

@ -0,0 +1,31 @@
resource "hcloud_server" "concourse_nodes" {
for_each = var.ci_workers
name = each.key
server_type = each.value.server_type
datacenter = "hel1-dc2"
image = "ubuntu-22.04"
backups = false
ssh_keys = [
hcloud_ssh_key.default.id
]
labels = {
"node_type" = each.value.node_type
}
public_net {
ipv4_enabled = true
ipv6_enabled = true
}
}
resource "hcloud_server_network" "concourse_internal" {
for_each = var.ci_workers
server_id = hcloud_server.concourse_nodes[each.key].id
network_id = hcloud_network.k8s_net.id
ip = each.value.private_ip
}

View file

@ -10,3 +10,11 @@ variable "vms" {
private_ip = string
}))
}
variable "ci_workers" {
type = map(object({
node_type = string
server_type = string
private_ip = string
}))
}

View file

@ -18,3 +18,11 @@ vms = {
private_ip = "172.23.2.21"
}
}
ci_workers = {
"concourse-worker-vm-1" = {
node_type = "concourse_worker"
server_type = "cpx21"
private_ip = "172.23.2.31"
}
}

View file

@ -1,6 +1,6 @@
[defaults]
nocows = True
inventory = ./inventory/
inventory = ./inventory/clusters.yaml
ansible_python_interpreter = /usr/bin/python3
become = True

View file

@ -0,0 +1,9 @@
all:
vars:
ansible_user: root
concourse_version: "7.8.3"
children:
concourse_workers:
hosts:
concourse-worker-1:
ansible_host: "95.217.220.68"

View file

@ -13,14 +13,14 @@ all:
control_plane:
hosts:
cp01:
ansible_host: "2a01:4f9:c012:7d4b::1"
ansible_host: "95.216.168.169"
k8s_ip: "172.23.2.10"
worker_nodes:
hosts:
worker01:
ansible_host: "2a01:4f9:c012:7521::1"
ansible_host: "65.108.148.54"
k8s_ip: "172.23.2.20"
worker02:
ansible_host: "2a01:4f9:c011:b313::1"
ansible_host: "95.217.184.201"
k8s_ip: "172.23.2.21"

File diff suppressed because it is too large Load diff

View file

@ -1,37 +0,0 @@
---
- name: Create Agola namespace
kubernetes.core.k8s:
name: agola
api_version: v1
kind: Namespace
state: present
- name: Create Agola manifests
kubernetes.core.k8s:
state: present
namespace: "{{ item.namespace | default('agola') }}"
definition: "{{ lookup('template', item.file) | from_yaml }}"
with_items:
- file: config.yml.j2
- file: pvc.yml.j2
- file: all-deployment.yml.j2
- file: all-internal-svc.yml.j2
- file: all-svc.yml.j2
- file: ingress.yml.j2
- file: rbac/role.yml.j2
- file: rbac/rolebinding.yml.j2
- file: rbac/serviceaccount.yml.j2
- file: rbac/clusterrole.yml.j2
- file: rbac/clusterrolebinding.yml.j2
- file: rbac/agola-deploy-role.yml.j2
- file: rbac/agola-deploy-rolebinding.yml.j2
namespace: blog
# - gateway-deployment.yml.j2
# - gateway-svc.yml.j2
# - gitserver-deployment.yml.j2
# - gitserver-svc.yml.j2
# - runservice-deployment.yml.j2
# - runservice-svc.yml.j2
# - configstore-deployment.yml.j2
# - configstore-svc.yml.j2
# - executor-deployment.yml.j2

View file

@ -1,55 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: agola
spec:
# Do not increase replica count or everything will break since every pod will
# have its own database
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: agola
template:
metadata:
labels:
app: agola
spec:
serviceAccountName: agola
imagePullSecrets:
- name: gitea-pull-secret
containers:
- name: agola
image: {{ agola_image }}
command:
- /bin/agola
- serve
- "--config"
- /mnt/agola/config/config.yml
- "--components"
- all-base,executor
- "--detailed-errors"
env:
ports:
- containerPort: 8000
- containerPort: 4000
- containerPort: 4002
- containerPort: 4003
volumeMounts:
- name: config-volume
mountPath: /mnt/agola/config
- name: agola-localdata
mountPath: /mnt/agola/local
- name: agola-objectstorage
mountPath: /mnt/agola/data
volumes:
- name: config-volume
secret:
secretName: agola
- name: agola-localdata
emptyDir: {}
- name: agola-objectstorage
persistentVolumeClaim:
claimName: agola-data

View file

@ -1,21 +0,0 @@
---
# The service for internal components communication.
# We are using an headless service since some k8s deployment doesn't have
# hairpin mode enabled and pods cannot communicate with themself via a
# service
apiVersion: v1
kind: Service
metadata:
name: agola-internal
spec:
ports:
- port: 8000
name: api
- port: 4000
name: runservice
- port: 4002
name: configstore
- port: 4003
name: gitserver
selector:
app: agola

View file

@ -1,11 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: agola
spec:
ports:
- port: 8000
name: api
selector:
app: agola

View file

@ -1,86 +0,0 @@
---
apiVersion: v1
kind: Secret
metadata:
name: agola
stringData:
config.yml: |
gateway:
# The api url that clients will call
# Change this to the exposed "agola" service IP
apiExposedURL: "https://ci.{{ domain }}"
# The web interface url that clients will use
# Change this to the exposed "agola" service IP
webExposedURL: "https://ci.{{ domain }}"
runserviceURL: "http://agola-internal:4000"
configstoreURL: "http://agola-internal:4002"
gitserverURL: "http://agola-internal:4003"
web:
listenAddress: ":8000"
tokenSigning:
# hmac or rsa (it possible use rsa)
method: hmac
# key to use when signing with hmac
key: DeerahXi8iChoh6VohG9to9vo
# paths to the private and public keys in pem encoding when using rsa signing
#privateKeyPath: /path/to/privatekey.pem
#publicKeyPath: /path/to/public.pem
adminToken: "{{ agola.adminToken }}"
scheduler:
runserviceURL: "http://agola-internal:4000"
notification:
webExposedURL: "https://ci.{{ domain }}"
runserviceURL: "http://agola-internal:4000"
configstoreURL: "http://agola-internal:4002"
db:
# example with a postgres db
type: postgres
connString: "postgres://{{ agola.dbUser }}:{{ agola.dbPassword }}@default-postgres-postgresql.postgres.svc.cluster.local:5432/agola_notification?sslmode=disable"
# connString: "/opt/agola/notification/db/db.db"
configstore:
dataDir: /mnt/agola/local/configstore
db:
# example with a postgres db
type: postgres
connString: "postgres://{{ agola.dbUser }}:{{ agola.dbPassword }}@default-postgres-postgresql.postgres.svc.cluster.local:5432/agola_configstore?sslmode=disable"
# connString: "/opt/agola/configstore/db/db.db"
objectStorage:
type: s3
endpoint: "http://minio.minio.svc.cluster.local:9000"
bucket: agola-configstore
accessKey: "{{ minio.rootUser }}"
secretAccessKey: "{{ minio.rootPassword }}"
web:
listenAddress: ":4002"
runservice:
# debug: true
dataDir: /mnt/agola/local/runservice
db:
type: postgres
connString: "postgres://{{ agola.dbUser }}:{{ agola.dbPassword }}@default-postgres-postgresql.postgres.svc.cluster.local:5432/agola_runservice?sslmode=disable"
# connString: "/opt/agola/runservice/db/db.db"
objectStorage:
type: s3
# example with minio
endpoint: "http://minio.minio.svc.cluster.local:9000"
bucket: agola-runservice
accessKey: "{{ minio.rootUser }}"
secretAccessKey: "{{ minio.rootPassword }}"
web:
listenAddress: ":4000"
executor:
dataDir: /mnt/agola/local/executor
# The directory containing the toolbox compiled for the various supported architectures
toolboxPath: ./bin
runserviceURL: "http://agola-internal:4000"
web:
listenAddress: ":4001"
activeTasksLimit: 2
driver:
type: kubernetes
allowPrivilegedContainers: true
gitserver:
dataDir: /mnt/agola/local/gitserver
gatewayURL: "http://agola-internal:8000"
web:
listenAddress: ":4003"

View file

@ -1,43 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: agola-configstore
spec:
replicas: 1
selector:
matchLabels:
app: agola
component: configstore
template:
metadata:
labels:
app: agola
component: configstore
spec:
serviceAccount: agola
containers:
- name: agola
image: {{ agola_image }}
command:
- /bin/agola
- serve
- "--config"
- /mnt/agola/config/config.yml
- "--components"
- configstore
- --embedded-etcd
env:
ports:
- containerPort: 4002
volumeMounts:
- name: config-volume
mountPath: /mnt/agola/config
- name: agola-localdata
mountPath: /mnt/agola/local
volumes:
- name: config-volume
secret:
secretName: agola
- name: agola-localdata
emptyDir: {}

View file

@ -1,12 +0,0 @@
---
# The service for internal components communication with the configstore.
apiVersion: v1
kind: Service
metadata:
name: agola-configstore
spec:
ports:
- port: 4002
selector:
app: agola
component: configstore

View file

@ -1,42 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: agola-executor
spec:
replicas: 2
selector:
matchLabels:
app: agola
component: executor
template:
metadata:
labels:
app: agola
component: executor
spec:
serviceAccount: agola
containers:
- name: agola
image: {{ agola_image }}
command:
- /bin/agola
- serve
- "--config"
- /mnt/agola/config/config.yml
- "--components"
- executor
env:
ports:
- containerPort: 4001
volumeMounts:
- name: config-volume
mountPath: /mnt/agola/config
- name: agola-localdata
mountPath: /mnt/agola/local
volumes:
- name: config-volume
secret:
secretName: agola
- name: agola-localdata
emptyDir: {}

View file

@ -1,42 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: agola-gateway
spec:
replicas: 2
selector:
matchLabels:
app: agola
component: gateway
template:
metadata:
labels:
app: agola
component: gateway
spec:
serviceAccount: agola
containers:
- name: agola
image: {{ agola_image }}
command:
- /bin/agola
- serve
- "--config"
- /mnt/agola/config/config.yml
- "--components"
- gateway,scheduler,notification
env:
ports:
- containerPort: 8000
volumeMounts:
- name: config-volume
mountPath: /mnt/agola/config
- name: agola-localdata
mountPath: /mnt/agola/local
volumes:
- name: config-volume
secret:
secretName: agola
- name: agola-localdata
emptyDir: {}

View file

@ -1,13 +0,0 @@
---
# The client service. It's a node port for easier testing on minikube. Change
# it to become a LoadBalancer if needed.
apiVersion: v1
kind: Service
metadata:
name: agola-gateway
spec:
ports:
- port: 8000
selector:
app: agola
component: gateway

View file

@ -1,47 +0,0 @@
---
# The gitserver. Since it'll primarily store temporary git build data the
# simple way to deploy it is to use a deployment with 1 replica and an emptyDir
# volume. A statefulset with 1 replica and a persistent volume will be a better
# alternative.
apiVersion: apps/v1
kind: Deployment
metadata:
name: agola-gitserver
spec:
# Don't increase the replicas
replicas: 1
selector:
matchLabels:
app: agola
component: gitserver
template:
metadata:
labels:
app: agola
component: gitserver
spec:
serviceAccount: agola
containers:
- name: agola
image: {{ agola_image }}
command:
- /bin/agola
- serve
- "--config"
- /mnt/agola/config/config.yml
- "--components"
- gitserver
env:
ports:
- containerPort: 4003
volumeMounts:
- name: config-volume
mountPath: /mnt/agola/config
- name: agola-localdata
mountPath: /mnt/agola/local
volumes:
- name: config-volume
secret:
secretName: agola
- name: agola-localdata
emptyDir: {}

View file

@ -1,12 +0,0 @@
---
# The service for internal components communication with the gitserver.
apiVersion: v1
kind: Service
metadata:
name: agola-gitserver
spec:
ports:
- port: 4003
selector:
app: agola
component: gitserver

View file

@ -1,21 +0,0 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: agola
labels:
app: agola
app.kubernetes.io/instance: agola
app.kubernetes.io/name: agola
spec:
rules:
- host: "ci.{{ domain }}"
http:
paths:
- pathType: Prefix
path: /
backend:
service:
name: agola
port:
number: 8000

View file

@ -1,12 +0,0 @@
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: agola-data
spec:
storageClassName: hcloud-volumes
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi

View file

@ -1,12 +0,0 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: agola
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- "*"

View file

@ -1,13 +0,0 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: agola
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: agola
subjects:
- kind: ServiceAccount
name: agola
namespace: agola

View file

@ -1,23 +0,0 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: agola
rules:
- apiGroups:
- ""
resources:
- nodes
- pods
- pods/exec
- configmaps
- secrets
verbs:
- "*"
- apiGroups:
- "coordination.k8s.io"
resources:
- leases
verbs:
- "*"

View file

@ -1,13 +0,0 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: agola
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: agola
subjects:
- kind: ServiceAccount
name: agola
namespace: agola

View file

@ -1,5 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: agola

View file

@ -1,43 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: agola-runservice
spec:
replicas: 1
selector:
matchLabels:
app: agola
component: runservice
template:
metadata:
labels:
app: agola
component: runservice
spec:
serviceAccount: agola
containers:
- name: agola
image: {{ agola_image }}
command:
- /bin/agola
- serve
- "--config"
- /mnt/agola/config/config.yml
- "--components"
- runservice
- --embedded-etcd
env:
ports:
- containerPort: 4000
volumeMounts:
- name: config-volume
mountPath: /mnt/agola/config
- name: agola-localdata
mountPath: /mnt/agola/local
volumes:
- name: config-volume
secret:
secretName: agola
- name: agola-localdata
emptyDir: {}

View file

@ -1,12 +0,0 @@
---
# The service for internal components communication with the runservice.
apiVersion: v1
kind: Service
metadata:
name: agola-runservice
spec:
ports:
- port: 4000
selector:
app: agola
component: runservice

View file

@ -0,0 +1,104 @@
---
- name: Install Docker dependencies
ansible.builtin.package:
name:
- ca-certificates
- curl
- gnupg
- lsb-release
state: latest
- name: Add Docker GPG key
ansible.builtin.apt_key:
url: https://download.docker.com/linux/ubuntu/gpg
state: present
keyring: /etc/apt/keyrings/docker.gpg
- name: Add Docker repository
ansible.builtin.apt_repository:
repo: deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu jammy stable
state: present
- name: Install Docker
ansible.builtin.package:
name:
- docker-ce
- docker-ce-cli
- containerd.io
- docker-compose-plugin
state: latest
register: install_docker
- name: Restart Docker service
ansible.builtin.service:
name: docker
state: restarted
when: install_docker.changed
- name: Download concourse
ansible.builtin.get_url:
url: https://github.com/concourse/concourse/releases/download/v{{ concourse_version }}/concourse-{{ concourse_version }}-linux-amd64.tgz
dest: /tmp/concourse.tgz
mode: '0640'
checksum: sha1:https://github.com/concourse/concourse/releases/download/v{{ concourse_version }}/concourse-{{ concourse_version }}-linux-amd64.tgz.sha1
register: download_concourse
- name: Extract concourse
ansible.builtin.unarchive:
src: /tmp/concourse.tgz
dest: /opt/
remote_src: true
when: download_concourse.changed
- name: Create concourse user
ansible.builtin.user:
name: concourse
home: /var/lib/concourse
shell: /bin/false
groups: users,docker
- name: Create /etc/concourse
ansible.builtin.file:
path: /etc/concourse
state: directory
- name: Create /etc/concourse
ansible.builtin.file:
path: /var/lib/concourse/.ssh
state: directory
owner: concourse
- name: Deploy concourse keys
ansible.builtin.copy:
content: "{{ item.content }}"
dest: "{{ item.dest }}"
mode: '0440'
loop:
- content: "{{ concourse.worker.workerKey }}"
dest: /var/lib/concourse/.ssh/id_rsa
- content: "{{ concourse.worker.workerKeyPub }}"
dest: /var/lib/concourse/.ssh/id_rsa.pub
- content: "{{ concourse.worker.hostKeyPub }}"
dest: /var/lib/concourse/.ssh/web_key.pub
- name: Create concourse config
ansible.builtin.template:
src: concourse-cfg.j2
dest: /etc/concourse/worker
mode: '0640'
register: create_concourse_config
- name: Create concourse service file
ansible.builtin.template:
src: concourse-worker.service.j2
dest: /lib/systemd/system/concourse-worker.service
mode: '0640'
register: create_concourse_service
- name: Make sure a service unit is running
ansible.builtin.systemd:
name: concourse-worker
state: restarted
daemon_reload: true
enabled: true
when: create_concourse_service.changed or create_concourse_config.changed

View file

@ -0,0 +1,8 @@
CONCOURSE_WORK_DIR=/var/lib/concourse
CONCOURSE_TSA_HOST=172.23.2.10:32222
CONCOURSE_CONTAINERD_DNS_SERVER="1.1.1.1"
CONCOURSE_CONTAINERD_ALLOW_HOST_ACCESS="true"
CONCOURSE_TSA_PUBLIC_KEY=/var/lib/concourse/.ssh/web_key.pub
CONCOURSE_TSA_WORKER_PRIVATE_KEY=/var/lib/concourse/.ssh/id_rsa
CONCOURSE_RUNTIME=containerd
CONCOURSE_TAG="linux,vm,ubuntu"

View file

@ -0,0 +1,11 @@
[Unit]
Description=Concourse worker
[Service]
EnvironmentFile=/etc/concourse/worker
ExecStart=/opt/concourse/bin/concourse worker
KillSignal=SIGUSR1
TimeoutStopSec=300
[Install]
WantedBy=multi-user.target

View file

@ -53,3 +53,34 @@
chart_version: 17.0.37
update_repo_cache: true
release_values: "{{ lookup('template', 'values.concourse.yml.j2') | from_yaml }}"
- name: Create concourse RBAC resources
kubernetes.core.k8s:
state: present
definition: "{{ lookup('template', 'rbac/deploy-role.yml.j2') | from_yaml }}"
- name: Bind service account for deployment
kubernetes.core.k8s:
name: "concourse-{{ item }}"
namespace: "concourse-{{ item }}"
definition: "{{ lookup('template', 'rbac/deploy-rolebinding.yml.j2') | from_yaml }}"
state: present
loop:
- main
- inetmock
- name: Create Gitea team credentials
kubernetes.core.k8s:
state: present
definition:
apiVersion: v1
kind: Secret
metadata:
name: gitea-credentials
namespace: "concourse-{{ item }}"
data:
user: "{{ concourse.gitea.user | b64encode}}"
token: "{{ concourse.gitea.token | b64encode}}"
loop:
- main
- inetmock

View file

@ -2,7 +2,11 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: agola-deploy
name: concourse-deploy
labels:
app.kubernetes.io/name: concourse
app.kubernetes.io/part-of: concourse
app.kubernetes.io/component: worker
rules:
- apiGroups:
- ""

View file

@ -2,12 +2,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: agola-deploy
name: {{ item }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: agola-deploy
name: concourse-deploy
subjects:
- kind: ServiceAccount
name: agola
namespace: agola
name: concourse-worker
namespace: concourse

View file

@ -1,5 +1,14 @@
web:
enabled: true
env:
- name: CONCOURSE_ENABLE_ACROSS_STEP
value: "true"
- name: CONCOURSE_ENABLE_PIPELINE_INSTANCES
value: "true"
service:
workerGateway:
type: NodePort
NodePort: 32222
ingress:
enabled: true
hosts:
@ -27,7 +36,10 @@ concourse:
host: default-postgres-postgresql.postgres.svc.cluster.local
port: "5432"
database: concourse
kubernetes:
teams:
- main
- inetmock
worker:
runtime: containerd
persistence:

View file

@ -0,0 +1,5 @@
---
- name: Prepare nodes
hosts: concourse_workers
roles:
- role: concourse-worker