Compare commits

..

52 Commits

Author SHA1 Message Date
Khwezi
0fb27042e6 Second drone ci port fix 2026-04-12 14:48:44 +02:00
Khwezi
5a98b0280e Fixed drone-ci port definitions 2026-04-12 14:43:52 +02:00
Khwezi
7a925af1c0 refactored drone ci template 2026-04-12 14:40:18 +02:00
Khwezi
db32982c30 Reconfigured semaphore secrets 2026-04-12 14:19:28 +02:00
Khwezi
6df82ede04 Light refactor 2026-04-12 14:08:52 +02:00
Khwezi
b473b0577b Readded init command and temporary mount point 2026-04-12 14:04:21 +02:00
Khwezi
c90c375348 Fixed semaphore mount paths 2026-04-12 14:00:26 +02:00
Khwezi
c0c94d320f Fixed semaphore role binding syntax error 2026-04-12 13:56:41 +02:00
Khwezi
2d4ccc7910 Added semaphore role binding 2026-04-12 13:54:24 +02:00
Khwezi
e1196d437f Added a semaphore service account 2026-04-12 13:50:09 +02:00
Khwezi
c6a3b4c473 Changed semaphore group policy 2026-04-12 13:44:44 +02:00
Khwezi
66f476e746 Refactored semaphore init 2026-04-12 13:41:51 +02:00
Khwezi
4e31981737 Ensured the volumes mount to the right target directory 2026-04-12 13:28:42 +02:00
Khwezi
a53578cfc9 Fixed private key secret 2026-04-12 13:23:12 +02:00
Khwezi
1992ce7d73 Corrected semaphore host affiny patching 2026-04-12 12:32:14 +02:00
Khwezi
5af2d33472 Added host alias to semaphore template for gitea 2026-04-12 12:29:44 +02:00
Khwezi
3be7ac45d6 Manually created public ssh key fix 2026-04-12 12:19:55 +02:00
Khwezi
db0fa845df Fixed semaphoe ssh secret 2026-04-12 12:18:01 +02:00
Khwezi
ffb5d9066f Added the semaphoreui template 2026-04-12 12:15:48 +02:00
Khwezi
188a89dcc5 changed node port for pgadmin 2026-04-12 11:54:44 +02:00
Khwezi
83e191252b Updated pgadmin to use a new port and use the correct nodeAffinity statement 2026-04-12 11:45:46 +02:00
Khwezi
2fd9e9d0c2 Refactored pgadmin k3s template 2026-04-12 11:40:43 +02:00
Khwezi
62835a8c1f Increased max CPU from 2 to 4 for nexus 2026-04-12 10:49:06 +02:00
Khwezi
a00771c190 Added nexus kubernetes template 2026-04-12 10:36:33 +02:00
Khwezi
678bd60383 Refactored searxing template 2026-04-11 17:20:44 +02:00
Khwezi
f8da3f6da9 Added two more k3s templates 2026-04-11 17:13:19 +02:00
Khwezi
df6eaef96c Turned async on 2026-04-11 16:46:13 +02:00
Khwezi
82a72307e4 Removed async from reboot playbook and set it to delay reboot for 5 seconds 2026-04-11 16:45:47 +02:00
Khwezi
9ab8b1e508 Added wait task to vpn playbook 2026-04-11 16:39:35 +02:00
Khwezi
421128b534 Refactored reboot playbook to create a 15 second delay and to run in async mode 2026-04-11 16:33:37 +02:00
Khwezi
160a93d93f Test refactor 2026-04-11 16:27:54 +02:00
Khwezi
d23a23860c Light refactor 2026-04-11 16:22:08 +02:00
Khwezi
353deb039d Lite refactor 2026-04-11 16:21:20 +02:00
Khwezi
22f55ce649 Updated vpn playbook to output diagnostics lines 2026-04-11 16:05:36 +02:00
Khwezi
d40c4a7036 Fixed semaphore named volume syntax error 2026-04-11 16:01:00 +02:00
Khwezi
82bba906f8 using named volumes for semaphore 2026-04-11 15:59:07 +02:00
Khwezi
0a884d52f0 Refactored host name groups 2026-04-11 15:42:08 +02:00
Khwezi
d691ea9396 Light refactor 2026-04-11 15:40:18 +02:00
Khwezi
fedcd29215 Reverted pub ssh key specification omission 2026-04-11 15:32:22 +02:00
Khwezi
c126bc1169 Removed ssh pub key line specification from ansible inventory 2026-04-11 15:29:14 +02:00
Khwezi
91913b64a1 Added VPN updater playbook 2026-04-11 15:14:23 +02:00
Khwezi
b3d7f2ab12 Added vpn host to manifest 2026-04-11 15:08:24 +02:00
Khwezi
992c6c3858 Specified timezone on semaphore 2026-04-11 15:03:08 +02:00
Khwezi
88c5e5bf3e Added playbook that reboots hosts 2026-04-11 14:57:42 +02:00
Khwezi
e154bd4b62 Changed database password variable name 2026-04-11 14:25:44 +02:00
Khwezi
cd283072fd Fixed variable formatting 2026-04-11 14:19:51 +02:00
Khwezi
1444a097b7 Hardcoded exposed port 2026-04-11 14:14:20 +02:00
Khwezi
cfeabcb338 Usings double quotes 2026-04-11 14:12:00 +02:00
Khwezi
7a4cf2232d Refactored semaphore stack to use inverted commas for values 2026-04-11 14:08:56 +02:00
Khwezi
d1076fe39a Added semaphore docker stack 2026-04-11 13:56:16 +02:00
Khwezi
0614dda247 Fixed ansible script 2026-04-11 11:20:31 +02:00
Khwezi
99efa12f43 Copied all stacks 2026-04-11 09:51:19 +02:00
43 changed files with 1832 additions and 392 deletions

View File

@@ -0,0 +1,22 @@
[all:vars]
ansible_python_interpreter=/usr/bin/python3
ansible_user=ansible
ansible_ssh_private_key_file=~/.ssh/id_ed25519
[lxc_hosts]
postgres ansible_host=192.168.1.170
gitea ansible_host=192.168.1.172
appserver ansible_host=192.168.1.173
[workload_hosts]
vpn ansible_host=192.168.1.138
authentik ansible_host=192.168.1.171
pangolingw ansible_host=192.168.1.175
[docker_hosts]
authentik ansible_host=192.168.1.171
appserver ansible_host=192.168.1.173
[k3s_hosts]
k3smainnode ansible_host=192.168.1.177
k3sworkernode ansible_host=192.168.1.178

View File

@@ -1,8 +1,8 @@
# command: ansible-playbook -i config/<target manifest>.ini common/create-ansible-user.yml --ask-become-pass # command: ansible-playbook -i common/config.ini common/create-ansible-user.yml --ask-become-pass
# Note: this playbook requires an interactive mode or passed secret for privilege escalation # Note: this playbook requires an interactive mode or passed secret for privilege escalation
--- ---
- name: Create ansible user and configure passwordless sudo - name: Create ansible user and configure passwordless sudo
hosts: all hosts: workload_hosts
become: true become: true
become_method: sudo become_method: sudo
vars: vars:

View File

@@ -0,0 +1,81 @@
# command: ansible-playbook -i common/config.ini common/create-lxc-ansible-user.yml --ask-become-pass
# Note: this playbook requires an interactive mode or passed secret for privilege escalation
---
- name: Create ansible user and configure passwordless sudo
hosts: lxc_hosts
become: true
become_method: sudo
vars:
ansible_user: root
tasks:
- name: Ensure 'ansible' user exists
ansible.builtin.user:
name: ansible
groups: sudo
append: yes
shell: /bin/bash
state: present
- name: Check if passwordless sudo is already configured for 'ansible'
ansible.builtin.shell: |
grep -Fxq "ansible ALL=(ALL) NOPASSWD: ALL" /etc/sudoers.d/ansible
register: sudoers_check
ignore_errors: true
changed_when: false
- name: Allow 'ansible' user passwordless sudo
ansible.builtin.copy:
dest: /etc/sudoers.d/ansible
content: "ansible ALL=(ALL) NOPASSWD: ALL\n"
owner: root
group: root
mode: '0440'
when: sudoers_check.rc != 0
- name: Ensure /home/ansible/.ssh directory exists
ansible.builtin.file:
path: /home/ansible/.ssh
state: directory
owner: ansible
group: ansible
mode: '0700'
- name: Copy id_ed25519 private key to ansible user
ansible.builtin.copy:
src: ~/.ssh/id_ed25519
dest: /home/ansible/.ssh/id_ed25519
owner: ansible
group: ansible
mode: '0600'
- name: Copy id_ed25519 public key to ansible user
ansible.builtin.copy:
src: ~/.ssh/id_ed25519.pub
dest: /home/ansible/.ssh/id_ed25519.pub
owner: ansible
group: ansible
mode: '0644'
- name: Ensure authorized_keys exists
ansible.builtin.file:
path: /home/ansible/.ssh/authorized_keys
state: touch
owner: ansible
group: ansible
mode: '0600'
- name: Read public key content
ansible.builtin.slurp:
src: /home/ansible/.ssh/id_ed25519.pub
register: pubkey_content
- name: Ensure public key is present in authorized_keys
ansible.builtin.lineinfile:
path: /home/ansible/.ssh/authorized_keys
line: "{{ pubkey_content['content'] | b64decode | trim }}"
owner: ansible
group: ansible
mode: '0600'
create: yes
state: present
- name: Allow 'ansible' user to write to /etc/systemd/resolved.conf
ansible.builtin.file:
path: /etc/systemd/resolved.conf
owner: ansible
group: ansible
mode: '0664'
state: file
become: true

View File

@@ -1,7 +1,7 @@
# command: ansible-playbook -i config/<target manifest>.ini common/install-docker.yml # command: ansible-playbook -i common/config.ini common/install-docker.yml
--- ---
- name: Install Docker and Test - name: Install Docker and Test
hosts: all hosts: docker_hosts
become: true become: true
become_method: sudo become_method: sudo

View File

@@ -0,0 +1,15 @@
---
- name: Reboot Managed Hosts
hosts: all
become: true
become_method: sudo
tasks:
- name: Trigger reboot with a slight delay
ansible.builtin.shell: "sleep 5 && reboot"
async: 1
poll: 0
ignore_errors: true
- name: Reboot command issues, exiting play
ansible.builtin.meta: end_host

View File

@@ -1,7 +1,7 @@
# command: ansible-playbook -i config/<target manifest>.ini common/update-docker.yml # command: ansible-playbook -i common/config.ini common/update-docker.yml
--- ---
- name: Update Docker only on hosts where it is installed - name: Update Docker only on hosts where it is installed
hosts: all hosts: docker_hosts
become: true become: true
become_method: sudo become_method: sudo

View File

@@ -1,4 +1,4 @@
# command: ansible-playbook -i config/<target manifest>.ini common/update-hosts.yml # command: ansible-playbook -i common/config.ini common/update-hosts.yml
--- ---
- name: Update and upgrade all apt packages - name: Update and upgrade all apt packages
hosts: all hosts: all

View File

@@ -0,0 +1,34 @@
# command: ansible-playbook -i common/config.ini common/update-release.yml
---
- name: Upgrade Ubuntu to next release
hosts: workload_hosts, k3s_hosts
become: true
tasks:
- name: Ensure update-manager-core is installed
ansible.builtin.apt:
name: update-manager-core
state: present
- name: Update all current packages to latest version
ansible.builtin.apt:
update_cache: yes
upgrade: dist
- name: Check if a reboot is required before upgrading
ansible.builtin.stat:
path: /var/run/reboot-required
register: reboot_required_pre
- name: Reboot if required before major upgrade
ansible.builtin.reboot:
when: reboot_required_pre.stat.exists
- name: Run do-release-upgrade non-interactively
ansible.builtin.shell: do-release-upgrade -f DistUpgradeViewNonInteractive
async: 3600 # Sets timeout to 1 hour
poll: 60 # Checks status every 60 seconds
register: upgrade_output
- name: Reboot the server after successful upgrade
ansible.builtin.reboot:
when: upgrade_output is succeeded

View File

@@ -1,7 +0,0 @@
[all:vars]
ansible_python_interpreter=/usr/bin/python3
ansible_user=ansible
ansible_ssh_private_key_file=~/.ssh/id_ed25519
[gameservers]
minecraft ansible_host=minecraft.mngoma.lab

View File

@@ -0,0 +1,31 @@
# command: ansible-playbook -i common/config.ini update-vpn.yml
---
- name: Maintain VPN and Pi-hole
hosts: vpn
become: yes
become_method: sudo
tasks:
- name: Wait for VPN host to be ready after reboot
ansible.builtin.wait_for_connection:
delay: 5
timeout: 300
- name: Update Pi-hole
ansible.builtin.shell: pihole -up
args:
executable: /bin/bash
register: pihole_out
changed_when: "'everything is up to date' not in pihole_out.stdout"
- name: Run PiVPN diagnostics with automated 'Y' responses
ansible.builtin.shell: yes Y | pivpn -d
args:
executable: /bin/bash
register: pivpn_diag
changed_when: false
- name: Debug Output (Optional)
ansible.builtin.debug:
msg: "{{ pivpn_diag.stdout_lines }}"

View File

@@ -1,18 +1,20 @@
# 1. Install using script # Webmin Installation commands
## 1. Install using script
```bash ```bash
curl -o webmin-setup-repo.sh https://raw.githubusercontent.com/webmin/webmin/master/webmin-setup-repo.sh curl -o webmin-setup-repo.sh https://raw.githubusercontent.com/webmin/webmin/master/webmin-setup-repo.sh
sudo sh webmin-setup-repo.sh sudo sh webmin-setup-repo.sh
``` ```
# 2. Install Webmin ## 2. Install Webmin
```shell ```shell
sudo apt update sudo apt update
sudo apt install webmin sudo apt install webmin
``` ```
# 3. Open port ## 3. Open port
```bash ```bash
ufw allow 10000 ufw allow 10000

View File

@@ -0,0 +1,12 @@
services:
dockhand:
image: fnsys/dockhand:latest
container_name: dockhand
restart: unless-stopped
ports:
- "3000:3000"
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- dockhand_data:/app/data
volumes:
dockhand_data:

View File

@@ -0,0 +1,15 @@
version: '3.8'
services:
nexus:
image: sonatype/nexus3:latest
container_name: nexus-mirror
restart: always
ports:
- "4009:8081"
- "4010:8082"
environment:
# Memory limits: Adjust based on your VM capacity
- INSTALL4J_ADD_VM_PARAMS=-Xms2g -Xmx2g -XX:MaxDirectMemorySize=2g -Dnexus.secrets.file=/nexus-data/nexus.secrets.json
volumes:
- ./nexus-data:/nexus-data

View File

@@ -0,0 +1,15 @@
version: '3.8'
services:
pgadmin:
image: dpage/pgadmin4
container_name: pgadmin
environment:
PGADMIN_DEFAULT_EMAIL: khwezi@litecharms.co.za
PGADMIN_DEFAULT_PASSWORD: Blackstar2@home
ports:
- "8080:80"
volumes:
- pgadmin-data:/var/lib/pgadmin
volumes:
pgadmin-data:

View File

@@ -0,0 +1,17 @@
services:
portainer:
container_name: portainer
image: portainer/portainer-ce:lts
restart: always
extra_hosts:
- "id.khongisa.co.za:192.168.1.171"
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- portainer_data:/data
ports:
- 9443:9443
- 8000:8000
volumes:
portainer_data:
name: portainer_data

View File

@@ -0,0 +1,14 @@
version: '3'
services:
registry:
image: registry:2
ports:
- "3003:5000"
environment:
REGISTRY_AUTH: token
REGISTRY_AUTH_TOKEN_REALM: https://gitea.khongisa.co.za/v2/token
REGISTRY_AUTH_TOKEN_SERVICE: gitea
REGISTRY_AUTH_TOKEN_ISSUER: gitea
volumes:
- ./data:/var/lib/registry
- ./config.yml:/etc/docker/registry/config.yml

View File

@@ -0,0 +1,27 @@
services:
semaphore:
image: semaphoreui/semaphore:latest
container_name: semaphore
restart: unless-stopped
ports:
- "4011:3000"
environment:
SEMAPHORE_SCHEDULE_TIMEZONE: "Africa/Johannesburg"
SEMAPHORE_DB_USER: "${SEMAPHORE_DB_USER}"
SEMAPHORE_DB_PASS: "${SEMAPHORE_DB_PASSWORD}"
SEMAPHORE_DB_HOST: "${SEMAPHORE_DB_HOST}"
SEMAPHORE_DB_PORT: "5432"
SEMAPHORE_DB_DIALECT: "postgres"
SEMAPHORE_DB: "${SEMAPHORE_DB_NAME}"
SEMAPHORE_ADMIN: "${SEMAPHORE_ADMIN_USERNAME}"
SEMAPHORE_ADMIN_PASSWORD: "${SEMAPHORE_ADMIN_PASSWORD}"
SEMAPHORE_ADMIN_NAME: "${SEMAPHORE_ADMIN_USERNAME}"
SEMAPHORE_ADMIN_EMAIL: "${SEMAPHORE_ADMIN_EMAIL}"
SEMAPHORE_ACCESS_KEY_ENCRYPTION: "${SEMAPHORE_ACCESS_KEY_ENCRYPTION}"
volumes:
- semaphore-tmp:/tmp/semaphore
- semaphore-ssh:/home/semaphore/.ssh
volumes:
semaphore-tmp:
semaphore-ssh:

View File

@@ -0,0 +1,86 @@
services:
server:
command: server
env_file:
- .env
environment:
AUTHENTIK_POSTGRESQL__HOST: 192.168.1.170
AUTHENTIK_POSTGRESQL__NAME: authentik
AUTHENTIK_POSTGRESQL__PASSWORD: 2h1y7H1kjdfdCLmd992SDL2iwtyjZ1iZb2X4SROK4GMvBiht
AUTHENTIK_POSTGRESQL__USER: authentik
AUTHENTIK_SECRET_KEY: ${AUTHENTIK_SECRET_KEY:?secret key required}
AUTHENTIK_LISTEN__TRUSTED_PROXY_CIDRS: 169.255.58.144/32, 192.168.1.0/24
AUTHENTIK_HOST_BROWSER: https://id.khongisa.co.za
AUTHENTIK_HOST: https://id.khongisa.co.za
AUTHENTIK_INSECURE: false
AUTHENTIK_ERROR_REPORTING__ENABLED: true
COMPOSE_PORT_HTTP: 80
COMPOSE_PORT_HTTPS: 443
AUTHENTIK_EMAIL__HOST: mail.litecharms.co.za
AUTHENTIK_EMAIL__PORT: 465
AUTHENTIK_EMAIL__USERNAME: authentik@litecharms.co.za
AUTHENTIK_EMAIL__PASSWORD: N<7`7986ZyHL
AUTHENTIK_EMAIL__USE_TLS: true
AUTHENTIK_EMAIL__USE_SSL: true
AUTHENTIK_EMAIL__TIMEOUT: 10
AUTHENTIK_EMAIL__FROM: authentik@litecharms.co.za
AUTHENTIK_POSTGRESQL__CONN_MAX_AGE: 60
AUTHENTIK_POSTGRESQL__CONN_HEALTH_CHECKS: "true"
AUTHENTIK_WORKER__THREADS: 8
AUTHENTIK_POSTGRESQL__DISABLE_SERVER_SIDE_CURSORS: "true"
AUTHENTIK_SERVER__WORKERS: 3
AUTHENTIK_SERVER__THREADS: 4
AUTHENTIK_LOG_LEVEL: warning
image: ${AUTHENTIK_IMAGE:-ghcr.io/goauthentik/server}:${AUTHENTIK_TAG:-2026.2.1}
ports:
- ${COMPOSE_PORT_HTTP:-9000}:9000
- ${COMPOSE_PORT_HTTPS:-9443}:9443
restart: unless-stopped
shm_size: 512mb
volumes:
- ./data:/data
- ./custom-templates:/templates
worker:
command: worker
env_file:
- .env
environment:
AUTHENTIK_POSTGRESQL__HOST: 192.168.1.170
AUTHENTIK_POSTGRESQL__NAME: authentik
AUTHENTIK_POSTGRESQL__PASSWORD: 2h1y7H1kjdfdCLmd992SDL2iwtyjZ1iZb2X4SROK4GMvBiht
AUTHENTIK_POSTGRESQL__USER: authentik
AUTHENTIK_SECRET_KEY: ${AUTHENTIK_SECRET_KEY:?secret key required}
AUTHENTIK_LISTEN__TRUSTED_PROXY_CIDRS: 169.255.58.144/32, 192.168.1.0/24
AUTHENTIK_HOST_BROWSER: https://id.khongisa.co.za
AUTHENTIK_HOST: https://id.khongisa.co.za
AUTHENTIK_INSECURE: false
AUTHENTIK_ERROR_REPORTING__ENABLED: true
COMPOSE_PORT_HTTP: 80
COMPOSE_PORT_HTTPS: 443
AUTHENTIK_EMAIL__HOST: mail.litecharms.co.za
AUTHENTIK_EMAIL__PORT: 465
AUTHENTIK_EMAIL__USERNAME: authentik@litecharms.co.za
AUTHENTIK_EMAIL__PASSWORD: N<7`7986ZyHL
AUTHENTIK_EMAIL__USE_TLS: true
AUTHENTIK_EMAIL__USE_SSL: true
AUTHENTIK_EMAIL__TIMEOUT: 10
AUTHENTIK_EMAIL__FROM: authentik@litecharms.co.za
AUTHENTIK_POSTGRESQL__CONN_MAX_AGE: 60
AUTHENTIK_POSTGRESQL__CONN_HEALTH_CHECKS: "true"
AUTHENTIK_POSTGRESQL__DISABLE_SERVER_SIDE_CURSORS: "true"
AUTHENTIK_WORKER__PROCESSES: 2
AUTHENTIK_WORKER__THREADS: 12
AUTHENTIK_LOG_LEVEL: warning
AUTHENTIK_BLUEPRINTS__RESCAN: "false"
image: ${AUTHENTIK_IMAGE:-ghcr.io/goauthentik/server}:${AUTHENTIK_TAG:-2026.2.1}
restart: unless-stopped
shm_size: 512mb
user: root
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- ./data:/data
- ./certs:/certs
- ./custom-templates:/templates
volumes:
database:
driver: local

View File

@@ -0,0 +1,10 @@
services:
hawser:
image: ghcr.io/finsys/hawser:latest
container_name: hawzer
restart: unless-stopped
environment:
- DOCKHAND_SERVER_URL=ws://192.168.1.173:3000
- TOKEN=4epVzroLNuzLw4rnB7IvwYwPcTHaS8R6DkeRPxK8t48
volumes:
- /var/run/docker.sock:/var/run/docker.sock

View File

@@ -0,0 +1,35 @@
# To see all available options, please visit the docs:
# https://docs.pangolin.net/
gerbil:
start_port: 51820
base_endpoint: "khongisa.co.za"
app:
dashboard_url: "https://khongisa.co.za"
log_level: "info"
telemetry:
anonymous_usage: true
domains:
domain1:
base_domain: "khongisa.co.za"
server:
secret: "WlCd2hoEWpQgE+XyHBn+xjQ5t/Irh91+i4krsnUgPog="
cors:
origins: ["https://khongisa.co.za"]
methods: ["GET", "POST", "PUT", "DELETE", "PATCH"]
allowed_headers: ["X-CSRF-Token", "Content-Type"]
credentials: false
maxmind_db_path: "./config/GeoLite2-Country.mmdb"
flags:
require_email_verification: false
disable_signup_without_invite: true
disable_user_create_org: false
allow_raw_resources: true
crowdsec:
enabled: true
appsec_body_limit: 0

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,91 @@
http:
middlewares:
nexus-buffer:
buffering:
maxRequestBodyBytes: 0
memRequestBodyBytes: 2097152 # 2MB
crowdsec:
plugin:
crowdsec:
crowdsecAppsecBodyLimit: 0
badger:
plugin:
badger:
disableForwardAuth: true
redirect-to-https:
redirectScheme:
scheme: https
routers:
nexus-docker-router:
rule: "Host(`nexus.khongisa.co.za`) && PathPrefix(`/v2`)"
service: api-service # Or whichever service points to Pangolin port 3000/3001
entryPoints:
- websecure
middlewares:
- nexus-buffer
- badger
tls:
certResolver: letsencrypt
# HTTP to HTTPS redirect router
main-app-router-redirect:
rule: "Host(`khongisa.co.za`)"
service: next-service
entryPoints:
- web
middlewares:
- redirect-to-https
- badger
# Next.js router (handles everything except API and WebSocket paths)
next-router:
rule: "Host(`khongisa.co.za`) && !PathPrefix(`/api/v1`)"
service: next-service
entryPoints:
- websecure
middlewares:
- badger
tls:
certResolver: letsencrypt
# API router (handles /api/v1 paths)
api-router:
rule: "Host(`khongisa.co.za`) && PathPrefix(`/api/v1`)"
service: api-service
entryPoints:
- websecure
middlewares:
- badger
tls:
certResolver: letsencrypt
# WebSocket router
ws-router:
rule: "Host(`khongisa.co.za`)"
service: api-service
entryPoints:
- websecure
middlewares:
- badger
tls:
certResolver: letsencrypt
services:
next-service:
loadBalancer:
servers:
- url: "http://pangolin:3002" # Next.js server
api-service:
loadBalancer:
servers:
- url: "http://pangolin:3000" # API/WebSocket server
tcp:
serversTransports:
pp-transport-v1:
proxyProtocol:
version: 1
pp-transport-v2:
proxyProtocol:
version: 2

View File

@@ -0,0 +1,73 @@
http:
middlewares:
badger:
plugin:
badger:
disableForwardAuth: true
redirect-to-https:
redirectScheme:
scheme: https
routers:
# HTTP to HTTPS redirect router
main-app-router-redirect:
rule: "Host(`khongisa.co.za`)"
service: next-service
entryPoints:
- web
middlewares:
- redirect-to-https
- badger
# Next.js router (handles everything except API and WebSocket paths)
next-router:
rule: "Host(`khongisa.co.za`) && !PathPrefix(`/api/v1`)"
service: next-service
entryPoints:
- websecure
middlewares:
- badger
tls:
certResolver: letsencrypt
# API router (handles /api/v1 paths)
api-router:
rule: "Host(`khongisa.co.za`) && PathPrefix(`/api/v1`)"
service: api-service
entryPoints:
- websecure
middlewares:
- badger
tls:
certResolver: letsencrypt
# WebSocket router
ws-router:
rule: "Host(`khongisa.co.za`)"
service: api-service
entryPoints:
- websecure
middlewares:
- badger
tls:
certResolver: letsencrypt
services:
next-service:
loadBalancer:
servers:
- url: "http://pangolin:3002" # Next.js server
api-service:
loadBalancer:
servers:
- url: "http://pangolin:3000" # API/WebSocket server
tcp:
serversTransports:
pp-transport-v1:
proxyProtocol:
version: 1
pp-transport-v2:
proxyProtocol:
version: 2

View File

@@ -0,0 +1,77 @@
http:
middlewares:
crowdsec:
plugin:
crowdsec:
crowdsecAppsecBodyLimit: 0
badger:
plugin:
badger:
disableForwardAuth: true
redirect-to-https:
redirectScheme:
scheme: https
routers:
# HTTP to HTTPS redirect router
main-app-router-redirect:
rule: "Host(`khongisa.co.za`)"
service: next-service
entryPoints:
- web
middlewares:
- redirect-to-https
- badger
# Next.js router (handles everything except API and WebSocket paths)
next-router:
rule: "Host(`khongisa.co.za`) && !PathPrefix(`/api/v1`)"
service: next-service
entryPoints:
- websecure
middlewares:
- badger
tls:
certResolver: letsencrypt
# API router (handles /api/v1 paths)
api-router:
rule: "Host(`khongisa.co.za`) && PathPrefix(`/api/v1`)"
service: api-service
entryPoints:
- websecure
middlewares:
- badger
tls:
certResolver: letsencrypt
# WebSocket router
ws-router:
rule: "Host(`khongisa.co.za`)"
service: api-service
entryPoints:
- websecure
middlewares:
- badger
tls:
certResolver: letsencrypt
services:
next-service:
loadBalancer:
servers:
- url: "http://pangolin:3002" # Next.js server
api-service:
loadBalancer:
servers:
- url: "http://pangolin:3000" # API/WebSocket server
tcp:
serversTransports:
pp-transport-v1:
proxyProtocol:
version: 1
pp-transport-v2:
proxyProtocol:
version: 2

View File

@@ -0,0 +1,58 @@
api:
insecure: true
dashboard: true
providers:
http:
endpoint: "http://pangolin:3001/api/v1/traefik-config"
pollInterval: "5s"
file:
filename: "/etc/traefik/dynamic_config.yml"
experimental:
plugins:
badger:
moduleName: "github.com/fosrl/badger"
version: "v1.3.1"
log:
level: "INFO"
format: "common"
maxSize: 100
maxBackups: 3
maxAge: 3
compress: true
certificatesResolvers:
letsencrypt:
acme:
httpChallenge:
entryPoint: web
email: "contact@litecharms.co.za"
storage: "/letsencrypt/acme.json"
caServer: "https://acme-v02.api.letsencrypt.org/directory"
entryPoints:
web:
address: ":80"
websecure:
address: ":443"
transport:
respondingTimeouts:
readTimeout: "0"
idleTimeout: "1200s"
#readTimeout: "30m"
http:
tls:
certResolver: "letsencrypt"
encodedCharacters:
allowEncodedSlash: true
allowEncodedQuestionMark: true
udp-51821:
address: ":51821/udp"
serversTransport:
insecureSkipVerify: true
ping:
entryPoint: "web"

View File

@@ -0,0 +1,56 @@
api:
insecure: true
dashboard: true
providers:
http:
endpoint: "http://pangolin:3001/api/v1/traefik-config"
pollInterval: "5s"
file:
filename: "/etc/traefik/dynamic_config.yml"
experimental:
plugins:
badger:
moduleName: "github.com/fosrl/badger"
version: "v1.3.1"
log:
level: "INFO"
format: "common"
maxSize: 100
maxBackups: 3
maxAge: 3
compress: true
certificatesResolvers:
letsencrypt:
acme:
httpChallenge:
entryPoint: web
email: "contact@litecharms.co.za"
storage: "/letsencrypt/acme.json"
caServer: "https://acme-v02.api.letsencrypt.org/directory"
entryPoints:
web:
address: ":80"
websecure:
address: ":443"
transport:
respondingTimeouts:
readTimeout: "30m"
http:
tls:
certResolver: "letsencrypt"
encodedCharacters:
allowEncodedSlash: true
allowEncodedQuestionMark: true
udp-51821:
address: ":51821/udp"
serversTransport:
insecureSkipVerify: true
ping:
entryPoint: "web"

View File

@@ -0,0 +1,69 @@
name: pangolin
networks:
proxy_net:
driver: bridge
services:
pangolin:
image: docker.io/fosrl/pangolin:ee-1.17.0
container_name: pangolin
restart: unless-stopped
volumes:
- ./config:/app/config
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:3001/api/v1/"]
interval: "10s"
timeout: "10s"
retries: 15
networks:
- proxy_net
deploy:
resources:
reservations:
memory: 800M
limits:
memory: 3G
gerbil:
image: docker.io/fosrl/gerbil:1.3.0
container_name: gerbil
restart: unless-stopped
depends_on:
pangolin:
condition: service_healthy
command:
- --reachableAt=http://gerbil:3004
- --generateAndSaveKeyTo=/var/config/key
- --remoteConfig=http://pangolin:3001/api/v1/
volumes:
- ./config/:/var/config
cap_add:
- NET_ADMIN
- SYS_MODULE
ports:
- 51821:51821/udp
- 51820:51820/udp
- 21820:21820/udp
- 443:443
- 80:80
networks:
- proxy_net
traefik:
image: docker.io/traefik:v3.6
container_name: traefik
restart: unless-stopped
network_mode: service:gerbil
depends_on:
pangolin:
condition: service_healthy
command:
- --configFile=/etc/traefik/traefik_config.yml
volumes:
- ./config/traefik:/etc/traefik:ro
- ./config/letsencrypt:/letsencrypt
- ./config/traefik/logs:/var/log/traefik
- /var/run/docker.sock:/var/run/docker.sock:ro

View File

@@ -0,0 +1,69 @@
name: pangolin
networks:
proxy_net:
driver: bridge
services:
pangolin:
image: docker.io/fosrl/pangolin:ee-1.16.2
container_name: pangolin
restart: unless-stopped
volumes:
- ./config:/app/config
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:3001/api/v1/"]
interval: "10s"
timeout: "10s"
retries: 15
networks:
- proxy_net
deploy:
resources:
reservations:
memory: 800M
limits:
memory: 3G
gerbil:
image: docker.io/fosrl/gerbil:1.3.0
container_name: gerbil
restart: unless-stopped
depends_on:
pangolin:
condition: service_healthy
command:
- --reachableAt=http://gerbil:3004
- --generateAndSaveKeyTo=/var/config/key
- --remoteConfig=http://pangolin:3001/api/v1/
volumes:
- ./config/:/var/config
cap_add:
- NET_ADMIN
- SYS_MODULE
ports:
- 51821:51821/udp
- 51820:51820/udp
- 21820:21820/udp
- 443:443
- 80:80
networks:
- proxy_net
traefik:
image: docker.io/traefik:v3.6
container_name: traefik
restart: unless-stopped
network_mode: service:gerbil
depends_on:
pangolin:
condition: service_healthy
command:
- --configFile=/etc/traefik/traefik_config.yml
volumes:
- ./config/traefik:/etc/traefik:ro
- ./config/letsencrypt:/letsencrypt
- ./config/traefik/logs:/var/log/traefik
- /var/run/docker.sock:/var/run/docker.sock:ro

View File

@@ -1,331 +1,172 @@
--- ---
# Namespace for Drone CI
apiVersion: v1 apiVersion: v1
kind: Namespace kind: Namespace
metadata: metadata:
name: droneci name: drone-ci
--- ---
# Service Account
apiVersion: v1 apiVersion: v1
kind: ServiceAccount kind: ServiceAccount
metadata: metadata:
name: droneci-sa name: drone-runner-sa
namespace: droneci namespace: drone-ci
--- ---
# ConfigMap for Drone configuration apiVersion: rbac.authorization.k8s.io/v1
apiVersion: v1 kind: Role
kind: ConfigMap
metadata: metadata:
name: droneci-config name: drone-runner-role
namespace: droneci namespace: drone-ci
data: rules:
server.domain: "droneci.apps.mngoma.lab" - apiGroups: [""]
server.proto: "https" resources: ["pods", "pods/log", "secrets"]
server.runnername: "drone-runner" verbs: ["get", "create", "delete", "list", "watch", "update"]
server.runnercapacity: "2"
server.runnernetworks: "default"
database.type: "postgres"
database.host: "192.168.1.137:5432"
database.name: "dronecim"
gitea.server: "https://gitea.apps.mngoma.lab"
gitea.server.internal: "https://gitea-server.gitea.svc.cluster.local"
--- ---
# Secret for Drone credentials apiVersion: rbac.authorization.k8s.io/v1
apiVersion: v1 kind: RoleBinding
kind: Secret
metadata: metadata:
name: droneci-secret name: drone-runner-rb
namespace: droneci namespace: drone-ci
type: Opaque subjects:
data: - kind: ServiceAccount
server.rpctoken: MDFLNlFHTkE4VEMxQjJGVzNGV0JSWDJFNE4= name: drone-runner-sa
database.username: YXBwX3VzZXI= namespace: drone-ci
database.password: MTIzNDU= roleRef:
database.connectstring: cG9zdGdyZXM6Ly9hcHBfdXNlcjoxMjM0NUAxOTIuMTY4LjEuMTM3OjU0MzIvZHJvbmVjaW0/c3NsbW9kZT1kaXNhYmxl kind: Role
gitea.clientid: MGRiNTliZDAtMGI3Ni00ODgxLThhODQtNjI0N2ZlYTExOTcz name: drone-runner-role
gitea.clientsecret: Z3RvX3l6bXB6NmJvZG52cmRnMnM1MmVmNWF1c3ozZTYzNGdyeTc0MjJqZ2hwd3ZnbGc2M2JtcnE= apiGroup: rbac.authorization.k8s.io
--- ---
# Persistent Volume for Drone data
apiVersion: v1
kind: PersistentVolume
metadata:
name: droneci-pv
labels:
type: local
spec:
capacity:
storage: 5Gi
accessModes: ["ReadWriteOnce"]
storageClassName: local-pvs
local:
path: /home/ansible/k3s/makhiwane/droneci
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values: ["lead"]
persistentVolumeReclaimPolicy: Retain
---
# Persistent Volume Claim
apiVersion: v1 apiVersion: v1
kind: PersistentVolumeClaim kind: PersistentVolumeClaim
metadata: metadata:
name: droneci-pvc name: drone-server-data-pvc
namespace: droneci namespace: drone-ci
spec: spec:
accessModes: ["ReadWriteOnce"] accessModes:
storageClassName: local-pvs - ReadWriteOnce
storageClassName: nfs-storage
resources: resources:
requests: requests:
storage: 5Gi storage: 5Gi
--- ---
# Drone Server Deployment apiVersion: v1
kind: Secret
metadata:
name: drone-secrets
namespace: drone-ci
type: Opaque
stringData:
DRONE_RPC_SECRET: "b505b2906ae213070b10d9698cc35e84"
DRONE_GITEA_CLIENT_ID: "a9b4a947-0b4c-4782-a5f8-3ed79a4b295d"
DRONE_GITEA_CLIENT_SECRET: "gto_ukxcserdy7vei36git4tbuz2tdyez4rb2eo5woownmtyct3lz3aq"
---
apiVersion: apps/v1 apiVersion: apps/v1
kind: Deployment kind: Deployment
metadata: metadata:
name: drone name: drone-server
namespace: droneci namespace: drone-ci
labels:
app.kubernetes.io/name: drone
spec: spec:
replicas: 1 replicas: 1
selector: selector:
matchLabels: matchLabels:
app.kubernetes.io/name: drone app: drone-server
template: template:
metadata: metadata:
labels: labels:
app.kubernetes.io/name: drone app: drone-server
spec: spec:
serviceAccountName: droneci-sa
hostAliases: hostAliases:
- ip: "192.168.1.160" - ip: "169.255.58.144"
hostnames: hostnames: ["gitea.khongisa.co.za"]
- "gitea.apps.mngoma.lab"
containers: containers:
- name: drone - name: drone-server
image: drone/drone:latest image: drone/drone:2
ports: ports:
- containerPort: 80 - containerPort: 80
name: http name: http
resources: resources:
requests: requests:
cpu: "100m"
memory: "256Mi" memory: "256Mi"
cpu: "250m"
limits: limits:
memory: "512Mi"
cpu: "500m" cpu: "500m"
memory: "512Mi"
env: env:
- name: DRONE_SERVER_HOST # FIX: Explicitly bind the address to bypass port validation logic
valueFrom: - name: DRONE_SERVER_ADDR
configMapKeyRef:
name: droneci-config
key: server.domain
- name: DRONE_SERVER_PROTO
valueFrom:
configMapKeyRef:
name: droneci-config
key: server.proto
- name: DRONE_SERVER_PORT
value: ":80" value: ":80"
- name: DRONE_TLS_AUTOCERT - name: DRONE_SERVER_HOST
value: "false" value: "drone.khongisa.co.za"
- name: DRONE_LOGS_DEBUG - name: DRONE_SERVER_PROTO
value: "true" value: "https"
- name: DRONE_RPC_SECRET
valueFrom:
secretKeyRef:
name: droneci-secret
key: server.rpctoken
- name: DRONE_DATABASE_DRIVER
valueFrom:
configMapKeyRef:
name: droneci-config
key: database.type
- name: DRONE_DATABASE_DATASOURCE
valueFrom:
secretKeyRef:
name: droneci-secret
key: database.connectstring
- name: DRONE_DB_USER
valueFrom:
secretKeyRef:
name: droneci-secret
key: database.username
- name: DRONE_DB_PASS
valueFrom:
secretKeyRef:
name: droneci-secret
key: database.password
- name: DRONE_GITEA_SERVER - name: DRONE_GITEA_SERVER
valueFrom: value: "https://gitea.khongisa.co.za"
configMapKeyRef: - name: DRONE_RPC_SECRET
name: droneci-config valueFrom: { secretKeyRef: { name: drone-secrets, key: DRONE_RPC_SECRET } }
key: gitea.server
- name: DRONE_GITEA_CLIENT_ID - name: DRONE_GITEA_CLIENT_ID
valueFrom: valueFrom: { secretKeyRef: { name: drone-secrets, key: DRONE_GITEA_CLIENT_ID } }
secretKeyRef:
name: droneci-secret
key: gitea.clientid
- name: DRONE_GITEA_CLIENT_SECRET - name: DRONE_GITEA_CLIENT_SECRET
valueFrom: valueFrom: { secretKeyRef: { name: drone-secrets, key: DRONE_GITEA_CLIENT_SECRET } }
secretKeyRef: - name: DRONE_DATABASE_DRIVER
name: droneci-secret value: "sqlite3"
key: gitea.clientsecret - name: DRONE_DATABASE_DATASOURCE
- name: DRONE_GITEA_SKIP_VERIFY value: "/data/database.sqlite"
value: "true"
volumeMounts: volumeMounts:
- name: drone-storage - name: data
mountPath: /data mountPath: /data
volumes: volumes:
- name: drone-storage - name: data
persistentVolumeClaim: persistentVolumeClaim:
claimName: droneci-pvc claimName: drone-server-data-pvc
--- ---
# Drone Server Service
apiVersion: v1
kind: Service
metadata:
name: drone-server
namespace: droneci
spec:
selector:
app.kubernetes.io/name: drone
ports:
- name: http
port: 80
targetPort: 80
type: ClusterIP
---
# Drone Runner Deployment
apiVersion: apps/v1 apiVersion: apps/v1
kind: Deployment kind: Deployment
metadata: metadata:
name: drone-runner name: drone-runner
namespace: droneci namespace: drone-ci
labels:
app.kubernetes.io/name: drone-runner
spec: spec:
replicas: 1 replicas: 1
selector: selector:
matchLabels: matchLabels:
app.kubernetes.io/name: drone-runner app: drone-runner
template: template:
metadata: metadata:
labels: labels:
app.kubernetes.io/name: drone-runner app: drone-runner
spec: spec:
serviceAccountName: droneci-sa serviceAccountName: drone-runner-sa
hostAliases: hostAliases:
- ip: "192.168.1.160" - ip: "169.255.58.144"
hostnames: hostnames: ["gitea.khongisa.co.za"]
- "droneci.apps.mngoma.lab"
containers: containers:
- name: runner - name: drone-runner
image: drone/drone-runner-kube:latest image: drone/drone-runner-kube:latest
ports:
- containerPort: 3000
env:
# propagate SSL skip and internal Gitea to ephemeral pods
- name: DRONE_RUNNER_ENV_VARS
valueFrom:
configMapKeyRef:
name: droneci-config
key: gitea.server.internal
- name: DRONE_RPC_HOST
value: drone-server.droneci.svc.cluster.local
- name: DRONE_RPC_PROTO
value: "http"
- name: DRONE_RPC_SECRET
valueFrom:
secretKeyRef:
name: droneci-secret
key: server.rpctoken
- name: DRONE_RUNNER_NAME
valueFrom:
configMapKeyRef:
name: droneci-config
key: server.runnername
- name: DRONE_RUNNER_CAPACITY
valueFrom:
configMapKeyRef:
name: droneci-config
key: server.runnercapacity
- name: DRONE_RUNNER_NETWORKS
valueFrom:
configMapKeyRef:
name: droneci-config
key: server.runnernetworks
resources: resources:
requests: requests:
cpu: "100m"
memory: "128Mi" memory: "128Mi"
cpu: "200m"
limits: limits:
cpu: "300m"
memory: "256Mi" memory: "256Mi"
cpu: "400m" env:
- name: DRONE_RPC_PROTO
value: "http"
- name: DRONE_RPC_HOST
value: "drone-server.drone-ci.svc.cluster.local"
- name: DRONE_RPC_SECRET
valueFrom: { secretKeyRef: { name: drone-secrets, key: DRONE_RPC_SECRET } }
- name: DRONE_NAMESPACE_DEFAULT
value: "drone-ci"
--- ---
# Drone IngressRoute for Traefik apiVersion: v1
apiVersion: traefik.io/v1alpha1 kind: Service
kind: IngressRoute
metadata: metadata:
name: droneci-web name: drone-server
namespace: droneci namespace: drone-ci
spec: spec:
entryPoints: type: NodePort
- websecure selector:
routes: app: drone-server
- match: Host(`droneci.apps.mngoma.lab`) ports:
kind: Rule - name: http
services:
- name: drone-server
port: 80 port: 80
scheme: http targetPort: 80
tls: {} nodePort: 31001
---
# ClusterRole for Drone CI Service Account
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: droneci-cluster-role
rules:
- apiGroups: [""] # core API
resources: ["pods", "pods/exec", "pods/log", "services", "endpoints", "configmaps", "secrets", "persistentvolumeclaims", "namespaces"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
- apiGroups: ["apps"]
resources: ["deployments", "replicasets", "statefulsets"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
- apiGroups: ["batch"]
resources: ["jobs", "cronjobs"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
- apiGroups: ["extensions", "networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
- apiGroups: ["policy"]
resources: ["poddisruptionbudgets"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
---
# ClusterRoleBinding for Drone CI Service Account
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: droneci-cluster-rolebinding
subjects:
- kind: ServiceAccount
name: droneci-sa
namespace: droneci
roleRef:
kind: ClusterRole
name: droneci-cluster-role
apiGroup: rbac.authorization.k8s.io

View File

@@ -0,0 +1,39 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: headlamp
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: headlamp
template:
metadata:
labels:
app.kubernetes.io/name: headlamp
spec:
containers:
- name: headlamp
image: ghcr.io/headlamp-k8s/headlamp:latest
# We only use -in-cluster. No other flags allowed.
args:
- "-in-cluster"
ports:
- containerPort: 4466
name: http
---
apiVersion: v1
kind: Service
metadata:
name: headlamp
namespace: kube-system
spec:
type: NodePort
selector:
app.kubernetes.io/name: headlamp
ports:
- protocol: TCP
port: 80
targetPort: 4466
nodePort: 30001

View File

@@ -0,0 +1,145 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: nexus
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nexus-data-pvc
namespace: nexus
spec:
accessModes:
- ReadWriteMany
storageClassName: nfs-nexus
resources:
requests:
storage: 200Gi
---
apiVersion: v1
kind: Secret
metadata:
name: nexus-secrets
namespace: nexus
type: Opaque
data:
nexus.secrets.json: ewogICJhY3RpdmUiOiAia2hvbmdpc2Eta2V5LTIwMjYiLAogICJrZXlzIjogWwogICAgewogICAgICAiaWQiOiAia2hvbmdpc2Eta2V5LTIwMjYiLAogICAgICAia2V5IjogIk5tTmhZMll3TkdNMUltVXdOVGt4WkROa1l6a3habVk1WVRJek5UWTVOalE9IgogICAgfQogIF0KfQo=
postgres-password: TWd6dUxVakZianA5ZjQ=
---
apiVersion: v1
kind: ConfigMap
metadata:
name: nexus-configs
namespace: nexus
data:
POSTGRES_HOST: "192.168.1.170"
POSTGRES_PORT: "5432"
POSTGRES_USER: "nexus"
POSTGRES_DBNAME: "nexus"
JVM_PARAMS: "-Xms2g -Xmx2g -XX:MaxDirectMemorySize=2g -Dnexus.secrets.file=/nexus-data/nexus.secrets.json"
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nexus
namespace: nexus
labels:
app: nexus
spec:
replicas: 1
selector:
matchLabels:
app: nexus
template:
metadata:
labels:
app: nexus
spec:
securityContext:
fsGroup: 200
containers:
- name: nexus
image: sonatype/nexus3:latest
ports:
- containerPort: 8081
name: nexus-ui
- containerPort: 8082
name: docker-repo
env:
- name: INSTALL4J_ADD_VM_PARAMS
valueFrom:
configMapKeyRef:
name: nexus-configs
key: JVM_PARAMS
- name: NEXUS_DATABAS_TYPE
value: "postgresql"
- name: NEXUS_DATABASE_POSTGRES_HOST
valueFrom:
configMapKeyRef:
name: nexus-configs
key: POSTGRES_HOST
- name: NEXUS_DATABASE_POSTGRES_PORT
valueFrom:
configMapKeyRef:
name: nexus-configs
key: POSTGRES_PORT
- name: NEXUS_DATABASE_POSTGRES_USER
valueFrom:
configMapKeyRef:
name: nexus-configs
key: POSTGRES_USER
- name: NEXUS_DATABASE_POSTGRES_DBNAME
valueFrom:
configMapKeyRef:
name: nexus-configs
key: POSTGRES_DBNAME
- name: NEXUS_DATABASE_POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: nexus-secrets
key: postgres-password
resources:
requests:
cpu: "500m"
memory: "2Gi"
limits:
cpu: "4"
memory: "4Gi"
volumeMounts:
- name: nexus-data
mountPath: /nexus-data
- name: secrets-volume
mountPath: /nexus-data/nexus.secrets.json
subPath: nexus.secrets.json
volumes:
- name: nexus-data
persistentVolumeClaim:
claimName: nexus-data-pvc
- name: secrets-volume
secret:
secretName: nexus-secrets
items:
- key: nexus.secrets.json
path: nexus.secrets.json
---
apiVersion: v1
kind: Service
metadata:
name: nexus-service
namespace: nexus
spec:
type: NodePort
selector:
app: nexus
ports:
- name: ui
protocol: TCP
port: 8081
targetPort: 8081
nodePort: 31009
- name: docker
protocol: TCP
port: 8082
targetPort: 8082
nodePort: 31010

View File

@@ -1,95 +1,46 @@
---
apiVersion: v1 apiVersion: v1
kind: Namespace kind: Namespace
metadata: metadata:
name: pgadmin name: pgadmin
--- ---
apiVersion: v1 apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pgadmin-data-pvc
namespace: pgadmin
spec:
accessModes:
- ReadWriteMany
storageClassName: nfs-storage
resources:
requests:
storage: 5Gi
---
apiVersion: v1
kind: Secret
metadata:
name: pgadmin-auth
namespace: pgadmin
type: Opaque
data:
pgadmin-password: QmxhY2tzdGFyMkBob21l
---
apiVersion: v1
kind: ConfigMap kind: ConfigMap
metadata: metadata:
name: pgadmin-config name: pgadmin-config
namespace: pgadmin namespace: pgadmin
data: data:
server.email: "khwezi@mngoma.lab" PGADMIN_DEFAULT_EMAIL: "khwezi@litecharms.co.za"
---
apiVersion: v1
kind: Secret
metadata:
name: pgadmin-secret
namespace: pgadmin
type: Opaque
data:
server.password: M3pDQTQz
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: pgadmin-sa
namespace: pgadmin
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: pgadmin-role
namespace: pgadmin
rules:
- apiGroups: [""]
resources: ["pods", "services", "persistentvolumeclaims", "configmaps", "secrets"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: pgadmin-rolebinding
namespace: pgadmin
subjects:
- kind: ServiceAccount
name: pgadmin-sa
namespace: pgadmin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: pgadmin-role
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pgadmin-pv
spec:
capacity:
storage: 2Gi
accessModes:
- ReadWriteOnce
storageClassName: local-pvs
local:
path: /home/ansible/k3s/makhiwane/pgadmin
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- lead
persistentVolumeReclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pgadmin-pvc
namespace: pgadmin
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-pvs
resources:
requests:
storage: 2Gi
--- ---
apiVersion: apps/v1 apiVersion: apps/v1
kind: Deployment kind: Deployment
metadata: metadata:
name: pgadmin name: pgadmin
namespace: pgadmin namespace: pgadmin
labels:
app: pgadmin
spec: spec:
replicas: 1 replicas: 1
selector: selector:
@@ -100,60 +51,63 @@ spec:
labels: labels:
app: pgadmin app: pgadmin
spec: spec:
serviceAccountName: pgadmin-sa affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: DoesNotExist
securityContext: securityContext:
runAsUser: 5050 runAsUser: 5050
runAsGroup: 5050
fsGroup: 5050 fsGroup: 5050
containers: containers:
- name: pgadmin - name: pgadmin
image: dpage/pgadmin4:latest image: dpage/pgadmin4:latest
ports: ports:
- containerPort: 80 - containerPort: 80
volumeMounts: name: http
- name: pgadmin-data
mountPath: /var/lib/pgadmin
env: env:
- name: PGADMIN_DEFAULT_EMAIL - name: PGADMIN_DEFAULT_EMAIL
valueFrom: valueFrom:
configMapKeyRef: configMapKeyRef:
name: pgadmin-config name: pgadmin-config
key: server.email key: PGADMIN_DEFAULT_EMAIL
- name: PGADMIN_DEFAULT_PASSWORD - name: PGADMIN_DEFAULT_PASSWORD
valueFrom: valueFrom:
secretKeyRef: secretKeyRef:
name: pgadmin-secret name: pgadmin-auth
key: server.password key: pgadmin-password
- name: PGADMIN_CONFIG_UPGRADE_CHECK_ENABLED
value: "False"
resources:
requests:
cpu: "100m"
memory: "256Mi"
limits:
cpu: "500m"
memory: "512Mi"
volumeMounts:
- name: pgadmin-storage
mountPath: /var/lib/pgadmin
volumes: volumes:
- name: pgadmin-data - name: pgadmin-storage
persistentVolumeClaim: persistentVolumeClaim:
claimName: pgadmin-pvc claimName: pgadmin-data-pvc
--- ---
apiVersion: v1 apiVersion: v1
kind: Service kind: Service
metadata: metadata:
name: pgadmin name: pgadmin-service
namespace: pgadmin namespace: pgadmin
spec: spec:
type: ClusterIP type: NodePort
selector: selector:
app: pgadmin app: pgadmin
ports: ports:
- port: 80 - name: http
targetPort: 80 protocol: TCP
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: pgadmin-ingress
namespace: pgadmin
spec:
entryPoints:
- websecure
routes:
- match: Host(`pgadmin.apps.mngoma.lab`)
kind: Rule
services:
- name: pgadmin
port: 80 port: 80
tls: {} targetPort: 80
nodePort: 32081

View File

@@ -38,6 +38,13 @@ spec:
value: "0.0.0.0" value: "0.0.0.0"
- name: SEARXNG_SERVER_PORT - name: SEARXNG_SERVER_PORT
value: "8080" value: "8080"
resources:
requests:
cpu: "100m"
memory: "128Mi"
limits:
cpu: "500m"
memory: "512Mi"
--- ---
apiVersion: v1 apiVersion: v1
kind: Service kind: Service
@@ -45,6 +52,7 @@ metadata:
name: searxng-server name: searxng-server
namespace: searxng namespace: searxng
spec: spec:
type: NodePort
selector: selector:
app.kubernetes.io/name: searxng-server app.kubernetes.io/name: searxng-server
ports: ports:
@@ -52,7 +60,7 @@ spec:
protocol: TCP protocol: TCP
port: 8080 port: 8080
targetPort: 8080 targetPort: 8080
type: ClusterIP nodePort: 32080
--- ---
apiVersion: traefik.io/v1alpha1 apiVersion: traefik.io/v1alpha1
kind: IngressRoute kind: IngressRoute

View File

@@ -0,0 +1,199 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: semaphore
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: semaphore-sa
namespace: semaphore
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: semaphore-role
namespace: semaphore
rules:
- apiGroups: [""]
resources: ["pods", "secrets", "configmaps"]
verbs: ["get", "watch", "list"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: semaphore-rb
namespace: semaphore
subjects:
- kind: ServiceAccount
name: semaphore-sa
namespace: semaphore
roleRef:
kind: Role
name: semaphore-role
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: semaphore-data-pvc
namespace: semaphore
spec:
accessModes:
- ReadWriteMany
storageClassName: nfs-storage
resources:
requests:
storage: 5Gi
---
apiVersion: v1
kind: Secret
metadata:
name: semaphore-secrets
namespace: semaphore
type: Opaque
data:
db-password: c2VYbk42RGt1cFJaN0Y=
admin-password: QmxhY2tzdGFyMkBob21l
access-key-encryption: NHZKMm1LMnBMNW5COHhSMnpRN3dFM3RZNnVJMG9QOWE=
id_ed25519: LS0tLS1CRUdJTiBPUEVOU1NIIFBSSVZBVEUgS0VZLS0tLS0KYjNCbGJuTnphQzFyWlhrdGRqRUFBQUFBQkc1dmJtVUFBQUFFYm05dVpRQUFBQUFBQUFBQkFBQUFNd0FBQUF0emMyZ3RaVwpReU5UVXhPUUFBQUNEbkRQMDZzbmM0Q2k3M0ZPSW1nTmszTWJsc25vNTNoajZYRDJTSzE1ZFpiQUFBQUpnZWwvMndIcGY5CnNBQUFBQXR6YzJndFpXUXlOVFV4T1FBQUFDRG5EUDA2c25jNENpNzNGT0ltZ05rM01ibHNubzUzaGo2WEQyU0sxNWRaYkEKQUFBRUJUaHFjcnNXZWVVWnpFeVdWWmJoRGlKZE9FQkZYSkg4NXNhMUNjK1dXQ0krY00vVHF5ZHpnS0x2Y1U0aWFBMlRjeAp1V3llam5lR1BwY1BaSXJYbDFsc0FBQUFEbXRvZDJWNmFVQkVRVkpMVTFWT0FRSURCQVVHQnc9PQotLS0tLUVORCBPUEVOU1NIIFBSSVZBVEUgS0VZLS0tLS0K
id_ed25519.pub: c3NoLWVkMjU1MTkgQUFBQUMzTnphQzFsWkRJMU5URTVBQUFBSU9jTS9UcXlkemdLTHZjVTRpYUEyVGN4dVd5ZWpuZUdQcGNQWklyWGwxbHMga2h3ZXppQERBUktTVU4K
---
apiVersion: v1
kind: ConfigMap
metadata:
name: semaphore-configs
namespace: semaphore
data:
SEMAPHORE_DB_USER: "semaphore_user"
SEMAPHORE_DB_HOST: "192.168.1.170"
SEMAPHORE_DB_NAME: "semaphore"
SEMAPHORE_ADMIN_USERNAME: "khwezi"
SEMAPHORE_ADMIN_EMAIL: "khwezi@litecharms.co.za"
SEMAPHORE_SCHEDULE_TIMEZONE: "Africa/Johannesburg"
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: semaphore
namespace: semaphore
spec:
replicas: 1
selector:
matchLabels:
app: semaphore
template:
metadata:
labels:
app: semaphore
spec:
serviceAccountName: semaphore-sa
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: DoesNotExist
hostAliases:
- ip: "169.255.58.144"
hostnames:
- "gitea.khongisa.co.za"
securityContext:
runAsUser: 1001
fsGroup: 1001
fsGroupChangePolicy: "Always"
initContainers:
- name: fix-ssh-permissions
image: busybox:latest
# We ensure the directory exists and has 700.
# We don't touch the files yet because they are mounted by the main container.
command: ["sh", "-c", "mkdir -p /home/semaphore/.ssh && chmod 700 /home/semaphore/.ssh"]
volumeMounts:
- name: semaphore-persistent-storage
mountPath: /home/semaphore
containers:
- name: semaphore
image: semaphoreui/semaphore:latest
ports:
- containerPort: 3000
name: http
resources:
requests:
cpu: "200m"
memory: "512Mi"
limits:
cpu: "1"
memory: "1Gi"
env:
- name: SEMAPHORE_DB_DIALECT
value: "postgres"
- name: SEMAPHORE_DB_PORT
value: "5432"
- name: SEMAPHORE_DB_USER
valueFrom: { configMapKeyRef: { name: semaphore-configs, key: SEMAPHORE_DB_USER } }
- name: SEMAPHORE_DB_HOST
valueFrom: { configMapKeyRef: { name: semaphore-configs, key: SEMAPHORE_DB_HOST } }
- name: SEMAPHORE_DB
valueFrom: { configMapKeyRef: { name: semaphore-configs, key: SEMAPHORE_DB_NAME } }
- name: SEMAPHORE_ADMIN
valueFrom: { configMapKeyRef: { name: semaphore-configs, key: SEMAPHORE_ADMIN_USERNAME } }
- name: SEMAPHORE_ADMIN_NAME
valueFrom: { configMapKeyRef: { name: semaphore-configs, key: SEMAPHORE_ADMIN_USERNAME } }
- name: SEMAPHORE_ADMIN_EMAIL
valueFrom: { configMapKeyRef: { name: semaphore-configs, key: SEMAPHORE_ADMIN_EMAIL } }
- name: SEMAPHORE_SCHEDULE_TIMEZONE
valueFrom: { configMapKeyRef: { name: semaphore-configs, key: SEMAPHORE_SCHEDULE_TIMEZONE } }
- name: SEMAPHORE_DB_PASS
valueFrom: { secretKeyRef: { name: semaphore-secrets, key: db-password } }
- name: SEMAPHORE_ADMIN_PASSWORD
valueFrom: { secretKeyRef: { name: semaphore-secrets, key: admin-password } }
- name: SEMAPHORE_ACCESS_KEY_ENCRYPTION
valueFrom: { secretKeyRef: { name: semaphore-secrets, key: access-key-encryption } }
volumeMounts:
- name: semaphore-persistent-storage
mountPath: /home/semaphore
- name: semaphore-persistent-storage
mountPath: /tmp/semaphore
subPath: tmp
- name: ssh-keys-volume
mountPath: /home/semaphore/.ssh/id_ed25519
subPath: id_ed25519
readOnly: true
- name: ssh-keys-volume
mountPath: /home/semaphore/.ssh/id_ed25519.pub
subPath: id_ed25519.pub
readOnly: true
volumes:
- name: semaphore-persistent-storage
persistentVolumeClaim:
claimName: semaphore-data-pvc
- name: ssh-keys-volume
secret:
secretName: semaphore-secrets
defaultMode: 384 # 0600
items:
- key: id_ed25519
path: id_ed25519
- key: id_ed25519.pub
path: id_ed25519.pub
---
apiVersion: v1
kind: Service
metadata:
name: semaphore-service
namespace: semaphore
spec:
type: NodePort
selector:
app: semaphore
ports:
- name: http
protocol: TCP
port: 3000
targetPort: 3000
nodePort: 31011

View File

@@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: traefik-dashboard-nodeport
namespace: kube-system
spec:
type: NodePort
selector:
app.kubernetes.io/name: traefik
ports:
- name: admin
protocol: TCP
port: 8080
targetPort: 8080
nodePort: 30000