first commit

This commit is contained in:
Khwezi Mngoma
2026-02-22 16:43:17 +02:00
commit 0410dc3950
94 changed files with 9739 additions and 0 deletions

484
.gitignore vendored Normal file
View File

@@ -0,0 +1,484 @@
## Ignore Visual Studio temporary files, build results, and
## files generated by popular Visual Studio add-ons.
##
## Get latest from `dotnet new gitignore`
# dotenv files
.env
# User-specific files
*.rsuser
*.suo
*.user
*.userosscache
*.sln.docstates
# User-specific files (MonoDevelop/Xamarin Studio)
*.userprefs
# Mono auto generated files
mono_crash.*
# Build results
[Dd]ebug/
[Dd]ebugPublic/
[Rr]elease/
[Rr]eleases/
x64/
x86/
[Ww][Ii][Nn]32/
[Aa][Rr][Mm]/
[Aa][Rr][Mm]64/
bld/
[Bb]in/
[Oo]bj/
[Ll]og/
[Ll]ogs/
# Visual Studio 2015/2017 cache/options directory
.vs/
# Uncomment if you have tasks that create the project's static files in wwwroot
#wwwroot/
# Visual Studio 2017 auto generated files
Generated\ Files/
# MSTest test Results
[Tt]est[Rr]esult*/
[Bb]uild[Ll]og.*
# NUnit
*.VisualState.xml
TestResult.xml
nunit-*.xml
# Build Results of an ATL Project
[Dd]ebugPS/
[Rr]eleasePS/
dlldata.c
# Benchmark Results
BenchmarkDotNet.Artifacts/
# .NET
project.lock.json
project.fragment.lock.json
artifacts/
# Tye
.tye/
# ASP.NET Scaffolding
ScaffoldingReadMe.txt
# StyleCop
StyleCopReport.xml
# Files built by Visual Studio
*_i.c
*_p.c
*_h.h
*.ilk
*.meta
*.obj
*.iobj
*.pch
*.pdb
*.ipdb
*.pgc
*.pgd
*.rsp
*.sbr
*.tlb
*.tli
*.tlh
*.tmp
*.tmp_proj
*_wpftmp.csproj
*.log
*.tlog
*.vspscc
*.vssscc
.builds
*.pidb
*.svclog
*.scc
# Chutzpah Test files
_Chutzpah*
# Visual C++ cache files
ipch/
*.aps
*.ncb
*.opendb
*.opensdf
*.sdf
*.cachefile
*.VC.db
*.VC.VC.opendb
# Visual Studio profiler
*.psess
*.vsp
*.vspx
*.sap
# Visual Studio Trace Files
*.e2e
# TFS 2012 Local Workspace
$tf/
# Guidance Automation Toolkit
*.gpState
# ReSharper is a .NET coding add-in
_ReSharper*/
*.[Rr]e[Ss]harper
*.DotSettings.user
# TeamCity is a build add-in
_TeamCity*
# DotCover is a Code Coverage Tool
*.dotCover
# AxoCover is a Code Coverage Tool
.axoCover/*
!.axoCover/settings.json
# Coverlet is a free, cross platform Code Coverage Tool
coverage*.json
coverage*.xml
coverage*.info
# Visual Studio code coverage results
*.coverage
*.coveragexml
# NCrunch
_NCrunch_*
.*crunch*.local.xml
nCrunchTemp_*
# MightyMoose
*.mm.*
AutoTest.Net/
# Web workbench (sass)
.sass-cache/
# Installshield output folder
[Ee]xpress/
# DocProject is a documentation generator add-in
DocProject/buildhelp/
DocProject/Help/*.HxT
DocProject/Help/*.HxC
DocProject/Help/*.hhc
DocProject/Help/*.hhk
DocProject/Help/*.hhp
DocProject/Help/Html2
DocProject/Help/html
# Click-Once directory
publish/
# Publish Web Output
*.[Pp]ublish.xml
*.azurePubxml
# Note: Comment the next line if you want to checkin your web deploy settings,
# but database connection strings (with potential passwords) will be unencrypted
*.pubxml
*.publishproj
# Microsoft Azure Web App publish settings. Comment the next line if you want to
# checkin your Azure Web App publish settings, but sensitive information contained
# in these scripts will be unencrypted
PublishScripts/
# NuGet Packages
*.nupkg
# NuGet Symbol Packages
*.snupkg
# The packages folder can be ignored because of Package Restore
**/[Pp]ackages/*
# except build/, which is used as an MSBuild target.
!**/[Pp]ackages/build/
# Uncomment if necessary however generally it will be regenerated when needed
#!**/[Pp]ackages/repositories.config
# NuGet v3's project.json files produces more ignorable files
*.nuget.props
*.nuget.targets
# Microsoft Azure Build Output
csx/
*.build.csdef
# Microsoft Azure Emulator
ecf/
rcf/
# Windows Store app package directories and files
AppPackages/
BundleArtifacts/
Package.StoreAssociation.xml
_pkginfo.txt
*.appx
*.appxbundle
*.appxupload
# Visual Studio cache files
# files ending in .cache can be ignored
*.[Cc]ache
# but keep track of directories ending in .cache
!?*.[Cc]ache/
# Others
ClientBin/
~$*
*~
*.dbmdl
*.dbproj.schemaview
*.jfm
*.pfx
*.publishsettings
orleans.codegen.cs
# Including strong name files can present a security risk
# (https://github.com/github/gitignore/pull/2483#issue-259490424)
#*.snk
# Since there are multiple workflows, uncomment next line to ignore bower_components
# (https://github.com/github/gitignore/pull/1529#issuecomment-104372622)
#bower_components/
# RIA/Silverlight projects
Generated_Code/
# Backup & report files from converting an old project file
# to a newer Visual Studio version. Backup files are not needed,
# because we have git ;-)
_UpgradeReport_Files/
Backup*/
UpgradeLog*.XML
UpgradeLog*.htm
ServiceFabricBackup/
*.rptproj.bak
# SQL Server files
*.mdf
*.ldf
*.ndf
# Business Intelligence projects
*.rdl.data
*.bim.layout
*.bim_*.settings
*.rptproj.rsuser
*- [Bb]ackup.rdl
*- [Bb]ackup ([0-9]).rdl
*- [Bb]ackup ([0-9][0-9]).rdl
# Microsoft Fakes
FakesAssemblies/
# GhostDoc plugin setting file
*.GhostDoc.xml
# Node.js Tools for Visual Studio
.ntvs_analysis.dat
node_modules/
# Visual Studio 6 build log
*.plg
# Visual Studio 6 workspace options file
*.opt
# Visual Studio 6 auto-generated workspace file (contains which files were open etc.)
*.vbw
# Visual Studio 6 auto-generated project file (contains which files were open etc.)
*.vbp
# Visual Studio 6 workspace and project file (working project files containing files to include in project)
*.dsw
*.dsp
# Visual Studio 6 technical files
*.ncb
*.aps
# Visual Studio LightSwitch build output
**/*.HTMLClient/GeneratedArtifacts
**/*.DesktopClient/GeneratedArtifacts
**/*.DesktopClient/ModelManifest.xml
**/*.Server/GeneratedArtifacts
**/*.Server/ModelManifest.xml
_Pvt_Extensions
# Paket dependency manager
.paket/paket.exe
paket-files/
# FAKE - F# Make
.fake/
# CodeRush personal settings
.cr/personal
# Python Tools for Visual Studio (PTVS)
__pycache__/
*.pyc
# Cake - Uncomment if you are using it
# tools/**
# !tools/packages.config
# Tabs Studio
*.tss
# Telerik's JustMock configuration file
*.jmconfig
# BizTalk build output
*.btp.cs
*.btm.cs
*.odx.cs
*.xsd.cs
# OpenCover UI analysis results
OpenCover/
# Azure Stream Analytics local run output
ASALocalRun/
# MSBuild Binary and Structured Log
*.binlog
# NVidia Nsight GPU debugger configuration file
*.nvuser
# MFractors (Xamarin productivity tool) working folder
.mfractor/
# Local History for Visual Studio
.localhistory/
# Visual Studio History (VSHistory) files
.vshistory/
# BeatPulse healthcheck temp database
healthchecksdb
# Backup folder for Package Reference Convert tool in Visual Studio 2017
MigrationBackup/
# Ionide (cross platform F# VS Code tools) working folder
.ionide/
# Fody - auto-generated XML schema
FodyWeavers.xsd
# VS Code files for those working on multiple tools
.vscode/*
!.vscode/settings.json
!.vscode/tasks.json
!.vscode/launch.json
!.vscode/extensions.json
*.code-workspace
# Local History for Visual Studio Code
.history/
# Windows Installer files from build outputs
*.cab
*.msi
*.msix
*.msm
*.msp
# JetBrains Rider
*.sln.iml
.idea
##
## Visual studio for Mac
##
# globs
Makefile.in
*.userprefs
*.usertasks
config.make
config.status
aclocal.m4
install-sh
autom4te.cache/
*.tar.gz
tarballs/
test-results/
# Mac bundle stuff
*.dmg
*.app
# content below from: https://github.com/github/gitignore/blob/master/Global/macOS.gitignore
# General
.DS_Store
.AppleDouble
.LSOverride
# Icon must end with two \r
Icon
# Thumbnails
._*
# Files that might appear in the root of a volume
.DocumentRevisions-V100
.fseventsd
.Spotlight-V100
.TemporaryItems
.Trashes
.VolumeIcon.icns
.com.apple.timemachine.donotpresent
# Directories potentially created on remote AFP share
.AppleDB
.AppleDesktop
Network Trash Folder
Temporary Items
.apdisk
# content below from: https://github.com/github/gitignore/blob/master/Global/Windows.gitignore
# Windows thumbnail cache files
Thumbs.db
ehthumbs.db
ehthumbs_vista.db
# Dump file
*.stackdump
# Folder config file
[Dd]esktop.ini
# Recycle Bin used on file shares
$RECYCLE.BIN/
# Windows Installer files
*.cab
*.msi
*.msix
*.msm
*.msp
# Windows shortcuts
*.lnk
# Vim temporary swap files
*.swp

5
.vscode/settings.json vendored Normal file
View File

@@ -0,0 +1,5 @@
{
"yaml.schemas": {
"kubernetes://schema/traefik.io/v1alpha1%40ingressroute": "file:///mnt/c/Users/Admin/source/repos/mngomalab/k3s/stacks/droneci.yml"
}
}

1
docker-swarm Submodule

Submodule docker-swarm added at 95c3e10e85

17
documents/ansible.md Normal file
View File

@@ -0,0 +1,17 @@
# Ensure Ansible is installed
Make sure to install python and pip, install ansble cli tools as well as on pip
```shell
sudo apt install python3
python --version
sudo apt remove ansible
pip uninstall ansible
pip install ansible
apt install ansible
ansible --version
python3 -c "import six"
pip install --upgrade ansible six
```

120
documents/bind9.md Normal file
View File

@@ -0,0 +1,120 @@
#Copy SSH keys to VM
## make sure that your local linux default ssh directory exists /root/.ssh if you are on windows
```shell
ssh-copy-id -i ~/.ssh/id_ed25519.pub khwezi@192.168.1.151
```
#Update system packages
```shell
sudo apt update && sudo apt upgrade -y
```
#Install Bind9
## install accompanying utilities as well
```shell
sudo apt install bind9 bind9utils bind9-doc
```
#Configure Bind9
```shell
//1. Define global DNS settings
cp /etc/bind/named.conf.options /etc/bind/named.conf.options.bak
sudo nano /etc/bind/named.conf.options
//2. Modify file contents, use following example
//options {
// directory "/var/cache/bind";
// listen-on { any; }; // Listen on all IP addresses
// allow-query { any; }; // Allow queries from any IP address
// forwarders {
// 8.8.8.8; // Google Public DNS
// 8.8.4.4;
// };
// dnssec-validation auto;
// auth-nxdomain no; # conform to RFC1035
// listen-on-v6 { any; };
//};
//Save changed and exit editor
//3. Create custom DNS Zones
cp /etc/bind/named.conf.local /etc/bind/named.conf.local.bak
sudo nano /etc/bind/named.conf.local
// use the following example
//zone "mngoma.lab" {
// type master;
// file "/etc/bind/db.mngoma.lab";
//};
//
//zone "1.168.192.in-addr.arpa" {
// type master;
// file "/etc/bind/db.192.168.1";
//};
//4. Create Zone file(s) referenced in /etc/bind/named.conf.local
sudo cp /etc/bind/db.local /etc/bind/db.mngoma.lab
sudo cp /etc/bind/db.127 /etc/bind/db.192.168.1
//5. Edit zone files to contain the records you need
//;
//; Zone file for example.com
//;
//$ORIGIN example.com.
//$TTL 3H
//
//; SOA record - authoritative info about the zone
//@ IN SOA ns1.example.com. hostmaster.example.com. (
// 2025010101 ; Serial Number
// 21600 ; Refresh (6 hours)
// 3600 ; Retry (1 hour)
// 604800 ; Expire (1 week)
// 86400 ; Minimum TTL (1 day)
// )
//
//; NS Records - authoritative name servers for the domain
//@ IN NS ns1.example.com.
//@ IN NS ns2.example.com.
//
//; A Records - mapping hostnames to IPv4 addresses
//ns1 IN A 192.168.1.10
//ns2 IN A 192.168.1.11
//www IN A 192.168.1.20
//ftp IN A 192.168.1.30
//
//; AAAA Records - mapping hostnames to IPv6 addresses (optional)
//ns1 IN AAAA 2001:db8::10
//www IN AAAA 2001:db8::20
//
//; MX Record - for mail exchange servers and priority
//@ IN MX 10 mail.example.com.
//mail IN A 192.168.1.100
//
//; CNAME Record - Alias for another hostname
//web IN CNAME www.example.com.
//----------
//6. check zone file syntax (validate)
sudo named-checkconf
sudo named-checkzone mngoma.lab /etc/bind/db.mngoma.lab
sudo named-checkzone 1.168.192.in-addr.arpa /etc/bind/db.192.168.1
//7. Restart bind9
sudo systemctl restart bind9
```
#Configure firewall (lockdown)
```shell
//1. enable firewall
sudo ufw enable
//2. allow all traffic from my address range
sudo ufw all from 192.168.1.0/24
//3. allow DNS ports
sudo ufw allow 53/udp
sudo ufw allow 53/tcp
```

155
documents/nginx.md Normal file
View File

@@ -0,0 +1,155 @@
# Update package list and install nginx
```shell
sudo apt update
sudo apt-get install nginx-full
```
# Backup the default config
```shell
sudo cp /etc/nginx/sites-available/default /etc/nginx/sites-available/default.bak
```
# (Optional) Obtain SSL certificates using Let's Encrypt (replace <your_domain> with your actual domain)
# If you want to use self-signed certificates, generate them instead.
# Example for Let's Encrypt:
# sudo apt install -y certbot python3-certbot-nginx
# sudo certbot --nginx -d <your_domain>
# Edit the default config (replace the server block with the following)
```shell
sudo tee /etc/nginx/sites-available/default > /dev/null <<'EOF'
server {
listen 80 default_server;
listen [::]:80 default_server;
server_name _;
root /var/www/html;
index index.html index.htm index.nginx-debian.html;
location / {
# This will just serve the static page /var/www/html/index.html
try_files $uri $uri/ =404;
}
}
EOF
```
# Edit Nginx.conf
## do not put the stream[{} block inside http
```shell
sudo nano /etc/nginx/nginx.conf
user www-data;
worker_processes auto;
pid /run/nginx.pid;
events { worker_connections 768; }
# Add the stream section here, outside http {}
stream {
upstream managers_http {
server lead.swarm.mngoma.lab:80;
server follow.swarm.mngoma.lab:80;
}
server {
listen 80;
proxy_pass managers_http;
}
upstream managers_https {
server lead.swarm.mngoma.lab:443;
server follow.swarm.mngoma.lab:443;
}
server {
listen 443;
proxy_pass managers_https;
}
}
http {
## ... your existing http config here ...
}
```
# Edit nginx conf
```shell
nano /etc/nginx/nginx.conf
# ONLY necessary if not handled by /etc/nginx/modules-enabled/
# load_module /usr/lib/nginx/modules/ngx_stream_module.so;
user www-data;
worker_processes auto;
pid /run/nginx.pid;
events {
worker_connections 768;
}
# ========== STREAM PROXY (Layer 4 TCP) ==========
stream {
upstream managers_http {
server lead.swarm.mngoma.lab:80;
server follow.swarm.mngoma.lab:80;
}
server {
listen 80;
proxy_pass managers_http;
}
upstream managers_https {
server lead.swarm.mngoma.lab:443;
server follow.swarm.mngoma.lab:443;
}
server {
listen 443;
proxy_pass managers_https;
}
}
# ========== HTTP CONFIG ==========
http {
##
# Basic Settings
##
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
include /etc/nginx/mime.types;
default_type application/octet-stream;
##
# Logging Settings
##
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
##
# Gzip Settings
##
gzip on;
gzip_disable "msie6";
##
# Include virtual host configurations
##
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*;
}
```
# Test and reload nginx
```shell
sudo nginx -t
sudo systemctl reload nginx
```
# Log trace
```shell
tail -f /var/log/nginx/error.log /var/log/nginx/access.log
```

View File

@@ -0,0 +1,191 @@
---
- name: Install dependencies, mount NFS at /home/{{ ansible_user }}/k3s, setup K3s, DNS, Traefik, venv for k8s module
hosts: all
become: yes
vars:
nfs_server: storage.mngoma.lab
nfs_export_path: /export/k3s
dns_server_ip: 192.168.1.151
dns_fallback_ip: 192.168.1.1
ansible_user_home: "/home/{{ ansible_user }}"
nfs_mount_path: "{{ ansible_user_home }}/k3s"
kube_config_path: "{{ ansible_user_home }}/.kube/config"
kube_venv_path: "/opt/kube-venv"
tasks:
- name: Configure systemd-resolved permanent DNS
copy:
dest: /etc/systemd/resolved.conf
content: |
[Resolve]
DNS={{ dns_server_ip }}
FallbackDNS={{ dns_fallback_ip }}
Domains=mngoma.lab
DNSStubListener=yes
owner: root
group: root
mode: '0644'
- name: Ensure /etc/resolv.conf points to systemd-resolved stub
file:
src: /run/systemd/resolve/stub-resolv.conf
dest: /etc/resolv.conf
state: link
force: yes
- name: Restart systemd-resolved to apply DNS changes
systemd:
name: systemd-resolved
state: restarted
enabled: yes
- name: Ensure NFS mount point exists
file:
path: "{{ nfs_mount_path }}"
state: directory
owner: "{{ ansible_user | default('root') }}"
group: "{{ ansible_user | default('root') }}"
mode: '0755'
- name: Mount NFS share immediately (direct, idempotent)
mount:
src: "{{ nfs_server }}:{{ nfs_export_path }}"
path: "{{ nfs_mount_path }}"
fstype: nfs
opts: defaults
state: mounted
- name: Ensure NFS mount persists across reboots (fstab entry)
mount:
src: "{{ nfs_server }}:{{ nfs_export_path }}"
path: "{{ nfs_mount_path }}"
fstype: nfs
opts: defaults
state: present
- name: Ensure .kube directory exists
file:
path: "{{ ansible_user_home }}/.kube"
state: directory
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: '0700'
- name: Download latest kubectl
shell: |
arch=$(uname -m)
if [ "$arch" = "x86_64" ]; then arch="amd64";
elif [ "$arch" = "aarch64" ]; then arch="arm64";
fi
curl -Lo /usr/local/bin/kubectl "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/${arch}/kubectl"
chmod +x /usr/local/bin/kubectl
args:
creates: /usr/local/bin/kubectl
environment:
KUBECONFIG: "{{ kube_config_path }}"
- name: Install Helm
shell: |
curl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash
args:
creates: /usr/local/bin/helm
environment:
KUBECONFIG: "{{ kube_config_path }}"
- name: Download and install kustomize
shell: |
kustomize_ver=$(curl -s https://api.github.com/repos/kubernetes-sigs/kustomize/releases/latest | jq -r '.tag_name')
arch=$(uname -m)
if [ "$arch" = "x86_64" ]; then arch="amd64";
elif [ "$arch" = "aarch64" ]; then arch="arm64";
fi
url="https://github.com/kubernetes-sigs/kustomize/releases/download/${kustomize_ver}/kustomize_${kustomize_ver#kustomize/}_linux_${arch}.tar.gz"
tmpfile=$(mktemp)
curl -L -o "$tmpfile" "$url"
tar -xz -C /usr/local/bin -f "$tmpfile"
chmod +x /usr/local/bin/kustomize
rm -f "$tmpfile"
args:
creates: /usr/local/bin/kustomize
ignore_errors: true
environment:
KUBECONFIG: "{{ kube_config_path }}"
- name: Install kubectx and kubens
shell: |
git clone https://github.com/ahmetb/kubectx /opt/kubectx || true
ln -sf /opt/kubectx/kubectx /usr/local/bin/kubectx
ln -sf /opt/kubectx/kubens /usr/local/bin/kubens
environment:
KUBECONFIG: "{{ kube_config_path }}"
- name: Ensure systemd override directory for k3s exists
file:
path: /etc/systemd/system/k3s.service.d
state: directory
owner: root
group: root
mode: '0755'
- name: Enable Traefik ping via CLI arguments
copy:
dest: /etc/systemd/system/k3s.service.d/10-traefik-ping.conf
content: |
[Service]
ExecStart=
ExecStart=/usr/local/bin/k3s server --no-deploy=servicelb --kubelet-arg="cloud-provider=external" \
--traefik-arg="--ping.entryPoint=web" \
--traefik-arg="--ping.manualRouting=false" \
--traefik-arg="--ping.responseMessage=Healthy"
owner: root
group: root
mode: '0644'
- name: Reload systemd to pick up k3s override
systemd:
daemon_reload: yes
# --- VENV FOR KUBERNETES MODULE ---
- name: Create venv for k8s ansible modules
command: python3 -m venv {{ kube_venv_path }}
args:
creates: "{{ kube_venv_path }}/bin/activate"
- name: Install kubernetes python library in venv
pip:
name: kubernetes
virtualenv: "{{ kube_venv_path }}"
virtualenv_python: python3
# The following play block will ONLY target the manager group
- name: Expose Traefik dashboard via IngressRoute (manager only)
hosts: manager # Change to your actual manager inventory group name
become: yes
vars:
ansible_user_home: "/home/{{ ansible_user }}"
kube_config_path: "{{ ansible_user_home }}/.kube/config"
kube_venv_path: "/opt/kube-venv"
tasks:
- name: Expose Traefik dashboard via IngressRoute (inline)
kubernetes.core.k8s:
state: present
definition:
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: traefik-dashboard
namespace: kube-system
spec:
entryPoints:
- web
- websecure
routes:
- match: Host(`dashboard.apps.mngoma.lab`)
kind: Rule
services:
- name: traefik
port: 8080
tls: {}
environment:
KUBECONFIG: "{{ kube_config_path }}"
vars:
ansible_python_interpreter: "{{ kube_venv_path }}/bin/python"

View File

@@ -0,0 +1,81 @@
# command: ansible-playbook -i config/<target manifest>.ini common/create-ansible-user.yml --ask-become-pass
# Note: this playbook requires an interactive mode or passed secret for privilege escalation
---
- name: Create ansible user and configure passwordless sudo
hosts: all
become: true
become_method: sudo
vars:
ansible_user: khwezi
tasks:
- name: Ensure 'ansible' user exists
ansible.builtin.user:
name: ansible
groups: sudo
append: yes
shell: /bin/bash
state: present
- name: Check if passwordless sudo is already configured for 'ansible'
ansible.builtin.shell: |
grep -Fxq "ansible ALL=(ALL) NOPASSWD: ALL" /etc/sudoers.d/ansible
register: sudoers_check
ignore_errors: true
changed_when: false
- name: Allow 'ansible' user passwordless sudo
ansible.builtin.copy:
dest: /etc/sudoers.d/ansible
content: "ansible ALL=(ALL) NOPASSWD: ALL\n"
owner: root
group: root
mode: '0440'
when: sudoers_check.rc != 0
- name: Ensure /home/ansible/.ssh directory exists
ansible.builtin.file:
path: /home/ansible/.ssh
state: directory
owner: ansible
group: ansible
mode: '0700'
- name: Copy id_ed25519 private key to ansible user
ansible.builtin.copy:
src: ~/.ssh/id_ed25519
dest: /home/ansible/.ssh/id_ed25519
owner: ansible
group: ansible
mode: '0600'
- name: Copy id_ed25519 public key to ansible user
ansible.builtin.copy:
src: ~/.ssh/id_ed25519.pub
dest: /home/ansible/.ssh/id_ed25519.pub
owner: ansible
group: ansible
mode: '0644'
- name: Ensure authorized_keys exists
ansible.builtin.file:
path: /home/ansible/.ssh/authorized_keys
state: touch
owner: ansible
group: ansible
mode: '0600'
- name: Read public key content
ansible.builtin.slurp:
src: /home/ansible/.ssh/id_ed25519.pub
register: pubkey_content
- name: Ensure public key is present in authorized_keys
ansible.builtin.lineinfile:
path: /home/ansible/.ssh/authorized_keys
line: "{{ pubkey_content['content'] | b64decode | trim }}"
owner: ansible
group: ansible
mode: '0600'
create: yes
state: present
- name: Allow 'ansible' user to write to /etc/systemd/resolved.conf
ansible.builtin.file:
path: /etc/systemd/resolved.conf
owner: ansible
group: ansible
mode: '0664'
state: file
become: true

View File

@@ -0,0 +1,86 @@
# command: ansible-playbook -i config/<target manifest>.ini common/install-docker.yml
---
- name: Install Docker and Test
hosts: all
become: true
become_method: sudo
tasks:
- name: Ensure required apt packages are installed
ansible.builtin.apt:
name:
- apt-transport-https
- ca-certificates
- curl
- gnupg
- lsb-release
state: present
update_cache: yes
- name: Ensure gpg is installed
ansible.builtin.apt:
name: gpg
state: present
- name: Remove old Docker keyring files if present
ansible.builtin.file:
path: "{{ item }}"
state: absent
loop:
- /usr/share/keyrings/docker-archive-keyring.gpg
- /usr/share/keyrings/docker-archive-keyring.gpg.asc
- name: Download Docker's official GPG key (ASCII)
ansible.builtin.get_url:
url: https://download.docker.com/linux/ubuntu/gpg
dest: /usr/share/keyrings/docker-archive-keyring.gpg.asc
mode: '0644'
force: yes
- name: Convert Docker GPG key to binary format
ansible.builtin.command: >
gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg /usr/share/keyrings/docker-archive-keyring.gpg.asc
- name: Add Docker repository if not present (modern method)
ansible.builtin.apt_repository:
repo: "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu {{ ansible_distribution_release }} stable"
state: present
filename: docker
- name: Update apt cache after adding Docker repo
ansible.builtin.apt:
update_cache: yes
- name: Check if Docker is already installed
ansible.builtin.command: docker --version
register: docker_check
ignore_errors: true
changed_when: false
- name: Install Docker Engine
ansible.builtin.apt:
name:
- docker-ce
- docker-ce-cli
- containerd.io
state: present
when: docker_check.rc != 0
- name: Check Docker version (post-install)
ansible.builtin.command: docker --version
register: docker_version
changed_when: false
- name: Show Docker version
ansible.builtin.debug:
var: docker_version.stdout
- name: Run hello-world container to test Docker
ansible.builtin.command: docker run --name hello-test --rm hello-world
register: hello_world_output
changed_when: false
- name: Show hello-world output
ansible.builtin.debug:
var: hello_world_output.stdout

View File

@@ -0,0 +1,28 @@
# command: ansible-playbook -i config/<target manifest>.ini common/update-docker.yml
---
- name: Update Docker only on hosts where it is installed
hosts: all
become: true
become_method: sudo
tasks:
- name: Check if Docker is installed
ansible.builtin.command: docker --version
register: docker_check
ignore_errors: true
changed_when: false
- name: Update Docker packages if installed
ansible.builtin.apt:
name:
- docker-ce
- docker-ce-cli
- containerd.io
state: latest
update_cache: yes
when: docker_check.rc == 0
- name: Debug message if Docker is not installed
ansible.builtin.debug:
msg: "Docker is not installed on this host. Skipping update."
when: docker_check.rc != 0

View File

@@ -0,0 +1,19 @@
# command: ansible-playbook -i config/<target manifest>.ini common/update-hosts.yml
---
- name: Update and upgrade all apt packages
hosts: all
become: true
become_method: sudo
tasks:
- name: Update apt cache
ansible.builtin.apt:
update_cache: yes
- name: Upgrade all packages
ansible.builtin.apt:
upgrade: dist
- name: Autoremove unused packages
ansible.builtin.apt:
autoremove: yes

View File

@@ -0,0 +1,30 @@
[all:vars]
ansible_python_interpreter=/usr/bin/python3
ansible_user=ansible
ansible_ssh_private_key_file=~/.ssh/id_ed25519
[shared]
sentry ansible_host=sentry.mngoma.lab
alpha ansible_host=alpha.lb.mngoma.lab
database ansible_host=database.mngoma.lab
vpn ansible_host=vpn.mngoma.lab
khongisa ansible_host=khongisa.mngoma.lab
#beta ansible_host=beta.lb.mngoma.lab
#dns ansible_host=dns.mngoma.lab
#storage ansible_host=storage.mngoma.lab
manager ansible_host=lead.mngoma.lab
worker ansible_host=worker1.mngoma.lab
#manager2 ansible_host=follow.mngoma.lab
#worker2 ansible_host=worker2.mngoma.lab
[makhiwanecluster]
manager ansible_host=lead.mngoma.lab
worker ansible_host=worker1.mngoma.lab
[mbubecluster]
#manager2 ansible_host=follow.mngoma.lab
#worker2 ansible_host=worker2.mngoma.lab
[loadbalancers]
alpha ansible_host=alpha.lb.mngoma.lab
#beta ansible_host=beta.lb.mngoma.lab

View File

@@ -0,0 +1,11 @@
[all:vars]
ansible_python_interpreter=/usr/bin/python3
ansible_user=ansible
ansible_ssh_private_key_file=~/.ssh/id_ed25519
[cluster]
manager ansible_host=lead.mngoma.lab
worker ansible_host=worker1.mngoma.lab
[workers]
worker ansible_host=worker1.mngoma.lab

View File

@@ -0,0 +1,11 @@
[all:vars]
ansible_python_interpreter=/usr/bin/python3
ansible_user=ansible
ansible_ssh_private_key_file=~/.ssh/id_ed25519
[cluster]
manager ansible_host=follow.mngoma.lab
worker ansible_host=worker2.mngoma.lab
[workers]
worker ansible_host=worker2.mngoma.lab

View File

@@ -0,0 +1,20 @@
---
- name: Ensure manager node is running k3s / start if needed
hosts: manager
become: true
tasks:
- name: Start k3s service on manager
systemd:
name: k3s
state: started
enabled: true
- name: Ensure worker nodes are running k3s-agent / start if needed
hosts: workers
become: true
tasks:
- name: Start k3s-agent service on worker nodes
systemd:
name: k3s-agent
state: started
enabled: true

View File

@@ -0,0 +1,256 @@
---
- name: Gather IPv4 address facts for all hosts
hosts: all
gather_facts: yes
vars:
dns_server: "192.168.1.151"
tasks:
- name: Configure systemd-resolved to use custom DNS
become: true
copy:
dest: /etc/systemd/resolved.conf
content: |
[Resolve]
DNS={{ dns_server }}
FallbackDNS=192.168.1.1
Domains=mngoma.lab
DNSStubListener=yes
owner: root
group: root
mode: "0644"
- name: Ensure systemd-resolved service is enabled and restarted
become: true
systemd:
name: systemd-resolved
state: restarted
enabled: yes
- name: Ensure host IPv4 address is available and stored as fact
set_fact:
node_ipv4: "{{ ansible_default_ipv4.address }}"
when: ansible_default_ipv4 is defined and ansible_default_ipv4.address is defined
- name: Fail if IPv4 address could not be determined
fail:
msg: "Could not determine IPv4 address for {{ inventory_hostname }}. Please check network configuration."
when: node_ipv4 is not defined
- name: Ensure /home/{{ ansible_user }}/k3s directory exists
become: yes
file:
path: /home/{{ ansible_user }}/k3s
state: directory
owner: root
group: root
mode: '0777'
- name: Initialise the K3s control plane (manager)
hosts: manager
become: yes
vars:
k3s_version: v1.29.4+k3s1
kubeconfig_dir: "/home/{{ ansible_user }}/.kube"
kubeconfig_file: "{{ kubeconfig_dir }}/config"
tasks:
- name: Install required apt dependencies
apt:
name:
- curl
- python3-pip
- python3-venv
state: present
update_cache: yes
- name: Create a Python virtual environment for Ansible k8s modules
command: python3 -m venv /opt/ansible-venv
args:
creates: /opt/ansible-venv
- name: Install Kubernetes and OpenShift libraries in the venv
command: /opt/ansible-venv/bin/pip install kubernetes openshift
- name: Install k3s on manager (control plane)
shell: |
curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION={{ k3s_version }} sh -
args:
creates: /usr/local/bin/k3s
- name: Gather architecture for kubectl
set_fact:
kubectl_arch: |
{% if ansible_architecture == "x86_64" %}
amd64
{% elif "armv7l" in ansible_architecture %}
arm
{% elif "aarch64" in ansible_architecture %}
arm64
{% else %}
{{ ansible_architecture }}
{% endif %}
- name: Download latest kubectl binary
shell: |
curl -Lo /usr/local/bin/kubectl "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/{{ kubectl_arch }}/kubectl"
args:
creates: /usr/local/bin/kubectl
- name: Make kubectl executable
file:
path: /usr/local/bin/kubectl
mode: '0755'
owner: root
group: root
- name: Validate kubectl version
shell: kubectl version --client --output=yaml
register: kubectl_version
changed_when: false
- name: Show kubectl version
debug:
var: kubectl_version.stdout
- name: Ensure .kube directory exists for ansible user
become_user: "{{ ansible_user }}"
file:
path: "{{ kubeconfig_dir }}"
state: directory
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: '0700'
- name: Copy kubeconfig file to ansible user home
become: true
copy:
src: /etc/rancher/k3s/k3s.yaml
dest: "{{ kubeconfig_file }}"
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: '0600'
remote_src: yes
- name: Replace server IP in kubeconfig with manager's IP address
become_user: "{{ ansible_user }}"
lineinfile:
path: "{{ kubeconfig_file }}"
regexp: 'server: https://127\\.0\\.0\\.1:6443'
line: " server: https://{{ node_ipv4 }}:6443"
- name: Get the cluster join token
shell: cat /var/lib/rancher/k3s/server/node-token
register: k3s_token
changed_when: false
- name: Set fact for join token and manager IP
set_fact:
k3s_node_token: "{{ k3s_token.stdout }}"
manager_ip: "{{ node_ipv4 }}"
- name: Add the join token and manager IP to hostvars for workers
add_host:
name: "cluster_primary"
groups: join_info
k3s_node_token: "{{ k3s_node_token }}"
k3s_manager_ip: "{{ manager_ip }}"
- name: Install and join worker nodes to the control plane
hosts: workers
become: yes
vars:
k3s_version: v1.29.4+k3s1
tasks:
- name: Install required dependencies
apt:
name: [curl]
state: present
update_cache: yes
- name: Ensure /home/{{ ansible_user }}/k3s directory exists
file:
path: /home/{{ ansible_user }}/k3s
state: directory
owner: root
group: root
mode: '0777'
- name: Set manager join information
set_fact:
k3s_node_token: "{{ hostvars[groups['join_info'][0]]['k3s_node_token'] }}"
k3s_manager_ip: "{{ hostvars[groups['join_info'][0]]['k3s_manager_ip'] }}"
- name: Fail if manager's IP is not available
fail:
msg: "Could not determine manager's IP for joining cluster!"
when: k3s_manager_ip is not defined
- name: Install k3s agent (worker)
shell: |
curl -sfL https://get.k3s.io | K3S_URL=https://{{ k3s_manager_ip }}:6443 K3S_TOKEN={{ k3s_node_token }} INSTALL_K3S_VERSION={{ k3s_version }} sh -
args:
creates: /usr/local/bin/k3s-agent
- name: Gather architecture for kubectl
set_fact:
kubectl_arch: |
{% if ansible_architecture == "x86_64" %}
amd64
{% elif "armv7l" in ansible_architecture %}
arm
{% elif "aarch64" in ansible_architecture %}
arm64
{% else %}
{{ ansible_architecture }}
{% endif %}
- name: Download latest kubectl binary (worker)
shell: |
curl -Lo /usr/local/bin/kubectl "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/{{ kubectl_arch }}/kubectl"
args:
creates: /usr/local/bin/kubectl
- name: Make kubectl executable (worker)
file:
path: /usr/local/bin/kubectl
mode: '0755'
owner: root
group: root
- name: Verify cluster nodes from manager node and configure storage
hosts: manager
become: yes
vars:
kubeconfig_file: "/home/{{ ansible_user }}/.kube/config"
tasks:
- name: Wait for all worker nodes to join the cluster
become_user: "{{ ansible_user }}"
shell: |
for i in {1..10}; do
[ $(kubectl --kubeconfig={{ kubeconfig_file }} get nodes | grep -c worker) -ge 1 ] && exit 0
sleep 15
done
exit 1
register: wait_worker
failed_when: wait_worker.rc != 0
- name: List all k3s nodes
become_user: "{{ ansible_user }}"
shell: kubectl --kubeconfig={{ kubeconfig_file }} get nodes -o wide
register: all_nodes
- name: Show current k3s cluster nodes
debug:
var: all_nodes.stdout
- name: Create StorageClass for /home/ansible/k3s INLINE
vars:
ansible_python_interpreter: /opt/ansible-venv/bin/python
kubernetes.core.k8s:
kubeconfig: "{{ kubeconfig_file }}"
definition:
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: local-pvs
provisioner: kubernetes.io/no-provisioner
volumeBindingMode: WaitForFirstConsumer

View File

@@ -0,0 +1,131 @@
# create-loadbalancer.yml
- name: Install and configure HAProxy with SSL termination and managed DNS
hosts: alpha
become: yes
vars:
ssl_cert_path: "/etc/ssl/certs/haproxy.pem"
dns_server: "192.168.1.151"
tasks:
- name: Install HAProxy and dependencies
apt:
name:
- haproxy
- openssl
state: present
update_cache: yes
- name: Ensure cert directory exists
file:
path: /etc/ssl/certs
state: directory
owner: root
group: root
mode: '0755'
- name: Generate private key for HAProxy
community.crypto.openssl_privatekey:
path: /etc/ssl/certs/haproxy.key
size: 2048
type: RSA
mode: '0600'
- name: Generate a Certificate Signing Request (CSR) for HAProxy
community.crypto.openssl_csr:
path: /etc/ssl/certs/haproxy.csr
privatekey_path: /etc/ssl/certs/haproxy.key
common_name: "{{ inventory_hostname }}"
subject_alt_name:
- "DNS:{{ inventory_hostname }}"
mode: "0644"
- name: Generate self-signed certificate for HAProxy
community.crypto.x509_certificate:
path: /etc/ssl/certs/haproxy.crt
privatekey_path: /etc/ssl/certs/haproxy.key
csr_path: /etc/ssl/certs/haproxy.csr
provider: selfsigned
selfsigned_not_before: "{{ '%Y%m%d%H%M%SZ' | strftime(ansible_date_time.epoch | int) }}"
selfsigned_not_after: "{{ '%Y%m%d%H%M%SZ' | strftime((ansible_date_time.epoch | int) + (365*24*60*60)) }}"
mode: "0644"
- name: Combine key and cert into .pem file for HAProxy
shell: cat /etc/ssl/certs/haproxy.key /etc/ssl/certs/haproxy.crt > {{ ssl_cert_path }}
args:
creates: "{{ ssl_cert_path }}"
- name: Configure systemd-resolved to use custom DNS
become: true
copy:
dest: /etc/systemd/resolved.conf
content: |
[Resolve]
DNS={{ dns_server }}
FallbackDNS=192.168.1.1
Domains=mngoma.lab
DNSStubListener=yes
owner: root
group: root
mode: "0644"
- name: Ensure systemd-resolved service is enabled and restarted
become: true
systemd:
name: systemd-resolved
state: restarted
enabled: yes
- name: Upload custom haproxy.cfg with SSL termination and HTTPS-only backend
copy:
dest: /etc/haproxy/haproxy.cfg
content: |
global
log /dev/log local0
log /dev/log local1 notice
chroot /var/lib/haproxy
stats socket /run/haproxy/admin.sock mode 660 level admin
user haproxy
group haproxy
daemon
tune.ssl.default-dh-param 2048
defaults
log global
mode http
option httplog
option dontlognull
timeout connect 5000
timeout client 50000
timeout server 50000
option forwardfor
resolvers dns
nameserver dns1 {{ dns_server }}:53
resolve_retries 3
timeout resolve 2s
timeout retry 1s
hold valid 10s
frontend https_front
bind *:443 ssl crt {{ ssl_cert_path }}
mode http
option forwardfor
http-request set-header X-Forwarded-Proto https
http-request set-header Host %[req.hdr(host)]
default_backend app_clusters
backend app_clusters
mode http
balance roundrobin
option httpchk GET /
http-check expect status 100,101,102,103,200,201,202,203,204,205,206,207,208,226,300,301,302,303,304,305,306,307,308,400,401,402,403,404,405,406,407,408,409,410,411,412,413,414,415,416,417,418,421,422,423,424,425,426,428,429,431,451
server lead_https lead.mngoma.lab:443 resolvers dns resolve-prefer ipv4 check ssl verify none
owner: root
group: root
mode: "0644"
- name: Enable and start haproxy
systemd:
name: haproxy
state: restarted
enabled: yes

View File

@@ -0,0 +1,35 @@
---
- name: Drain and stop worker nodes first
hosts: workers
become: true
tasks:
- name: Drain worker node (optional - requires kubectl access)
shell: kubectl drain {{ inventory_hostname }} --ignore-daemonsets --delete-emptydir-data || true
delegate_to: manager
ignore_errors: yes
- name: Stop k3s-agent service on worker
systemd:
name: k3s-agent
state: stopped
enabled: false
- name: Poweroff worker node
shell: shutdown -h now
async: 0
poll: 0
- name: Stop and poweroff the manager node
hosts: manager
become: true
tasks:
- name: Stop k3s (server) service
systemd:
name: k3s
state: stopped
enabled: false
- name: Poweroff manager node
shell: shutdown -h now
async: 0
poll: 0

View File

@@ -0,0 +1,46 @@
apiVersion: v1
data:
Corefile: |
mngoma.lab:53 {
forward . dns.mngoma.lab
}
.:53 {
errors
health
ready
kubernetes cluster.local in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
}
hosts /etc/coredns/NodeHosts {
ttl 60
reload 15s
fallthrough
}
prometheus :9153
forward . /etc/resolv.conf
cache 30
loop
reload
loadbalance
import /etc/coredns/custom/*.override
}
import /etc/coredns/custom/*.server
NodeHosts: |
192.168.1.155 lead
192.168.1.157 worker1
kind: ConfigMap
metadata:
annotations:
objectset.rio.cattle.io/applied: H4sIAAAAAAAA/4yQwWrzMBCEX0Xs2fEf20nsX9BDybH02lMva2kdq1Z2g6SkBJN3L8IUCiVtbyNGOzvfzoAn90IhOmHQcKmgAIsJQc+wl0CD8wQaSr1t1PzKSilFIUiIix4JfRoXHQjtdZHTuafAlCgq488xUSi9wK2AybEFDXvhwR2e8QQFHCnh50ZkloTJCcf8lP6NTIqUyuCkNJiSp9LJP5czoLjryztTWB0uE2iYmvjFuVSFenJsHx6tFf41gvGY6Y0Eshz/9D2e0OSZfIJVvMZExwzusSf/I9SIcQQNvaG6a+r/XVdV7abBddPtsN9W66Eedi0N7aberM22zaHf6t0tcPsIAAD//8Ix+PfoAQAA
objectset.rio.cattle.io/id: ""
objectset.rio.cattle.io/owner-gvk: k3s.cattle.io/v1, Kind=Addon
objectset.rio.cattle.io/owner-name: coredns
objectset.rio.cattle.io/owner-namespace: kube-system
creationTimestamp: "2025-09-29T14:41:54Z"
labels:
objectset.rio.cattle.io/hash: bce283298811743a0386ab510f2f67ef74240c57
name: coredns
namespace: kube-system
resourceVersion: "480"
uid: ed00243e-0e5c-4a6a-9f3c-41e9ba6fa6d8

View File

@@ -0,0 +1,46 @@
apiVersion: v1
data:
Corefile: |
mngoma.lab:53 {
forward . dns.mngoma.lab
}
.:53 {
errors
health
ready
kubernetes cluster.local in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
}
hosts /etc/coredns/NodeHosts {
ttl 60
reload 15s
fallthrough
}
prometheus :9153
forward . /etc/resolv.conf
cache 30
loop
reload
loadbalance
import /etc/coredns/custom/*.override
}
import /etc/coredns/custom/*.server
NodeHosts: |
192.168.1.155 lead
192.168.1.157 worker1
kind: ConfigMap
metadata:
annotations:
objectset.rio.cattle.io/applied: H4sIAAAAAAAA/4yQwWrzMBCEX0Xs2fEf20nsX9BDybH02lMva2kdq1Z2g6SkBJN3L8IUCiVtbyNGOzvfzoAn90IhOmHQcKmgAIsJQc+wl0CD8wQaSr1t1PzKSilFIUiIix4JfRoXHQjtdZHTuafAlCgq488xUSi9wK2AybEFDXvhwR2e8QQFHCnh50ZkloTJCcf8lP6NTIqUyuCkNJiSp9LJP5czoLjryztTWB0uE2iYmvjFuVSFenJsHx6tFf41gvGY6Y0Eshz/9D2e0OSZfIJVvMZExwzusSf/I9SIcQQNvaG6a+r/XVdV7abBddPtsN9W66Eedi0N7aberM22zaHf6t0tcPsIAAD//8Ix+PfoAQAA
objectset.rio.cattle.io/id: ""
objectset.rio.cattle.io/owner-gvk: k3s.cattle.io/v1, Kind=Addon
objectset.rio.cattle.io/owner-name: coredns
objectset.rio.cattle.io/owner-namespace: kube-system
creationTimestamp: "2025-09-28T11:07:44Z"
labels:
objectset.rio.cattle.io/hash: bce283298811743a0386ab510f2f67ef74240c57
name: coredns
namespace: kube-system
resourceVersion: "474"
uid: 21a101b3-48c7-4004-918b-e17fbb55fd2f

View File

@@ -0,0 +1,152 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: dashy
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: dashy-sa
namespace: dashy
---
apiVersion: v1
kind: ConfigMap
metadata:
name: dashy-config
namespace: dashy
data:
config.yaml: |
appConfig:
title: "Mngoma"
description: "Welcome to mngoma lab, click where you may"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: dashy-role
namespace: dashy
rules:
- apiGroups: [""]
resources: ["pods", "services", "endpoints", "configmaps"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: dashy-rolebinding
namespace: dashy
subjects:
- kind: ServiceAccount
name: dashy-sa
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: dashy-role
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: dashy-pv
labels:
type: local
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
storageClassName: local-pvs
local:
path: /home/ansible/k3s/makhiwane/dashy
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- lead
persistentVolumeReclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: dashy-pvc
namespace: dashy
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-pvs
resources:
requests:
storage: 1Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: dashy
namespace: dashy
spec:
replicas: 1
selector:
matchLabels:
app: dashy
template:
metadata:
labels:
app: dashy
spec:
serviceAccountName: dashy-sa
containers:
- name: dashy
image: lissy93/dashy:latest
ports:
- containerPort: 8080
volumeMounts:
- name: dashy-data
mountPath: /app/public
- name: dashy-config
mountPath: /app/public/conf.yml
subPath: config.yaml
volumes:
- name: dashy-data
persistentVolumeClaim:
claimName: dashy-pvc
- name: dashy-config
configMap:
name: dashy-config
items:
- key: config.yaml
path: config.yaml
---
apiVersion: v1
kind: Service
metadata:
name: dashy
namespace: dashy
spec:
type: ClusterIP
selector:
app: dashy
ports:
- name: web
protocol: TCP
port: 80
targetPort: 8080
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: dashy-web
namespace: dashy
spec:
entryPoints:
- websecure
routes:
- match: Host(`dashboard.apps.mngoma.lab`)
kind: Rule
services:
- name: dashy
port: 80
scheme: http
tls: {}

View File

@@ -0,0 +1,321 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: droneci
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: droneci-sa
namespace: droneci
---
apiVersion: v1
kind: ConfigMap
metadata:
name: droneci-config
namespace: droneci
data:
server.domain: "droneci.apps.mngoma.lab"
server.proto: "https"
server.runnername: "drone_runner"
server.runnernetworks: "default"
server.runnercapacity: "2"
database.type: "postgres"
database.host: "192.168.1.137:5432"
database.name: "dronecim"
gitea.server: "https://gitea.apps.mngoma.lab"
---
apiVersion: v1
kind: Secret
metadata:
name: droneci-secret
namespace: droneci
type: Opaque
data:
server.rpctoken: MDFLNlFHTkE4VEMxQjJGVzNGV0JSWDJFNE4=
database.username: YXBwX3VzZXI=
database.password: MTIzNDU=
database.connectstring: cG9zdGdyZXM6Ly9hcHBfdXNlcjoxMjM0NUAxOTIuMTY4LjEuMTM3OjU0MzIvZHJvbmVjaW0/c3NsbW9kZT1kaXNhYmxl
gitea.clientid: MGRiNTliZDAtMGI3Ni00ODgxLThhODQtNjI0N2ZlYTExOTcz
gitea.clientsecret: Z3RvX3l6bXB6NmJvZG52cmRnMnM1MmVmNWF1c3ozZTYzNGdyeTc0MjJqZ2hwd3ZnbGc2M2JtcnE=
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: droneci-role
namespace: droneci
rules:
- apiGroups: [""]
resources: ["pods", "services", "endpoints", "persistentvolumeclaims", "configmaps", "secrets"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: droneci-rolebinding
namespace: droneci
subjects:
- kind: ServiceAccount
name: droneci-sa
namespace: droneci
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: droneci-role
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: drone-runner-role
namespace: droneci
rules:
- apiGroups: [""]
resources: ["pods", "pods/exec", "services", "endpoints", "configmaps", "secrets", "persistentvolumeclaims"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
- apiGroups: ["apps"]
resources: ["deployments", "replicasets"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: drone-runner-rolebinding
namespace: droneci
subjects:
- kind: ServiceAccount
name: droneci-sa
namespace: droneci
roleRef:
kind: Role
name: drone-runner-role
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: droneci-pv
labels:
type: local
spec:
capacity:
storage: 10Gi
accessModes: ["ReadWriteOnce"]
storageClassName: local-pvs
local:
path: /home/ansible/k3s/makhiwane/droneci
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values: ["lead"]
persistentVolumeReclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: droneci-pvc
namespace: droneci
spec:
accessModes: ["ReadWriteOnce"]
storageClassName: local-pvs
resources:
requests:
storage: 10Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: drone
namespace: droneci
labels:
app.kubernetes.io/name: drone
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: drone
template:
metadata:
labels:
app.kubernetes.io/name: drone
spec:
hostAliases:
- ip: "192.168.1.160"
hostnames:
- "gitea.apps.mngoma.lab"
- "droneci.apps.mngoma.lab"
serviceAccountName: droneci-sa
containers:
- name: drone
image: drone/drone:latest
ports:
- containerPort: 80
name: http
env:
- name: DRONE_SERVER_HOST
valueFrom:
configMapKeyRef:
name: droneci-config
key: server.domain
- name: DRONE_SERVER_PROTO
valueFrom:
configMapKeyRef:
name: droneci-config
key: server.proto
- name: DRONE_SERVER_PORT
value: ":80"
- name: DRONE_TLS_AUTOCERT
value: "false"
- name: DRONE_LOGS_DEBUG
value: "true"
- name: DRONE_RPC_SECRET
valueFrom:
secretKeyRef:
name: droneci-secret
key: server.rpctoken
- name: DRONE_DATABASE_DRIVER
valueFrom:
configMapKeyRef:
name: droneci-config
key: database.type
- name: DRONE_DATABASE_DATASOURCE
valueFrom:
secretKeyRef:
name: droneci-secret
key: database.connectstring
- name: DRONE_DB_USER
valueFrom:
secretKeyRef:
name: droneci-secret
key: database.username
- name: DRONE_DB_PASS
valueFrom:
secretKeyRef:
name: droneci-secret
key: database.password
- name: DRONE_GITEA_SERVER
valueFrom:
configMapKeyRef:
name: droneci-config
key: gitea.server
- name: DRONE_GITEA_CLIENT_ID
valueFrom:
secretKeyRef:
name: droneci-secret
key: gitea.clientid
- name: DRONE_GITEA_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: droneci-secret
key: gitea.clientsecret
- name: DRONE_GITEA_SKIP_VERIFY
value: "true"
readinessProbe:
httpGet:
path: /healthz
port: 80
initialDelaySeconds: 20
periodSeconds: 10
failureThreshold: 3
livenessProbe:
httpGet:
path: /healthz
port: 80
initialDelaySeconds: 30
periodSeconds: 20
failureThreshold: 3
volumeMounts:
- name: drone-storage
mountPath: /data
volumes:
- name: drone-storage
persistentVolumeClaim:
claimName: droneci-pvc
---
apiVersion: v1
kind: Service
metadata:
name: drone-server
namespace: droneci
spec:
selector:
app.kubernetes.io/name: drone
ports:
- name: http
port: 80
targetPort: 80
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: drone-runner
namespace: droneci
labels:
app.kubernetes.io/name: drone-runner
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: drone-runner
template:
metadata:
labels:
app.kubernetes.io/name: drone-runner
spec:
hostAliases:
- ip: "192.168.1.160"
hostnames:
- "gitea.apps.mngoma.lab"
- "droneci.apps.mngoma.lab"
serviceAccountName: droneci-sa
containers:
- name: runner
image: drone/drone-runner-kube:latest
ports:
- containerPort: 3000
env:
- name: DRONE_RPC_HOST
value: drone-server.droneci.svc.cluster.local
- name: DRONE_RPC_PROTO
value: "http"
- name: DRONE_RPC_SECRET
valueFrom:
secretKeyRef:
name: droneci-secret
key: server.rpctoken
- name: DRONE_RUNNER_NAME
valueFrom:
configMapKeyRef:
name: droneci-config
key: server.runnername
- name: DRONE_RUNNER_CAPACITY
valueFrom:
configMapKeyRef:
name: droneci-config
key: server.runnercapacity
- name: DRONE_RUNNER_NETWORKS
valueFrom:
configMapKeyRef:
name: droneci-config
key: server.runnernetworks
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: droneci-web
namespace: droneci
spec:
entryPoints:
- websecure
routes:
- match: Host(`droneci.apps.mngoma.lab`)
kind: Rule
services:
- name: drone-server
port: 80
scheme: http
tls: {}

View File

@@ -0,0 +1,201 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: gitea
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: gitea-sa
namespace: gitea
---
apiVersion: v1
kind: ConfigMap
metadata:
name: gitea-config
namespace: gitea
data:
server.domain: "gitea.apps.mngoma.lab"
server.rooturl: "https://gitea.apps.mngoma.lab"
database.type: "postgres"
database.host: "192.168.1.137:5432"
database.name: "giteam"
---
apiVersion: v1
kind: Secret
metadata:
name: gitea-secret
namespace: gitea
type: Opaque
data:
database.username: YXBwX3VzZXI=
database.password: MTIzNDU=
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: gitea-role
namespace: gitea
rules:
- apiGroups: [""]
resources: ["pods", "services", "endpoints", "persistentvolumeclaims", "configmaps", "secrets"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: gitea-rolebinding
namespace: gitea
subjects:
- kind: ServiceAccount
name: gitea-sa
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: gitea-role
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: gitea-pv
labels:
type: local
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
storageClassName: local-pvs
local:
path: /home/ansible/k3s/makhiwane/gitea
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- lead
persistentVolumeReclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: gitea-pvc
namespace: gitea
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-pvs
resources:
requests:
storage: 10Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: gitea
namespace: gitea
labels:
app.kubernetes.io/name: gitea-server
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: gitea-server
template:
metadata:
labels:
app.kubernetes.io/name: gitea-server
spec:
serviceAccountName: gitea-sa
containers:
- name: gitea
image: gitea/gitea:1.21.4
ports:
- containerPort: 3000
- containerPort: 22
volumeMounts:
- name: gitea-data
mountPath: /data
env:
- name: USER_UID
value: "1000"
- name: USER_GID
value: "1000"
- name: GITEA_SERVER_ROOT_URL
valueFrom:
configMapKeyRef:
name: gitea-config
key: server.rooturl
- name: GITEA_SERVER_DOMAIN
valueFrom:
configMapKeyRef:
name: gitea-config
key: server.domain
- name: GITEA__database__TYPE
valueFrom:
configMapKeyRef:
name: gitea-config
key: database.type
- name: GITEA__database__HOST
valueFrom:
configMapKeyRef:
name: gitea-config
key: database.host
- name: GITEA__database__USER
valueFrom:
secretKeyRef:
name: gitea-secret
key: database.username
- name: GITEA__database__PASSWD
valueFrom:
secretKeyRef:
name: gitea-secret
key: database.password
- name: GITEA__database__NAME
valueFrom:
configMapKeyRef:
name: gitea-config
key: database.name
volumes:
- name: gitea-data
persistentVolumeClaim:
claimName: gitea-pvc
---
apiVersion: v1
kind: Service
metadata:
name: gitea-server
namespace: gitea
spec:
selector:
app.kubernetes.io/name: gitea-server
ports:
- name: http
protocol: TCP
port: 3000
targetPort: 3000
- name: ssh
protocol: TCP
port: 22
targetPort: 22
type: ClusterIP
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: gitea-web
namespace: gitea
spec:
entryPoints:
- websecure
routes:
- match: Host(`gitea.apps.mngoma.lab`)
kind: Rule
services:
- name: gitea-server
port: 3000
scheme: http
tls: {}

View File

@@ -0,0 +1,156 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: mariadb
---
apiVersion: v1
kind: Secret
metadata:
name: mariadb-secret
namespace: mariadb
type: Opaque
data:
root.password: UDRvMzBB
database.username: cm9vdA==
database.password: NXBFMjZa
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: mariadb-sa
namespace: mariadb
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: mariadb-role
namespace: mariadb
rules:
- apiGroups: [""]
resources: ["pods", "services", "persistentvolumeclaims", "configmaps", "secrets"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: mariadb-rolebinding
namespace: mariadb
subjects:
- kind: ServiceAccount
name: mariadb-sa
namespace: mariadb
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: mariadb-role
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: mariadb-pv
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
storageClassName: local-pvs
local:
path: /home/ansible/k3s/makhiwane/mariadb
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- lead
persistentVolumeReclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mariadb-pvc
namespace: mariadb
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-pvs
resources:
requests:
storage: 10Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: mariadb
namespace: mariadb
spec:
replicas: 1
selector:
matchLabels:
app: mariadb
template:
metadata:
labels:
app: mariadb
spec:
serviceAccountName: mariadb-sa
containers:
- name: mariadb
image: mariadb:11
restartPolicy: Always
env:
- name: MYSQL_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: mariadb-secret
key: root.password
- name: MYSQL_USER
valueFrom:
secretKeyRef:
name: mariadb-secret
key: database.username
- name: MYSQL_PASSWORD
valueFrom:
secretKeyRef:
name: mariadb-secret
key: database.password
ports:
- containerPort: 3306
volumeMounts:
- mountPath: /var/lib/mysql
name: mariadb-data
volumes:
- name: mariadb-data
persistentVolumeClaim:
claimName: mariadb-pvc
---
apiVersion: v1
kind: Service
metadata:
name: mariadb
namespace: mariadb
spec:
type: ClusterIP
selector:
app: mariadb
ports:
- port: 3306
targetPort: 3306
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: mariadb-ingress
namespace: mariadb
spec:
entryPoints:
- websecure
routes:
- match: Host(`mariadb.database.mngoma.lab`)
kind: Rule
services:
- name: mariadb
port: 3306
tls: {}

View File

@@ -0,0 +1,166 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: mongodb
---
apiVersion: v1
kind: Secret
metadata:
name: mongodb-secret
namespace: mongodb
type: Opaque
data:
root.username: YWRtaW4=
root.password: bGpUMTkx
username: YXBwdXNlcg==
password: VTNlNzRy
---
apiVersion: v1
kind: ConfigMap
metadata:
name: mongodb-config
namespace: mongodb
data:
database.name: "appdb"
database.replicaset: "primary"
database.port: "27017"
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: mongodb-sa
namespace: mongodb
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: mongodb-role
namespace: mongodb
rules:
- apiGroups: [""]
resources: ["pods", "services", "persistentvolumeclaims", "configmaps", "secrets"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: mongodb-rolebinding
namespace: mongodb
subjects:
- kind: ServiceAccount
name: mongodb-sa
namespace: mongodb
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: mongodb-role
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: mongodb-pv
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
storageClassName: local-pvs
local:
path: /home/ansible/k3s/makhiwane/mongodb
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- lead
persistentVolumeReclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mongodb-pvc
namespace: mongodb
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-pvs
resources:
requests:
storage: 10Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: mongodb
namespace: mongodb
spec:
replicas: 1
selector:
matchLabels:
app: mongodb
template:
metadata:
labels:
app: mongodb
spec:
serviceAccountName: mongodb-sa
containers:
- name: mongodb
image: mongo:6
env:
- name: MONGO_INITDB_ROOT_USERNAME
valueFrom:
secretKeyRef:
name: mongodb-secret
key: root.username
- name: MONGO_INITDB_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: mongodb-secret
key: root.password
- name: MONGO_INITDB_DATABASE
valueFrom:
configMapKeyRef:
name: mongodb-config
key: database.name
ports:
- containerPort: 27017
volumeMounts:
- mountPath: /data/db
name: mongodb-data
volumes:
- name: mongodb-data
persistentVolumeClaim:
claimName: mongodb-pvc
---
apiVersion: v1
kind: Service
metadata:
name: mongodb
namespace: mongodb
spec:
type: ClusterIP
selector:
app: mongodb
ports:
- port: 27017
targetPort: 27017
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: mongodb-ingress
namespace: mongodb
spec:
entryPoints:
- websecure
routes:
- match: Host(`mongodb.database.mngoma.lab`)
kind: Rule
services:
- name: mongodb
port: 27017
tls: {}

View File

@@ -0,0 +1,196 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: nextcloud
---
apiVersion: v1
kind: ConfigMap
metadata:
name: nextcloud-config
namespace: nextcloud
data:
server.trusteddomains: "nextcloud.apps.mngoma.lab"
database.createdbuser: "false"
database.host: "192.168.1.137"
database.name: "nextcloud"
---
apiVersion: v1
kind: Secret
metadata:
name: nextcloud-secret
namespace: nextcloud
type: Opaque
data:
root.username: a2h3ZXpp
root.password: QmxhY2tzdGFyMkBob21l
database.username: YXBwX3VzZXI=
database.password: MTIzNDU=
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nextcloud-sa
namespace: nextcloud
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: nextcloud-role
namespace: nextcloud
rules:
- apiGroups: [""]
resources: ["pods", "services", "endpoints", "persistentvolumeclaims", "configmaps", "secrets"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: nextcloud-rolebinding
namespace: nextcloud
subjects:
- kind: ServiceAccount
name: nextcloud-sa
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: nextcloud-role
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nextcloud-pv
labels:
type: local
spec:
capacity:
storage: 20Gi
accessModes:
- ReadWriteOnce
storageClassName: local-pvs
local:
path: /home/ansible/k3s/makhiwane/nextcloud
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- lead
persistentVolumeReclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nextcloud-pvc
namespace: nextcloud
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-pvs
resources:
requests:
storage: 20Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nextcloud
namespace: nextcloud
spec:
replicas: 1
selector:
matchLabels:
app: nextcloud
template:
metadata:
labels:
app: nextcloud
spec:
serviceAccountName: nextcloud-sa
containers:
- name: nextcloud
image: nextcloud:27.1.7
ports:
- containerPort: 80
volumeMounts:
- name: nextcloud-data
mountPath: /var/www/html
env:
- name: NEXTCLOUD_ADMIN_USER
valueFrom:
secretKeyRef:
name: nextcloud-secret
key: root.username
- name: NEXTCLOUD_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
name: nextcloud-secret
key: root.password
- name: NEXTCLOUD_TRUSTED_DOMAINS
valueFrom:
configMapKeyRef:
name: nextcloud-config
key: server.trusteddomains
- name: POSTGRES_HOST
valueFrom:
configMapKeyRef:
name: nextcloud-config
key: database.host
- name: POSTGRES_DB
valueFrom:
configMapKeyRef:
name: nextcloud-config
key: database.name
- name: POSTGRES_USER
valueFrom:
secretKeyRef:
name: nextcloud-secret
key: database.username
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: nextcloud-secret
key: database.password
- name: NC_SETUP_CREATE_DB_USER
valueFrom:
configMapKeyRef:
name: nextcloud-config
key: database.createdbuser
volumes:
- name: nextcloud-data
persistentVolumeClaim:
claimName: nextcloud-pvc
---
apiVersion: v1
kind: Service
metadata:
name: nextcloud
namespace: nextcloud
spec:
type: ClusterIP
selector:
app: nextcloud
ports:
- name: http
protocol: TCP
port: 80
targetPort: 80
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: nextcloud-web
namespace: nextcloud
spec:
entryPoints:
- websecure
routes:
- match: Host(`nextcloud.apps.mngoma.lab`)
kind: Rule
services:
- name: nextcloud
port: 80
scheme: http
tls: {}

View File

@@ -0,0 +1,101 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: nosqlclient
---
apiVersion: v1
kind: Secret
metadata:
name: nosqlclient-secret
namespace: nosqlclient
type: Opaque
data:
mongodb-uri: bW9uZ29kYjovL2FkbWluOkJsYWNrc3RhcjIlNDBob21lQGRhdGFiYXNlLm1uZ29tYS5sYWI6MjcwMTcvYWRtaW4=
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nosqlclient-sa
namespace: nosqlclient
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: nosqlclient-role
namespace: nosqlclient
rules:
- apiGroups: [""]
resources: ["pods", "services", "persistentvolumeclaims", "configmaps", "secrets"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: nosqlclient-rolebinding
namespace: nosqlclient
subjects:
- kind: ServiceAccount
name: nosqlclient-sa
namespace: nosqlclient
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: nosqlclient-role
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nosqlclient
namespace: nosqlclient
spec:
replicas: 1
selector:
matchLabels:
app: nosqlclient
template:
metadata:
labels:
app: nosqlclient
spec:
serviceAccountName: nosqlclient-sa
containers:
- name: nosqlclient
image: mongoclient/mongoclient:latest
env:
- name: MONGO_URL
valueFrom:
secretKeyRef:
name: nosqlclient-secret
key: mongodb-uri
ports:
- containerPort: 3000
---
apiVersion: v1
kind: Service
metadata:
name: nosqlclient
namespace: nosqlclient
spec:
type: ClusterIP
selector:
app: nosqlclient
ports:
- port: 3000
targetPort: 3000
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: nosqlclient-ingress
namespace: nosqlclient
spec:
entryPoints:
- websecure
routes:
- match: Host(`mongodb.apps.mngoma.lab`)
kind: Rule
services:
- name: nosqlclient
port: 3000
tls: {}

View File

@@ -0,0 +1,159 @@
apiVersion: v1
kind: Namespace
metadata:
name: pgadmin
---
apiVersion: v1
kind: ConfigMap
metadata:
name: pgadmin-config
namespace: pgadmin
data:
server.email: "khwezi@mngoma.lab"
---
apiVersion: v1
kind: Secret
metadata:
name: pgadmin-secret
namespace: pgadmin
type: Opaque
data:
server.password: M3pDQTQz
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: pgadmin-sa
namespace: pgadmin
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: pgadmin-role
namespace: pgadmin
rules:
- apiGroups: [""]
resources: ["pods", "services", "persistentvolumeclaims", "configmaps", "secrets"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: pgadmin-rolebinding
namespace: pgadmin
subjects:
- kind: ServiceAccount
name: pgadmin-sa
namespace: pgadmin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: pgadmin-role
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pgadmin-pv
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
storageClassName: local-pvs
local:
path: /home/ansible/k3s/makhiwane/pgadmin
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- lead
persistentVolumeReclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pgadmin-pvc
namespace: pgadmin
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-pvs
resources:
requests:
storage: 10Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: pgadmin
namespace: pgadmin
spec:
replicas: 1
selector:
matchLabels:
app: pgadmin
template:
metadata:
labels:
app: pgadmin
spec:
serviceAccountName: pgadmin-sa
securityContext:
runAsUser: 5050
runAsGroup: 5050
fsGroup: 5050
containers:
- name: pgadmin
image: dpage/pgadmin4:latest
ports:
- containerPort: 80
volumeMounts:
- name: pgadmin-data
mountPath: /var/lib/pgadmin
env:
- name: PGADMIN_DEFAULT_EMAIL
valueFrom:
configMapKeyRef:
name: pgadmin-config
key: server.email
- name: PGADMIN_DEFAULT_PASSWORD
valueFrom:
secretKeyRef:
name: pgadmin-secret
key: server.password
volumes:
- name: pgadmin-data
persistentVolumeClaim:
claimName: pgadmin-pvc
---
apiVersion: v1
kind: Service
metadata:
name: pgadmin
namespace: pgadmin
spec:
type: ClusterIP
selector:
app: pgadmin
ports:
- port: 80
targetPort: 80
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: pgadmin-ingress
namespace: pgadmin
spec:
entryPoints:
- websecure
routes:
- match: Host(`pgadmin.apps.mngoma.lab`)
kind: Rule
services:
- name: pgadmin
port: 80
tls: {}

View File

@@ -0,0 +1,126 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: phpmyadmin
---
apiVersion: v1
kind: ConfigMap
metadata:
name: phpmyadmin-config
namespace: phpmyadmin
data:
database.address: "192.168.1.137"
database.port: "3306"
---
apiVersion: v1
kind: Secret
metadata:
name: mysql-secret
namespace: phpmyadmin
type: Opaque
data:
username: cm9vdA==
password: QmxhY2tzdGFyMkBob21l
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: phpmyadmin-sa
namespace: phpmyadmin
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: phpmyadmin-role
namespace: phpmyadmin
rules:
- apiGroups: [""]
resources: ["pods", "services", "configmaps", "secrets"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: phpmyadmin-rolebinding
namespace: phpmyadmin
subjects:
- kind: ServiceAccount
name: phpmyadmin-sa
namespace: phpmyadmin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: phpmyadmin-role
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: phpmyadmin
namespace: phpmyadmin
spec:
replicas: 1
selector:
matchLabels:
app: phpmyadmin
template:
metadata:
labels:
app: phpmyadmin
spec:
serviceAccountName: phpmyadmin-sa
containers:
- name: phpmyadmin
image: phpmyadmin/phpmyadmin:latest
ports:
- containerPort: 80
env:
- name: PMA_HOST
valueFrom:
configMapKeyRef:
name: phpmyadmin-config
key: database.address
- name: PMA_PORT
valueFrom:
configMapKeyRef:
name: phpmyadmin-config
key: database.port
- name: PMA_USER
valueFrom:
secretKeyRef:
name: mysql-secret
key: username
- name: PMA_PASSWORD
valueFrom:
secretKeyRef:
name: mysql-secret
key: password
---
apiVersion: v1
kind: Service
metadata:
name: phpmyadmin
namespace: phpmyadmin
spec:
type: ClusterIP
selector:
app: phpmyadmin
ports:
- port: 80
targetPort: 80
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: phpmyadmin-ingress
namespace: phpmyadmin
spec:
entryPoints:
- websecure
routes:
- match: Host(`phpmyadmin.apps.mngoma.lab`)
kind: Rule
services:
- name: phpmyadmin
port: 80
tls: {}

View File

@@ -0,0 +1,127 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: portainer
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: portainer-sa
namespace: portainer
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: portainer-admin-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: portainer-sa
namespace: portainer
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: portainer-pv
labels:
type: local
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
storageClassName: local-pvs
local:
path: /home/ansible/k3s/makhiwane/portainer
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- lead
persistentVolumeReclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: portainer-pvc
namespace: portainer
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-pvs
resources:
requests:
storage: 10Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: portainer
namespace: portainer
spec:
replicas: 1
selector:
matchLabels:
app: portainer
template:
metadata:
labels:
app: portainer
spec:
serviceAccountName: portainer-sa
containers:
- name: portainer
image: portainer/portainer-ce:2.33.2
ports:
- containerPort: 9000
- containerPort: 9443
volumeMounts:
- name: data
mountPath: /data
volumes:
- name: data
persistentVolumeClaim:
claimName: portainer-pvc
---
apiVersion: v1
kind: Service
metadata:
name: portainer
namespace: portainer
spec:
type: ClusterIP
selector:
app: portainer
ports:
- name: http
protocol: TCP
port: 9000
targetPort: 9000
- name: https
protocol: TCP
port: 9443
targetPort: 9443
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: portainer-dashboard
namespace: portainer
spec:
entryPoints:
- websecure
routes:
- match: Host(`portainer.apps.mngoma.lab`)
kind: Rule
services:
- name: portainer
port: 9000
scheme: http
tls: {}

View File

@@ -0,0 +1,149 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: postgresql
---
apiVersion: v1
kind: Secret
metadata:
name: postgresql-secret
namespace: postgresql
type: Opaque
data:
username: cm9vdA==
password: Mmh2MTdL
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: postgresql-sa
namespace: postgresql
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: postgresql-role
namespace: postgresql
rules:
- apiGroups: [""]
resources: ["pods", "services", "persistentvolumeclaims", "configmaps", "secrets"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: postgresql-rolebinding
namespace: postgresql
subjects:
- kind: ServiceAccount
name: postgresql-sa
namespace: postgresql
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: postgresql-role
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: postgresql-pv
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
storageClassName: local-pvs
local:
path: /home/ansible/k3s/makhiwane/postgresql
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- lead
persistentVolumeReclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: postgresql-pvc
namespace: postgresql
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-pvs
resources:
requests:
storage: 10Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: postgresql
namespace: postgresql
spec:
replicas: 1
selector:
matchLabels:
app: postgresql
template:
metadata:
labels:
app: postgresql
spec:
serviceAccountName: postgresql-sa
containers:
- name: postgresql
image: postgres:16
env:
- name: POSTGRES_USER
valueFrom:
secretKeyRef:
name: postgresql-secret
key: username
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: postgresql-secret
key: password
ports:
- containerPort: 5432
volumeMounts:
- mountPath: /var/lib/postgresql/data
name: postgresql-data
volumes:
- name: postgresql-data
persistentVolumeClaim:
claimName: postgresql-pvc
---
apiVersion: v1
kind: Service
metadata:
name: postgresql
namespace: postgresql
spec:
type: ClusterIP
selector:
app: postgresql
ports:
- port: 5432
targetPort: 5432
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: postgresql-ingress
namespace: postgresql
spec:
entryPoints:
- websecure
routes:
- match: Host(`postgresql.database.mngoma.lab`)
kind: Rule
services:
- name: postgresql
port: 5432
tls: {}

View File

@@ -0,0 +1,107 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: redis
---
apiVersion: v1
kind: Secret
metadata:
name: redis-secret
namespace: redis
type: Opaque
data:
username: YWRtaW4=
password: NjI4akZL
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: redis-sa
namespace: redis
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: redis-role
namespace: redis
rules:
- apiGroups: [""]
resources: ["pods", "services"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: redis-rolebinding
namespace: redis
subjects:
- kind: ServiceAccount
name: redis-sa
namespace: redis
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: redis-role
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: redis
namespace: redis
spec:
replicas: 1
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
serviceAccountName: redis-sa
containers:
- name: redis
image: redis:7
ports:
- containerPort: 6379
env:
- name: REDIS_USERNAME
valueFrom:
secretKeyRef:
name: redis-secret
key: username
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: redis-secret
key: password
---
apiVersion: v1
kind: Service
metadata:
name: redis
namespace: redis
spec:
type: ClusterIP
selector:
app: redis
ports:
- port: 6379
targetPort: 6379
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: redis-ingress
namespace: redis
spec:
entryPoints:
- websecure
routes:
- match: Host(`redis.database.mngoma.lab`)
kind: Rule
services:
- name: redis
port: 6379
tls: {}

View File

@@ -0,0 +1,189 @@
apiVersion: v1
kind: Namespace
metadata:
name: redisinsight
---
apiVersion: v1
kind: ConfigMap
metadata:
name: redisinsight-config
namespace: redisinsight
data:
database.host: "192.168.1.137"
database.port: "6379"
database.instance: "redis"
RI_LOG_LEVEL: "info"
RI_ALLOW_PRIVILEGED: "true"
RI_TELEMETRY: "false"
---
apiVersion: v1
kind: Secret
metadata:
name: redis-secret
namespace: redisinsight
type: Opaque
data:
password: NjI4akZL
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: redisinsight-sa
namespace: redisinsight
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: redisinsight-role
namespace: redisinsight
rules:
- apiGroups: [""]
resources: ["pods", "services", "persistentvolumeclaims", "configmaps", "secrets"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: redisinsight-rolebinding
namespace: redisinsight
subjects:
- kind: ServiceAccount
name: redisinsight-sa
namespace: redisinsight
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: redisinsight-role
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: redisinsight-pv
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
storageClassName: local-pvs
local:
path: /home/ansible/k3s/makhiwane/redisinsight
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- lead
persistentVolumeReclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: redisinsight-pvc
namespace: redisinsight
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-pvs
resources:
requests:
storage: 10Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: redisinsight
namespace: redisinsight
spec:
replicas: 1
selector:
matchLabels:
app: redisinsight
template:
metadata:
labels:
app: redisinsight
spec:
serviceAccountName: redisinsight-sa
containers:
- name: redisinsight
image: redislabs/redisinsight:latest
ports:
- containerPort: 8001
volumeMounts:
- name: redisinsight-data
mountPath: /db
env:
- name: RI_APP_HOST
value: "0.0.0.0"
- name: RI_APP_PORT
value: "8001"
- name: RI_LOG_LEVEL
valueFrom:
configMapKeyRef:
name: redisinsight-config
key: RI_LOG_LEVEL
- name: RI_ALLOW_PRIVILEGED
valueFrom:
configMapKeyRef:
name: redisinsight-config
key: RI_ALLOW_PRIVILEGED
- name: RI_TELEMETRY
valueFrom:
configMapKeyRef:
name: redisinsight-config
key: RI_TELEMETRY
- name: RI_DATABASE_0_NAME
valueFrom:
configMapKeyRef:
name: redisinsight-config
key: database.instance
- name: RI_DATABASE_0_HOST
valueFrom:
configMapKeyRef:
name: redisinsight-config
key: database.host
- name: RI_DATABASE_0_PORT
valueFrom:
configMapKeyRef:
name: redisinsight-config
key: database.port
- name: RI_DATABASE_0_PASSWORD
valueFrom:
secretKeyRef:
name: redis-secret
key: password
volumes:
- name: redisinsight-data
persistentVolumeClaim:
claimName: redisinsight-pvc
---
apiVersion: v1
kind: Service
metadata:
name: redisinsight
namespace: redisinsight
spec:
type: ClusterIP
selector:
app: redisinsight
ports:
- port: 8001
targetPort: 8001 # maybe 5540
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: redisinsight-ingress
namespace: redisinsight
spec:
entryPoints:
- websecure
routes:
- match: Host(`redisinsight.apps.mngoma.lab`)
kind: Rule
services:
- name: redisinsight
port: 8001
tls: {}

View File

@@ -0,0 +1,134 @@
apiVersion: v1
kind: Namespace
metadata:
name: uptimekuma
---
apiVersion: v1
kind: ConfigMap
metadata:
name: uptimekuma-config
namespace: uptimekuma
data:
server.port: "3001"
server.disableusageanalytics: "true"
---
apiVersion: v1
kind: Secret
metadata:
name: uptimekuma-secret
namespace: uptimekuma
type: Opaque
data:
password: MWhEMjBn
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: uptimekuma-pv
labels:
type: local
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
storageClassName: local-pvs
local:
path: /home/ansible/k3s/makhiwane/uptimekuma
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- lead
persistentVolumeReclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: uptimekuma-data
namespace: uptimekuma
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-pvs
resources:
requests:
storage: 10Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: uptimekuma
namespace: uptimekuma
spec:
replicas: 1
selector:
matchLabels:
app: uptimekuma
template:
metadata:
labels:
app: uptimekuma
spec:
containers:
- name: uptimekuma
image: louislam/uptime-kuma:latest
imagePullPolicy: IfNotPresent
ports:
- containerPort: 3001
volumeMounts:
- mountPath: /app/data
name: uptimekuma-data
env:
- name: PORT
valueFrom:
configMapKeyRef:
name: uptimekuma-config
key: server.port
- name: server.disableusageanalytics
valueFrom:
configMapKeyRef:
name: uptimekuma-config
key: server.disableusageanalytics
- name: password
valueFrom:
secretKeyRef:
name: uptimekuma-secret
key: password
volumes:
- name: uptimekuma-data
persistentVolumeClaim:
claimName: uptimekuma-data
---
apiVersion: v1
kind: Service
metadata:
name: uptimekuma
namespace: uptimekuma
spec:
type: ClusterIP
selector:
app: uptimekuma
ports:
- name: http
port: 3001
targetPort: 3001
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: uptimekuma-ingress
namespace: uptimekuma
spec:
entryPoints:
- websecure
routes:
- match: Host(`uptimekuma.apps.mngoma.lab`)
kind: Rule
services:
- name: uptimekuma
port: 3001
tls: {}

View File

@@ -0,0 +1,88 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: whoami
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: whoami-sa
namespace: whoami
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: whoami-role
namespace: whoami
rules:
- apiGroups: [""]
resources: ["pods", "services"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: whoami-rolebinding
namespace: whoami
subjects:
- kind: ServiceAccount
name: whoami-sa
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: whoami-role
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: whoami
namespace: whoami
spec:
replicas: 1
selector:
matchLabels:
app: whoami
template:
metadata:
labels:
app: whoami
spec:
serviceAccountName: whoami-sa
containers:
- name: whoami
image: traefik/whoami
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
name: whoami
namespace: whoami
spec:
type: ClusterIP
selector:
app: whoami
ports:
- name: http
protocol: TCP
port: 80
targetPort: 80
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: whoami-web
namespace: whoami
spec:
entryPoints:
- websecure
routes:
- match: Host(`whoami.apps.mngoma.lab`)
kind: Rule
services:
- name: whoami
port: 80
scheme: http
tls: {}

View File

@@ -0,0 +1,191 @@
---
- name: Install dependencies, mount NFS at /home/{{ ansible_user }}/k3s, setup K3s, DNS, Traefik, venv for k8s module
hosts: all
become: yes
vars:
nfs_server: storage.mngoma.lab
nfs_export_path: /export/k3s
dns_server_ip: 192.168.1.151
dns_fallback_ip: 192.168.1.1
ansible_user_home: "/home/{{ ansible_user }}"
nfs_mount_path: "{{ ansible_user_home }}/k3s"
kube_config_path: "{{ ansible_user_home }}/.kube/config"
kube_venv_path: "/opt/kube-venv"
tasks:
- name: Configure systemd-resolved permanent DNS
copy:
dest: /etc/systemd/resolved.conf
content: |
[Resolve]
DNS={{ dns_server_ip }}
FallbackDNS={{ dns_fallback_ip }}
Domains=mngoma.lab
DNSStubListener=yes
owner: root
group: root
mode: '0644'
- name: Ensure /etc/resolv.conf points to systemd-resolved stub
file:
src: /run/systemd/resolve/stub-resolv.conf
dest: /etc/resolv.conf
state: link
force: yes
- name: Restart systemd-resolved to apply DNS changes
systemd:
name: systemd-resolved
state: restarted
enabled: yes
- name: Ensure NFS mount point exists
file:
path: "{{ nfs_mount_path }}"
state: directory
owner: "{{ ansible_user | default('root') }}"
group: "{{ ansible_user | default('root') }}"
mode: '0755'
- name: Mount NFS share immediately (direct, idempotent)
mount:
src: "{{ nfs_server }}:{{ nfs_export_path }}"
path: "{{ nfs_mount_path }}"
fstype: nfs
opts: defaults
state: mounted
- name: Ensure NFS mount persists across reboots (fstab entry)
mount:
src: "{{ nfs_server }}:{{ nfs_export_path }}"
path: "{{ nfs_mount_path }}"
fstype: nfs
opts: defaults
state: present
- name: Ensure .kube directory exists
file:
path: "{{ ansible_user_home }}/.kube"
state: directory
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: '0700'
- name: Download latest kubectl
shell: |
arch=$(uname -m)
if [ "$arch" = "x86_64" ]; then arch="amd64";
elif [ "$arch" = "aarch64" ]; then arch="arm64";
fi
curl -Lo /usr/local/bin/kubectl "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/${arch}/kubectl"
chmod +x /usr/local/bin/kubectl
args:
creates: /usr/local/bin/kubectl
environment:
KUBECONFIG: "{{ kube_config_path }}"
- name: Install Helm
shell: |
curl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash
args:
creates: /usr/local/bin/helm
environment:
KUBECONFIG: "{{ kube_config_path }}"
- name: Download and install kustomize
shell: |
kustomize_ver=$(curl -s https://api.github.com/repos/kubernetes-sigs/kustomize/releases/latest | jq -r '.tag_name')
arch=$(uname -m)
if [ "$arch" = "x86_64" ]; then arch="amd64";
elif [ "$arch" = "aarch64" ]; then arch="arm64";
fi
url="https://github.com/kubernetes-sigs/kustomize/releases/download/${kustomize_ver}/kustomize_${kustomize_ver#kustomize/}_linux_${arch}.tar.gz"
tmpfile=$(mktemp)
curl -L -o "$tmpfile" "$url"
tar -xz -C /usr/local/bin -f "$tmpfile"
chmod +x /usr/local/bin/kustomize
rm -f "$tmpfile"
args:
creates: /usr/local/bin/kustomize
ignore_errors: true
environment:
KUBECONFIG: "{{ kube_config_path }}"
- name: Install kubectx and kubens
shell: |
git clone https://github.com/ahmetb/kubectx /opt/kubectx || true
ln -sf /opt/kubectx/kubectx /usr/local/bin/kubectx
ln -sf /opt/kubectx/kubens /usr/local/bin/kubens
environment:
KUBECONFIG: "{{ kube_config_path }}"
- name: Ensure systemd override directory for k3s exists
file:
path: /etc/systemd/system/k3s.service.d
state: directory
owner: root
group: root
mode: '0755'
- name: Enable Traefik ping via CLI arguments
copy:
dest: /etc/systemd/system/k3s.service.d/10-traefik-ping.conf
content: |
[Service]
ExecStart=
ExecStart=/usr/local/bin/k3s server --no-deploy=servicelb --kubelet-arg="cloud-provider=external" \
--traefik-arg="--ping.entryPoint=web" \
--traefik-arg="--ping.manualRouting=false" \
--traefik-arg="--ping.responseMessage=Healthy"
owner: root
group: root
mode: '0644'
- name: Reload systemd to pick up k3s override
systemd:
daemon_reload: yes
# --- VENV FOR KUBERNETES MODULE ---
- name: Create venv for k8s ansible modules
command: python3 -m venv {{ kube_venv_path }}
args:
creates: "{{ kube_venv_path }}/bin/activate"
- name: Install kubernetes python library in venv
pip:
name: kubernetes
virtualenv: "{{ kube_venv_path }}"
virtualenv_python: python3
# The following play block will ONLY target the manager group
- name: Expose Traefik dashboard via IngressRoute (manager only)
hosts: manager # Change to your actual manager inventory group name
become: yes
vars:
ansible_user_home: "/home/{{ ansible_user }}"
kube_config_path: "{{ ansible_user_home }}/.kube/config"
kube_venv_path: "/opt/kube-venv"
tasks:
- name: Expose Traefik dashboard via IngressRoute (inline)
kubernetes.core.k8s:
state: present
definition:
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: traefik-dashboard
namespace: kube-system
spec:
entryPoints:
- web
- websecure
routes:
- match: Host(`dashboard.apps.mngoma.lab`)
kind: Rule
services:
- name: traefik
port: 8080
tls: {}
environment:
KUBECONFIG: "{{ kube_config_path }}"
vars:
ansible_python_interpreter: "{{ kube_venv_path }}/bin/python"

View File

@@ -0,0 +1,37 @@
@echo off
REM =========================================
REM apply-cluster-snat.bat
REM Enables hairpin NAT for Kubernetes pods -> HAProxy host
REM =========================================
REM ---- Step 1: Enable IP forwarding ----
echo Enabling IPv4 forwarding...
powershell -Command "Set-ItemProperty -Path 'HKLM:\SYSTEM\CurrentControlSet\Services\Tcpip\Parameters' -Name 'IPEnableRouter' -Value 1"
echo IP forwarding enabled. Please reboot for permanent effect.
REM ---- Step 2: Add UFW NAT rules ----
echo Applying NAT rules for pod -> HAProxy hairpin...
REM Ensure before.rules file exists
set ufw_rules_file=/etc/ufw/before.rules
REM Backup original rules
if exist "%ufw_rules_file%.bak" (
echo Backup already exists.
) else (
copy "%ufw_rules_file%" "%ufw_rules_file%.bak"
echo Backup created at %ufw_rules_file%.bak
)
REM Append NAT rules
echo *nat >> "%ufw_rules_file%"
echo :POSTROUTING ACCEPT [0:0] >> "%ufw_rules_file%"
echo -A POSTROUTING -s 10.42.0.0/16 -d 192.168.1.160 -j MASQUERADE >> "%ufw_rules_file%"
echo COMMIT >> "%ufw_rules_file%"
REM ---- Step 3: Reload UFW ----
echo Reloading UFW...
ufw disable
ufw enable
echo Hairpin NAT applied successfully.
pause

View File

@@ -0,0 +1,81 @@
# command: ansible-playbook -i config/<target manifest>.ini common/create-ansible-user.yml --ask-become-pass
# Note: this playbook requires an interactive mode or passed secret for privilege escalation
---
- name: Create ansible user and configure passwordless sudo
hosts: all
become: true
become_method: sudo
vars:
ansible_user: khwezi
tasks:
- name: Ensure 'ansible' user exists
ansible.builtin.user:
name: ansible
groups: sudo
append: yes
shell: /bin/bash
state: present
- name: Check if passwordless sudo is already configured for 'ansible'
ansible.builtin.shell: |
grep -Fxq "ansible ALL=(ALL) NOPASSWD: ALL" /etc/sudoers.d/ansible
register: sudoers_check
ignore_errors: true
changed_when: false
- name: Allow 'ansible' user passwordless sudo
ansible.builtin.copy:
dest: /etc/sudoers.d/ansible
content: "ansible ALL=(ALL) NOPASSWD: ALL\n"
owner: root
group: root
mode: '0440'
when: sudoers_check.rc != 0
- name: Ensure /home/ansible/.ssh directory exists
ansible.builtin.file:
path: /home/ansible/.ssh
state: directory
owner: ansible
group: ansible
mode: '0700'
- name: Copy id_ed25519 private key to ansible user
ansible.builtin.copy:
src: ~/.ssh/id_ed25519
dest: /home/ansible/.ssh/id_ed25519
owner: ansible
group: ansible
mode: '0600'
- name: Copy id_ed25519 public key to ansible user
ansible.builtin.copy:
src: ~/.ssh/id_ed25519.pub
dest: /home/ansible/.ssh/id_ed25519.pub
owner: ansible
group: ansible
mode: '0644'
- name: Ensure authorized_keys exists
ansible.builtin.file:
path: /home/ansible/.ssh/authorized_keys
state: touch
owner: ansible
group: ansible
mode: '0600'
- name: Read public key content
ansible.builtin.slurp:
src: /home/ansible/.ssh/id_ed25519.pub
register: pubkey_content
- name: Ensure public key is present in authorized_keys
ansible.builtin.lineinfile:
path: /home/ansible/.ssh/authorized_keys
line: "{{ pubkey_content['content'] | b64decode | trim }}"
owner: ansible
group: ansible
mode: '0600'
create: yes
state: present
- name: Allow 'ansible' user to write to /etc/systemd/resolved.conf
ansible.builtin.file:
path: /etc/systemd/resolved.conf
owner: ansible
group: ansible
mode: '0664'
state: file
become: true

View File

@@ -0,0 +1,86 @@
# command: ansible-playbook -i config/<target manifest>.ini common/install-docker.yml
---
- name: Install Docker and Test
hosts: all
become: true
become_method: sudo
tasks:
- name: Ensure required apt packages are installed
ansible.builtin.apt:
name:
- apt-transport-https
- ca-certificates
- curl
- gnupg
- lsb-release
state: present
update_cache: yes
- name: Ensure gpg is installed
ansible.builtin.apt:
name: gpg
state: present
- name: Remove old Docker keyring files if present
ansible.builtin.file:
path: "{{ item }}"
state: absent
loop:
- /usr/share/keyrings/docker-archive-keyring.gpg
- /usr/share/keyrings/docker-archive-keyring.gpg.asc
- name: Download Docker's official GPG key (ASCII)
ansible.builtin.get_url:
url: https://download.docker.com/linux/ubuntu/gpg
dest: /usr/share/keyrings/docker-archive-keyring.gpg.asc
mode: '0644'
force: yes
- name: Convert Docker GPG key to binary format
ansible.builtin.command: >
gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg /usr/share/keyrings/docker-archive-keyring.gpg.asc
- name: Add Docker repository if not present (modern method)
ansible.builtin.apt_repository:
repo: "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu {{ ansible_distribution_release }} stable"
state: present
filename: docker
- name: Update apt cache after adding Docker repo
ansible.builtin.apt:
update_cache: yes
- name: Check if Docker is already installed
ansible.builtin.command: docker --version
register: docker_check
ignore_errors: true
changed_when: false
- name: Install Docker Engine
ansible.builtin.apt:
name:
- docker-ce
- docker-ce-cli
- containerd.io
state: present
when: docker_check.rc != 0
- name: Check Docker version (post-install)
ansible.builtin.command: docker --version
register: docker_version
changed_when: false
- name: Show Docker version
ansible.builtin.debug:
var: docker_version.stdout
- name: Run hello-world container to test Docker
ansible.builtin.command: docker run --name hello-test --rm hello-world
register: hello_world_output
changed_when: false
- name: Show hello-world output
ansible.builtin.debug:
var: hello_world_output.stdout

View File

@@ -0,0 +1,28 @@
# command: ansible-playbook -i config/<target manifest>.ini common/update-docker.yml
---
- name: Update Docker only on hosts where it is installed
hosts: all
become: true
become_method: sudo
tasks:
- name: Check if Docker is installed
ansible.builtin.command: docker --version
register: docker_check
ignore_errors: true
changed_when: false
- name: Update Docker packages if installed
ansible.builtin.apt:
name:
- docker-ce
- docker-ce-cli
- containerd.io
state: latest
update_cache: yes
when: docker_check.rc == 0
- name: Debug message if Docker is not installed
ansible.builtin.debug:
msg: "Docker is not installed on this host. Skipping update."
when: docker_check.rc != 0

View File

@@ -0,0 +1,19 @@
# command: ansible-playbook -i config/<target manifest>.ini common/update-hosts.yml
---
- name: Update and upgrade all apt packages
hosts: all
become: true
become_method: sudo
tasks:
- name: Update apt cache
ansible.builtin.apt:
update_cache: yes
- name: Upgrade all packages
ansible.builtin.apt:
upgrade: dist
- name: Autoremove unused packages
ansible.builtin.apt:
autoremove: yes

View File

@@ -0,0 +1,31 @@
[all:vars]
ansible_python_interpreter=/usr/bin/python3
ansible_user=ansible
ansible_ssh_private_key_file=~/.ssh/id_ed25519
[shared]
sentry ansible_host=sentry.mngoma.lab
alpha ansible_host=alpha.lb.mngoma.lab
database ansible_host=database.mngoma.lab
vpn ansible_host=vpn.mngoma.lab
minecraft ansible_host=minecraft.mngoma.lab
#khongisa ansible_host=khongisa.mngoma.lab
#beta ansible_host=beta.lb.mngoma.lab
#dns ansible_host=dns.mngoma.lab
#storage ansible_host=storage.mngoma.lab
manager ansible_host=lead.mngoma.lab
worker ansible_host=worker1.mngoma.lab
#manager2 ansible_host=follow.mngoma.lab
#worker2 ansible_host=worker2.mngoma.lab
[makhiwanecluster]
manager ansible_host=lead.mngoma.lab
worker ansible_host=worker1.mngoma.lab
[mbubecluster]
#manager2 ansible_host=follow.mngoma.lab
#worker2 ansible_host=worker2.mngoma.lab
[loadbalancers]
alpha ansible_host=alpha.lb.mngoma.lab
#beta ansible_host=beta.lb.mngoma.lab

View File

@@ -0,0 +1,11 @@
[all:vars]
ansible_python_interpreter=/usr/bin/python3
ansible_user=ansible
ansible_ssh_private_key_file=~/.ssh/id_ed25519
[cluster]
manager ansible_host=lead.mngoma.lab
worker ansible_host=worker1.mngoma.lab
[workers]
worker ansible_host=worker1.mngoma.lab

View File

@@ -0,0 +1,11 @@
[all:vars]
ansible_python_interpreter=/usr/bin/python3
ansible_user=ansible
ansible_ssh_private_key_file=~/.ssh/id_ed25519
[cluster]
manager ansible_host=follow.mngoma.lab
worker ansible_host=worker2.mngoma.lab
[workers]
worker ansible_host=worker2.mngoma.lab

View File

@@ -0,0 +1,20 @@
---
- name: Ensure manager node is running k3s / start if needed
hosts: manager
become: true
tasks:
- name: Start k3s service on manager
systemd:
name: k3s
state: started
enabled: true
- name: Ensure worker nodes are running k3s-agent / start if needed
hosts: workers
become: true
tasks:
- name: Start k3s-agent service on worker nodes
systemd:
name: k3s-agent
state: started
enabled: true

View File

@@ -0,0 +1,256 @@
---
- name: Gather IPv4 address facts for all hosts
hosts: all
gather_facts: yes
vars:
dns_server: "192.168.1.151"
tasks:
- name: Configure systemd-resolved to use custom DNS
become: true
copy:
dest: /etc/systemd/resolved.conf
content: |
[Resolve]
DNS={{ dns_server }}
FallbackDNS=192.168.1.1
Domains=mngoma.lab
DNSStubListener=yes
owner: root
group: root
mode: "0644"
- name: Ensure systemd-resolved service is enabled and restarted
become: true
systemd:
name: systemd-resolved
state: restarted
enabled: yes
- name: Ensure host IPv4 address is available and stored as fact
set_fact:
node_ipv4: "{{ ansible_default_ipv4.address }}"
when: ansible_default_ipv4 is defined and ansible_default_ipv4.address is defined
- name: Fail if IPv4 address could not be determined
fail:
msg: "Could not determine IPv4 address for {{ inventory_hostname }}. Please check network configuration."
when: node_ipv4 is not defined
- name: Ensure /home/{{ ansible_user }}/k3s directory exists
become: yes
file:
path: /home/{{ ansible_user }}/k3s
state: directory
owner: root
group: root
mode: '0777'
- name: Initialise the K3s control plane (manager)
hosts: manager
become: yes
vars:
k3s_version: v1.29.4+k3s1
kubeconfig_dir: "/home/{{ ansible_user }}/.kube"
kubeconfig_file: "{{ kubeconfig_dir }}/config"
tasks:
- name: Install required apt dependencies
apt:
name:
- curl
- python3-pip
- python3-venv
state: present
update_cache: yes
- name: Create a Python virtual environment for Ansible k8s modules
command: python3 -m venv /opt/ansible-venv
args:
creates: /opt/ansible-venv
- name: Install Kubernetes and OpenShift libraries in the venv
command: /opt/ansible-venv/bin/pip install kubernetes openshift
- name: Install k3s on manager (control plane)
shell: |
curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION={{ k3s_version }} sh -
args:
creates: /usr/local/bin/k3s
- name: Gather architecture for kubectl
set_fact:
kubectl_arch: |
{% if ansible_architecture == "x86_64" %}
amd64
{% elif "armv7l" in ansible_architecture %}
arm
{% elif "aarch64" in ansible_architecture %}
arm64
{% else %}
{{ ansible_architecture }}
{% endif %}
- name: Download latest kubectl binary
shell: |
curl -Lo /usr/local/bin/kubectl "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/{{ kubectl_arch }}/kubectl"
args:
creates: /usr/local/bin/kubectl
- name: Make kubectl executable
file:
path: /usr/local/bin/kubectl
mode: '0755'
owner: root
group: root
- name: Validate kubectl version
shell: kubectl version --client --output=yaml
register: kubectl_version
changed_when: false
- name: Show kubectl version
debug:
var: kubectl_version.stdout
- name: Ensure .kube directory exists for ansible user
become_user: "{{ ansible_user }}"
file:
path: "{{ kubeconfig_dir }}"
state: directory
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: '0700'
- name: Copy kubeconfig file to ansible user home
become: true
copy:
src: /etc/rancher/k3s/k3s.yaml
dest: "{{ kubeconfig_file }}"
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: '0600'
remote_src: yes
- name: Replace server IP in kubeconfig with manager's IP address
become_user: "{{ ansible_user }}"
lineinfile:
path: "{{ kubeconfig_file }}"
regexp: 'server: https://127\\.0\\.0\\.1:6443'
line: " server: https://{{ node_ipv4 }}:6443"
- name: Get the cluster join token
shell: cat /var/lib/rancher/k3s/server/node-token
register: k3s_token
changed_when: false
- name: Set fact for join token and manager IP
set_fact:
k3s_node_token: "{{ k3s_token.stdout }}"
manager_ip: "{{ node_ipv4 }}"
- name: Add the join token and manager IP to hostvars for workers
add_host:
name: "cluster_primary"
groups: join_info
k3s_node_token: "{{ k3s_node_token }}"
k3s_manager_ip: "{{ manager_ip }}"
- name: Install and join worker nodes to the control plane
hosts: workers
become: yes
vars:
k3s_version: v1.29.4+k3s1
tasks:
- name: Install required dependencies
apt:
name: [curl]
state: present
update_cache: yes
- name: Ensure /home/{{ ansible_user }}/k3s directory exists
file:
path: /home/{{ ansible_user }}/k3s
state: directory
owner: root
group: root
mode: '0777'
- name: Set manager join information
set_fact:
k3s_node_token: "{{ hostvars[groups['join_info'][0]]['k3s_node_token'] }}"
k3s_manager_ip: "{{ hostvars[groups['join_info'][0]]['k3s_manager_ip'] }}"
- name: Fail if manager's IP is not available
fail:
msg: "Could not determine manager's IP for joining cluster!"
when: k3s_manager_ip is not defined
- name: Install k3s agent (worker)
shell: |
curl -sfL https://get.k3s.io | K3S_URL=https://{{ k3s_manager_ip }}:6443 K3S_TOKEN={{ k3s_node_token }} INSTALL_K3S_VERSION={{ k3s_version }} sh -
args:
creates: /usr/local/bin/k3s-agent
- name: Gather architecture for kubectl
set_fact:
kubectl_arch: |
{% if ansible_architecture == "x86_64" %}
amd64
{% elif "armv7l" in ansible_architecture %}
arm
{% elif "aarch64" in ansible_architecture %}
arm64
{% else %}
{{ ansible_architecture }}
{% endif %}
- name: Download latest kubectl binary (worker)
shell: |
curl -Lo /usr/local/bin/kubectl "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/{{ kubectl_arch }}/kubectl"
args:
creates: /usr/local/bin/kubectl
- name: Make kubectl executable (worker)
file:
path: /usr/local/bin/kubectl
mode: '0755'
owner: root
group: root
- name: Verify cluster nodes from manager node and configure storage
hosts: manager
become: yes
vars:
kubeconfig_file: "/home/{{ ansible_user }}/.kube/config"
tasks:
- name: Wait for all worker nodes to join the cluster
become_user: "{{ ansible_user }}"
shell: |
for i in {1..10}; do
[ $(kubectl --kubeconfig={{ kubeconfig_file }} get nodes | grep -c worker) -ge 1 ] && exit 0
sleep 15
done
exit 1
register: wait_worker
failed_when: wait_worker.rc != 0
- name: List all k3s nodes
become_user: "{{ ansible_user }}"
shell: kubectl --kubeconfig={{ kubeconfig_file }} get nodes -o wide
register: all_nodes
- name: Show current k3s cluster nodes
debug:
var: all_nodes.stdout
- name: Create StorageClass for /home/ansible/k3s INLINE
vars:
ansible_python_interpreter: /opt/ansible-venv/bin/python
kubernetes.core.k8s:
kubeconfig: "{{ kubeconfig_file }}"
definition:
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: local-pvs
provisioner: kubernetes.io/no-provisioner
volumeBindingMode: WaitForFirstConsumer

View File

@@ -0,0 +1,131 @@
# create-loadbalancer.yml
- name: Install and configure HAProxy with SSL termination and managed DNS
hosts: alpha
become: yes
vars:
ssl_cert_path: "/etc/ssl/certs/haproxy.pem"
dns_server: "192.168.1.151"
tasks:
- name: Install HAProxy and dependencies
apt:
name:
- haproxy
- openssl
state: present
update_cache: yes
- name: Ensure cert directory exists
file:
path: /etc/ssl/certs
state: directory
owner: root
group: root
mode: '0755'
- name: Generate private key for HAProxy
community.crypto.openssl_privatekey:
path: /etc/ssl/certs/haproxy.key
size: 2048
type: RSA
mode: '0600'
- name: Generate a Certificate Signing Request (CSR) for HAProxy
community.crypto.openssl_csr:
path: /etc/ssl/certs/haproxy.csr
privatekey_path: /etc/ssl/certs/haproxy.key
common_name: "{{ inventory_hostname }}"
subject_alt_name:
- "DNS:{{ inventory_hostname }}"
mode: "0644"
- name: Generate self-signed certificate for HAProxy
community.crypto.x509_certificate:
path: /etc/ssl/certs/haproxy.crt
privatekey_path: /etc/ssl/certs/haproxy.key
csr_path: /etc/ssl/certs/haproxy.csr
provider: selfsigned
selfsigned_not_before: "{{ '%Y%m%d%H%M%SZ' | strftime(ansible_date_time.epoch | int) }}"
selfsigned_not_after: "{{ '%Y%m%d%H%M%SZ' | strftime((ansible_date_time.epoch | int) + (365*24*60*60)) }}"
mode: "0644"
- name: Combine key and cert into .pem file for HAProxy
shell: cat /etc/ssl/certs/haproxy.key /etc/ssl/certs/haproxy.crt > {{ ssl_cert_path }}
args:
creates: "{{ ssl_cert_path }}"
- name: Configure systemd-resolved to use custom DNS
become: true
copy:
dest: /etc/systemd/resolved.conf
content: |
[Resolve]
DNS={{ dns_server }}
FallbackDNS=192.168.1.1
Domains=mngoma.lab
DNSStubListener=yes
owner: root
group: root
mode: "0644"
- name: Ensure systemd-resolved service is enabled and restarted
become: true
systemd:
name: systemd-resolved
state: restarted
enabled: yes
- name: Upload custom haproxy.cfg with SSL termination and HTTPS-only backend
copy:
dest: /etc/haproxy/haproxy.cfg
content: |
global
log /dev/log local0
log /dev/log local1 notice
chroot /var/lib/haproxy
stats socket /run/haproxy/admin.sock mode 660 level admin
user haproxy
group haproxy
daemon
tune.ssl.default-dh-param 2048
defaults
log global
mode http
option httplog
option dontlognull
timeout connect 5000
timeout client 50000
timeout server 50000
option forwardfor
resolvers dns
nameserver dns1 {{ dns_server }}:53
resolve_retries 3
timeout resolve 2s
timeout retry 1s
hold valid 10s
frontend https_front
bind *:443 ssl crt {{ ssl_cert_path }}
mode http
option forwardfor
http-request set-header X-Forwarded-Proto https
http-request set-header Host %[req.hdr(host)]
default_backend app_clusters
backend app_clusters
mode http
balance roundrobin
option httpchk GET /
http-check expect status 100,101,102,103,200,201,202,203,204,205,206,207,208,226,300,301,302,303,304,305,306,307,308,400,401,402,403,404,405,406,407,408,409,410,411,412,413,414,415,416,417,418,421,422,423,424,425,426,428,429,431,451
server lead_https lead.mngoma.lab:443 resolvers dns resolve-prefer ipv4 check ssl verify none
owner: root
group: root
mode: "0644"
- name: Enable and start haproxy
systemd:
name: haproxy
state: restarted
enabled: yes

View File

@@ -0,0 +1,35 @@
---
- name: Drain and stop worker nodes first
hosts: workers
become: true
tasks:
- name: Drain worker node (optional - requires kubectl access)
shell: kubectl drain {{ inventory_hostname }} --ignore-daemonsets --delete-emptydir-data || true
delegate_to: manager
ignore_errors: yes
- name: Stop k3s-agent service on worker
systemd:
name: k3s-agent
state: stopped
enabled: false
- name: Poweroff worker node
shell: shutdown -h now
async: 0
poll: 0
- name: Stop and poweroff the manager node
hosts: manager
become: true
tasks:
- name: Stop k3s (server) service
systemd:
name: k3s
state: stopped
enabled: false
- name: Poweroff manager node
shell: shutdown -h now
async: 0
poll: 0

168
k3s/stacks/dashy.yaml Normal file
View File

@@ -0,0 +1,168 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: dashy
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: dashy-sa
namespace: dashy
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: dashy-role
namespace: dashy
rules:
- apiGroups: [""]
resources: ["pods", "services", "endpoints"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: dashy-rolebinding
namespace: dashy
subjects:
- kind: ServiceAccount
name: dashy-sa
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: dashy-role
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: dashy-config-pv
labels:
type: local
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
storageClassName: local-pvs
local:
path: /home/ansible/k3s/makhiwane/dashy
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- lead
persistentVolumeReclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: dashy-config-pvc
namespace: dashy
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-pvs
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: dashy-pvc
namespace: dashy
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-pvs
resources:
requests:
storage: 1Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: dashy
namespace: dashy
spec:
replicas: 1
selector:
matchLabels:
app: dashy
template:
metadata:
labels:
app: dashy
spec:
serviceAccountName: dashy-sa
containers:
- name: dashy
image: lissy93/dashy:latest
ports:
- containerPort: 8080
resources:
requests:
cpu: "100m"
memory: "128Mi"
limits:
cpu: "250m"
memory: "256Mi"
volumeMounts:
- name: dashy-config
mountPath: /app/data
startupProbe:
httpGet:
path: /
port: 8080
initialDelaySeconds: 300
periodSeconds: 10
failureThreshold: 18
timeoutSeconds: 10
readinessProbe:
httpGet:
path: /
port: 8080
initialDelaySeconds: 300
periodSeconds: 10
failureThreshold: 18
timeoutSeconds: 10
volumes:
- name: dashy-config
persistentVolumeClaim:
claimName: dashy-config-pvc
---
apiVersion: v1
kind: Service
metadata:
name: dashy
namespace: dashy
spec:
type: ClusterIP
selector:
app: dashy
ports:
- name: web
protocol: TCP
port: 80
targetPort: 8080
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: dashy-web
namespace: dashy
spec:
entryPoints:
- websecure
routes:
- match: Host(`dashboard.apps.mngoma.lab`)
kind: Rule
services:
- name: dashy
port: 80
scheme: http
tls: {}

331
k3s/stacks/droneci.yml Normal file
View File

@@ -0,0 +1,331 @@
---
# Namespace for Drone CI
apiVersion: v1
kind: Namespace
metadata:
name: droneci
---
# Service Account
apiVersion: v1
kind: ServiceAccount
metadata:
name: droneci-sa
namespace: droneci
---
# ConfigMap for Drone configuration
apiVersion: v1
kind: ConfigMap
metadata:
name: droneci-config
namespace: droneci
data:
server.domain: "droneci.apps.mngoma.lab"
server.proto: "https"
server.runnername: "drone-runner"
server.runnercapacity: "2"
server.runnernetworks: "default"
database.type: "postgres"
database.host: "192.168.1.137:5432"
database.name: "dronecim"
gitea.server: "https://gitea.apps.mngoma.lab"
gitea.server.internal: "https://gitea-server.gitea.svc.cluster.local"
---
# Secret for Drone credentials
apiVersion: v1
kind: Secret
metadata:
name: droneci-secret
namespace: droneci
type: Opaque
data:
server.rpctoken: MDFLNlFHTkE4VEMxQjJGVzNGV0JSWDJFNE4=
database.username: YXBwX3VzZXI=
database.password: MTIzNDU=
database.connectstring: cG9zdGdyZXM6Ly9hcHBfdXNlcjoxMjM0NUAxOTIuMTY4LjEuMTM3OjU0MzIvZHJvbmVjaW0/c3NsbW9kZT1kaXNhYmxl
gitea.clientid: MGRiNTliZDAtMGI3Ni00ODgxLThhODQtNjI0N2ZlYTExOTcz
gitea.clientsecret: Z3RvX3l6bXB6NmJvZG52cmRnMnM1MmVmNWF1c3ozZTYzNGdyeTc0MjJqZ2hwd3ZnbGc2M2JtcnE=
---
# Persistent Volume for Drone data
apiVersion: v1
kind: PersistentVolume
metadata:
name: droneci-pv
labels:
type: local
spec:
capacity:
storage: 5Gi
accessModes: ["ReadWriteOnce"]
storageClassName: local-pvs
local:
path: /home/ansible/k3s/makhiwane/droneci
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values: ["lead"]
persistentVolumeReclaimPolicy: Retain
---
# Persistent Volume Claim
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: droneci-pvc
namespace: droneci
spec:
accessModes: ["ReadWriteOnce"]
storageClassName: local-pvs
resources:
requests:
storage: 5Gi
---
# Drone Server Deployment
apiVersion: apps/v1
kind: Deployment
metadata:
name: drone
namespace: droneci
labels:
app.kubernetes.io/name: drone
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: drone
template:
metadata:
labels:
app.kubernetes.io/name: drone
spec:
serviceAccountName: droneci-sa
hostAliases:
- ip: "192.168.1.160"
hostnames:
- "gitea.apps.mngoma.lab"
containers:
- name: drone
image: drone/drone:latest
ports:
- containerPort: 80
name: http
resources:
requests:
memory: "256Mi"
cpu: "250m"
limits:
memory: "512Mi"
cpu: "500m"
env:
- name: DRONE_SERVER_HOST
valueFrom:
configMapKeyRef:
name: droneci-config
key: server.domain
- name: DRONE_SERVER_PROTO
valueFrom:
configMapKeyRef:
name: droneci-config
key: server.proto
- name: DRONE_SERVER_PORT
value: ":80"
- name: DRONE_TLS_AUTOCERT
value: "false"
- name: DRONE_LOGS_DEBUG
value: "true"
- name: DRONE_RPC_SECRET
valueFrom:
secretKeyRef:
name: droneci-secret
key: server.rpctoken
- name: DRONE_DATABASE_DRIVER
valueFrom:
configMapKeyRef:
name: droneci-config
key: database.type
- name: DRONE_DATABASE_DATASOURCE
valueFrom:
secretKeyRef:
name: droneci-secret
key: database.connectstring
- name: DRONE_DB_USER
valueFrom:
secretKeyRef:
name: droneci-secret
key: database.username
- name: DRONE_DB_PASS
valueFrom:
secretKeyRef:
name: droneci-secret
key: database.password
- name: DRONE_GITEA_SERVER
valueFrom:
configMapKeyRef:
name: droneci-config
key: gitea.server
- name: DRONE_GITEA_CLIENT_ID
valueFrom:
secretKeyRef:
name: droneci-secret
key: gitea.clientid
- name: DRONE_GITEA_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: droneci-secret
key: gitea.clientsecret
- name: DRONE_GITEA_SKIP_VERIFY
value: "true"
volumeMounts:
- name: drone-storage
mountPath: /data
volumes:
- name: drone-storage
persistentVolumeClaim:
claimName: droneci-pvc
---
# Drone Server Service
apiVersion: v1
kind: Service
metadata:
name: drone-server
namespace: droneci
spec:
selector:
app.kubernetes.io/name: drone
ports:
- name: http
port: 80
targetPort: 80
type: ClusterIP
---
# Drone Runner Deployment
apiVersion: apps/v1
kind: Deployment
metadata:
name: drone-runner
namespace: droneci
labels:
app.kubernetes.io/name: drone-runner
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: drone-runner
template:
metadata:
labels:
app.kubernetes.io/name: drone-runner
spec:
serviceAccountName: droneci-sa
hostAliases:
- ip: "192.168.1.160"
hostnames:
- "droneci.apps.mngoma.lab"
containers:
- name: runner
image: drone/drone-runner-kube:latest
ports:
- containerPort: 3000
env:
# propagate SSL skip and internal Gitea to ephemeral pods
- name: DRONE_RUNNER_ENV_VARS
valueFrom:
configMapKeyRef:
name: droneci-config
key: gitea.server.internal
- name: DRONE_RPC_HOST
value: drone-server.droneci.svc.cluster.local
- name: DRONE_RPC_PROTO
value: "http"
- name: DRONE_RPC_SECRET
valueFrom:
secretKeyRef:
name: droneci-secret
key: server.rpctoken
- name: DRONE_RUNNER_NAME
valueFrom:
configMapKeyRef:
name: droneci-config
key: server.runnername
- name: DRONE_RUNNER_CAPACITY
valueFrom:
configMapKeyRef:
name: droneci-config
key: server.runnercapacity
- name: DRONE_RUNNER_NETWORKS
valueFrom:
configMapKeyRef:
name: droneci-config
key: server.runnernetworks
resources:
requests:
memory: "128Mi"
cpu: "200m"
limits:
memory: "256Mi"
cpu: "400m"
---
# Drone IngressRoute for Traefik
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: droneci-web
namespace: droneci
spec:
entryPoints:
- websecure
routes:
- match: Host(`droneci.apps.mngoma.lab`)
kind: Rule
services:
- name: drone-server
port: 80
scheme: http
tls: {}
---
# ClusterRole for Drone CI Service Account
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: droneci-cluster-role
rules:
- apiGroups: [""] # core API
resources: ["pods", "pods/exec", "pods/log", "services", "endpoints", "configmaps", "secrets", "persistentvolumeclaims", "namespaces"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
- apiGroups: ["apps"]
resources: ["deployments", "replicasets", "statefulsets"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
- apiGroups: ["batch"]
resources: ["jobs", "cronjobs"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
- apiGroups: ["extensions", "networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
- apiGroups: ["policy"]
resources: ["poddisruptionbudgets"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
---
# ClusterRoleBinding for Drone CI Service Account
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: droneci-cluster-rolebinding
subjects:
- kind: ServiceAccount
name: droneci-sa
namespace: droneci
roleRef:
kind: ClusterRole
name: droneci-cluster-role
apiGroup: rbac.authorization.k8s.io

170
k3s/stacks/flame.yml Normal file
View File

@@ -0,0 +1,170 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: flame
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flame-sa
namespace: flame
---
apiVersion: v1
kind: Secret
metadata:
name: flame-secret
namespace: flame
type: Opaque
data:
app.password: MTIzNDU=
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: flame-role
namespace: flame
rules:
- apiGroups: [""]
resources: ["pods", "services", "endpoints"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: flame-rolebinding
namespace: flame
subjects:
- kind: ServiceAccount
name: flame-sa
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: flame-role
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: flame-config-pv
labels:
type: local
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
storageClassName: local-pvs
local:
path: /home/ansible/k3s/makhiwane/flame
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- lead
persistentVolumeReclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: flame-config-pvc
namespace: flame
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-pvs
resources:
requests:
storage: 1Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: flame
namespace: flame
spec:
replicas: 1
selector:
matchLabels:
app: flame
template:
metadata:
labels:
app: flame
spec:
serviceAccountName: flame-sa
containers:
- name: flame
image: pawelmalak/flame
env:
- name: PASSWORD
valueFrom:
secretKeyRef:
name: flame-secret
key: app.password
ports:
- containerPort: 5005
resources:
requests:
cpu: "100m"
memory: "128Mi"
limits:
cpu: "250m"
memory: "256Mi"
volumeMounts:
- name: flame-config
mountPath: /app/data
startupProbe:
httpGet:
path: /
port: 5005
initialDelaySeconds: 60
periodSeconds: 10
failureThreshold: 10
timeoutSeconds: 5
readinessProbe:
httpGet:
path: /
port: 5005
initialDelaySeconds: 60
periodSeconds: 10
failureThreshold: 10
timeoutSeconds: 5
volumes:
- name: flame-config
persistentVolumeClaim:
claimName: flame-config-pvc
---
apiVersion: v1
kind: Service
metadata:
name: flame
namespace: flame
spec:
type: ClusterIP
selector:
app: flame
ports:
- name: web
protocol: TCP
port: 80
targetPort: 5005
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: flame-web
namespace: flame
spec:
entryPoints:
- websecure
routes:
- match: Host(`dashboard.apps.mngoma.lab`)
kind: Rule
services:
- name: flame
port: 80
scheme: http
tls: {}

208
k3s/stacks/gitea.yml Normal file
View File

@@ -0,0 +1,208 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: gitea
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: gitea-sa
namespace: gitea
---
apiVersion: v1
kind: ConfigMap
metadata:
name: gitea-config
namespace: gitea
data:
server.domain: "gitea.apps.mngoma.lab"
server.rooturl: "https://gitea.apps.mngoma.lab"
database.type: "postgres"
database.host: "192.168.1.137:5432"
database.name: "giteam"
---
apiVersion: v1
kind: Secret
metadata:
name: gitea-secret
namespace: gitea
type: Opaque
data:
database.username: YXBwX3VzZXI=
database.password: MTIzNDU=
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: gitea-role
namespace: gitea
rules:
- apiGroups: [""]
resources: ["pods", "services", "endpoints", "persistentvolumeclaims", "configmaps", "secrets"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: gitea-rolebinding
namespace: gitea
subjects:
- kind: ServiceAccount
name: gitea-sa
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: gitea-role
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: gitea-pv
labels:
type: local
spec:
capacity:
storage: 5Gi
accessModes:
- ReadWriteOnce
storageClassName: local-pvs
local:
path: /home/ansible/k3s/makhiwane/gitea
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- lead
persistentVolumeReclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: gitea-pvc
namespace: gitea
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-pvs
resources:
requests:
storage: 5Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: gitea
namespace: gitea
labels:
app.kubernetes.io/name: gitea-server
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: gitea-server
template:
metadata:
labels:
app.kubernetes.io/name: gitea-server
spec:
serviceAccountName: gitea-sa
containers:
- name: gitea
image: gitea/gitea:latest
ports:
- containerPort: 3000
- containerPort: 22
volumeMounts:
- name: gitea-data
mountPath: /data
env:
- name: USER_UID
value: "1000"
- name: USER_GID
value: "1000"
- name: GITEA_SERVER_ROOT_URL
valueFrom:
configMapKeyRef:
name: gitea-config
key: server.rooturl
- name: GITEA_SERVER_DOMAIN
valueFrom:
configMapKeyRef:
name: gitea-config
key: server.domain
- name: GITEA__database__TYPE
valueFrom:
configMapKeyRef:
name: gitea-config
key: database.type
- name: GITEA__database__HOST
valueFrom:
configMapKeyRef:
name: gitea-config
key: database.host
- name: GITEA__database__USER
valueFrom:
secretKeyRef:
name: gitea-secret
key: database.username
- name: GITEA__database__PASSWD
valueFrom:
secretKeyRef:
name: gitea-secret
key: database.password
- name: GITEA__database__NAME
valueFrom:
configMapKeyRef:
name: gitea-config
key: database.name
resources:
requests:
memory: "512Mi"
cpu: "250m"
limits:
memory: "2Gi"
cpu: "500m"
volumes:
- name: gitea-data
persistentVolumeClaim:
claimName: gitea-pvc
---
apiVersion: v1
kind: Service
metadata:
name: gitea-server
namespace: gitea
spec:
selector:
app.kubernetes.io/name: gitea-server
ports:
- name: http
protocol: TCP
port: 3000
targetPort: 3000
- name: ssh
protocol: TCP
port: 22
targetPort: 22
type: ClusterIP
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: gitea-web
namespace: gitea
spec:
entryPoints:
- websecure
routes:
- match: Host(`gitea.apps.mngoma.lab`)
kind: Rule
services:
- name: gitea-server
port: 3000
scheme: http
tls: {}

156
k3s/stacks/mariadb.yml Normal file
View File

@@ -0,0 +1,156 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: mariadb
---
apiVersion: v1
kind: Secret
metadata:
name: mariadb-secret
namespace: mariadb
type: Opaque
data:
root.password: UDRvMzBB
database.username: cm9vdA==
database.password: NXBFMjZa
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: mariadb-sa
namespace: mariadb
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: mariadb-role
namespace: mariadb
rules:
- apiGroups: [""]
resources: ["pods", "services", "persistentvolumeclaims", "configmaps", "secrets"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: mariadb-rolebinding
namespace: mariadb
subjects:
- kind: ServiceAccount
name: mariadb-sa
namespace: mariadb
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: mariadb-role
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: mariadb-pv
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
storageClassName: local-pvs
local:
path: /home/ansible/k3s/makhiwane/mariadb
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- lead
persistentVolumeReclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mariadb-pvc
namespace: mariadb
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-pvs
resources:
requests:
storage: 10Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: mariadb
namespace: mariadb
spec:
replicas: 1
selector:
matchLabels:
app: mariadb
template:
metadata:
labels:
app: mariadb
spec:
serviceAccountName: mariadb-sa
containers:
- name: mariadb
image: mariadb:11
restartPolicy: Always
env:
- name: MYSQL_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: mariadb-secret
key: root.password
- name: MYSQL_USER
valueFrom:
secretKeyRef:
name: mariadb-secret
key: database.username
- name: MYSQL_PASSWORD
valueFrom:
secretKeyRef:
name: mariadb-secret
key: database.password
ports:
- containerPort: 3306
volumeMounts:
- mountPath: /var/lib/mysql
name: mariadb-data
volumes:
- name: mariadb-data
persistentVolumeClaim:
claimName: mariadb-pvc
---
apiVersion: v1
kind: Service
metadata:
name: mariadb
namespace: mariadb
spec:
type: ClusterIP
selector:
app: mariadb
ports:
- port: 3306
targetPort: 3306
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: mariadb-ingress
namespace: mariadb
spec:
entryPoints:
- websecure
routes:
- match: Host(`mariadb.database.mngoma.lab`)
kind: Rule
services:
- name: mariadb
port: 3306
tls: {}

166
k3s/stacks/mongodb.yml Normal file
View File

@@ -0,0 +1,166 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: mongodb
---
apiVersion: v1
kind: Secret
metadata:
name: mongodb-secret
namespace: mongodb
type: Opaque
data:
root.username: YWRtaW4=
root.password: bGpUMTkx
username: YXBwdXNlcg==
password: VTNlNzRy
---
apiVersion: v1
kind: ConfigMap
metadata:
name: mongodb-config
namespace: mongodb
data:
database.name: "appdb"
database.replicaset: "primary"
database.port: "27017"
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: mongodb-sa
namespace: mongodb
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: mongodb-role
namespace: mongodb
rules:
- apiGroups: [""]
resources: ["pods", "services", "persistentvolumeclaims", "configmaps", "secrets"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: mongodb-rolebinding
namespace: mongodb
subjects:
- kind: ServiceAccount
name: mongodb-sa
namespace: mongodb
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: mongodb-role
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: mongodb-pv
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
storageClassName: local-pvs
local:
path: /home/ansible/k3s/makhiwane/mongodb
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- lead
persistentVolumeReclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mongodb-pvc
namespace: mongodb
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-pvs
resources:
requests:
storage: 10Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: mongodb
namespace: mongodb
spec:
replicas: 1
selector:
matchLabels:
app: mongodb
template:
metadata:
labels:
app: mongodb
spec:
serviceAccountName: mongodb-sa
containers:
- name: mongodb
image: mongo:6
env:
- name: MONGO_INITDB_ROOT_USERNAME
valueFrom:
secretKeyRef:
name: mongodb-secret
key: root.username
- name: MONGO_INITDB_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: mongodb-secret
key: root.password
- name: MONGO_INITDB_DATABASE
valueFrom:
configMapKeyRef:
name: mongodb-config
key: database.name
ports:
- containerPort: 27017
volumeMounts:
- mountPath: /data/db
name: mongodb-data
volumes:
- name: mongodb-data
persistentVolumeClaim:
claimName: mongodb-pvc
---
apiVersion: v1
kind: Service
metadata:
name: mongodb
namespace: mongodb
spec:
type: ClusterIP
selector:
app: mongodb
ports:
- port: 27017
targetPort: 27017
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: mongodb-ingress
namespace: mongodb
spec:
entryPoints:
- websecure
routes:
- match: Host(`mongodb.database.mngoma.lab`)
kind: Rule
services:
- name: mongodb
port: 27017
tls: {}

196
k3s/stacks/nextcloud.yml Normal file
View File

@@ -0,0 +1,196 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: nextcloud
---
apiVersion: v1
kind: ConfigMap
metadata:
name: nextcloud-config
namespace: nextcloud
data:
server.trusteddomains: "nextcloud.apps.mngoma.lab"
database.createdbuser: "false"
database.host: "192.168.1.137"
database.name: "nextcloudm"
---
apiVersion: v1
kind: Secret
metadata:
name: nextcloud-secret
namespace: nextcloud
type: Opaque
data:
root.username: a2h3ZXpp
root.password: QmxhY2tzdGFyMkBob21l
database.username: YXBwX3VzZXI=
database.password: MTIzNDU=
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nextcloud-sa
namespace: nextcloud
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: nextcloud-role
namespace: nextcloud
rules:
- apiGroups: [""]
resources: ["pods", "services", "endpoints", "persistentvolumeclaims", "configmaps", "secrets"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: nextcloud-rolebinding
namespace: nextcloud
subjects:
- kind: ServiceAccount
name: nextcloud-sa
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: nextcloud-role
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nextcloud-pv
labels:
type: local
spec:
capacity:
storage: 5Gi
accessModes:
- ReadWriteOnce
storageClassName: local-pvs
local:
path: /home/ansible/k3s/makhiwane/nextcloud
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- lead
persistentVolumeReclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nextcloud-pvc
namespace: nextcloud
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-pvs
resources:
requests:
storage: 5Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nextcloud
namespace: nextcloud
spec:
replicas: 1
selector:
matchLabels:
app: nextcloud
template:
metadata:
labels:
app: nextcloud
spec:
serviceAccountName: nextcloud-sa
containers:
- name: nextcloud
image: nextcloud:27.1.7
ports:
- containerPort: 80
volumeMounts:
- name: nextcloud-data
mountPath: /var/www/html
env:
- name: NEXTCLOUD_ADMIN_USER
valueFrom:
secretKeyRef:
name: nextcloud-secret
key: root.username
- name: NEXTCLOUD_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
name: nextcloud-secret
key: root.password
- name: NEXTCLOUD_TRUSTED_DOMAINS
valueFrom:
configMapKeyRef:
name: nextcloud-config
key: server.trusteddomains
- name: POSTGRES_HOST
valueFrom:
configMapKeyRef:
name: nextcloud-config
key: database.host
- name: POSTGRES_DB
valueFrom:
configMapKeyRef:
name: nextcloud-config
key: database.name
- name: POSTGRES_USER
valueFrom:
secretKeyRef:
name: nextcloud-secret
key: database.username
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: nextcloud-secret
key: database.password
- name: NC_SETUP_CREATE_DB_USER
valueFrom:
configMapKeyRef:
name: nextcloud-config
key: database.createdbuser
volumes:
- name: nextcloud-data
persistentVolumeClaim:
claimName: nextcloud-pvc
---
apiVersion: v1
kind: Service
metadata:
name: nextcloud
namespace: nextcloud
spec:
type: ClusterIP
selector:
app: nextcloud
ports:
- name: http
protocol: TCP
port: 80
targetPort: 80
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: nextcloud-web
namespace: nextcloud
spec:
entryPoints:
- websecure
routes:
- match: Host(`nextcloud.apps.mngoma.lab`)
kind: Rule
services:
- name: nextcloud
port: 80
scheme: http
tls: {}

101
k3s/stacks/nosqlclient.yml Normal file
View File

@@ -0,0 +1,101 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: nosqlclient
---
apiVersion: v1
kind: Secret
metadata:
name: nosqlclient-secret
namespace: nosqlclient
type: Opaque
data:
mongodb-uri: bW9uZ29kYjovL2FkbWluOkJsYWNrc3RhcjIlNDBob21lQGRhdGFiYXNlLm1uZ29tYS5sYWI6MjcwMTcvYWRtaW4=
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nosqlclient-sa
namespace: nosqlclient
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: nosqlclient-role
namespace: nosqlclient
rules:
- apiGroups: [""]
resources: ["pods", "services", "persistentvolumeclaims", "configmaps", "secrets"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: nosqlclient-rolebinding
namespace: nosqlclient
subjects:
- kind: ServiceAccount
name: nosqlclient-sa
namespace: nosqlclient
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: nosqlclient-role
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nosqlclient
namespace: nosqlclient
spec:
replicas: 1
selector:
matchLabels:
app: nosqlclient
template:
metadata:
labels:
app: nosqlclient
spec:
serviceAccountName: nosqlclient-sa
containers:
- name: nosqlclient
image: mongoclient/mongoclient:latest
env:
- name: MONGO_URL
valueFrom:
secretKeyRef:
name: nosqlclient-secret
key: mongodb-uri
ports:
- containerPort: 3000
---
apiVersion: v1
kind: Service
metadata:
name: nosqlclient
namespace: nosqlclient
spec:
type: ClusterIP
selector:
app: nosqlclient
ports:
- port: 3000
targetPort: 3000
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: nosqlclient-ingress
namespace: nosqlclient
spec:
entryPoints:
- websecure
routes:
- match: Host(`mongodb.apps.mngoma.lab`)
kind: Rule
services:
- name: nosqlclient
port: 3000
tls: {}

159
k3s/stacks/pgadmin.yml Normal file
View File

@@ -0,0 +1,159 @@
apiVersion: v1
kind: Namespace
metadata:
name: pgadmin
---
apiVersion: v1
kind: ConfigMap
metadata:
name: pgadmin-config
namespace: pgadmin
data:
server.email: "khwezi@mngoma.lab"
---
apiVersion: v1
kind: Secret
metadata:
name: pgadmin-secret
namespace: pgadmin
type: Opaque
data:
server.password: M3pDQTQz
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: pgadmin-sa
namespace: pgadmin
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: pgadmin-role
namespace: pgadmin
rules:
- apiGroups: [""]
resources: ["pods", "services", "persistentvolumeclaims", "configmaps", "secrets"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: pgadmin-rolebinding
namespace: pgadmin
subjects:
- kind: ServiceAccount
name: pgadmin-sa
namespace: pgadmin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: pgadmin-role
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pgadmin-pv
spec:
capacity:
storage: 2Gi
accessModes:
- ReadWriteOnce
storageClassName: local-pvs
local:
path: /home/ansible/k3s/makhiwane/pgadmin
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- lead
persistentVolumeReclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pgadmin-pvc
namespace: pgadmin
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-pvs
resources:
requests:
storage: 2Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: pgadmin
namespace: pgadmin
spec:
replicas: 1
selector:
matchLabels:
app: pgadmin
template:
metadata:
labels:
app: pgadmin
spec:
serviceAccountName: pgadmin-sa
securityContext:
runAsUser: 5050
runAsGroup: 5050
fsGroup: 5050
containers:
- name: pgadmin
image: dpage/pgadmin4:latest
ports:
- containerPort: 80
volumeMounts:
- name: pgadmin-data
mountPath: /var/lib/pgadmin
env:
- name: PGADMIN_DEFAULT_EMAIL
valueFrom:
configMapKeyRef:
name: pgadmin-config
key: server.email
- name: PGADMIN_DEFAULT_PASSWORD
valueFrom:
secretKeyRef:
name: pgadmin-secret
key: server.password
volumes:
- name: pgadmin-data
persistentVolumeClaim:
claimName: pgadmin-pvc
---
apiVersion: v1
kind: Service
metadata:
name: pgadmin
namespace: pgadmin
spec:
type: ClusterIP
selector:
app: pgadmin
ports:
- port: 80
targetPort: 80
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: pgadmin-ingress
namespace: pgadmin
spec:
entryPoints:
- websecure
routes:
- match: Host(`pgadmin.apps.mngoma.lab`)
kind: Rule
services:
- name: pgadmin
port: 80
tls: {}

126
k3s/stacks/phpmyadmin.yml Normal file
View File

@@ -0,0 +1,126 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: phpmyadmin
---
apiVersion: v1
kind: ConfigMap
metadata:
name: phpmyadmin-config
namespace: phpmyadmin
data:
database.address: "192.168.1.137"
database.port: "3306"
---
apiVersion: v1
kind: Secret
metadata:
name: mysql-secret
namespace: phpmyadmin
type: Opaque
data:
username: cm9vdA==
password: QmxhY2tzdGFyMkBob21l
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: phpmyadmin-sa
namespace: phpmyadmin
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: phpmyadmin-role
namespace: phpmyadmin
rules:
- apiGroups: [""]
resources: ["pods", "services", "configmaps", "secrets"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: phpmyadmin-rolebinding
namespace: phpmyadmin
subjects:
- kind: ServiceAccount
name: phpmyadmin-sa
namespace: phpmyadmin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: phpmyadmin-role
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: phpmyadmin
namespace: phpmyadmin
spec:
replicas: 1
selector:
matchLabels:
app: phpmyadmin
template:
metadata:
labels:
app: phpmyadmin
spec:
serviceAccountName: phpmyadmin-sa
containers:
- name: phpmyadmin
image: phpmyadmin/phpmyadmin:latest
ports:
- containerPort: 80
env:
- name: PMA_HOST
valueFrom:
configMapKeyRef:
name: phpmyadmin-config
key: database.address
- name: PMA_PORT
valueFrom:
configMapKeyRef:
name: phpmyadmin-config
key: database.port
- name: PMA_USER
valueFrom:
secretKeyRef:
name: mysql-secret
key: username
- name: PMA_PASSWORD
valueFrom:
secretKeyRef:
name: mysql-secret
key: password
---
apiVersion: v1
kind: Service
metadata:
name: phpmyadmin
namespace: phpmyadmin
spec:
type: ClusterIP
selector:
app: phpmyadmin
ports:
- port: 80
targetPort: 80
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: phpmyadmin-ingress
namespace: phpmyadmin
spec:
entryPoints:
- websecure
routes:
- match: Host(`phpmyadmin.apps.mngoma.lab`)
kind: Rule
services:
- name: phpmyadmin
port: 80
tls: {}

127
k3s/stacks/portainer.yml Normal file
View File

@@ -0,0 +1,127 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: portainer
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: portainer-sa
namespace: portainer
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: portainer-admin-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: portainer-sa
namespace: portainer
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: portainer-pv
labels:
type: local
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
storageClassName: local-pvs
local:
path: /home/ansible/k3s/makhiwane/portainer
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- lead
persistentVolumeReclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: portainer-pvc
namespace: portainer
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-pvs
resources:
requests:
storage: 10Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: portainer
namespace: portainer
spec:
replicas: 1
selector:
matchLabels:
app: portainer
template:
metadata:
labels:
app: portainer
spec:
serviceAccountName: portainer-sa
containers:
- name: portainer
image: portainer/portainer-ce:2.33.2
ports:
- containerPort: 9000
- containerPort: 9443
volumeMounts:
- name: data
mountPath: /data
volumes:
- name: data
persistentVolumeClaim:
claimName: portainer-pvc
---
apiVersion: v1
kind: Service
metadata:
name: portainer
namespace: portainer
spec:
type: ClusterIP
selector:
app: portainer
ports:
- name: http
protocol: TCP
port: 9000
targetPort: 9000
- name: https
protocol: TCP
port: 9443
targetPort: 9443
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: portainer-dashboard
namespace: portainer
spec:
entryPoints:
- websecure
routes:
- match: Host(`portainer.apps.mngoma.lab`)
kind: Rule
services:
- name: portainer
port: 9000
scheme: http
tls: {}

149
k3s/stacks/postgresql.yml Normal file
View File

@@ -0,0 +1,149 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: postgresql
---
apiVersion: v1
kind: Secret
metadata:
name: postgresql-secret
namespace: postgresql
type: Opaque
data:
username: cm9vdA==
password: Mmh2MTdL
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: postgresql-sa
namespace: postgresql
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: postgresql-role
namespace: postgresql
rules:
- apiGroups: [""]
resources: ["pods", "services", "persistentvolumeclaims", "configmaps", "secrets"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: postgresql-rolebinding
namespace: postgresql
subjects:
- kind: ServiceAccount
name: postgresql-sa
namespace: postgresql
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: postgresql-role
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: postgresql-pv
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
storageClassName: local-pvs
local:
path: /home/ansible/k3s/makhiwane/postgresql
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- lead
persistentVolumeReclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: postgresql-pvc
namespace: postgresql
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-pvs
resources:
requests:
storage: 10Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: postgresql
namespace: postgresql
spec:
replicas: 1
selector:
matchLabels:
app: postgresql
template:
metadata:
labels:
app: postgresql
spec:
serviceAccountName: postgresql-sa
containers:
- name: postgresql
image: postgres:16
env:
- name: POSTGRES_USER
valueFrom:
secretKeyRef:
name: postgresql-secret
key: username
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: postgresql-secret
key: password
ports:
- containerPort: 5432
volumeMounts:
- mountPath: /var/lib/postgresql/data
name: postgresql-data
volumes:
- name: postgresql-data
persistentVolumeClaim:
claimName: postgresql-pvc
---
apiVersion: v1
kind: Service
metadata:
name: postgresql
namespace: postgresql
spec:
type: ClusterIP
selector:
app: postgresql
ports:
- port: 5432
targetPort: 5432
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: postgresql-ingress
namespace: postgresql
spec:
entryPoints:
- websecure
routes:
- match: Host(`postgresql.database.mngoma.lab`)
kind: Rule
services:
- name: postgresql
port: 5432
tls: {}

107
k3s/stacks/redis.yml Normal file
View File

@@ -0,0 +1,107 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: redis
---
apiVersion: v1
kind: Secret
metadata:
name: redis-secret
namespace: redis
type: Opaque
data:
username: YWRtaW4=
password: NjI4akZL
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: redis-sa
namespace: redis
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: redis-role
namespace: redis
rules:
- apiGroups: [""]
resources: ["pods", "services"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: redis-rolebinding
namespace: redis
subjects:
- kind: ServiceAccount
name: redis-sa
namespace: redis
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: redis-role
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: redis
namespace: redis
spec:
replicas: 1
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
serviceAccountName: redis-sa
containers:
- name: redis
image: redis:7
ports:
- containerPort: 6379
env:
- name: REDIS_USERNAME
valueFrom:
secretKeyRef:
name: redis-secret
key: username
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: redis-secret
key: password
---
apiVersion: v1
kind: Service
metadata:
name: redis
namespace: redis
spec:
type: ClusterIP
selector:
app: redis
ports:
- port: 6379
targetPort: 6379
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: redis-ingress
namespace: redis
spec:
entryPoints:
- websecure
routes:
- match: Host(`redis.database.mngoma.lab`)
kind: Rule
services:
- name: redis
port: 6379
tls: {}

189
k3s/stacks/redisinsight.yml Normal file
View File

@@ -0,0 +1,189 @@
apiVersion: v1
kind: Namespace
metadata:
name: redisinsight
---
apiVersion: v1
kind: ConfigMap
metadata:
name: redisinsight-config
namespace: redisinsight
data:
database.host: "192.168.1.137"
database.port: "6379"
database.instance: "redis"
RI_LOG_LEVEL: "info"
RI_ALLOW_PRIVILEGED: "true"
RI_TELEMETRY: "false"
---
apiVersion: v1
kind: Secret
metadata:
name: redis-secret
namespace: redisinsight
type: Opaque
data:
password: NjI4akZL
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: redisinsight-sa
namespace: redisinsight
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: redisinsight-role
namespace: redisinsight
rules:
- apiGroups: [""]
resources: ["pods", "services", "persistentvolumeclaims", "configmaps", "secrets"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: redisinsight-rolebinding
namespace: redisinsight
subjects:
- kind: ServiceAccount
name: redisinsight-sa
namespace: redisinsight
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: redisinsight-role
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: redisinsight-pv
spec:
capacity:
storage: 2Gi
accessModes:
- ReadWriteOnce
storageClassName: local-pvs
local:
path: /home/ansible/k3s/makhiwane/redisinsight
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- lead
persistentVolumeReclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: redisinsight-pvc
namespace: redisinsight
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-pvs
resources:
requests:
storage: 2Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: redisinsight
namespace: redisinsight
spec:
replicas: 1
selector:
matchLabels:
app: redisinsight
template:
metadata:
labels:
app: redisinsight
spec:
serviceAccountName: redisinsight-sa
containers:
- name: redisinsight
image: redislabs/redisinsight:latest
ports:
- containerPort: 8001
volumeMounts:
- name: redisinsight-data
mountPath: /db
env:
- name: RI_APP_HOST
value: "0.0.0.0"
- name: RI_APP_PORT
value: "8001"
- name: RI_LOG_LEVEL
valueFrom:
configMapKeyRef:
name: redisinsight-config
key: RI_LOG_LEVEL
- name: RI_ALLOW_PRIVILEGED
valueFrom:
configMapKeyRef:
name: redisinsight-config
key: RI_ALLOW_PRIVILEGED
- name: RI_TELEMETRY
valueFrom:
configMapKeyRef:
name: redisinsight-config
key: RI_TELEMETRY
- name: RI_DATABASE_0_NAME
valueFrom:
configMapKeyRef:
name: redisinsight-config
key: database.instance
- name: RI_DATABASE_0_HOST
valueFrom:
configMapKeyRef:
name: redisinsight-config
key: database.host
- name: RI_DATABASE_0_PORT
valueFrom:
configMapKeyRef:
name: redisinsight-config
key: database.port
- name: RI_DATABASE_0_PASSWORD
valueFrom:
secretKeyRef:
name: redis-secret
key: password
volumes:
- name: redisinsight-data
persistentVolumeClaim:
claimName: redisinsight-pvc
---
apiVersion: v1
kind: Service
metadata:
name: redisinsight
namespace: redisinsight
spec:
type: ClusterIP
selector:
app: redisinsight
ports:
- port: 8001
targetPort: 8001 # maybe 5540
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: redisinsight-ingress
namespace: redisinsight
spec:
entryPoints:
- websecure
routes:
- match: Host(`redisinsight.apps.mngoma.lab`)
kind: Rule
services:
- name: redisinsight
port: 8001
tls: {}

163
k3s/stacks/registry-ui.yml Normal file
View File

@@ -0,0 +1,163 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: registry-ui
---
apiVersion: v1
kind: Secret
metadata:
name: registry-credentials
namespace: registry-ui
type: Opaque
data:
username: YXBwX3VzZXI=
password: MTIzNDU=
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: registry-ui-pv
namespace: registry-ui
spec:
capacity:
storage: 2Gi
accessModes:
- ReadWriteOnce
storageClassName: local-pvs
local:
path: /home/ansible/k3s/makhiwane/registry-ui
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- lead
persistentVolumeReclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: registry-ui-pvc
namespace: registry-ui
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-pvs
resources:
requests:
storage: 2Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: registry-ui
namespace: registry-ui
spec:
replicas: 1
selector:
matchLabels:
app: registry-ui
template:
metadata:
labels:
app: registry-ui
spec:
containers:
- name: registry-ui
image: joxit/docker-registry-ui:main
ports:
- containerPort: 80
env:
- name: SINGLE_REGISTRY
value: "true"
- name: REGISTRY_TITLE
value: "Docker Registry UI"
- name: DELETE_IMAGES
value: "true"
- name: SHOW_CONTENT_DIGEST
value: "true"
- name: SHOW_CATALOG_NB_TAGS
value: "true"
- name: CATALOG_MIN_BRANCHES
value: "1"
- name: CATALOG_MAX_BRANCHES
value: "1"
- name: TAGLIST_PAGE_SIZE
value: "100"
- name: REGISTRY_SECURED
value: "false"
- name: CATALOG_ELEMENTS_LIMIT
value: "1000"
- name: NGINX_PROXY_PASS_URL
value: "http://registry-server.registry.svc.cluster.local:5000"
- name: REGISTRY_AUTH_USER
valueFrom:
secretKeyRef:
name: registry-credentials
key: username
- name: REGISTRY_AUTH_PASS
valueFrom:
secretKeyRef:
name: registry-credentials
key: password
volumeMounts:
- name: registry-ui-data
mountPath: /data
resources:
requests:
memory: "256Mi"
cpu: "250m"
limits:
memory: "512Mi"
cpu: "500m"
volumes:
- name: registry-ui-data
persistentVolumeClaim:
claimName: registry-ui-pvc
---
apiVersion: v1
kind: Service
metadata:
name: registry-ui
namespace: registry-ui
spec:
selector:
app: registry-ui
ports:
- port: 80
targetPort: 80
type: ClusterIP
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: registry-ui-ingress
namespace: registry-ui
spec:
entryPoints:
- websecure
routes:
- match: Host(`registry-ui.apps.mngoma.lab`)
kind: Rule
services:
- name: registry-ui
port: 80
tls: {}
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: registry-ui-insecure
namespace: registry-ui
spec:
entryPoints:
- web
routes:
- match: Host(`registry-ui.apps.mngoma.lab`)
kind: Rule
services:
- name: registry-ui
port: 80

170
k3s/stacks/registry.yml Normal file
View File

@@ -0,0 +1,170 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: registry
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: registry-pv
namespace: registry
spec:
capacity:
storage: 20Gi
accessModes:
- ReadWriteOnce
storageClassName: local-pvs
local:
path: /home/ansible/k3s/makhiwane/registry
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- lead
persistentVolumeReclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: registry-pvc
namespace: registry
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-pvs
resources:
requests:
storage: 20Gi
---
apiVersion: v1
kind: Secret
metadata:
name: registry-http-secret
namespace: registry
type: Opaque
data:
http-secret: ZDlmOTNjOGEyMmQ2NDMyZWE4YTMwYTBkNDc5ZjBhMWY=
---
apiVersion: v1
kind: Secret
metadata:
name: registry-basic-auth
namespace: registry
type: Opaque
data:
users: YXBwX3VzZXI6JGFwcjEkMTIzNDUk
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: registry
namespace: registry
spec:
replicas: 1
selector:
matchLabels:
app: registry
template:
metadata:
labels:
app: registry
spec:
containers:
- name: registry
image: registry:2.8.2
ports:
- containerPort: 5000
name: http
env:
- name: REGISTRY_STORAGE_DELETE_ENABLED
value: "true"
- name: REGISTRY_HTTP_SECRET
valueFrom:
secretKeyRef:
name: registry-http-secret
key: http-secret
- name: REGISTRY_HTTP_HEADERS_Access-Control-Allow-Origin
value: '["https://registry-ui.apps.mngoma.lab","https://registry.apps.mngoma.lab"]'
- name: REGISTRY_HTTP_HEADERS_Access-Control-Allow-Methods
value: '["HEAD","GET","OPTIONS","DELETE","PUT","POST"]'
- name: REGISTRY_HTTP_HEADERS_Access-Control-Allow-Credentials
value: '["true"]'
- name: REGISTRY_HTTP_HEADERS_Access-Control-Allow-Headers
value: '["Authorization","Accept","Cache-Control","Content-Type","X-Requested-With"]'
- name: REGISTRY_HTTP_HEADERS_Access-Control-Expose-Headers
value: '["Docker-Content-Digest"]'
volumeMounts:
- name: registry-data
mountPath: /var/lib/registry
resources:
requests:
memory: "256Mi"
cpu: "250m"
limits:
memory: "512Mi"
cpu: "500m"
volumes:
- name: registry-data
persistentVolumeClaim:
claimName: registry-pvc
---
apiVersion: v1
kind: Service
metadata:
name: registry-server
namespace: registry
spec:
selector:
app: registry
ports:
- name: http
port: 5000
targetPort: 5000
type: ClusterIP
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: registry-server-ingress
namespace: registry
spec:
entryPoints:
- websecure
routes:
- match: Host(`registry.apps.mngoma.lab`)
kind: Rule
middlewares:
- name: registry-basic-auth
services:
- name: registry-server
port: 5000
tls: {}
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: registry-server-insecure
namespace: registry
spec:
entryPoints:
- web
routes:
- match: Host(`registry.apps.mngoma.lab`)
kind: Rule
services:
- name: registry-server
port: 5000
---
apiVersion: traefik.containo.us/v1alpha1
kind: Middleware
metadata:
name: registry-basic-auth
namespace: registry
spec:
basicAuth:
secret: registry-basic-auth
removeHeader: true

72
k3s/stacks/searxng.yml Normal file
View File

@@ -0,0 +1,72 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: searxng
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: searxng-sa
namespace: searxng
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: searxng
namespace: searxng
labels:
app.kubernetes.io/name: searxng-server
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: searxng-server
template:
metadata:
labels:
app.kubernetes.io/name: searxng-server
spec:
serviceAccountName: searxng-sa
containers:
- name: searxng
image: searxng/searxng:latest
ports:
- containerPort: 8080
env:
- name: SEARXNG_SERVER_HOST
value: "0.0.0.0"
- name: SEARXNG_SERVER_PORT
value: "8080"
---
apiVersion: v1
kind: Service
metadata:
name: searxng-server
namespace: searxng
spec:
selector:
app.kubernetes.io/name: searxng-server
ports:
- name: http
protocol: TCP
port: 8080
targetPort: 8080
type: ClusterIP
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: searxng-web
namespace: searxng
spec:
entryPoints:
- websecure
routes:
- match: Host(`searxng.apps.mngoma.lab`)
kind: Rule
services:
- name: searxng-server
port: 8080
scheme: http
tls: {}

141
k3s/stacks/uptime-kuma.yml Normal file
View File

@@ -0,0 +1,141 @@
apiVersion: v1
kind: Namespace
metadata:
name: uptimekuma
---
apiVersion: v1
kind: ConfigMap
metadata:
name: uptimekuma-config
namespace: uptimekuma
data:
server.port: "3001"
server.disableusageanalytics: "true"
---
apiVersion: v1
kind: Secret
metadata:
name: uptimekuma-secret
namespace: uptimekuma
type: Opaque
data:
password: MWhEMjBn
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: uptimekuma-pv
labels:
type: local
spec:
capacity:
storage: 2Gi
accessModes:
- ReadWriteOnce
storageClassName: local-pvs
local:
path: /home/ansible/k3s/makhiwane/uptimekuma
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- lead
persistentVolumeReclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: uptimekuma-data
namespace: uptimekuma
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-pvs
resources:
requests:
storage: 2Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: uptimekuma
namespace: uptimekuma
spec:
replicas: 1
selector:
matchLabels:
app: uptimekuma
template:
metadata:
labels:
app: uptimekuma
spec:
containers:
- name: uptimekuma
image: louislam/uptime-kuma:latest
imagePullPolicy: IfNotPresent
ports:
- containerPort: 3001
volumeMounts:
- mountPath: /app/data
name: uptimekuma-data
env:
- name: PORT
valueFrom:
configMapKeyRef:
name: uptimekuma-config
key: server.port
- name: server.disableusageanalytics
valueFrom:
configMapKeyRef:
name: uptimekuma-config
key: server.disableusageanalytics
- name: password
valueFrom:
secretKeyRef:
name: uptimekuma-secret
key: password
resources:
requests:
memory: "256Mi"
cpu: "250m"
limits:
memory: "512Mi"
cpu: "500m"
volumes:
- name: uptimekuma-data
persistentVolumeClaim:
claimName: uptimekuma-data
---
apiVersion: v1
kind: Service
metadata:
name: uptimekuma
namespace: uptimekuma
spec:
type: ClusterIP
selector:
app: uptimekuma
ports:
- name: http
port: 3001
targetPort: 3001
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: uptimekuma-ingress
namespace: uptimekuma
spec:
entryPoints:
- websecure
routes:
- match: Host(`uptimekuma.apps.mngoma.lab`)
kind: Rule
services:
- name: uptimekuma
port: 3001
tls: {}

181
k3s/stacks/wandbox.yml Normal file
View File

@@ -0,0 +1,181 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: wandbox
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: wandbox-sa
namespace: wandbox
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: wandbox-role
namespace: wandbox
rules:
- apiGroups: [""]
resources: ["pods", "services", "endpoints", "persistentvolumeclaims", "configmaps", "secrets"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: wandbox-rolebinding
namespace: wandbox
subjects:
- kind: ServiceAccount
name: wandbox-sa
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: wandbox-role
---
apiVersion: v1
kind: ConfigMap
metadata:
name: wandbox-config
namespace: wandbox
data:
wandbox.domain: "wandbox.apps.mngoma.lab"
wandbox.rooturl: "https://wandbox.apps.mngoma.lab"
wandbox.port: "5000"
---
apiVersion: v1
kind: Secret
metadata:
name: wandbox-secret
namespace: wandbox
type: Opaque
data:
api.key: cG1HeW9xUlBCYW1qdndRV2FRbzZWME9CdmJLS3BFS1RhWlF0bDRndUhMSGpYQlZwc0Y3dnJPZXhXMTNIRWFDRg==
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: wandbox-pv
labels:
type: local
spec:
capacity:
storage: 5Gi
accessModes:
- ReadWriteOnce
storageClassName: local-pvs
local:
path: /home/ansible/k3s/makhiwane/wandbox
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- lead
persistentVolumeReclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: wandbox-pvc
namespace: wandbox
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-pvs
resources:
requests:
storage: 5Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: wandbox
namespace: wandbox
labels:
app.kubernetes.io/name: wandbox
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: wandbox
template:
metadata:
labels:
app.kubernetes.io/name: wandbox
spec:
serviceAccountName: wandbox-sa
containers:
- name: wandbox
image: melpon/wandbox:latest
ports:
- containerPort: 5000
env:
- name: WANDBOX_PORT
valueFrom:
configMapKeyRef:
name: wandbox-config
key: wandbox.port
- name: WANDBOX_ROOTURL
valueFrom:
configMapKeyRef:
name: wandbox-config
key: wandbox.rooturl
- name: WANDBOX_DOMAIN
valueFrom:
configMapKeyRef:
name: wandbox-config
key: wandbox.domain
- name: API_KEY
valueFrom:
secretKeyRef:
name: wandbox-secret
key: api.key
volumeMounts:
- name: wandbox-data
mountPath: /data
resources:
requests:
memory: "256Mi"
cpu: "250m"
limits:
memory: "1Gi"
cpu: "500m"
volumes:
- name: wandbox-data
persistentVolumeClaim:
claimName: wandbox-pvc
---
apiVersion: v1
kind: Service
metadata:
name: wandbox-service
namespace: wandbox
spec:
selector:
app.kubernetes.io/name: wandbox
ports:
- name: http
protocol: TCP
port: 80
targetPort: 5000
type: ClusterIP
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: wandbox-web
namespace: wandbox
spec:
entryPoints:
- websecure
routes:
- match: Host(`wandbox.apps.mngoma.lab`)
kind: Rule
services:
- name: wandbox-service
port: 80
scheme: http
tls: {}

88
k3s/stacks/whoami.yml Normal file
View File

@@ -0,0 +1,88 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: whoami
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: whoami-sa
namespace: whoami
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: whoami-role
namespace: whoami
rules:
- apiGroups: [""]
resources: ["pods", "services"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: whoami-rolebinding
namespace: whoami
subjects:
- kind: ServiceAccount
name: whoami-sa
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: whoami-role
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: whoami
namespace: whoami
spec:
replicas: 1
selector:
matchLabels:
app: whoami
template:
metadata:
labels:
app: whoami
spec:
serviceAccountName: whoami-sa
containers:
- name: whoami
image: traefik/whoami
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
name: whoami
namespace: whoami
spec:
type: ClusterIP
selector:
app: whoami
ports:
- name: http
protocol: TCP
port: 80
targetPort: 80
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: whoami-web
namespace: whoami
spec:
entryPoints:
- websecure
routes:
- match: Host(`whoami.apps.mngoma.lab`)
kind: Rule
services:
- name: whoami
port: 80
scheme: http
tls: {}

1
k8s Submodule

Submodule k8s added at 35786df734

7
minecraft/config.ini Normal file
View File

@@ -0,0 +1,7 @@
[all:vars]
ansible_python_interpreter=/usr/bin/python3
ansible_user=ansible
ansible_ssh_private_key_file=~/.ssh/id_ed25519
[gameservers]
minecraft ansible_host=minecraft.mngoma.lab

View File

@@ -0,0 +1,105 @@
---
- name: Full Minecraft Server Setup (Paper + Geyser + Floodgate)
hosts: gameservers
become: yes
vars:
minecraft_user: minecraft
minecraft_home: /opt/minecraft
paper_version: "1.21.1"
paper_build: "130"
java_opts: "-Xms1G -Xmx3G -XX:+UseG1GC -XX:+ParallelRefProcEnabled -XX:MaxGCPauseMillis=200 -XX:+UnlockExperimentalVMOptions -XX:+DisableExplicitGC -XX:G1NewSizePercent=30 -XX:G1MaxNewSizePercent=40 -XX:G1HeapRegionSize=8M -XX:G1ReservePercent=20 -XX:G1HeapWastePercent=5 -XX:G1MixedGCCountTarget=4 -XX:InitiatingHeapOccupancyPercent=15 -XX:G1MixedGCLiveThresholdPercent=90 -XX:G1RSetUpdatingPauseTimePercent=5 -XX:SurvivorRatio=32 -XX:+PerfDisableSharedMem -XX:MaxTenuringThreshold=1"
tasks:
- name: Ensure required packages
apt:
name:
- openjdk-21-jdk
- curl
- unzip
state: present
update_cache: yes
- name: Create minecraft user
user:
name: "{{ minecraft_user }}"
shell: /bin/bash
create_home: yes
home: "{{ minecraft_home }}"
- name: Create server directory
file:
path: "{{ minecraft_home }}"
state: directory
owner: "{{ minecraft_user }}"
group: "{{ minecraft_user }}"
mode: '0755'
- name: Download PaperMC server
get_url:
url: "https://api.papermc.io/v2/projects/paper/versions/{{ paper_version }}/builds/{{ paper_build }}/downloads/paper-{{ paper_version }}-{{ paper_build }}.jar"
dest: "{{ minecraft_home }}/paper.jar"
owner: "{{ minecraft_user }}"
group: "{{ minecraft_user }}"
mode: '0755'
- name: Download Geyser-Spigot plugin
get_url:
url: "https://ci.opencollab.dev/job/GeyserMC/job/Geyser/job/master/lastSuccessfulBuild/artifact/bootstrap/spigot/target/Geyser-Spigot.jar"
dest: "{{ minecraft_home }}/plugins/Geyser-Spigot.jar"
owner: "{{ minecraft_user }}"
group: "{{ minecraft_user }}"
mode: '0644'
- name: Download Floodgate plugin
get_url:
url: "https://ci.opencollab.dev/job/GeyserMC/job/Floodgate/job/master/lastSuccessfulBuild/artifact/spigot/build/libs/floodgate-spigot.jar"
dest: "{{ minecraft_home }}/plugins/floodgate-spigot.jar"
owner: "{{ minecraft_user }}"
group: "{{ minecraft_user }}"
mode: '0644'
- name: Create start.sh script
copy:
dest: "{{ minecraft_home }}/start.sh"
content: |
#!/bin/bash
cd {{ minecraft_home }}
exec java {{ java_opts }} -jar paper.jar nogui
owner: "{{ minecraft_user }}"
group: "{{ minecraft_user }}"
mode: '0755'
- name: Accept EULA
copy:
dest: "{{ minecraft_home }}/eula.txt"
content: "eula=true"
owner: "{{ minecraft_user }}"
group: "{{ minecraft_user }}"
mode: '0644'
- name: Create systemd service
copy:
dest: /etc/systemd/system/minecraft.service
content: |
[Unit]
Description=Minecraft Server
After=network.target
[Service]
User={{ minecraft_user }}
WorkingDirectory={{ minecraft_home }}
ExecStart={{ minecraft_home }}/start.sh
Restart=on-failure
RestartSec=10
[Install]
WantedBy=multi-user.target
mode: '0644'
- name: Reload systemd and enable Minecraft service
systemd:
daemon_reload: yes
name: minecraft
enabled: yes
state: started

View File

@@ -0,0 +1,80 @@
---
- name: Update Minecraft Server
hosts: minecraft
become: true
vars:
minecraft_user: minecraft
minecraft_home: /opt/minecraft
mc_service_name: minecraft
mc_version_manifest: https://piston-meta.mojang.com/mc/game/version_manifest.json
backup_dir: "{{ minecraft_home }}/backup"
timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
tasks:
- name: Ensure jq and wget are installed
apt:
name: [ wget, jq ]
state: present
update_cache: true
- name: Stop Minecraft service
systemd:
name: "{{ mc_service_name }}"
state: stopped
- name: Create backup directory
file:
path: "{{ backup_dir }}"
state: directory
owner: "{{ minecraft_user }}"
group: "{{ minecraft_user }}"
mode: "0755"
- name: Backup current server.jar
copy:
src: "{{ minecraft_home }}/server.jar"
dest: "{{ backup_dir }}/server.jar.{{ timestamp }}"
remote_src: true
ignore_errors: yes # in case it's missing (first run)
- name: Backup world data folder
archive:
path: "{{ minecraft_home }}/world"
dest: "{{ backup_dir }}/world-{{ timestamp }}.tar.gz"
format: gz
become_user: "{{ minecraft_user }}"
when: ansible.builtin.stat.path is not none
ignore_errors: yes
- name: Download latest Minecraft server JAR
shell: |
set -e
cd "{{ minecraft_home }}"
latest_url=$(wget -qO- {{ mc_version_manifest }} | jq -r '.latest.release as $v | .versions[] | select(.id == $v) | .url')
server_url=$(wget -qO- "$latest_url" | jq -r '.downloads.server.url')
wget -qO server.jar "$server_url"
args:
executable: /bin/bash
- name: Ensure correct ownership of files
file:
path: "{{ minecraft_home }}"
owner: "{{ minecraft_user }}"
group: "{{ minecraft_user }}"
recurse: true
- name: Start Minecraft service
systemd:
name: "{{ mc_service_name }}"
state: started
- name: Verify service is running
systemd:
name: "{{ mc_service_name }}"
state: started
enabled: true
- name: Display success message
debug:
msg: "Minecraft server updated and restarted successfully on {{ inventory_hostname }}"

30
sampleapi/.dockerignore Normal file
View File

@@ -0,0 +1,30 @@
**/.classpath
**/.dockerignore
**/.env
**/.git
**/.gitignore
**/.project
**/.settings
**/.toolstarget
**/.vs
**/.vscode
**/*.*proj.user
**/*.dbmdl
**/*.jfm
**/azds.yaml
**/bin
**/charts
**/docker-compose*
**/Dockerfile*
**/node_modules
**/npm-debug.log
**/obj
**/secrets.dev.yaml
**/values.dev.yaml
LICENSE
README.md
!**/.gitignore
!.git/HEAD
!.git/config
!.git/packed-refs
!.git/refs/heads/**

101
sampleapi/.drone.yml Normal file
View File

@@ -0,0 +1,101 @@
---
kind: pipeline
type: kubernetes
name: build
# disable built-in clone step
clone:
disable: true
steps:
# git clone code base
- name: git clone
image: drone/git
commands:
- git clone http://gitea.main.k3s.lab.mngoma.africa/mngomalab/sampleapi.git .
- git checkout $DRONE_COMMIT
# dotnet restore
- name: dotnet restore
image: mcr.microsoft.com/dotnet/sdk:8.0
commands:
- dotnet restore
# dotnet build
- name: dotnet build
image: mcr.microsoft.com/dotnet/sdk:8.0
commands:
- dotnet build --configuration Release
- ls ./SampleApi/bin/Release/net8.0/
# dotnet test
- name: dotnet test
image: mcr.microsoft.com/dotnet/sdk:8.0
commands:
- dotnet test --configuration Release
---
kind: pipeline
type: kubernetes
name: package
depends_on:
- build
# disable built-in clone step
clone:
disable: true
steps:
# git clone code base
- name: git clone
image: drone/git
commands:
- git clone http://gitea.main.k3s.lab.mngoma.africa/mngomalab/sampleapi.git .
- git checkout $DRONE_COMMIT
# dotnet publish
- name: dotnet publish
image: mcr.microsoft.com/dotnet/sdk:8.0
commands:
- dotnet publish --configuration Release
- ls ./SampleApi/bin/Release/net8.0/publish/
# build and push docker image
- name: docker build and push
image: plugins/docker
settings:
repo: khwezi/mngomalab
auto_tag: true
username:
from_secret: docker_username
password:
from_secret: docker_password
dockerfile: Dockerfile
context: ./SampleApi/bin/Release/net8.0/publish/
---
kind: pipeline
type: kubernetes
name: deploy
depends_on:
- package
# disable built-in clone step
clone:
disable: true
steps:
# git clone code base
- name: git clone
image: drone/git
commands:
- git clone http://gitea.main.k3s.lab.mngoma.africa/mngomalab/sampleapi.git .
- git checkout $DRONE_COMMIT
- name: deploy
image: danielgormly/drone-plugin-kube:0.0.1
settings:
template: ./templates/deployment.yaml
ca:
from_secret: kube_ca_cert
server: https://192.168.1.161:6443
token:
from_secret: k3s_token

63
sampleapi/.gitattributes vendored Normal file
View File

@@ -0,0 +1,63 @@
###############################################################################
# Set default behavior to automatically normalize line endings.
###############################################################################
* text=auto
###############################################################################
# Set default behavior for command prompt diff.
#
# This is need for earlier builds of msysgit that does not have it on by
# default for csharp files.
# Note: This is only used by command line
###############################################################################
#*.cs diff=csharp
###############################################################################
# Set the merge driver for project and solution files
#
# Merging from the command prompt will add diff markers to the files if there
# are conflicts (Merging from VS is not affected by the settings below, in VS
# the diff markers are never inserted). Diff markers may cause the following
# file extensions to fail to load in VS. An alternative would be to treat
# these files as binary and thus will always conflict and require user
# intervention with every merge. To do so, just uncomment the entries below
###############################################################################
#*.sln merge=binary
#*.csproj merge=binary
#*.vbproj merge=binary
#*.vcxproj merge=binary
#*.vcproj merge=binary
#*.dbproj merge=binary
#*.fsproj merge=binary
#*.lsproj merge=binary
#*.wixproj merge=binary
#*.modelproj merge=binary
#*.sqlproj merge=binary
#*.wwaproj merge=binary
###############################################################################
# behavior for image files
#
# image files are treated as binary by default.
###############################################################################
#*.jpg binary
#*.png binary
#*.gif binary
###############################################################################
# diff behavior for common document formats
#
# Convert binary document formats to text before diffing them. This feature
# is only available from the command line. Turn it on by uncommenting the
# entries below.
###############################################################################
#*.doc diff=astextplain
#*.DOC diff=astextplain
#*.docx diff=astextplain
#*.DOCX diff=astextplain
#*.dot diff=astextplain
#*.DOT diff=astextplain
#*.pdf diff=astextplain
#*.PDF diff=astextplain
#*.rtf diff=astextplain
#*.RTF diff=astextplain

365
sampleapi/.gitignore vendored Normal file
View File

@@ -0,0 +1,365 @@
## Ignore Visual Studio temporary files, build results, and
## files generated by popular Visual Studio add-ons.
##
## Get latest from https://github.com/github/gitignore/blob/master/VisualStudio.gitignore
# User-specific files
*.rsuser
*.suo
*.user
*.userosscache
*.sln.docstates
# User-specific files (MonoDevelop/Xamarin Studio)
*.userprefs
# Mono auto generated files
mono_crash.*
# Build results
[Dd]ebug/
[Dd]ebugPublic/
[Rr]elease/
[Rr]eleases/
x64/
x86/
[Ww][Ii][Nn]32/
[Aa][Rr][Mm]/
[Aa][Rr][Mm]64/
bld/
[Bb]in/
[Oo]bj/
[Oo]ut/
[Ll]og/
[Ll]ogs/
# Visual Studio 2015/2017 cache/options directory
.vs/
.vscode/
.vscode/*
# Uncomment if you have tasks that create the project's static files in wwwroot
#wwwroot/
# Visual Studio 2017 auto generated files
Generated\ Files/
# MSTest test Results
[Tt]est[Rr]esult*/
[Bb]uild[Ll]og.*
# NUnit
*.VisualState.xml
TestResult.xml
nunit-*.xml
# Build Results of an ATL Project
[Dd]ebugPS/
[Rr]eleasePS/
dlldata.c
# Benchmark Results
BenchmarkDotNet.Artifacts/
# .NET Core
project.lock.json
project.fragment.lock.json
artifacts/
# ASP.NET Scaffolding
ScaffoldingReadMe.txt
# StyleCop
StyleCopReport.xml
# Files built by Visual Studio
*_i.c
*_p.c
*_h.h
*.ilk
*.meta
*.obj
*.iobj
*.pch
*.pdb
*.ipdb
*.pgc
*.pgd
*.rsp
*.sbr
*.tlb
*.tli
*.tlh
*.tmp
*.tmp_proj
*_wpftmp.csproj
*.log
*.vspscc
*.vssscc
.builds
*.pidb
*.svclog
*.scc
# Chutzpah Test files
_Chutzpah*
# Visual C++ cache files
ipch/
*.aps
*.ncb
*.opendb
*.opensdf
*.sdf
*.cachefile
*.VC.db
*.VC.VC.opendb
# Visual Studio profiler
*.psess
*.vsp
*.vspx
*.sap
# Visual Studio Trace Files
*.e2e
# TFS 2012 Local Workspace
$tf/
# Guidance Automation Toolkit
*.gpState
# ReSharper is a .NET coding add-in
_ReSharper*/
*.[Rr]e[Ss]harper
*.DotSettings.user
# TeamCity is a build add-in
_TeamCity*
# DotCover is a Code Coverage Tool
*.dotCover
# AxoCover is a Code Coverage Tool
.axoCover/*
!.axoCover/settings.json
# Coverlet is a free, cross platform Code Coverage Tool
coverage*.json
coverage*.xml
coverage*.info
# Visual Studio code coverage results
*.coverage
*.coveragexml
# NCrunch
_NCrunch_*
.*crunch*.local.xml
nCrunchTemp_*
# MightyMoose
*.mm.*
AutoTest.Net/
# Web workbench (sass)
.sass-cache/
# Installshield output folder
[Ee]xpress/
# DocProject is a documentation generator add-in
DocProject/buildhelp/
DocProject/Help/*.HxT
DocProject/Help/*.HxC
DocProject/Help/*.hhc
DocProject/Help/*.hhk
DocProject/Help/*.hhp
DocProject/Help/Html2
DocProject/Help/html
# Click-Once directory
publish/
# Publish Web Output
*.[Pp]ublish.xml
*.azurePubxml
# Note: Comment the next line if you want to checkin your web deploy settings,
# but database connection strings (with potential passwords) will be unencrypted
*.pubxml
*.publishproj
# Microsoft Azure Web App publish settings. Comment the next line if you want to
# checkin your Azure Web App publish settings, but sensitive information contained
# in these scripts will be unencrypted
PublishScripts/
# NuGet Packages
*.nupkg
# NuGet Symbol Packages
*.snupkg
# The packages folder can be ignored because of Package Restore
**/[Pp]ackages/*
# except build/, which is used as an MSBuild target.
!**/[Pp]ackages/build/
# Uncomment if necessary however generally it will be regenerated when needed
#!**/[Pp]ackages/repositories.config
# NuGet v3's project.json files produces more ignorable files
*.nuget.props
*.nuget.targets
# Microsoft Azure Build Output
csx/
*.build.csdef
# Microsoft Azure Emulator
ecf/
rcf/
# Windows Store app package directories and files
AppPackages/
BundleArtifacts/
Package.StoreAssociation.xml
_pkginfo.txt
*.appx
*.appxbundle
*.appxupload
# Visual Studio cache files
# files ending in .cache can be ignored
*.[Cc]ache
# but keep track of directories ending in .cache
!?*.[Cc]ache/
# Others
ClientBin/
~$*
*~
*.dbmdl
*.dbproj.schemaview
*.jfm
*.pfx
*.publishsettings
orleans.codegen.cs
# Including strong name files can present a security risk
# (https://github.com/github/gitignore/pull/2483#issue-259490424)
#*.snk
# Since there are multiple workflows, uncomment next line to ignore bower_components
# (https://github.com/github/gitignore/pull/1529#issuecomment-104372622)
#bower_components/
# RIA/Silverlight projects
Generated_Code/
# Backup & report files from converting an old project file
# to a newer Visual Studio version. Backup files are not needed,
# because we have git ;-)
_UpgradeReport_Files/
Backup*/
UpgradeLog*.XML
UpgradeLog*.htm
ServiceFabricBackup/
*.rptproj.bak
# SQL Server files
*.mdf
*.ldf
*.ndf
# Business Intelligence projects
*.rdl.data
*.bim.layout
*.bim_*.settings
*.rptproj.rsuser
*- [Bb]ackup.rdl
*- [Bb]ackup ([0-9]).rdl
*- [Bb]ackup ([0-9][0-9]).rdl
# Microsoft Fakes
FakesAssemblies/
# GhostDoc plugin setting file
*.GhostDoc.xml
# Node.js Tools for Visual Studio
.ntvs_analysis.dat
node_modules/
# Visual Studio 6 build log
*.plg
# Visual Studio 6 workspace options file
*.opt
# Visual Studio 6 auto-generated workspace file (contains which files were open etc.)
*.vbw
# Visual Studio LightSwitch build output
**/*.HTMLClient/GeneratedArtifacts
**/*.DesktopClient/GeneratedArtifacts
**/*.DesktopClient/ModelManifest.xml
**/*.Server/GeneratedArtifacts
**/*.Server/ModelManifest.xml
_Pvt_Extensions
# Paket dependency manager
.paket/paket.exe
paket-files/
# FAKE - F# Make
.fake/
# CodeRush personal settings
.cr/personal
# Python Tools for Visual Studio (PTVS)
__pycache__/
*.pyc
# Cake - Uncomment if you are using it
# tools/**
# !tools/packages.config
# Tabs Studio
*.tss
# Telerik's JustMock configuration file
*.jmconfig
# BizTalk build output
*.btp.cs
*.btm.cs
*.odx.cs
*.xsd.cs
# OpenCover UI analysis results
OpenCover/
# Azure Stream Analytics local run output
ASALocalRun/
# MSBuild Binary and Structured Log
*.binlog
# NVidia Nsight GPU debugger configuration file
*.nvuser
# MFractors (Xamarin productivity tool) working folder
.mfractor/
# Local History for Visual Studio
.localhistory/
# BeatPulse healthcheck temp database
healthchecksdb
# Backup folder for Package Reference Convert tool in Visual Studio 2017
MigrationBackup/
# Ionide (cross platform F# VS Code tools) working folder
.ionide/
# Fody - auto-generated XML schema
FodyWeavers.xsd

13
sampleapi/Dockerfile Normal file
View File

@@ -0,0 +1,13 @@
FROM mcr.microsoft.com/dotnet/aspnet:8.0 AS base
USER app
WORKDIR /app
EXPOSE 8080
EXPOSE 8081
COPY . /app
FROM base AS final
WORKDIR /app
ENTRYPOINT ["dotnet", "SampleApi.dll"]

25
sampleapi/SampleApi.sln Normal file
View File

@@ -0,0 +1,25 @@
Microsoft Visual Studio Solution File, Format Version 12.00
# Visual Studio Version 17
VisualStudioVersion = 17.9.34607.119
MinimumVisualStudioVersion = 10.0.40219.1
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "SampleApi", "SampleApi\SampleApi.csproj", "{13C3F97A-4BF2-4255-8711-0098CD253F31}"
EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|Any CPU = Debug|Any CPU
Release|Any CPU = Release|Any CPU
EndGlobalSection
GlobalSection(ProjectConfigurationPlatforms) = postSolution
{13C3F97A-4BF2-4255-8711-0098CD253F31}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{13C3F97A-4BF2-4255-8711-0098CD253F31}.Debug|Any CPU.Build.0 = Debug|Any CPU
{13C3F97A-4BF2-4255-8711-0098CD253F31}.Release|Any CPU.ActiveCfg = Release|Any CPU
{13C3F97A-4BF2-4255-8711-0098CD253F31}.Release|Any CPU.Build.0 = Release|Any CPU
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
EndGlobalSection
GlobalSection(ExtensibilityGlobals) = postSolution
SolutionGuid = {6F21F43E-CAB2-400C-A163-26A1A808E16D}
EndGlobalSection
EndGlobal

View File

@@ -0,0 +1,33 @@
using Microsoft.AspNetCore.Mvc;
namespace SampleApi.Controllers
{
[ApiController]
[Route("[controller]")]
public class WeatherForecastController : ControllerBase
{
private static readonly string[] Summaries = new[]
{
"Freezing", "Bracing", "Chilly", "Cool", "Mild", "Warm", "Balmy", "Hot", "Sweltering", "Scorching"
};
private readonly ILogger<WeatherForecastController> _logger;
public WeatherForecastController(ILogger<WeatherForecastController> logger)
{
_logger = logger;
}
[HttpGet(Name = "GetWeatherForecast")]
public IEnumerable<WeatherForecast> Get()
{
return Enumerable.Range(1, 5).Select(index => new WeatherForecast
{
Date = DateOnly.FromDateTime(DateTime.Now.AddDays(index)),
TemperatureC = Random.Shared.Next(-20, 55),
Summary = Summaries[Random.Shared.Next(Summaries.Length)]
})
.ToArray();
}
}
}

View File

@@ -0,0 +1,25 @@
var builder = WebApplication.CreateBuilder(args);
// Add services to the container.
builder.Services.AddControllers();
// Learn more about configuring Swagger/OpenAPI at https://aka.ms/aspnetcore/swashbuckle
builder.Services.AddEndpointsApiExplorer();
builder.Services.AddSwaggerGen();
var app = builder.Build();
// Configure the HTTP request pipeline.
if (app.Environment.IsDevelopment())
{
app.UseSwagger();
app.UseSwaggerUI();
}
app.UseHttpsRedirection();
app.UseAuthorization();
app.MapControllers();
app.Run();

View File

@@ -0,0 +1,52 @@
{
"profiles": {
"http": {
"commandName": "Project",
"launchBrowser": true,
"launchUrl": "swagger",
"environmentVariables": {
"ASPNETCORE_ENVIRONMENT": "Development"
},
"dotnetRunMessages": true,
"applicationUrl": "http://localhost:5221"
},
"https": {
"commandName": "Project",
"launchBrowser": true,
"launchUrl": "swagger",
"environmentVariables": {
"ASPNETCORE_ENVIRONMENT": "Development"
},
"dotnetRunMessages": true,
"applicationUrl": "https://localhost:7018;http://localhost:5221"
},
"IIS Express": {
"commandName": "IISExpress",
"launchBrowser": true,
"launchUrl": "swagger",
"environmentVariables": {
"ASPNETCORE_ENVIRONMENT": "Development"
}
},
"Container (Dockerfile)": {
"commandName": "Docker",
"launchBrowser": true,
"launchUrl": "{Scheme}://{ServiceHost}:{ServicePort}/swagger",
"environmentVariables": {
"ASPNETCORE_HTTPS_PORTS": "8081",
"ASPNETCORE_HTTP_PORTS": "8080"
},
"publishAllPorts": true,
"useSSL": true
}
},
"$schema": "http://json.schemastore.org/launchsettings.json",
"iisSettings": {
"windowsAuthentication": false,
"anonymousAuthentication": true,
"iisExpress": {
"applicationUrl": "http://localhost:55457",
"sslPort": 44308
}
}
}

View File

@@ -0,0 +1,16 @@
<Project Sdk="Microsoft.NET.Sdk.Web">
<PropertyGroup>
<TargetFramework>net8.0</TargetFramework>
<Nullable>enable</Nullable>
<ImplicitUsings>enable</ImplicitUsings>
<UserSecretsId>cfe6b4ce-2d40-4273-b3a3-e4df67304fc5</UserSecretsId>
<DockerDefaultTargetOS>Linux</DockerDefaultTargetOS>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="Microsoft.VisualStudio.Azure.Containers.Tools.Targets" Version="1.19.6" />
<PackageReference Include="Swashbuckle.AspNetCore" Version="6.4.0" />
</ItemGroup>
</Project>

View File

@@ -0,0 +1,6 @@
@SampleApi_HostAddress = http://localhost:5221
GET {{SampleApi_HostAddress}}/weatherforecast/
Accept: application/json
###

View File

@@ -0,0 +1,13 @@
namespace SampleApi
{
public class WeatherForecast
{
public DateOnly Date { get; set; }
public int TemperatureC { get; set; }
public int TemperatureF => 32 + (int)(TemperatureC / 0.5556);
public string? Summary { get; set; }
}
}

View File

@@ -0,0 +1,8 @@
{
"Logging": {
"LogLevel": {
"Default": "Information",
"Microsoft.AspNetCore": "Warning"
}
}
}

View File

@@ -0,0 +1,9 @@
{
"Logging": {
"LogLevel": {
"Default": "Information",
"Microsoft.AspNetCore": "Warning"
}
},
"AllowedHosts": "*"
}

View File

@@ -0,0 +1,93 @@
# namespace
apiVersion: v1
kind: Namespace
metadata:
name: experiments
labels:
name: experiments
---
# config map
apiVersion: v1
kind: ConfigMap
metadata:
name: sampleapi-configmap
namespace: experiments
data:
appname: "SampleApi"
---
# deployment
apiVersion: apps/v1
kind: Deployment
metadata:
name: sampleapi-deployment
namespace: experiments
labels:
app: sampleapi
spec:
replicas: 1
selector:
matchLabels:
app: sampleapi
template:
metadata:
labels:
app: sampleapi
spec:
containers:
- name: sampleapi
image: khwezi/mngomalab:latest
imagePullPolicy: IfNotPresent
ports:
- name: http
containerPort: 8080
protocol: TCP
- name: https
containerPort: 8081
protocol: TCP
---
# service
apiVersion: v1
kind: Service
metadata:
name: sampleapi-service
namespace: experiments
spec:
type: ClusterIP
selector:
app: sampleapi
app.kubernetes.io/instance: sampleapi
ports:
- port: 8080
targetPort: 80
- port: 8081
targetPort: 443
---
# ingress
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: sampleapi-ingress
namespace: experiments
annotations:
cert-manager.io/cluster-issuer: sampleapi-secret
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/ssl-redirect: "false"
cert-manager.io/private-key-size: "4096"
spec:
ingressClassName: nginx
rules:
- host: sampleapi.main.k3s.lab.mngoma.africa
http:
paths:
- pathType: Prefix
path: /
backend:
service:
name: sampleapi-service
port:
number: 80
tls:
- hosts:
- sampleapi.main.k3s.lab.mngoma.africa
secretName: sampleapi-secret

120
shared/bind9/commands.md Normal file
View File

@@ -0,0 +1,120 @@
#Copy SSH keys to VM
## make sure that your local linux default ssh directory exists /root/.ssh if you are on windows
```shell
ssh-copy-id -i ~/.ssh/id_ed25519.pub khwezi@192.168.1.151
```
#Update system packages
```shell
sudo apt update && sudo apt upgrade -y
```
#Install Bind9
## install accompanying utilities as well
```shell
sudo apt install bind9 bind9utils bind9-doc
```
#Configure Bind9
```shell
//1. Define global DNS settings
cp /etc/bind/named.conf.options /etc/bind/named.conf.options.bak
sudo nano /etc/bind/named.conf.options
//2. Modify file contents, use following example
//options {
// directory "/var/cache/bind";
// listen-on { any; }; // Listen on all IP addresses
// allow-query { any; }; // Allow queries from any IP address
// forwarders {
// 8.8.8.8; // Google Public DNS
// 8.8.4.4;
// };
// dnssec-validation auto;
// auth-nxdomain no; # conform to RFC1035
// listen-on-v6 { any; };
//};
//Save changed and exit editor
//3. Create custom DNS Zones
cp /etc/bind/named.conf.local /etc/bind/named.conf.local.bak
sudo nano /etc/bind/named.conf.local
// use the following example
//zone "mngoma.lab" {
// type master;
// file "/etc/bind/db.mngoma.lab";
//};
//
//zone "1.168.192.in-addr.arpa" {
// type master;
// file "/etc/bind/db.192.168.1";
//};
//4. Create Zone file(s) referenced in /etc/bind/named.conf.local
sudo cp /etc/bind/db.local /etc/bind/db.mngoma.lab
sudo cp /etc/bind/db.127 /etc/bind/db.192.168.1
//5. Edit zone files to contain the records you need
//;
//; Zone file for example.com
//;
//$ORIGIN example.com.
//$TTL 3H
//
//; SOA record - authoritative info about the zone
//@ IN SOA ns1.example.com. hostmaster.example.com. (
// 2025010101 ; Serial Number
// 21600 ; Refresh (6 hours)
// 3600 ; Retry (1 hour)
// 604800 ; Expire (1 week)
// 86400 ; Minimum TTL (1 day)
// )
//
//; NS Records - authoritative name servers for the domain
//@ IN NS ns1.example.com.
//@ IN NS ns2.example.com.
//
//; A Records - mapping hostnames to IPv4 addresses
//ns1 IN A 192.168.1.10
//ns2 IN A 192.168.1.11
//www IN A 192.168.1.20
//ftp IN A 192.168.1.30
//
//; AAAA Records - mapping hostnames to IPv6 addresses (optional)
//ns1 IN AAAA 2001:db8::10
//www IN AAAA 2001:db8::20
//
//; MX Record - for mail exchange servers and priority
//@ IN MX 10 mail.example.com.
//mail IN A 192.168.1.100
//
//; CNAME Record - Alias for another hostname
//web IN CNAME www.example.com.
//----------
//6. check zone file syntax (validate)
sudo named-checkconf
sudo named-checkzone mngoma.lab /etc/bind/db.mngoma.lab
sudo named-checkzone 1.168.192.in-addr.arpa /etc/bind/db.192.168.1
//7. Restart bind9
sudo systemctl restart bind9
```
#Configure firewall (lockdown)
```shell
//1. enable firewall
sudo ufw enable
//2. allow all traffic from my address range
sudo ufw all from 192.168.1.0/24
//3. allow DNS ports
sudo ufw allow 53/udp
sudo ufw allow 53/tcp
```

155
shared/nginx/commands.md Normal file
View File

@@ -0,0 +1,155 @@
# Update package list and install nginx
```shell
sudo apt update
sudo apt-get install nginx-full
```
# Backup the default config
```shell
sudo cp /etc/nginx/sites-available/default /etc/nginx/sites-available/default.bak
```
# (Optional) Obtain SSL certificates using Let's Encrypt (replace <your_domain> with your actual domain)
# If you want to use self-signed certificates, generate them instead.
# Example for Let's Encrypt:
# sudo apt install -y certbot python3-certbot-nginx
# sudo certbot --nginx -d <your_domain>
# Edit the default config (replace the server block with the following)
```shell
sudo tee /etc/nginx/sites-available/default > /dev/null <<'EOF'
server {
listen 80 default_server;
listen [::]:80 default_server;
server_name _;
root /var/www/html;
index index.html index.htm index.nginx-debian.html;
location / {
# This will just serve the static page /var/www/html/index.html
try_files $uri $uri/ =404;
}
}
EOF
```
# Edit Nginx.conf
## do not put the stream[{} block inside http
```shell
sudo nano /etc/nginx/nginx.conf
user www-data;
worker_processes auto;
pid /run/nginx.pid;
events { worker_connections 768; }
# Add the stream section here, outside http {}
stream {
upstream managers_http {
server lead.swarm.mngoma.lab:80;
server follow.swarm.mngoma.lab:80;
}
server {
listen 80;
proxy_pass managers_http;
}
upstream managers_https {
server lead.swarm.mngoma.lab:443;
server follow.swarm.mngoma.lab:443;
}
server {
listen 443;
proxy_pass managers_https;
}
}
http {
## ... your existing http config here ...
}
```
# Edit nginx conf
```shell
nano /etc/nginx/nginx.conf
# ONLY necessary if not handled by /etc/nginx/modules-enabled/
# load_module /usr/lib/nginx/modules/ngx_stream_module.so;
user www-data;
worker_processes auto;
pid /run/nginx.pid;
events {
worker_connections 768;
}
# ========== STREAM PROXY (Layer 4 TCP) ==========
stream {
upstream managers_http {
server lead.swarm.mngoma.lab:80;
server follow.swarm.mngoma.lab:80;
}
server {
listen 80;
proxy_pass managers_http;
}
upstream managers_https {
server lead.swarm.mngoma.lab:443;
server follow.swarm.mngoma.lab:443;
}
server {
listen 443;
proxy_pass managers_https;
}
}
# ========== HTTP CONFIG ==========
http {
##
# Basic Settings
##
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
include /etc/nginx/mime.types;
default_type application/octet-stream;
##
# Logging Settings
##
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
##
# Gzip Settings
##
gzip on;
gzip_disable "msie6";
##
# Include virtual host configurations
##
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*;
}
```
# Test and reload nginx
```shell
sudo nginx -t
sudo systemctl reload nginx
```
# Log trace
```shell
tail -f /var/log/nginx/error.log /var/log/nginx/access.log
```

19
webmin/commands.md Normal file
View File

@@ -0,0 +1,19 @@
# 1. Install using script
```bash
curl -o webmin-setup-repo.sh https://raw.githubusercontent.com/webmin/webmin/master/webmin-setup-repo.sh
sudo sh webmin-setup-repo.sh
```
# 2. Install Webmin
```shell
sudo apt update
sudo apt install webmin
```
# 3. Open port
```bash
ufw allow 10000
```

14
webmin/config.ini Normal file
View File

@@ -0,0 +1,14 @@
[all:vars]
ansible_python_interpreter=/usr/bin/python3
ansible_user=ansible
ansible_ssh_private_key_file=~/.ssh/id_ed25519
[hosts]
sentry ansible_host=sentry.mngoma.lab
alpha ansible_host=alpha.lb.mngoma.lab
database ansible_host=database.mngoma.lab
vpn ansible_host=vpn.mngoma.lab
minecraft ansible_host=minecraft.mngoma.lab
# dns ansible_host=dns.mngoma.lab
manager ansible_host=lead.mngoma.lab
worker ansible_host=worker1.mngoma.lab

View File

@@ -0,0 +1,25 @@
---
- name: Cleanup broken Webmin repository
hosts: all
become: yes
tasks:
- name: Remove old Webmin repository file
file:
path: /etc/apt/sources.list.d/webmin.list
state: absent
- name: Remove Webmin GPG keyring (optional)
file:
path: /usr/share/keyrings/webmin.gpg
state: absent
- name: Clean apt cache
apt:
autoclean: yes
autoremove: yes
- name: Update apt cache
apt:
update_cache: yes

View File

@@ -0,0 +1,56 @@
---
- name: Install Webmin using official setup script
hosts: all
become: yes
vars:
webmin_port: 10000
webmin_script: /tmp/webmin-setup-repo.sh
tasks:
- name: Ensure required packages are installed
apt:
name:
- curl
- ufw
state: present
update_cache: yes
- name: Download Webmin setup script
get_url:
url: https://raw.githubusercontent.com/webmin/webmin/master/webmin-setup-repo.sh
dest: "{{ webmin_script }}"
mode: '0755'
- name: Run Webmin setup script non-interactively
command: "sh {{ webmin_script }}"
args:
stdin: "y\n"
creates: /etc/apt/sources.list.d/webmin.list
- name: Update apt cache after script
apt:
update_cache: yes
- name: Install Webmin
apt:
name: webmin
state: latest
- name: Ensure Webmin service is running
systemd:
name: webmin
state: started
enabled: yes
- name: Allow Webmin port through UFW
ufw:
rule: allow
port: "{{ webmin_port }}"
proto: tcp
register: ufw_rule
- name: Confirm UFW rule applied
debug:
msg: "UFW rule for Webmin (port {{ webmin_port }}) is present."
when: ufw_rule.changed or ufw_rule.skipped

View File

@@ -0,0 +1,55 @@
---
- name: Safely update Webmin on all hosts
hosts: all
become: yes
vars:
webmin_repo: "deb http://download.webmin.com/download/repository sarge contrib"
webmin_key_url: "http://www.webmin.com/jcameron-key.asc"
tasks:
- name: Ensure required packages are installed for HTTPS repositories
apt:
name:
- apt-transport-https
- software-properties-common
- gnupg
state: present
update_cache: yes
- name: Add Webmin GPG key (idempotent)
ansible.builtin.apt_key:
url: "{{ webmin_key_url }}"
state: present
- name: Ensure Webmin repository is present
ansible.builtin.apt_repository:
repo: "{{ webmin_repo }}"
state: present
- name: Update apt cache
apt:
update_cache: yes
- name: Check if Webmin is installed
command: dpkg -l webmin
register: webmin_installed
ignore_errors: yes
- name: Upgrade Webmin if installed
apt:
name: webmin
state: latest
when: webmin_installed.rc == 0
- name: Install Webmin if not installed
apt:
name: webmin
state: present
when: webmin_installed.rc != 0
- name: Ensure Webmin service is running
systemd:
name: webmin
state: restarted
enabled: yes