Copied all stacks

This commit is contained in:
Khwezi
2026-04-11 09:51:19 +02:00
parent 7c14e29bd4
commit 99efa12f43
25 changed files with 1040 additions and 4 deletions

View File

@@ -0,0 +1,191 @@
---
- name: Install dependencies, mount NFS at /home/{{ ansible_user }}/k3s, setup K3s, DNS, Traefik, venv for k8s module
hosts: all
become: yes
vars:
nfs_server: storage.mngoma.lab
nfs_export_path: /export/k3s
dns_server_ip: 192.168.1.151
dns_fallback_ip: 192.168.1.1
ansible_user_home: "/home/{{ ansible_user }}"
nfs_mount_path: "{{ ansible_user_home }}/k3s"
kube_config_path: "{{ ansible_user_home }}/.kube/config"
kube_venv_path: "/opt/kube-venv"
tasks:
- name: Configure systemd-resolved permanent DNS
copy:
dest: /etc/systemd/resolved.conf
content: |
[Resolve]
DNS={{ dns_server_ip }}
FallbackDNS={{ dns_fallback_ip }}
Domains=mngoma.lab
DNSStubListener=yes
owner: root
group: root
mode: '0644'
- name: Ensure /etc/resolv.conf points to systemd-resolved stub
file:
src: /run/systemd/resolve/stub-resolv.conf
dest: /etc/resolv.conf
state: link
force: yes
- name: Restart systemd-resolved to apply DNS changes
systemd:
name: systemd-resolved
state: restarted
enabled: yes
- name: Ensure NFS mount point exists
file:
path: "{{ nfs_mount_path }}"
state: directory
owner: "{{ ansible_user | default('root') }}"
group: "{{ ansible_user | default('root') }}"
mode: '0755'
- name: Mount NFS share immediately (direct, idempotent)
mount:
src: "{{ nfs_server }}:{{ nfs_export_path }}"
path: "{{ nfs_mount_path }}"
fstype: nfs
opts: defaults
state: mounted
- name: Ensure NFS mount persists across reboots (fstab entry)
mount:
src: "{{ nfs_server }}:{{ nfs_export_path }}"
path: "{{ nfs_mount_path }}"
fstype: nfs
opts: defaults
state: present
- name: Ensure .kube directory exists
file:
path: "{{ ansible_user_home }}/.kube"
state: directory
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: '0700'
- name: Download latest kubectl
shell: |
arch=$(uname -m)
if [ "$arch" = "x86_64" ]; then arch="amd64";
elif [ "$arch" = "aarch64" ]; then arch="arm64";
fi
curl -Lo /usr/local/bin/kubectl "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/${arch}/kubectl"
chmod +x /usr/local/bin/kubectl
args:
creates: /usr/local/bin/kubectl
environment:
KUBECONFIG: "{{ kube_config_path }}"
- name: Install Helm
shell: |
curl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash
args:
creates: /usr/local/bin/helm
environment:
KUBECONFIG: "{{ kube_config_path }}"
- name: Download and install kustomize
shell: |
kustomize_ver=$(curl -s https://api.github.com/repos/kubernetes-sigs/kustomize/releases/latest | jq -r '.tag_name')
arch=$(uname -m)
if [ "$arch" = "x86_64" ]; then arch="amd64";
elif [ "$arch" = "aarch64" ]; then arch="arm64";
fi
url="https://github.com/kubernetes-sigs/kustomize/releases/download/${kustomize_ver}/kustomize_${kustomize_ver#kustomize/}_linux_${arch}.tar.gz"
tmpfile=$(mktemp)
curl -L -o "$tmpfile" "$url"
tar -xz -C /usr/local/bin -f "$tmpfile"
chmod +x /usr/local/bin/kustomize
rm -f "$tmpfile"
args:
creates: /usr/local/bin/kustomize
ignore_errors: true
environment:
KUBECONFIG: "{{ kube_config_path }}"
- name: Install kubectx and kubens
shell: |
git clone https://github.com/ahmetb/kubectx /opt/kubectx || true
ln -sf /opt/kubectx/kubectx /usr/local/bin/kubectx
ln -sf /opt/kubectx/kubens /usr/local/bin/kubens
environment:
KUBECONFIG: "{{ kube_config_path }}"
- name: Ensure systemd override directory for k3s exists
file:
path: /etc/systemd/system/k3s.service.d
state: directory
owner: root
group: root
mode: '0755'
- name: Enable Traefik ping via CLI arguments
copy:
dest: /etc/systemd/system/k3s.service.d/10-traefik-ping.conf
content: |
[Service]
ExecStart=
ExecStart=/usr/local/bin/k3s server --no-deploy=servicelb --kubelet-arg="cloud-provider=external" \
--traefik-arg="--ping.entryPoint=web" \
--traefik-arg="--ping.manualRouting=false" \
--traefik-arg="--ping.responseMessage=Healthy"
owner: root
group: root
mode: '0644'
- name: Reload systemd to pick up k3s override
systemd:
daemon_reload: yes
# --- VENV FOR KUBERNETES MODULE ---
- name: Create venv for k8s ansible modules
command: python3 -m venv {{ kube_venv_path }}
args:
creates: "{{ kube_venv_path }}/bin/activate"
- name: Install kubernetes python library in venv
pip:
name: kubernetes
virtualenv: "{{ kube_venv_path }}"
virtualenv_python: python3
# The following play block will ONLY target the manager group
- name: Expose Traefik dashboard via IngressRoute (manager only)
hosts: manager # Change to your actual manager inventory group name
become: yes
vars:
ansible_user_home: "/home/{{ ansible_user }}"
kube_config_path: "{{ ansible_user_home }}/.kube/config"
kube_venv_path: "/opt/kube-venv"
tasks:
- name: Expose Traefik dashboard via IngressRoute (inline)
kubernetes.core.k8s:
state: present
definition:
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: traefik-dashboard
namespace: kube-system
spec:
entryPoints:
- web
- websecure
routes:
- match: Host(`dashboard.apps.mngoma.lab`)
kind: Rule
services:
- name: traefik
port: 8080
tls: {}
environment:
KUBECONFIG: "{{ kube_config_path }}"
vars:
ansible_python_interpreter: "{{ kube_venv_path }}/bin/python"

View File

@@ -0,0 +1,37 @@
@echo off
REM =========================================
REM apply-cluster-snat.bat
REM Enables hairpin NAT for Kubernetes pods -> HAProxy host
REM =========================================
REM ---- Step 1: Enable IP forwarding ----
echo Enabling IPv4 forwarding...
powershell -Command "Set-ItemProperty -Path 'HKLM:\SYSTEM\CurrentControlSet\Services\Tcpip\Parameters' -Name 'IPEnableRouter' -Value 1"
echo IP forwarding enabled. Please reboot for permanent effect.
REM ---- Step 2: Add UFW NAT rules ----
echo Applying NAT rules for pod -> HAProxy hairpin...
REM Ensure before.rules file exists
set ufw_rules_file=/etc/ufw/before.rules
REM Backup original rules
if exist "%ufw_rules_file%.bak" (
echo Backup already exists.
) else (
copy "%ufw_rules_file%" "%ufw_rules_file%.bak"
echo Backup created at %ufw_rules_file%.bak
)
REM Append NAT rules
echo *nat >> "%ufw_rules_file%"
echo :POSTROUTING ACCEPT [0:0] >> "%ufw_rules_file%"
echo -A POSTROUTING -s 10.42.0.0/16 -d 192.168.1.160 -j MASQUERADE >> "%ufw_rules_file%"
echo COMMIT >> "%ufw_rules_file%"
REM ---- Step 3: Reload UFW ----
echo Reloading UFW...
ufw disable
ufw enable
echo Hairpin NAT applied successfully.
pause

View File

@@ -0,0 +1,256 @@
---
- name: Gather IPv4 address facts for all hosts
hosts: all
gather_facts: yes
vars:
dns_server: "192.168.1.151"
tasks:
- name: Configure systemd-resolved to use custom DNS
become: true
copy:
dest: /etc/systemd/resolved.conf
content: |
[Resolve]
DNS={{ dns_server }}
FallbackDNS=192.168.1.1
Domains=mngoma.lab
DNSStubListener=yes
owner: root
group: root
mode: "0644"
- name: Ensure systemd-resolved service is enabled and restarted
become: true
systemd:
name: systemd-resolved
state: restarted
enabled: yes
- name: Ensure host IPv4 address is available and stored as fact
set_fact:
node_ipv4: "{{ ansible_default_ipv4.address }}"
when: ansible_default_ipv4 is defined and ansible_default_ipv4.address is defined
- name: Fail if IPv4 address could not be determined
fail:
msg: "Could not determine IPv4 address for {{ inventory_hostname }}. Please check network configuration."
when: node_ipv4 is not defined
- name: Ensure /home/{{ ansible_user }}/k3s directory exists
become: yes
file:
path: /home/{{ ansible_user }}/k3s
state: directory
owner: root
group: root
mode: '0777'
- name: Initialise the K3s control plane (manager)
hosts: manager
become: yes
vars:
k3s_version: v1.29.4+k3s1
kubeconfig_dir: "/home/{{ ansible_user }}/.kube"
kubeconfig_file: "{{ kubeconfig_dir }}/config"
tasks:
- name: Install required apt dependencies
apt:
name:
- curl
- python3-pip
- python3-venv
state: present
update_cache: yes
- name: Create a Python virtual environment for Ansible k8s modules
command: python3 -m venv /opt/ansible-venv
args:
creates: /opt/ansible-venv
- name: Install Kubernetes and OpenShift libraries in the venv
command: /opt/ansible-venv/bin/pip install kubernetes openshift
- name: Install k3s on manager (control plane)
shell: |
curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION={{ k3s_version }} sh -
args:
creates: /usr/local/bin/k3s
- name: Gather architecture for kubectl
set_fact:
kubectl_arch: |
{% if ansible_architecture == "x86_64" %}
amd64
{% elif "armv7l" in ansible_architecture %}
arm
{% elif "aarch64" in ansible_architecture %}
arm64
{% else %}
{{ ansible_architecture }}
{% endif %}
- name: Download latest kubectl binary
shell: |
curl -Lo /usr/local/bin/kubectl "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/{{ kubectl_arch }}/kubectl"
args:
creates: /usr/local/bin/kubectl
- name: Make kubectl executable
file:
path: /usr/local/bin/kubectl
mode: '0755'
owner: root
group: root
- name: Validate kubectl version
shell: kubectl version --client --output=yaml
register: kubectl_version
changed_when: false
- name: Show kubectl version
debug:
var: kubectl_version.stdout
- name: Ensure .kube directory exists for ansible user
become_user: "{{ ansible_user }}"
file:
path: "{{ kubeconfig_dir }}"
state: directory
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: '0700'
- name: Copy kubeconfig file to ansible user home
become: true
copy:
src: /etc/rancher/k3s/k3s.yaml
dest: "{{ kubeconfig_file }}"
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: '0600'
remote_src: yes
- name: Replace server IP in kubeconfig with manager's IP address
become_user: "{{ ansible_user }}"
lineinfile:
path: "{{ kubeconfig_file }}"
regexp: 'server: https://127\\.0\\.0\\.1:6443'
line: " server: https://{{ node_ipv4 }}:6443"
- name: Get the cluster join token
shell: cat /var/lib/rancher/k3s/server/node-token
register: k3s_token
changed_when: false
- name: Set fact for join token and manager IP
set_fact:
k3s_node_token: "{{ k3s_token.stdout }}"
manager_ip: "{{ node_ipv4 }}"
- name: Add the join token and manager IP to hostvars for workers
add_host:
name: "cluster_primary"
groups: join_info
k3s_node_token: "{{ k3s_node_token }}"
k3s_manager_ip: "{{ manager_ip }}"
- name: Install and join worker nodes to the control plane
hosts: workers
become: yes
vars:
k3s_version: v1.29.4+k3s1
tasks:
- name: Install required dependencies
apt:
name: [curl]
state: present
update_cache: yes
- name: Ensure /home/{{ ansible_user }}/k3s directory exists
file:
path: /home/{{ ansible_user }}/k3s
state: directory
owner: root
group: root
mode: '0777'
- name: Set manager join information
set_fact:
k3s_node_token: "{{ hostvars[groups['join_info'][0]]['k3s_node_token'] }}"
k3s_manager_ip: "{{ hostvars[groups['join_info'][0]]['k3s_manager_ip'] }}"
- name: Fail if manager's IP is not available
fail:
msg: "Could not determine manager's IP for joining cluster!"
when: k3s_manager_ip is not defined
- name: Install k3s agent (worker)
shell: |
curl -sfL https://get.k3s.io | K3S_URL=https://{{ k3s_manager_ip }}:6443 K3S_TOKEN={{ k3s_node_token }} INSTALL_K3S_VERSION={{ k3s_version }} sh -
args:
creates: /usr/local/bin/k3s-agent
- name: Gather architecture for kubectl
set_fact:
kubectl_arch: |
{% if ansible_architecture == "x86_64" %}
amd64
{% elif "armv7l" in ansible_architecture %}
arm
{% elif "aarch64" in ansible_architecture %}
arm64
{% else %}
{{ ansible_architecture }}
{% endif %}
- name: Download latest kubectl binary (worker)
shell: |
curl -Lo /usr/local/bin/kubectl "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/{{ kubectl_arch }}/kubectl"
args:
creates: /usr/local/bin/kubectl
- name: Make kubectl executable (worker)
file:
path: /usr/local/bin/kubectl
mode: '0755'
owner: root
group: root
- name: Verify cluster nodes from manager node and configure storage
hosts: manager
become: yes
vars:
kubeconfig_file: "/home/{{ ansible_user }}/.kube/config"
tasks:
- name: Wait for all worker nodes to join the cluster
become_user: "{{ ansible_user }}"
shell: |
for i in {1..10}; do
[ $(kubectl --kubeconfig={{ kubeconfig_file }} get nodes | grep -c worker) -ge 1 ] && exit 0
sleep 15
done
exit 1
register: wait_worker
failed_when: wait_worker.rc != 0
- name: List all k3s nodes
become_user: "{{ ansible_user }}"
shell: kubectl --kubeconfig={{ kubeconfig_file }} get nodes -o wide
register: all_nodes
- name: Show current k3s cluster nodes
debug:
var: all_nodes.stdout
- name: Create StorageClass for /home/ansible/k3s INLINE
vars:
ansible_python_interpreter: /opt/ansible-venv/bin/python
kubernetes.core.k8s:
kubeconfig: "{{ kubeconfig_file }}"
definition:
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: local-pvs
provisioner: kubernetes.io/no-provisioner
volumeBindingMode: WaitForFirstConsumer

View File

@@ -0,0 +1,131 @@
# create-loadbalancer.yml
- name: Install and configure HAProxy with SSL termination and managed DNS
hosts: alpha
become: yes
vars:
ssl_cert_path: "/etc/ssl/certs/haproxy.pem"
dns_server: "192.168.1.151"
tasks:
- name: Install HAProxy and dependencies
apt:
name:
- haproxy
- openssl
state: present
update_cache: yes
- name: Ensure cert directory exists
file:
path: /etc/ssl/certs
state: directory
owner: root
group: root
mode: '0755'
- name: Generate private key for HAProxy
community.crypto.openssl_privatekey:
path: /etc/ssl/certs/haproxy.key
size: 2048
type: RSA
mode: '0600'
- name: Generate a Certificate Signing Request (CSR) for HAProxy
community.crypto.openssl_csr:
path: /etc/ssl/certs/haproxy.csr
privatekey_path: /etc/ssl/certs/haproxy.key
common_name: "{{ inventory_hostname }}"
subject_alt_name:
- "DNS:{{ inventory_hostname }}"
mode: "0644"
- name: Generate self-signed certificate for HAProxy
community.crypto.x509_certificate:
path: /etc/ssl/certs/haproxy.crt
privatekey_path: /etc/ssl/certs/haproxy.key
csr_path: /etc/ssl/certs/haproxy.csr
provider: selfsigned
selfsigned_not_before: "{{ '%Y%m%d%H%M%SZ' | strftime(ansible_date_time.epoch | int) }}"
selfsigned_not_after: "{{ '%Y%m%d%H%M%SZ' | strftime((ansible_date_time.epoch | int) + (365*24*60*60)) }}"
mode: "0644"
- name: Combine key and cert into .pem file for HAProxy
shell: cat /etc/ssl/certs/haproxy.key /etc/ssl/certs/haproxy.crt > {{ ssl_cert_path }}
args:
creates: "{{ ssl_cert_path }}"
- name: Configure systemd-resolved to use custom DNS
become: true
copy:
dest: /etc/systemd/resolved.conf
content: |
[Resolve]
DNS={{ dns_server }}
FallbackDNS=192.168.1.1
Domains=mngoma.lab
DNSStubListener=yes
owner: root
group: root
mode: "0644"
- name: Ensure systemd-resolved service is enabled and restarted
become: true
systemd:
name: systemd-resolved
state: restarted
enabled: yes
- name: Upload custom haproxy.cfg with SSL termination and HTTPS-only backend
copy:
dest: /etc/haproxy/haproxy.cfg
content: |
global
log /dev/log local0
log /dev/log local1 notice
chroot /var/lib/haproxy
stats socket /run/haproxy/admin.sock mode 660 level admin
user haproxy
group haproxy
daemon
tune.ssl.default-dh-param 2048
defaults
log global
mode http
option httplog
option dontlognull
timeout connect 5000
timeout client 50000
timeout server 50000
option forwardfor
resolvers dns
nameserver dns1 {{ dns_server }}:53
resolve_retries 3
timeout resolve 2s
timeout retry 1s
hold valid 10s
frontend https_front
bind *:443 ssl crt {{ ssl_cert_path }}
mode http
option forwardfor
http-request set-header X-Forwarded-Proto https
http-request set-header Host %[req.hdr(host)]
default_backend app_clusters
backend app_clusters
mode http
balance roundrobin
option httpchk GET /
http-check expect status 100,101,102,103,200,201,202,203,204,205,206,207,208,226,300,301,302,303,304,305,306,307,308,400,401,402,403,404,405,406,407,408,409,410,411,412,413,414,415,416,417,418,421,422,423,424,425,426,428,429,431,451
server lead_https lead.mngoma.lab:443 resolvers dns resolve-prefer ipv4 check ssl verify none
owner: root
group: root
mode: "0644"
- name: Enable and start haproxy
systemd:
name: haproxy
state: restarted
enabled: yes

View File

@@ -0,0 +1,35 @@
---
- name: Drain and stop worker nodes first
hosts: workers
become: true
tasks:
- name: Drain worker node (optional - requires kubectl access)
shell: kubectl drain {{ inventory_hostname }} --ignore-daemonsets --delete-emptydir-data || true
delegate_to: manager
ignore_errors: yes
- name: Stop k3s-agent service on worker
systemd:
name: k3s-agent
state: stopped
enabled: false
- name: Poweroff worker node
shell: shutdown -h now
async: 0
poll: 0
- name: Stop and poweroff the manager node
hosts: manager
become: true
tasks:
- name: Stop k3s (server) service
systemd:
name: k3s
state: stopped
enabled: false
- name: Poweroff manager node
shell: shutdown -h now
async: 0
poll: 0