first commit
This commit is contained in:
191
k3s-cluster/ansible/add-cluster-tooling.yml
Normal file
191
k3s-cluster/ansible/add-cluster-tooling.yml
Normal file
@@ -0,0 +1,191 @@
|
||||
---
|
||||
- name: Install dependencies, mount NFS at /home/{{ ansible_user }}/k3s, setup K3s, DNS, Traefik, venv for k8s module
|
||||
hosts: all
|
||||
become: yes
|
||||
vars:
|
||||
nfs_server: storage.mngoma.lab
|
||||
nfs_export_path: /export/k3s
|
||||
dns_server_ip: 192.168.1.151
|
||||
dns_fallback_ip: 192.168.1.1
|
||||
ansible_user_home: "/home/{{ ansible_user }}"
|
||||
nfs_mount_path: "{{ ansible_user_home }}/k3s"
|
||||
kube_config_path: "{{ ansible_user_home }}/.kube/config"
|
||||
kube_venv_path: "/opt/kube-venv"
|
||||
tasks:
|
||||
- name: Configure systemd-resolved permanent DNS
|
||||
copy:
|
||||
dest: /etc/systemd/resolved.conf
|
||||
content: |
|
||||
[Resolve]
|
||||
DNS={{ dns_server_ip }}
|
||||
FallbackDNS={{ dns_fallback_ip }}
|
||||
Domains=mngoma.lab
|
||||
DNSStubListener=yes
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0644'
|
||||
|
||||
- name: Ensure /etc/resolv.conf points to systemd-resolved stub
|
||||
file:
|
||||
src: /run/systemd/resolve/stub-resolv.conf
|
||||
dest: /etc/resolv.conf
|
||||
state: link
|
||||
force: yes
|
||||
|
||||
- name: Restart systemd-resolved to apply DNS changes
|
||||
systemd:
|
||||
name: systemd-resolved
|
||||
state: restarted
|
||||
enabled: yes
|
||||
|
||||
- name: Ensure NFS mount point exists
|
||||
file:
|
||||
path: "{{ nfs_mount_path }}"
|
||||
state: directory
|
||||
owner: "{{ ansible_user | default('root') }}"
|
||||
group: "{{ ansible_user | default('root') }}"
|
||||
mode: '0755'
|
||||
|
||||
- name: Mount NFS share immediately (direct, idempotent)
|
||||
mount:
|
||||
src: "{{ nfs_server }}:{{ nfs_export_path }}"
|
||||
path: "{{ nfs_mount_path }}"
|
||||
fstype: nfs
|
||||
opts: defaults
|
||||
state: mounted
|
||||
|
||||
- name: Ensure NFS mount persists across reboots (fstab entry)
|
||||
mount:
|
||||
src: "{{ nfs_server }}:{{ nfs_export_path }}"
|
||||
path: "{{ nfs_mount_path }}"
|
||||
fstype: nfs
|
||||
opts: defaults
|
||||
state: present
|
||||
|
||||
- name: Ensure .kube directory exists
|
||||
file:
|
||||
path: "{{ ansible_user_home }}/.kube"
|
||||
state: directory
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "{{ ansible_user }}"
|
||||
mode: '0700'
|
||||
|
||||
- name: Download latest kubectl
|
||||
shell: |
|
||||
arch=$(uname -m)
|
||||
if [ "$arch" = "x86_64" ]; then arch="amd64";
|
||||
elif [ "$arch" = "aarch64" ]; then arch="arm64";
|
||||
fi
|
||||
curl -Lo /usr/local/bin/kubectl "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/${arch}/kubectl"
|
||||
chmod +x /usr/local/bin/kubectl
|
||||
args:
|
||||
creates: /usr/local/bin/kubectl
|
||||
environment:
|
||||
KUBECONFIG: "{{ kube_config_path }}"
|
||||
|
||||
- name: Install Helm
|
||||
shell: |
|
||||
curl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash
|
||||
args:
|
||||
creates: /usr/local/bin/helm
|
||||
environment:
|
||||
KUBECONFIG: "{{ kube_config_path }}"
|
||||
|
||||
- name: Download and install kustomize
|
||||
shell: |
|
||||
kustomize_ver=$(curl -s https://api.github.com/repos/kubernetes-sigs/kustomize/releases/latest | jq -r '.tag_name')
|
||||
arch=$(uname -m)
|
||||
if [ "$arch" = "x86_64" ]; then arch="amd64";
|
||||
elif [ "$arch" = "aarch64" ]; then arch="arm64";
|
||||
fi
|
||||
url="https://github.com/kubernetes-sigs/kustomize/releases/download/${kustomize_ver}/kustomize_${kustomize_ver#kustomize/}_linux_${arch}.tar.gz"
|
||||
tmpfile=$(mktemp)
|
||||
curl -L -o "$tmpfile" "$url"
|
||||
tar -xz -C /usr/local/bin -f "$tmpfile"
|
||||
chmod +x /usr/local/bin/kustomize
|
||||
rm -f "$tmpfile"
|
||||
args:
|
||||
creates: /usr/local/bin/kustomize
|
||||
ignore_errors: true
|
||||
environment:
|
||||
KUBECONFIG: "{{ kube_config_path }}"
|
||||
|
||||
- name: Install kubectx and kubens
|
||||
shell: |
|
||||
git clone https://github.com/ahmetb/kubectx /opt/kubectx || true
|
||||
ln -sf /opt/kubectx/kubectx /usr/local/bin/kubectx
|
||||
ln -sf /opt/kubectx/kubens /usr/local/bin/kubens
|
||||
environment:
|
||||
KUBECONFIG: "{{ kube_config_path }}"
|
||||
|
||||
- name: Ensure systemd override directory for k3s exists
|
||||
file:
|
||||
path: /etc/systemd/system/k3s.service.d
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0755'
|
||||
|
||||
- name: Enable Traefik ping via CLI arguments
|
||||
copy:
|
||||
dest: /etc/systemd/system/k3s.service.d/10-traefik-ping.conf
|
||||
content: |
|
||||
[Service]
|
||||
ExecStart=
|
||||
ExecStart=/usr/local/bin/k3s server --no-deploy=servicelb --kubelet-arg="cloud-provider=external" \
|
||||
--traefik-arg="--ping.entryPoint=web" \
|
||||
--traefik-arg="--ping.manualRouting=false" \
|
||||
--traefik-arg="--ping.responseMessage=Healthy"
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0644'
|
||||
|
||||
- name: Reload systemd to pick up k3s override
|
||||
systemd:
|
||||
daemon_reload: yes
|
||||
|
||||
# --- VENV FOR KUBERNETES MODULE ---
|
||||
- name: Create venv for k8s ansible modules
|
||||
command: python3 -m venv {{ kube_venv_path }}
|
||||
args:
|
||||
creates: "{{ kube_venv_path }}/bin/activate"
|
||||
|
||||
- name: Install kubernetes python library in venv
|
||||
pip:
|
||||
name: kubernetes
|
||||
virtualenv: "{{ kube_venv_path }}"
|
||||
virtualenv_python: python3
|
||||
|
||||
# The following play block will ONLY target the manager group
|
||||
- name: Expose Traefik dashboard via IngressRoute (manager only)
|
||||
hosts: manager # Change to your actual manager inventory group name
|
||||
become: yes
|
||||
vars:
|
||||
ansible_user_home: "/home/{{ ansible_user }}"
|
||||
kube_config_path: "{{ ansible_user_home }}/.kube/config"
|
||||
kube_venv_path: "/opt/kube-venv"
|
||||
tasks:
|
||||
- name: Expose Traefik dashboard via IngressRoute (inline)
|
||||
kubernetes.core.k8s:
|
||||
state: present
|
||||
definition:
|
||||
apiVersion: traefik.containo.us/v1alpha1
|
||||
kind: IngressRoute
|
||||
metadata:
|
||||
name: traefik-dashboard
|
||||
namespace: kube-system
|
||||
spec:
|
||||
entryPoints:
|
||||
- web
|
||||
- websecure
|
||||
routes:
|
||||
- match: Host(`dashboard.apps.mngoma.lab`)
|
||||
kind: Rule
|
||||
services:
|
||||
- name: traefik
|
||||
port: 8080
|
||||
tls: {}
|
||||
environment:
|
||||
KUBECONFIG: "{{ kube_config_path }}"
|
||||
vars:
|
||||
ansible_python_interpreter: "{{ kube_venv_path }}/bin/python"
|
||||
81
k3s-cluster/ansible/common/create-ansible-user.yml
Normal file
81
k3s-cluster/ansible/common/create-ansible-user.yml
Normal file
@@ -0,0 +1,81 @@
|
||||
# command: ansible-playbook -i config/<target manifest>.ini common/create-ansible-user.yml --ask-become-pass
|
||||
# Note: this playbook requires an interactive mode or passed secret for privilege escalation
|
||||
---
|
||||
- name: Create ansible user and configure passwordless sudo
|
||||
hosts: all
|
||||
become: true
|
||||
become_method: sudo
|
||||
vars:
|
||||
ansible_user: khwezi
|
||||
tasks:
|
||||
- name: Ensure 'ansible' user exists
|
||||
ansible.builtin.user:
|
||||
name: ansible
|
||||
groups: sudo
|
||||
append: yes
|
||||
shell: /bin/bash
|
||||
state: present
|
||||
- name: Check if passwordless sudo is already configured for 'ansible'
|
||||
ansible.builtin.shell: |
|
||||
grep -Fxq "ansible ALL=(ALL) NOPASSWD: ALL" /etc/sudoers.d/ansible
|
||||
register: sudoers_check
|
||||
ignore_errors: true
|
||||
changed_when: false
|
||||
- name: Allow 'ansible' user passwordless sudo
|
||||
ansible.builtin.copy:
|
||||
dest: /etc/sudoers.d/ansible
|
||||
content: "ansible ALL=(ALL) NOPASSWD: ALL\n"
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0440'
|
||||
when: sudoers_check.rc != 0
|
||||
- name: Ensure /home/ansible/.ssh directory exists
|
||||
ansible.builtin.file:
|
||||
path: /home/ansible/.ssh
|
||||
state: directory
|
||||
owner: ansible
|
||||
group: ansible
|
||||
mode: '0700'
|
||||
- name: Copy id_ed25519 private key to ansible user
|
||||
ansible.builtin.copy:
|
||||
src: ~/.ssh/id_ed25519
|
||||
dest: /home/ansible/.ssh/id_ed25519
|
||||
owner: ansible
|
||||
group: ansible
|
||||
mode: '0600'
|
||||
- name: Copy id_ed25519 public key to ansible user
|
||||
ansible.builtin.copy:
|
||||
src: ~/.ssh/id_ed25519.pub
|
||||
dest: /home/ansible/.ssh/id_ed25519.pub
|
||||
owner: ansible
|
||||
group: ansible
|
||||
mode: '0644'
|
||||
- name: Ensure authorized_keys exists
|
||||
ansible.builtin.file:
|
||||
path: /home/ansible/.ssh/authorized_keys
|
||||
state: touch
|
||||
owner: ansible
|
||||
group: ansible
|
||||
mode: '0600'
|
||||
- name: Read public key content
|
||||
ansible.builtin.slurp:
|
||||
src: /home/ansible/.ssh/id_ed25519.pub
|
||||
register: pubkey_content
|
||||
- name: Ensure public key is present in authorized_keys
|
||||
ansible.builtin.lineinfile:
|
||||
path: /home/ansible/.ssh/authorized_keys
|
||||
line: "{{ pubkey_content['content'] | b64decode | trim }}"
|
||||
owner: ansible
|
||||
group: ansible
|
||||
mode: '0600'
|
||||
create: yes
|
||||
state: present
|
||||
|
||||
- name: Allow 'ansible' user to write to /etc/systemd/resolved.conf
|
||||
ansible.builtin.file:
|
||||
path: /etc/systemd/resolved.conf
|
||||
owner: ansible
|
||||
group: ansible
|
||||
mode: '0664'
|
||||
state: file
|
||||
become: true
|
||||
86
k3s-cluster/ansible/common/install-docker.yml
Normal file
86
k3s-cluster/ansible/common/install-docker.yml
Normal file
@@ -0,0 +1,86 @@
|
||||
# command: ansible-playbook -i config/<target manifest>.ini common/install-docker.yml
|
||||
---
|
||||
- name: Install Docker and Test
|
||||
hosts: all
|
||||
become: true
|
||||
become_method: sudo
|
||||
|
||||
tasks:
|
||||
- name: Ensure required apt packages are installed
|
||||
ansible.builtin.apt:
|
||||
name:
|
||||
- apt-transport-https
|
||||
- ca-certificates
|
||||
- curl
|
||||
- gnupg
|
||||
- lsb-release
|
||||
state: present
|
||||
update_cache: yes
|
||||
|
||||
- name: Ensure gpg is installed
|
||||
ansible.builtin.apt:
|
||||
name: gpg
|
||||
state: present
|
||||
|
||||
- name: Remove old Docker keyring files if present
|
||||
ansible.builtin.file:
|
||||
path: "{{ item }}"
|
||||
state: absent
|
||||
loop:
|
||||
- /usr/share/keyrings/docker-archive-keyring.gpg
|
||||
- /usr/share/keyrings/docker-archive-keyring.gpg.asc
|
||||
|
||||
- name: Download Docker's official GPG key (ASCII)
|
||||
ansible.builtin.get_url:
|
||||
url: https://download.docker.com/linux/ubuntu/gpg
|
||||
dest: /usr/share/keyrings/docker-archive-keyring.gpg.asc
|
||||
mode: '0644'
|
||||
force: yes
|
||||
|
||||
- name: Convert Docker GPG key to binary format
|
||||
ansible.builtin.command: >
|
||||
gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg /usr/share/keyrings/docker-archive-keyring.gpg.asc
|
||||
|
||||
- name: Add Docker repository if not present (modern method)
|
||||
ansible.builtin.apt_repository:
|
||||
repo: "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu {{ ansible_distribution_release }} stable"
|
||||
state: present
|
||||
filename: docker
|
||||
|
||||
- name: Update apt cache after adding Docker repo
|
||||
ansible.builtin.apt:
|
||||
update_cache: yes
|
||||
|
||||
- name: Check if Docker is already installed
|
||||
ansible.builtin.command: docker --version
|
||||
register: docker_check
|
||||
ignore_errors: true
|
||||
changed_when: false
|
||||
|
||||
- name: Install Docker Engine
|
||||
ansible.builtin.apt:
|
||||
name:
|
||||
- docker-ce
|
||||
- docker-ce-cli
|
||||
- containerd.io
|
||||
state: present
|
||||
when: docker_check.rc != 0
|
||||
|
||||
- name: Check Docker version (post-install)
|
||||
ansible.builtin.command: docker --version
|
||||
register: docker_version
|
||||
changed_when: false
|
||||
|
||||
- name: Show Docker version
|
||||
ansible.builtin.debug:
|
||||
var: docker_version.stdout
|
||||
|
||||
- name: Run hello-world container to test Docker
|
||||
ansible.builtin.command: docker run --name hello-test --rm hello-world
|
||||
register: hello_world_output
|
||||
changed_when: false
|
||||
|
||||
- name: Show hello-world output
|
||||
ansible.builtin.debug:
|
||||
var: hello_world_output.stdout
|
||||
|
||||
28
k3s-cluster/ansible/common/update-docker.yml
Normal file
28
k3s-cluster/ansible/common/update-docker.yml
Normal file
@@ -0,0 +1,28 @@
|
||||
# command: ansible-playbook -i config/<target manifest>.ini common/update-docker.yml
|
||||
---
|
||||
- name: Update Docker only on hosts where it is installed
|
||||
hosts: all
|
||||
become: true
|
||||
become_method: sudo
|
||||
|
||||
tasks:
|
||||
- name: Check if Docker is installed
|
||||
ansible.builtin.command: docker --version
|
||||
register: docker_check
|
||||
ignore_errors: true
|
||||
changed_when: false
|
||||
|
||||
- name: Update Docker packages if installed
|
||||
ansible.builtin.apt:
|
||||
name:
|
||||
- docker-ce
|
||||
- docker-ce-cli
|
||||
- containerd.io
|
||||
state: latest
|
||||
update_cache: yes
|
||||
when: docker_check.rc == 0
|
||||
|
||||
- name: Debug message if Docker is not installed
|
||||
ansible.builtin.debug:
|
||||
msg: "Docker is not installed on this host. Skipping update."
|
||||
when: docker_check.rc != 0
|
||||
19
k3s-cluster/ansible/common/update-hosts.yml
Normal file
19
k3s-cluster/ansible/common/update-hosts.yml
Normal file
@@ -0,0 +1,19 @@
|
||||
# command: ansible-playbook -i config/<target manifest>.ini common/update-hosts.yml
|
||||
---
|
||||
- name: Update and upgrade all apt packages
|
||||
hosts: all
|
||||
become: true
|
||||
become_method: sudo
|
||||
|
||||
tasks:
|
||||
- name: Update apt cache
|
||||
ansible.builtin.apt:
|
||||
update_cache: yes
|
||||
|
||||
- name: Upgrade all packages
|
||||
ansible.builtin.apt:
|
||||
upgrade: dist
|
||||
|
||||
- name: Autoremove unused packages
|
||||
ansible.builtin.apt:
|
||||
autoremove: yes
|
||||
30
k3s-cluster/ansible/config/appliances.ini
Normal file
30
k3s-cluster/ansible/config/appliances.ini
Normal file
@@ -0,0 +1,30 @@
|
||||
[all:vars]
|
||||
ansible_python_interpreter=/usr/bin/python3
|
||||
ansible_user=ansible
|
||||
ansible_ssh_private_key_file=~/.ssh/id_ed25519
|
||||
|
||||
[shared]
|
||||
sentry ansible_host=sentry.mngoma.lab
|
||||
alpha ansible_host=alpha.lb.mngoma.lab
|
||||
database ansible_host=database.mngoma.lab
|
||||
vpn ansible_host=vpn.mngoma.lab
|
||||
khongisa ansible_host=khongisa.mngoma.lab
|
||||
#beta ansible_host=beta.lb.mngoma.lab
|
||||
#dns ansible_host=dns.mngoma.lab
|
||||
#storage ansible_host=storage.mngoma.lab
|
||||
manager ansible_host=lead.mngoma.lab
|
||||
worker ansible_host=worker1.mngoma.lab
|
||||
#manager2 ansible_host=follow.mngoma.lab
|
||||
#worker2 ansible_host=worker2.mngoma.lab
|
||||
|
||||
[makhiwanecluster]
|
||||
manager ansible_host=lead.mngoma.lab
|
||||
worker ansible_host=worker1.mngoma.lab
|
||||
|
||||
[mbubecluster]
|
||||
#manager2 ansible_host=follow.mngoma.lab
|
||||
#worker2 ansible_host=worker2.mngoma.lab
|
||||
|
||||
[loadbalancers]
|
||||
alpha ansible_host=alpha.lb.mngoma.lab
|
||||
#beta ansible_host=beta.lb.mngoma.lab
|
||||
11
k3s-cluster/ansible/config/makhiwane.ini
Normal file
11
k3s-cluster/ansible/config/makhiwane.ini
Normal file
@@ -0,0 +1,11 @@
|
||||
[all:vars]
|
||||
ansible_python_interpreter=/usr/bin/python3
|
||||
ansible_user=ansible
|
||||
ansible_ssh_private_key_file=~/.ssh/id_ed25519
|
||||
|
||||
[cluster]
|
||||
manager ansible_host=lead.mngoma.lab
|
||||
worker ansible_host=worker1.mngoma.lab
|
||||
|
||||
[workers]
|
||||
worker ansible_host=worker1.mngoma.lab
|
||||
11
k3s-cluster/ansible/config/mbube.ini
Normal file
11
k3s-cluster/ansible/config/mbube.ini
Normal file
@@ -0,0 +1,11 @@
|
||||
[all:vars]
|
||||
ansible_python_interpreter=/usr/bin/python3
|
||||
ansible_user=ansible
|
||||
ansible_ssh_private_key_file=~/.ssh/id_ed25519
|
||||
|
||||
[cluster]
|
||||
manager ansible_host=follow.mngoma.lab
|
||||
worker ansible_host=worker2.mngoma.lab
|
||||
|
||||
[workers]
|
||||
worker ansible_host=worker2.mngoma.lab
|
||||
20
k3s-cluster/ansible/config/safe-cluster-startup.yml
Normal file
20
k3s-cluster/ansible/config/safe-cluster-startup.yml
Normal file
@@ -0,0 +1,20 @@
|
||||
---
|
||||
- name: Ensure manager node is running k3s / start if needed
|
||||
hosts: manager
|
||||
become: true
|
||||
tasks:
|
||||
- name: Start k3s service on manager
|
||||
systemd:
|
||||
name: k3s
|
||||
state: started
|
||||
enabled: true
|
||||
|
||||
- name: Ensure worker nodes are running k3s-agent / start if needed
|
||||
hosts: workers
|
||||
become: true
|
||||
tasks:
|
||||
- name: Start k3s-agent service on worker nodes
|
||||
systemd:
|
||||
name: k3s-agent
|
||||
state: started
|
||||
enabled: true
|
||||
256
k3s-cluster/ansible/create-cluster.yml
Normal file
256
k3s-cluster/ansible/create-cluster.yml
Normal file
@@ -0,0 +1,256 @@
|
||||
---
|
||||
- name: Gather IPv4 address facts for all hosts
|
||||
hosts: all
|
||||
gather_facts: yes
|
||||
vars:
|
||||
dns_server: "192.168.1.151"
|
||||
tasks:
|
||||
- name: Configure systemd-resolved to use custom DNS
|
||||
become: true
|
||||
copy:
|
||||
dest: /etc/systemd/resolved.conf
|
||||
content: |
|
||||
[Resolve]
|
||||
DNS={{ dns_server }}
|
||||
FallbackDNS=192.168.1.1
|
||||
Domains=mngoma.lab
|
||||
DNSStubListener=yes
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
|
||||
- name: Ensure systemd-resolved service is enabled and restarted
|
||||
become: true
|
||||
systemd:
|
||||
name: systemd-resolved
|
||||
state: restarted
|
||||
enabled: yes
|
||||
|
||||
- name: Ensure host IPv4 address is available and stored as fact
|
||||
set_fact:
|
||||
node_ipv4: "{{ ansible_default_ipv4.address }}"
|
||||
when: ansible_default_ipv4 is defined and ansible_default_ipv4.address is defined
|
||||
|
||||
- name: Fail if IPv4 address could not be determined
|
||||
fail:
|
||||
msg: "Could not determine IPv4 address for {{ inventory_hostname }}. Please check network configuration."
|
||||
when: node_ipv4 is not defined
|
||||
|
||||
- name: Ensure /home/{{ ansible_user }}/k3s directory exists
|
||||
become: yes
|
||||
file:
|
||||
path: /home/{{ ansible_user }}/k3s
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0777'
|
||||
|
||||
- name: Initialise the K3s control plane (manager)
|
||||
hosts: manager
|
||||
become: yes
|
||||
vars:
|
||||
k3s_version: v1.29.4+k3s1
|
||||
kubeconfig_dir: "/home/{{ ansible_user }}/.kube"
|
||||
kubeconfig_file: "{{ kubeconfig_dir }}/config"
|
||||
tasks:
|
||||
- name: Install required apt dependencies
|
||||
apt:
|
||||
name:
|
||||
- curl
|
||||
- python3-pip
|
||||
- python3-venv
|
||||
state: present
|
||||
update_cache: yes
|
||||
|
||||
- name: Create a Python virtual environment for Ansible k8s modules
|
||||
command: python3 -m venv /opt/ansible-venv
|
||||
args:
|
||||
creates: /opt/ansible-venv
|
||||
|
||||
- name: Install Kubernetes and OpenShift libraries in the venv
|
||||
command: /opt/ansible-venv/bin/pip install kubernetes openshift
|
||||
|
||||
- name: Install k3s on manager (control plane)
|
||||
shell: |
|
||||
curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION={{ k3s_version }} sh -
|
||||
args:
|
||||
creates: /usr/local/bin/k3s
|
||||
|
||||
- name: Gather architecture for kubectl
|
||||
set_fact:
|
||||
kubectl_arch: |
|
||||
{% if ansible_architecture == "x86_64" %}
|
||||
amd64
|
||||
{% elif "armv7l" in ansible_architecture %}
|
||||
arm
|
||||
{% elif "aarch64" in ansible_architecture %}
|
||||
arm64
|
||||
{% else %}
|
||||
{{ ansible_architecture }}
|
||||
{% endif %}
|
||||
|
||||
- name: Download latest kubectl binary
|
||||
shell: |
|
||||
curl -Lo /usr/local/bin/kubectl "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/{{ kubectl_arch }}/kubectl"
|
||||
args:
|
||||
creates: /usr/local/bin/kubectl
|
||||
|
||||
- name: Make kubectl executable
|
||||
file:
|
||||
path: /usr/local/bin/kubectl
|
||||
mode: '0755'
|
||||
owner: root
|
||||
group: root
|
||||
|
||||
- name: Validate kubectl version
|
||||
shell: kubectl version --client --output=yaml
|
||||
register: kubectl_version
|
||||
changed_when: false
|
||||
|
||||
- name: Show kubectl version
|
||||
debug:
|
||||
var: kubectl_version.stdout
|
||||
|
||||
- name: Ensure .kube directory exists for ansible user
|
||||
become_user: "{{ ansible_user }}"
|
||||
file:
|
||||
path: "{{ kubeconfig_dir }}"
|
||||
state: directory
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "{{ ansible_user }}"
|
||||
mode: '0700'
|
||||
|
||||
- name: Copy kubeconfig file to ansible user home
|
||||
become: true
|
||||
copy:
|
||||
src: /etc/rancher/k3s/k3s.yaml
|
||||
dest: "{{ kubeconfig_file }}"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "{{ ansible_user }}"
|
||||
mode: '0600'
|
||||
remote_src: yes
|
||||
|
||||
- name: Replace server IP in kubeconfig with manager's IP address
|
||||
become_user: "{{ ansible_user }}"
|
||||
lineinfile:
|
||||
path: "{{ kubeconfig_file }}"
|
||||
regexp: 'server: https://127\\.0\\.0\\.1:6443'
|
||||
line: " server: https://{{ node_ipv4 }}:6443"
|
||||
|
||||
- name: Get the cluster join token
|
||||
shell: cat /var/lib/rancher/k3s/server/node-token
|
||||
register: k3s_token
|
||||
changed_when: false
|
||||
|
||||
- name: Set fact for join token and manager IP
|
||||
set_fact:
|
||||
k3s_node_token: "{{ k3s_token.stdout }}"
|
||||
manager_ip: "{{ node_ipv4 }}"
|
||||
|
||||
- name: Add the join token and manager IP to hostvars for workers
|
||||
add_host:
|
||||
name: "cluster_primary"
|
||||
groups: join_info
|
||||
k3s_node_token: "{{ k3s_node_token }}"
|
||||
k3s_manager_ip: "{{ manager_ip }}"
|
||||
|
||||
- name: Install and join worker nodes to the control plane
|
||||
hosts: workers
|
||||
become: yes
|
||||
vars:
|
||||
k3s_version: v1.29.4+k3s1
|
||||
tasks:
|
||||
- name: Install required dependencies
|
||||
apt:
|
||||
name: [curl]
|
||||
state: present
|
||||
update_cache: yes
|
||||
|
||||
- name: Ensure /home/{{ ansible_user }}/k3s directory exists
|
||||
file:
|
||||
path: /home/{{ ansible_user }}/k3s
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0777'
|
||||
|
||||
- name: Set manager join information
|
||||
set_fact:
|
||||
k3s_node_token: "{{ hostvars[groups['join_info'][0]]['k3s_node_token'] }}"
|
||||
k3s_manager_ip: "{{ hostvars[groups['join_info'][0]]['k3s_manager_ip'] }}"
|
||||
|
||||
- name: Fail if manager's IP is not available
|
||||
fail:
|
||||
msg: "Could not determine manager's IP for joining cluster!"
|
||||
when: k3s_manager_ip is not defined
|
||||
|
||||
- name: Install k3s agent (worker)
|
||||
shell: |
|
||||
curl -sfL https://get.k3s.io | K3S_URL=https://{{ k3s_manager_ip }}:6443 K3S_TOKEN={{ k3s_node_token }} INSTALL_K3S_VERSION={{ k3s_version }} sh -
|
||||
args:
|
||||
creates: /usr/local/bin/k3s-agent
|
||||
|
||||
- name: Gather architecture for kubectl
|
||||
set_fact:
|
||||
kubectl_arch: |
|
||||
{% if ansible_architecture == "x86_64" %}
|
||||
amd64
|
||||
{% elif "armv7l" in ansible_architecture %}
|
||||
arm
|
||||
{% elif "aarch64" in ansible_architecture %}
|
||||
arm64
|
||||
{% else %}
|
||||
{{ ansible_architecture }}
|
||||
{% endif %}
|
||||
|
||||
- name: Download latest kubectl binary (worker)
|
||||
shell: |
|
||||
curl -Lo /usr/local/bin/kubectl "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/{{ kubectl_arch }}/kubectl"
|
||||
args:
|
||||
creates: /usr/local/bin/kubectl
|
||||
|
||||
- name: Make kubectl executable (worker)
|
||||
file:
|
||||
path: /usr/local/bin/kubectl
|
||||
mode: '0755'
|
||||
owner: root
|
||||
group: root
|
||||
|
||||
- name: Verify cluster nodes from manager node and configure storage
|
||||
hosts: manager
|
||||
become: yes
|
||||
vars:
|
||||
kubeconfig_file: "/home/{{ ansible_user }}/.kube/config"
|
||||
tasks:
|
||||
- name: Wait for all worker nodes to join the cluster
|
||||
become_user: "{{ ansible_user }}"
|
||||
shell: |
|
||||
for i in {1..10}; do
|
||||
[ $(kubectl --kubeconfig={{ kubeconfig_file }} get nodes | grep -c worker) -ge 1 ] && exit 0
|
||||
sleep 15
|
||||
done
|
||||
exit 1
|
||||
register: wait_worker
|
||||
failed_when: wait_worker.rc != 0
|
||||
|
||||
- name: List all k3s nodes
|
||||
become_user: "{{ ansible_user }}"
|
||||
shell: kubectl --kubeconfig={{ kubeconfig_file }} get nodes -o wide
|
||||
register: all_nodes
|
||||
|
||||
- name: Show current k3s cluster nodes
|
||||
debug:
|
||||
var: all_nodes.stdout
|
||||
|
||||
- name: Create StorageClass for /home/ansible/k3s INLINE
|
||||
vars:
|
||||
ansible_python_interpreter: /opt/ansible-venv/bin/python
|
||||
kubernetes.core.k8s:
|
||||
kubeconfig: "{{ kubeconfig_file }}"
|
||||
definition:
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: local-pvs
|
||||
provisioner: kubernetes.io/no-provisioner
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
131
k3s-cluster/ansible/create-loadbalancer.yml
Normal file
131
k3s-cluster/ansible/create-loadbalancer.yml
Normal file
@@ -0,0 +1,131 @@
|
||||
# create-loadbalancer.yml
|
||||
- name: Install and configure HAProxy with SSL termination and managed DNS
|
||||
hosts: alpha
|
||||
become: yes
|
||||
vars:
|
||||
ssl_cert_path: "/etc/ssl/certs/haproxy.pem"
|
||||
dns_server: "192.168.1.151"
|
||||
|
||||
tasks:
|
||||
- name: Install HAProxy and dependencies
|
||||
apt:
|
||||
name:
|
||||
- haproxy
|
||||
- openssl
|
||||
state: present
|
||||
update_cache: yes
|
||||
|
||||
- name: Ensure cert directory exists
|
||||
file:
|
||||
path: /etc/ssl/certs
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0755'
|
||||
|
||||
- name: Generate private key for HAProxy
|
||||
community.crypto.openssl_privatekey:
|
||||
path: /etc/ssl/certs/haproxy.key
|
||||
size: 2048
|
||||
type: RSA
|
||||
mode: '0600'
|
||||
|
||||
- name: Generate a Certificate Signing Request (CSR) for HAProxy
|
||||
community.crypto.openssl_csr:
|
||||
path: /etc/ssl/certs/haproxy.csr
|
||||
privatekey_path: /etc/ssl/certs/haproxy.key
|
||||
common_name: "{{ inventory_hostname }}"
|
||||
subject_alt_name:
|
||||
- "DNS:{{ inventory_hostname }}"
|
||||
mode: "0644"
|
||||
|
||||
- name: Generate self-signed certificate for HAProxy
|
||||
community.crypto.x509_certificate:
|
||||
path: /etc/ssl/certs/haproxy.crt
|
||||
privatekey_path: /etc/ssl/certs/haproxy.key
|
||||
csr_path: /etc/ssl/certs/haproxy.csr
|
||||
provider: selfsigned
|
||||
selfsigned_not_before: "{{ '%Y%m%d%H%M%SZ' | strftime(ansible_date_time.epoch | int) }}"
|
||||
selfsigned_not_after: "{{ '%Y%m%d%H%M%SZ' | strftime((ansible_date_time.epoch | int) + (365*24*60*60)) }}"
|
||||
mode: "0644"
|
||||
|
||||
- name: Combine key and cert into .pem file for HAProxy
|
||||
shell: cat /etc/ssl/certs/haproxy.key /etc/ssl/certs/haproxy.crt > {{ ssl_cert_path }}
|
||||
args:
|
||||
creates: "{{ ssl_cert_path }}"
|
||||
|
||||
- name: Configure systemd-resolved to use custom DNS
|
||||
become: true
|
||||
copy:
|
||||
dest: /etc/systemd/resolved.conf
|
||||
content: |
|
||||
[Resolve]
|
||||
DNS={{ dns_server }}
|
||||
FallbackDNS=192.168.1.1
|
||||
Domains=mngoma.lab
|
||||
DNSStubListener=yes
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
|
||||
- name: Ensure systemd-resolved service is enabled and restarted
|
||||
become: true
|
||||
systemd:
|
||||
name: systemd-resolved
|
||||
state: restarted
|
||||
enabled: yes
|
||||
|
||||
- name: Upload custom haproxy.cfg with SSL termination and HTTPS-only backend
|
||||
copy:
|
||||
dest: /etc/haproxy/haproxy.cfg
|
||||
content: |
|
||||
global
|
||||
log /dev/log local0
|
||||
log /dev/log local1 notice
|
||||
chroot /var/lib/haproxy
|
||||
stats socket /run/haproxy/admin.sock mode 660 level admin
|
||||
user haproxy
|
||||
group haproxy
|
||||
daemon
|
||||
tune.ssl.default-dh-param 2048
|
||||
|
||||
defaults
|
||||
log global
|
||||
mode http
|
||||
option httplog
|
||||
option dontlognull
|
||||
timeout connect 5000
|
||||
timeout client 50000
|
||||
timeout server 50000
|
||||
option forwardfor
|
||||
|
||||
resolvers dns
|
||||
nameserver dns1 {{ dns_server }}:53
|
||||
resolve_retries 3
|
||||
timeout resolve 2s
|
||||
timeout retry 1s
|
||||
hold valid 10s
|
||||
|
||||
frontend https_front
|
||||
bind *:443 ssl crt {{ ssl_cert_path }}
|
||||
mode http
|
||||
option forwardfor
|
||||
http-request set-header X-Forwarded-Proto https
|
||||
http-request set-header Host %[req.hdr(host)]
|
||||
default_backend app_clusters
|
||||
|
||||
backend app_clusters
|
||||
mode http
|
||||
balance roundrobin
|
||||
option httpchk GET /
|
||||
http-check expect status 100,101,102,103,200,201,202,203,204,205,206,207,208,226,300,301,302,303,304,305,306,307,308,400,401,402,403,404,405,406,407,408,409,410,411,412,413,414,415,416,417,418,421,422,423,424,425,426,428,429,431,451
|
||||
server lead_https lead.mngoma.lab:443 resolvers dns resolve-prefer ipv4 check ssl verify none
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
|
||||
- name: Enable and start haproxy
|
||||
systemd:
|
||||
name: haproxy
|
||||
state: restarted
|
||||
enabled: yes
|
||||
35
k3s-cluster/ansible/safe-cluster-shutdown.yml
Normal file
35
k3s-cluster/ansible/safe-cluster-shutdown.yml
Normal file
@@ -0,0 +1,35 @@
|
||||
---
|
||||
- name: Drain and stop worker nodes first
|
||||
hosts: workers
|
||||
become: true
|
||||
tasks:
|
||||
- name: Drain worker node (optional - requires kubectl access)
|
||||
shell: kubectl drain {{ inventory_hostname }} --ignore-daemonsets --delete-emptydir-data || true
|
||||
delegate_to: manager
|
||||
ignore_errors: yes
|
||||
|
||||
- name: Stop k3s-agent service on worker
|
||||
systemd:
|
||||
name: k3s-agent
|
||||
state: stopped
|
||||
enabled: false
|
||||
|
||||
- name: Poweroff worker node
|
||||
shell: shutdown -h now
|
||||
async: 0
|
||||
poll: 0
|
||||
|
||||
- name: Stop and poweroff the manager node
|
||||
hosts: manager
|
||||
become: true
|
||||
tasks:
|
||||
- name: Stop k3s (server) service
|
||||
systemd:
|
||||
name: k3s
|
||||
state: stopped
|
||||
enabled: false
|
||||
|
||||
- name: Poweroff manager node
|
||||
shell: shutdown -h now
|
||||
async: 0
|
||||
poll: 0
|
||||
Reference in New Issue
Block a user