Compare commits

...

12 Commits

7 changed files with 57 additions and 25 deletions

View File

@ -7,7 +7,7 @@
- Create a username named "orangepi" with password "orangepi".
- Install sudo.
- Install `sudo`.
- Allow the user "orangepi" to have access to sudo.
- The user "orangepi" will be later deleted, so it's fine.
@ -98,7 +98,9 @@ ansible_become_password: Password used to execute `sudo`
# Actual vars
set_hostname: New hostname for the device, by default will obtain it's values from the variable `ansible_host` aka. the URL specified to connect to such device.
is_master: This variable determines which playbooks will run.
is_master: This variable determines which playbooks will run. (default false)
is_proxmox_vm: If true it will install the quemu agent for monitoring. (default false)
setup_only: If true will prevent creating/joining a kubernetes cluster.
# Cluster shit
kubeadm_join_path: File path that will store the `kubeadm join` command to be executed by the worker nodes.
@ -113,7 +115,8 @@ _kubeadm_join_command: Placeholder, will be populated at later stages of the scr
- Update -> Upgrade.
- Uninstalls `containerd`.
- Installs Docker (Debian) and Kubernetes repos.
- Installs `containerd.io`, `kubelet`, `kubeadm`, `kubectl`, `git`, `vim`.
- Installs `containerd.io`, `kubelet`, `kubeadm`, `kubectl`, `git`, `vim`. (and others)
- If the variable `is_proxmox_vm` is set to true, will also install the quemu agent.
- Sets default config for `containerd` with cGroups enabled.
- Enables some `iptables` modules.
- "Resets" `/etc/hosts` file
@ -137,7 +140,7 @@ _kubeadm_join_command: Placeholder, will be populated at later stages of the scr
#### tasks_end.yaml
- As per the moment, only reboots.
- As per the moment, deletes the "default user specified" (usually orangepi, on my scenario atleast) then reboots the system.
# USAGE

View File

@ -9,9 +9,14 @@
slaves:
hosts:
slave[03:03].filter.home:
slave[04:04].filter.home:
vars:
is_master: no
is_proxmox_vm: true
# setup_only: false
initial_username: orangepi
initial_password: orangepi
delete_user_name: orangepi
all:
vars:
kubernetes_version: 1.32

View File

@ -10,10 +10,13 @@
vars:
# Testing purposes
ansible_user: "my_user" # Testing purposes
ansible_password: "my_password" # Testing purposes
ansible_user: "adminuser" # Testing purposes
ansible_password: "adminpassword" # Testing purposes
ansible_become_password: "{{ ansible_password }}" # Testing purposes
# Kubernetes version
target_kubernetes_version: "{{ kubernetes_version }}" # Testing purposes
# Actual vars
set_hostname: "{{ ansible_host }}"
# is_master: Figurative
@ -22,29 +25,37 @@
kubeadm_join_path: "./Exported/kubeadm-join.command"
_kubeadm_join_command: "" # Placeholder
# Others
_is_master: "{{ is_master | default('false') | bool }}" # Prevent creating/joining a cluster
_is_proxmox_vm: "{{ is_proxmox_vm | default('false') | bool }}" # Prevent creating/joining a cluster
_setup_only: "{{ setup_only | default('false') | bool }}" # Prevent creating/joining a cluster
tasks:
# - check vars
# check vars
- debug: var=set_hostname
- debug: var=is_master
- debug: var=_is_master
- debug: var=_is_proxmox_vm
- debug: var=_setup_only
- debug: var=target_kubernetes_version
- name: Ping check
ping:
#
# Init / Basic setup
- name: set up node
import_tasks: tasks_prepare_node.yaml
become: true
# If is_master: init
# If _is_master: init
- name: init cluster
import_tasks: tasks_master.yaml
when: is_master
when: _is_master and not _setup_only
become: true
# else: join
- name: join cluster
import_tasks: tasks_slave.yaml
when: not is_master
when: not _is_master and not _setup_only
# Do other stuff
- name: post setup

View File

@ -1,6 +1,6 @@
# https://stackoverflow.com/questions/46515704/how-to-kill-a-running-process-using-ansible
- name: Get running processes
shell: "ps -ef | grep -v grep | grep -w ^orangepi | awk '{print $2}'"
shell: "ps -ef | grep -v grep | grep -w ^{{ delete_user_name }} | awk '{print $2}'"
register: running_processes
when: delete_user_name is defined and delete_user_name | length > 0
@ -10,7 +10,7 @@
- name: Kill running processes
shell: "kill {{ item }}"
with_items: "{{ running_processes.stdout_lines }}"
when: delete_user_name is defined and delete_user_name | length > 0
when: (delete_user_name is defined) and (delete_user_name | length > 0) and (running_processes | length > 0)
- wait_for:
path: "/proc/{{ item }}/status"
@ -18,12 +18,12 @@
with_items: "{{ running_processes.stdout_lines }}"
ignore_errors: yes
register: killed_processes
when: delete_user_name is defined and delete_user_name | length > 0
when: (delete_user_name is defined) and (delete_user_name | length > 0) and (running_processes | length > 0)
- name: Force kill stuck processes
shell: "kill -9 {{ item }}"
with_items: "{{ killed_processes.results | select('failed') | map(attribute='item') | list }}"
when: delete_user_name is defined and delete_user_name | length > 0
when: (delete_user_name is defined) and (delete_user_name | length > 0) and (running_processes | length > 0)
- name: Delete user
@ -33,7 +33,6 @@
state: absent
when: delete_user_name is defined and delete_user_name | length > 0
#reboot
- name: reboot
reboot:

View File

@ -22,12 +22,12 @@
### Calico
- name: Calico
ansible.builtin.command: "kubectl create --kubeconfig /etc/kubernetes/admin.conf -f https://raw.githubusercontent.com/projectcalico/calico/v3.26.1/manifests/calico.yaml"
ansible.builtin.command: "kubectl create --kubeconfig /etc/kubernetes/admin.conf -f https://raw.githubusercontent.com/projectcalico/calico/v3.27.2/manifests/calico.yaml"
## Extras
### MetalLB
- name: MetalLB
ansible.builtin.command: "kubectl create --kubeconfig /etc/kubernetes/admin.conf -f https://raw.githubusercontent.com/metallb/metallb/v0.13.10/config/manifests/metallb-native.yaml"
ansible.builtin.command: "kubectl create --kubeconfig /etc/kubernetes/admin.conf -f https://raw.githubusercontent.com/metallb/metallb/v0.14.3/config/manifests/metallb-native.yaml"
## Export join command

View File

@ -92,13 +92,13 @@
# Kubeshit repo
- name: Add Kubernetes GPG key
apt_key:
url: https://pkgs.k8s.io/core:/stable:/v1.28/deb/Release.key
url: "https://pkgs.k8s.io/core:/stable:/v{{ target_kubernetes_version }}/deb/Release.key"
state: present
keyring: /etc/apt/keyrings/kubernetes-apt-keyring-1.28.gpg
keyring: "/etc/apt/keyrings/kubernetes-apt-keyring-{{ target_kubernetes_version }}.gpg"
- name: Add Kubernetes APT repository
apt_repository:
repo: deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring-1.28.gpg] https://pkgs.k8s.io/core:/stable:/v1.28/deb/ /
repo: "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring-{{ target_kubernetes_version }}.gpg] https://pkgs.k8s.io/core:/stable:/v{{ target_kubernetes_version }}/deb/ /"
state: present
@ -194,7 +194,7 @@
name: containerd
enabled: true
### Restart
### Restart containerd
- name: Enable kubelet
ansible.builtin.systemd:
name: kubelet
@ -220,3 +220,16 @@
ff02::2 ip6-allrouters
## ProxmoxVM related
- name: Install quemu-guest-agent
ansible.builtin.apt:
pkg:
- qemu-guest-agent
when: is_proxmox_vm
- name: Enable quemu-guest-agent
ansible.builtin.systemd:
name: qemu-guest-agent
enabled: true
# Quemu requires to be enabled on the Proxmox VM, after installing also requires to be shut down, and started again from the Proxmox GUI

3
run.sh
View File

@ -1,7 +1,8 @@
#!/bin/bash
export ANSIBLE_HOST_KEY_CHECKING=False
ansible-playbook -i inventory.yaml Initial_Setup/armbian_initial_setup.yaml && sleep 25 && # Wait for reboot
ansible-playbook -i inventory.yaml Initial_Setup/armbian_initial_setup.yaml && printf "Giving some time (40s) to catch up to the reboot command ...\n" &&
sleep 40 && # Wait for reboot
ansible-playbook -i inventory.yaml ksetup/playbook.yaml