This commit is contained in:
Daniel Flanagan 2022-02-03 14:05:56 -06:00
parent eda32db757
commit 3e79f1355d
Signed by: lytedev
GPG key ID: 5B2020A0F9921EF4
11 changed files with 136 additions and 20 deletions

1
.gitignore vendored Normal file
View file

@ -0,0 +1 @@
k3s-cluster-config.yaml

View file

@ -1,4 +1,5 @@
- name: install yay-bin - name: install yay-bin
include: aur.yaml pkg_name=yay-bin include_tasks: aur.yml
vars: vars:
makepkg_nonroot_user: aur_builder makepkg_nonroot_user: aur_builder
pkg_name: yay-bin

View file

@ -4,7 +4,10 @@
- include_tasks: ./create-aur-builder.yml - include_tasks: ./create-aur-builder.yml
when: ansible_facts.os_family == 'Archlinux' when: ansible_facts.os_family == 'Archlinux'
- include_tasks: ./install-yay-bin.yaml - include_tasks: ./install-yay-bin.yml
when: ansible_facts.os_family == 'Archlinux'
- include_tasks: ./upgrade-all-packages.yml
when: ansible_facts.os_family == 'Archlinux' when: ansible_facts.os_family == 'Archlinux'
# TODO: # TODO:

View file

@ -0,0 +1,8 @@
- name: upgrade all packages
community.general.pacman:
# executable: yay
force: yes
state: latest
update_cache: yes
upgrade: yes

18
ansible/install-k3s.yml Normal file
View file

@ -0,0 +1,18 @@
- name: provision Arch Linux hosts
hosts: all
gather_facts: yes
roles:
- role: xanmanning.k3s
# - hosts: k3s_nodes
# vars:
# k3s_registration_address: loadbalancer # Typically a load balancer.
# k3s_server:
# datastore-endpoint: "postgres://postgres:verybadpass@database:5432/postgres?sslmode=disable"
# pre_tasks:
# - name: Set each node to be a control node
# ansible.builtin.set_fact:
# k3s_control_node: true
# when: inventory_hostname in ['node2', 'node3']
# roles:
# - role: xanmanning.k3s

View file

@ -0,0 +1,32 @@
homek8s:
children:
controllers:
hybrid:
hosts:
"10.0.0.87":
k3s_control_node: true
ansible_host: "10.0.0.87"
ansible_user: root
cluster-cidr: '10.42.0.0/16'
# # flannel-backend: 'none'
# node-label:
# - "human-name=datto3"
"10.0.0.138":
ansible_host: "10.0.0.138"
ansible_user: root
# k3s_control_node: false
# with-node-id: true
# node-label:
# - "human-name=chromebox"
workers:
vars:
k3s_release_version: v1.23.3+k3s1
k3s_build_cluster: true
k3s_registration_address: 10.0.0.87
ansible_python_interpreter: /usr/bin/python3.10
k3s_server:
advertise-address: "0.0.0.0"
k3s_agent:
node-ip: "{{ ansible_host }}"
node-external-ip: "{{ ansible_host }}"

39
ansible/nuke-cluster.yml Normal file
View file

@ -0,0 +1,39 @@
- name: deprovision k3s nodes
hosts: all
become: true
gather_facts: yes
any_errors_fatal: true
tasks:
- name: Kill k3s
ansible.builtin.command: /usr/local/bin/k3s-killall.sh
- name: Uninstall k3s
ansible.builtin.command:
cmd: /usr/local/bin/k3s-uninstall.sh
removes: /usr/local/bin/k3s-uninstall.sh
- name: Uninstall k3s agent
ansible.builtin.command:
cmd: /usr/local/bin/k3s-agent-uninstall.sh
removes: /usr/local/bin/k3s-agent-uninstall.sh
- name: Gather list of CNI files to delete
find:
paths: /etc/cni/net.d
patterns: "*"
register: files_to_delete
- name: Delete CNI files
ansible.builtin.file:
path: "{{ item.path }}"
state: absent
loop: "{{ files_to_delete.files }}"
# - hosts: k3s_nodes
# vars:
# k3s_registration_address: loadbalancer # Typically a load balancer.
# k3s_server:
# datastore-endpoint: "postgres://postgres:verybadpass@database:5432/postgres?sslmode=disable"
# pre_tasks:
# - name: Set each node to be a control node
# ansible.builtin.set_fact:
# k3s_control_node: true
# when: inventory_hostname in ['node2', 'node3']
# roles:
# - role: xanmanning.k3s

View file

@ -1,9 +1,8 @@
---
- name: provision Arch Linux hosts - name: provision Arch Linux hosts
hosts: all hosts: all
gather_facts: yes
roles: roles:
- role: './arch-linux' - role: './arch-linux'
gather_facts: yes
tasks: tasks:
- name: print all available facts - name: print all available facts
ansible.builtin.debug: ansible.builtin.debug:

10
ansible/requirements.yml Normal file
View file

@ -0,0 +1,10 @@
collections:
- name: community.general
version: 4.4.0
- name: community.sops
version: 1.2.0
- name: ansible.posix
version: 1.3.0
roles:
- src: xanmanning.k3s
version: v3.1.0

View file

@ -1,19 +1,24 @@
# Provision Machines # Provision Machines
- My nodes are Arch Linux machines on bare metal - My nodes are Arch Linux machines on bare metal
- Have `python3` installed for Ansible - Nodes must be ready to be controlled via Ansible
- Need to be `ssh`-able from a workstation - Have `python3` installed for Ansible
- Using Ansible - Need to be `ssh`-able from a workstation
- Setup [Ansible inventory][ainv] under the key `homek8s` - You can grab keys like so: `curl -L files.lyte.dev/key.pub >> ~/.ssh/authorized_keys`
``` - TODO: script this? maybe custom ISO+PXEBoot?
homek8s: - Setup Ansible on the controller (from `./ansible`)
children: - `ansible-galaxy install -r requirements.yml --force`
controllers: - Verify Ansible can reach hosts (from `./ansible`)
hybrid: - `ansible all -i inventory/hosts.yml --list-hosts`
hosts: - `ansible all -i inventory/hosts.yml -m ping`
root@kubeworker1: - Use Ansible to prepare hosts for k3s installation (from `./ansible`)
workers: - `ansible-playbook -i inventory/hosts.yml ./provision.yml`
``` - Use Ansible to install k3s as configured on all nodes (from `./ansible`)
- Test with `ansible homek8s -m ping` or groups like this `ansible homek8s:hybrid -m ping` - `ansible-playbook -i inventory/hosts.yml ./install-k3s.yml`
- You have to run this multiple times for the worker nodes to successfully
[ainv]: https://docs.ansible.com/ansible/latest/user_guide/intro_getting_started.html#selecting-machines-from-inventory connect to the control plane nodes
- Setup your local kubectl to work with the new cluster
- `ansible -i ansible/inventory/hosts.yml $REMOTE_HOST -m fetch -a "src=/etc/rancher/k3s/k3s.yaml dest=./k3s-cluster-config.yaml flat=yes"`
- Copy the cluster information from the `./k3s-cluster-config.yaml` file into
your existing `~/.kube/config` (or just copy it there if it doesn't exist)
- You will need to edit the host from `localhost`/`127.0.0.1` to the correct host