Updates
This commit is contained in:
parent
eda32db757
commit
3e79f1355d
11 changed files with 136 additions and 20 deletions
1
.gitignore
vendored
Normal file
1
.gitignore
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
k3s-cluster-config.yaml
|
|
@ -1,4 +1,5 @@
|
|||
- name: install yay-bin
|
||||
include: aur.yaml pkg_name=yay-bin
|
||||
include_tasks: aur.yml
|
||||
vars:
|
||||
makepkg_nonroot_user: aur_builder
|
||||
pkg_name: yay-bin
|
|
@ -4,7 +4,10 @@
|
|||
- include_tasks: ./create-aur-builder.yml
|
||||
when: ansible_facts.os_family == 'Archlinux'
|
||||
|
||||
- include_tasks: ./install-yay-bin.yaml
|
||||
- include_tasks: ./install-yay-bin.yml
|
||||
when: ansible_facts.os_family == 'Archlinux'
|
||||
|
||||
- include_tasks: ./upgrade-all-packages.yml
|
||||
when: ansible_facts.os_family == 'Archlinux'
|
||||
|
||||
# TODO:
|
||||
|
|
8
ansible/arch-linux/tasks/upgrade-all-packages.yml
Normal file
8
ansible/arch-linux/tasks/upgrade-all-packages.yml
Normal file
|
@ -0,0 +1,8 @@
|
|||
- name: upgrade all packages
|
||||
community.general.pacman:
|
||||
# executable: yay
|
||||
force: yes
|
||||
state: latest
|
||||
update_cache: yes
|
||||
upgrade: yes
|
||||
|
18
ansible/install-k3s.yml
Normal file
18
ansible/install-k3s.yml
Normal file
|
@ -0,0 +1,18 @@
|
|||
- name: provision Arch Linux hosts
|
||||
hosts: all
|
||||
gather_facts: yes
|
||||
roles:
|
||||
- role: xanmanning.k3s
|
||||
|
||||
# - hosts: k3s_nodes
|
||||
# vars:
|
||||
# k3s_registration_address: loadbalancer # Typically a load balancer.
|
||||
# k3s_server:
|
||||
# datastore-endpoint: "postgres://postgres:verybadpass@database:5432/postgres?sslmode=disable"
|
||||
# pre_tasks:
|
||||
# - name: Set each node to be a control node
|
||||
# ansible.builtin.set_fact:
|
||||
# k3s_control_node: true
|
||||
# when: inventory_hostname in ['node2', 'node3']
|
||||
# roles:
|
||||
# - role: xanmanning.k3s
|
32
ansible/inventory/hosts.yml
Normal file
32
ansible/inventory/hosts.yml
Normal file
|
@ -0,0 +1,32 @@
|
|||
homek8s:
|
||||
children:
|
||||
controllers:
|
||||
hybrid:
|
||||
hosts:
|
||||
"10.0.0.87":
|
||||
k3s_control_node: true
|
||||
ansible_host: "10.0.0.87"
|
||||
ansible_user: root
|
||||
cluster-cidr: '10.42.0.0/16'
|
||||
# # flannel-backend: 'none'
|
||||
# node-label:
|
||||
# - "human-name=datto3"
|
||||
"10.0.0.138":
|
||||
ansible_host: "10.0.0.138"
|
||||
ansible_user: root
|
||||
# k3s_control_node: false
|
||||
# with-node-id: true
|
||||
# node-label:
|
||||
# - "human-name=chromebox"
|
||||
workers:
|
||||
|
||||
vars:
|
||||
k3s_release_version: v1.23.3+k3s1
|
||||
k3s_build_cluster: true
|
||||
k3s_registration_address: 10.0.0.87
|
||||
ansible_python_interpreter: /usr/bin/python3.10
|
||||
k3s_server:
|
||||
advertise-address: "0.0.0.0"
|
||||
k3s_agent:
|
||||
node-ip: "{{ ansible_host }}"
|
||||
node-external-ip: "{{ ansible_host }}"
|
39
ansible/nuke-cluster.yml
Normal file
39
ansible/nuke-cluster.yml
Normal file
|
@ -0,0 +1,39 @@
|
|||
- name: deprovision k3s nodes
|
||||
hosts: all
|
||||
become: true
|
||||
gather_facts: yes
|
||||
any_errors_fatal: true
|
||||
tasks:
|
||||
- name: Kill k3s
|
||||
ansible.builtin.command: /usr/local/bin/k3s-killall.sh
|
||||
- name: Uninstall k3s
|
||||
ansible.builtin.command:
|
||||
cmd: /usr/local/bin/k3s-uninstall.sh
|
||||
removes: /usr/local/bin/k3s-uninstall.sh
|
||||
- name: Uninstall k3s agent
|
||||
ansible.builtin.command:
|
||||
cmd: /usr/local/bin/k3s-agent-uninstall.sh
|
||||
removes: /usr/local/bin/k3s-agent-uninstall.sh
|
||||
- name: Gather list of CNI files to delete
|
||||
find:
|
||||
paths: /etc/cni/net.d
|
||||
patterns: "*"
|
||||
register: files_to_delete
|
||||
- name: Delete CNI files
|
||||
ansible.builtin.file:
|
||||
path: "{{ item.path }}"
|
||||
state: absent
|
||||
loop: "{{ files_to_delete.files }}"
|
||||
|
||||
# - hosts: k3s_nodes
|
||||
# vars:
|
||||
# k3s_registration_address: loadbalancer # Typically a load balancer.
|
||||
# k3s_server:
|
||||
# datastore-endpoint: "postgres://postgres:verybadpass@database:5432/postgres?sslmode=disable"
|
||||
# pre_tasks:
|
||||
# - name: Set each node to be a control node
|
||||
# ansible.builtin.set_fact:
|
||||
# k3s_control_node: true
|
||||
# when: inventory_hostname in ['node2', 'node3']
|
||||
# roles:
|
||||
# - role: xanmanning.k3s
|
|
@ -1,9 +1,8 @@
|
|||
---
|
||||
- name: provision Arch Linux hosts
|
||||
hosts: all
|
||||
gather_facts: yes
|
||||
roles:
|
||||
- role: './arch-linux'
|
||||
gather_facts: yes
|
||||
tasks:
|
||||
- name: print all available facts
|
||||
ansible.builtin.debug:
|
10
ansible/requirements.yml
Normal file
10
ansible/requirements.yml
Normal file
|
@ -0,0 +1,10 @@
|
|||
collections:
|
||||
- name: community.general
|
||||
version: 4.4.0
|
||||
- name: community.sops
|
||||
version: 1.2.0
|
||||
- name: ansible.posix
|
||||
version: 1.3.0
|
||||
roles:
|
||||
- src: xanmanning.k3s
|
||||
version: v3.1.0
|
37
readme.md
37
readme.md
|
@ -1,19 +1,24 @@
|
|||
# Provision Machines
|
||||
|
||||
- My nodes are Arch Linux machines on bare metal
|
||||
- Have `python3` installed for Ansible
|
||||
- Need to be `ssh`-able from a workstation
|
||||
- Using Ansible
|
||||
- Setup [Ansible inventory][ainv] under the key `homek8s`
|
||||
```
|
||||
homek8s:
|
||||
children:
|
||||
controllers:
|
||||
hybrid:
|
||||
hosts:
|
||||
root@kubeworker1:
|
||||
workers:
|
||||
```
|
||||
- Test with `ansible homek8s -m ping` or groups like this `ansible homek8s:hybrid -m ping`
|
||||
|
||||
[ainv]: https://docs.ansible.com/ansible/latest/user_guide/intro_getting_started.html#selecting-machines-from-inventory
|
||||
- Nodes must be ready to be controlled via Ansible
|
||||
- Have `python3` installed for Ansible
|
||||
- Need to be `ssh`-able from a workstation
|
||||
- You can grab keys like so: `curl -L files.lyte.dev/key.pub >> ~/.ssh/authorized_keys`
|
||||
- TODO: script this? maybe custom ISO+PXEBoot?
|
||||
- Setup Ansible on the controller (from `./ansible`)
|
||||
- `ansible-galaxy install -r requirements.yml --force`
|
||||
- Verify Ansible can reach hosts (from `./ansible`)
|
||||
- `ansible all -i inventory/hosts.yml --list-hosts`
|
||||
- `ansible all -i inventory/hosts.yml -m ping`
|
||||
- Use Ansible to prepare hosts for k3s installation (from `./ansible`)
|
||||
- `ansible-playbook -i inventory/hosts.yml ./provision.yml`
|
||||
- Use Ansible to install k3s as configured on all nodes (from `./ansible`)
|
||||
- `ansible-playbook -i inventory/hosts.yml ./install-k3s.yml`
|
||||
- You have to run this multiple times for the worker nodes to successfully
|
||||
connect to the control plane nodes
|
||||
- Setup your local kubectl to work with the new cluster
|
||||
- `ansible -i ansible/inventory/hosts.yml $REMOTE_HOST -m fetch -a "src=/etc/rancher/k3s/k3s.yaml dest=./k3s-cluster-config.yaml flat=yes"`
|
||||
- Copy the cluster information from the `./k3s-cluster-config.yaml` file into
|
||||
your existing `~/.kube/config` (or just copy it there if it doesn't exist)
|
||||
- You will need to edit the host from `localhost`/`127.0.0.1` to the correct host
|
||||
|
|
Loading…
Reference in a new issue