diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..b291ca4 --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +k3s-cluster-config.yaml diff --git a/ansible/arch-linux/tasks/aur.yaml b/ansible/arch-linux/tasks/aur.yml similarity index 100% rename from ansible/arch-linux/tasks/aur.yaml rename to ansible/arch-linux/tasks/aur.yml diff --git a/ansible/arch-linux/tasks/install-yay-bin.yaml b/ansible/arch-linux/tasks/install-yay-bin.yml similarity index 59% rename from ansible/arch-linux/tasks/install-yay-bin.yaml rename to ansible/arch-linux/tasks/install-yay-bin.yml index 93073de..590bdeb 100644 --- a/ansible/arch-linux/tasks/install-yay-bin.yaml +++ b/ansible/arch-linux/tasks/install-yay-bin.yml @@ -1,4 +1,5 @@ - name: install yay-bin - include: aur.yaml pkg_name=yay-bin + include_tasks: aur.yml vars: makepkg_nonroot_user: aur_builder + pkg_name: yay-bin diff --git a/ansible/arch-linux/tasks/main.yml b/ansible/arch-linux/tasks/main.yml index 0e119e2..a70f8af 100644 --- a/ansible/arch-linux/tasks/main.yml +++ b/ansible/arch-linux/tasks/main.yml @@ -4,7 +4,10 @@ - include_tasks: ./create-aur-builder.yml when: ansible_facts.os_family == 'Archlinux' -- include_tasks: ./install-yay-bin.yaml +- include_tasks: ./install-yay-bin.yml + when: ansible_facts.os_family == 'Archlinux' + +- include_tasks: ./upgrade-all-packages.yml when: ansible_facts.os_family == 'Archlinux' # TODO: diff --git a/ansible/arch-linux/tasks/upgrade-all-packages.yml b/ansible/arch-linux/tasks/upgrade-all-packages.yml new file mode 100644 index 0000000..da51fb6 --- /dev/null +++ b/ansible/arch-linux/tasks/upgrade-all-packages.yml @@ -0,0 +1,8 @@ +- name: upgrade all packages + community.general.pacman: + # executable: yay + force: yes + state: latest + update_cache: yes + upgrade: yes + diff --git a/ansible/install-k3s.yml b/ansible/install-k3s.yml new file mode 100644 index 0000000..aca0139 --- /dev/null +++ b/ansible/install-k3s.yml @@ -0,0 +1,18 @@ +- name: provision Arch Linux hosts + hosts: all + gather_facts: yes + roles: + - role: xanmanning.k3s + +# - hosts: k3s_nodes +# vars: +# k3s_registration_address: loadbalancer # Typically a load balancer. +# k3s_server: +# datastore-endpoint: "postgres://postgres:verybadpass@database:5432/postgres?sslmode=disable" +# pre_tasks: +# - name: Set each node to be a control node +# ansible.builtin.set_fact: +# k3s_control_node: true +# when: inventory_hostname in ['node2', 'node3'] +# roles: +# - role: xanmanning.k3s diff --git a/ansible/inventory/hosts.yml b/ansible/inventory/hosts.yml new file mode 100644 index 0000000..0893121 --- /dev/null +++ b/ansible/inventory/hosts.yml @@ -0,0 +1,32 @@ +homek8s: + children: + controllers: + hybrid: + hosts: + "10.0.0.87": + k3s_control_node: true + ansible_host: "10.0.0.87" + ansible_user: root + cluster-cidr: '10.42.0.0/16' + # # flannel-backend: 'none' + # node-label: + # - "human-name=datto3" + "10.0.0.138": + ansible_host: "10.0.0.138" + ansible_user: root + # k3s_control_node: false + # with-node-id: true + # node-label: + # - "human-name=chromebox" + workers: + + vars: + k3s_release_version: v1.23.3+k3s1 + k3s_build_cluster: true + k3s_registration_address: 10.0.0.87 + ansible_python_interpreter: /usr/bin/python3.10 + k3s_server: + advertise-address: "0.0.0.0" + k3s_agent: + node-ip: "{{ ansible_host }}" + node-external-ip: "{{ ansible_host }}" diff --git a/ansible/nuke-cluster.yml b/ansible/nuke-cluster.yml new file mode 100644 index 0000000..0c9a4c2 --- /dev/null +++ b/ansible/nuke-cluster.yml @@ -0,0 +1,39 @@ +- name: deprovision k3s nodes + hosts: all + become: true + gather_facts: yes + any_errors_fatal: true + tasks: + - name: Kill k3s + ansible.builtin.command: /usr/local/bin/k3s-killall.sh + - name: Uninstall k3s + ansible.builtin.command: + cmd: /usr/local/bin/k3s-uninstall.sh + removes: /usr/local/bin/k3s-uninstall.sh + - name: Uninstall k3s agent + ansible.builtin.command: + cmd: /usr/local/bin/k3s-agent-uninstall.sh + removes: /usr/local/bin/k3s-agent-uninstall.sh + - name: Gather list of CNI files to delete + find: + paths: /etc/cni/net.d + patterns: "*" + register: files_to_delete + - name: Delete CNI files + ansible.builtin.file: + path: "{{ item.path }}" + state: absent + loop: "{{ files_to_delete.files }}" + +# - hosts: k3s_nodes +# vars: +# k3s_registration_address: loadbalancer # Typically a load balancer. +# k3s_server: +# datastore-endpoint: "postgres://postgres:verybadpass@database:5432/postgres?sslmode=disable" +# pre_tasks: +# - name: Set each node to be a control node +# ansible.builtin.set_fact: +# k3s_control_node: true +# when: inventory_hostname in ['node2', 'node3'] +# roles: +# - role: xanmanning.k3s diff --git a/ansible/provision.yaml b/ansible/provision.yml similarity index 98% rename from ansible/provision.yaml rename to ansible/provision.yml index 91fd41d..43988e5 100644 --- a/ansible/provision.yaml +++ b/ansible/provision.yml @@ -1,9 +1,8 @@ ---- - name: provision Arch Linux hosts hosts: all + gather_facts: yes roles: - role: './arch-linux' - gather_facts: yes tasks: - name: print all available facts ansible.builtin.debug: diff --git a/ansible/requirements.yml b/ansible/requirements.yml new file mode 100644 index 0000000..9dbc468 --- /dev/null +++ b/ansible/requirements.yml @@ -0,0 +1,10 @@ +collections: + - name: community.general + version: 4.4.0 + - name: community.sops + version: 1.2.0 + - name: ansible.posix + version: 1.3.0 +roles: + - src: xanmanning.k3s + version: v3.1.0 diff --git a/readme.md b/readme.md index d84399b..ad203ec 100644 --- a/readme.md +++ b/readme.md @@ -1,19 +1,24 @@ # Provision Machines - My nodes are Arch Linux machines on bare metal - - Have `python3` installed for Ansible - - Need to be `ssh`-able from a workstation -- Using Ansible - - Setup [Ansible inventory][ainv] under the key `homek8s` - ``` - homek8s: - children: - controllers: - hybrid: - hosts: - root@kubeworker1: - workers: - ``` - - Test with `ansible homek8s -m ping` or groups like this `ansible homek8s:hybrid -m ping` - -[ainv]: https://docs.ansible.com/ansible/latest/user_guide/intro_getting_started.html#selecting-machines-from-inventory + - Nodes must be ready to be controlled via Ansible + - Have `python3` installed for Ansible + - Need to be `ssh`-able from a workstation + - You can grab keys like so: `curl -L files.lyte.dev/key.pub >> ~/.ssh/authorized_keys` + - TODO: script this? maybe custom ISO+PXEBoot? +- Setup Ansible on the controller (from `./ansible`) + - `ansible-galaxy install -r requirements.yml --force` +- Verify Ansible can reach hosts (from `./ansible`) + - `ansible all -i inventory/hosts.yml --list-hosts` + - `ansible all -i inventory/hosts.yml -m ping` +- Use Ansible to prepare hosts for k3s installation (from `./ansible`) + - `ansible-playbook -i inventory/hosts.yml ./provision.yml` +- Use Ansible to install k3s as configured on all nodes (from `./ansible`) + - `ansible-playbook -i inventory/hosts.yml ./install-k3s.yml` + - You have to run this multiple times for the worker nodes to successfully + connect to the control plane nodes +- Setup your local kubectl to work with the new cluster + - `ansible -i ansible/inventory/hosts.yml $REMOTE_HOST -m fetch -a "src=/etc/rancher/k3s/k3s.yaml dest=./k3s-cluster-config.yaml flat=yes"` + - Copy the cluster information from the `./k3s-cluster-config.yaml` file into + your existing `~/.kube/config` (or just copy it there if it doesn't exist) + - You will need to edit the host from `localhost`/`127.0.0.1` to the correct host