Stage them all, let god sort it out

This commit is contained in:
Jim Nicholson 2023-12-20 21:54:54 -08:00
parent a0bcbefa61
commit 0100387087
21 changed files with 57 additions and 193 deletions

1
.gitignore vendored
View File

@ -1,2 +1,3 @@
.direnv .direnv
.envrc .envrc
requirements.txt

View File

@ -0,0 +1,11 @@
export SWAP_SIZE=0
alpine-setup
echo 'http://dl-cdn.alpinelinux.org/alpine/v3.18/community' >> /etc/apk/repositories
apk update
apk add curl python3 sudo qemu-guest-agent
rc-service qemu-guest-agent start
rc-update add qemu-guest-agent
echo '%wheel ALL=(ALL:ALL) NOPASSWD: ALL' > /etc/sudoers.d/wheel
adduser admin wheel

View File

@ -1,5 +1,5 @@
[defaults] [defaults]
inventory = ./inventory.yaml inventory = ./inventory.yaml
host_key_checking = False host_key_checking = False
remote_user = kube remote_user = admin
roles_path = ./roles roles_path = ./roles

View File

@ -0,0 +1,4 @@
#cloud-config
bootcmd:
- nmcli con mod "cloud-init eth0" ipv4.dns 10.0.96.30
- nmcli con up id "cloud-init eth0"

View File

@ -0,0 +1,7 @@
write-kubeconfig-mode: "0644"
tls-san:
- "{{ cp_fqdn }}"
node-label:
- "size=small"
cluster-init: true
node-external-ip:

View File

@ -4,17 +4,19 @@
- localhost - localhost
gather_facts: no gather_facts: no
tasks: tasks:
- name: Clone host image - name: Clone host image
community.general.proxmox_kvm: community.general.proxmox_kvm:
api_host: "{{ proxmox_api_host }}" api_host: "{{ proxmox_api_host }}"
api_user: "{{ lookup('env','PM_USER') }}" api_user: "{{ proxmox_user }}"
api_password: "{{ lookup('env','PM_PASSWORD') }}" api_password: "{{ proxmox_pw }}"
autostart: true autostart: true
clone: "{{ cloudinit_img }}" clone: "{{ cloudinit_img }}"
name: "{{ item }}" name: "{{ item }}"
node: pve2 node: pve
target: "{{ hostvars[item].node }}" target: "{{ hostvars[item].node }}"
storage: disk-storage storage: ceph_storage
format: qcow2 format: qcow2
newid: "{{ hostvars[item].id }}" newid: "{{ hostvars[item].id }}"
timeout: 500 timeout: 500
@ -27,11 +29,11 @@
update: yes update: yes
node: "{{ hostvars[item].node }}" node: "{{ hostvars[item].node }}"
api_host: "{{ proxmox_api_host }}" api_host: "{{ proxmox_api_host }}"
api_user: "{{ lookup('env','PM_USER') }}" api_user: "{{ proxmox_user }}"
api_password: "{{ lookup('env','PM_PASSWORD') }}" api_password: "{{ proxmox_pw }}"
memory: "{{ hostvars[item].mem }}" memory: "{{ hostvars[item].mem }}"
ciuser: kube ciuser: admin
cipassword: Call1_advent cipassword: "{{ proxmox_pw }}"
cores: 4 cores: 4
sshkeys: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC0Lk7zShZhujYeAnqorxZZCUJbZWWzf74cyAXRCGFeqyUvzOHuc/M3s0dmEqWRQCnKKdRAcAeBuya8dNyXwlTbWGTMbrObesPb0rHgLLXUfPbDH1km9QpVufjpuhbUtBN0iSa/1n3vKeMvrQj3ekUvl6nRtcLPHY0H4RswOJSpEzpvHK8S4YxdSoBV0z9KVB3/nS45WsqY45pD75epEjgaEhxyiJkf2fy5VkEB0+ZRMWs4uv/emwXq1hparkh5618Qap5qTpxI0kG0gXjupYc9HYe3oqHtxXsqrN3G/wEX6bVsbxNUdU5WMlqT88TkbRcju7UI7UhNcBezXaeT/WlJZGGM2spEHpk7DBC8Td6t09vCDQzRU694p6/hMfOUS3aMSdcIIU9wOdqXaXgFW2ugUxjQV0L0EowCxX8wJpPmxECs+svf7cCPYZVyF+R4MKHHx2mW/GtHYceAkQvIMfjPg2ZlKNOWGAJHBjjnLlAdXWJf77+FH5q3QCQshEW4loe/7/cd3AUGplYtHKxBaGYJS8YRDDmAE/TBZsm3ICaGCIUtEEsIBrSPR+f6WFU5fMIOh82735FGMI1rO4rNkFJ3ZBgwsgurY1yKrtP8yrTKELF0noycWw6DYHrwShXDFKjIlut5w3L3tOdlL6gheTfZqhSwDNZXm/3H76BfOUpxEQ== jim@DESKTOP-PP2J3PP' sshkeys: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC0Lk7zShZhujYeAnqorxZZCUJbZWWzf74cyAXRCGFeqyUvzOHuc/M3s0dmEqWRQCnKKdRAcAeBuya8dNyXwlTbWGTMbrObesPb0rHgLLXUfPbDH1km9QpVufjpuhbUtBN0iSa/1n3vKeMvrQj3ekUvl6nRtcLPHY0H4RswOJSpEzpvHK8S4YxdSoBV0z9KVB3/nS45WsqY45pD75epEjgaEhxyiJkf2fy5VkEB0+ZRMWs4uv/emwXq1hparkh5618Qap5qTpxI0kG0gXjupYc9HYe3oqHtxXsqrN3G/wEX6bVsbxNUdU5WMlqT88TkbRcju7UI7UhNcBezXaeT/WlJZGGM2spEHpk7DBC8Td6t09vCDQzRU694p6/hMfOUS3aMSdcIIU9wOdqXaXgFW2ugUxjQV0L0EowCxX8wJpPmxECs+svf7cCPYZVyF+R4MKHHx2mW/GtHYceAkQvIMfjPg2ZlKNOWGAJHBjjnLlAdXWJf77+FH5q3QCQshEW4loe/7/cd3AUGplYtHKxBaGYJS8YRDDmAE/TBZsm3ICaGCIUtEEsIBrSPR+f6WFU5fMIOh82735FGMI1rO4rNkFJ3ZBgwsgurY1yKrtP8yrTKELF0noycWw6DYHrwShXDFKjIlut5w3L3tOdlL6gheTfZqhSwDNZXm/3H76BfOUpxEQ== jim@DESKTOP-PP2J3PP'
vmid: "{{ hostvars[item].id }}" vmid: "{{ hostvars[item].id }}"
@ -40,7 +42,7 @@
net: net:
net0: 'virtio,bridge=vmbr0' net0: 'virtio,bridge=vmbr0'
ipconfig: ipconfig:
ipconfig0: "ip={{ hostvars[item].ip }}/24,gw=10.0.96.1" ipconfig0: "ip={{ hostvars[item].ansible_host }}/24,gw=10.0.96.1"
with_inventory_hostnames: with_inventory_hostnames:
- primary - primary
- control_plane - control_plane
@ -49,8 +51,8 @@
community.general.proxmox_kvm: community.general.proxmox_kvm:
node: "{{ hostvars[item].node }}" node: "{{ hostvars[item].node }}"
api_host: "{{ proxmox_api_host }}" api_host: "{{ proxmox_api_host }}"
api_user: "{{ lookup('env','PM_USER') }}" api_user: "{{ proxmox_user }}"
api_password: "{{ lookup('env','PM_PASSWORD') }}" api_password: "{{ proxmox_pw }}"
update: yes update: yes
vmid: "{{ hostvars[item].id }}" vmid: "{{ hostvars[item].id }}"
state: started state: started
@ -58,28 +60,3 @@
- primary - primary
- control_plane - control_plane
- workers - workers
- name: Update DNS for cluster nodes
uri:
method: PATCH
url: "http://{{ pdns_api }}:8081/api/v1/servers/localhost/zones/{{ pdns_dom }}"
body:
rrsets:
- name: "{{ item }}.{{ pdns_dom }}."
type: A
ttl: 86400
changetype: REPLACE
records:
- content: "{{ hostvars[item].ip }}"
disabled: false
body_format: json
headers:
'X-API-Key': "{{ lookup('env','PDNS_API_PW') }}"
return_content: yes
status_code:
- 200
- 204
register: dns_result
with_inventory_hostnames:
- primary
- control_plane
- workers

View File

@ -0,0 +1,10 @@
---
- name: Create config directory
file:
path: /etc/rancher/k3s
state: directory
owner: root
group: root
mode: 0755
become: true

View File

@ -8,6 +8,8 @@
dest="{{ playbook_dir }}/config/primary_ip" dest="{{ playbook_dir }}/config/primary_ip"
become: false become: false
delegate_to: localhost delegate_to: localhost
- debug:
msg: "curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC='server --cluster-init --tls-san {{ cp_fqdn }} --write-kubeconfig-mode 644' sh -"
- name: Run the installer - name: Run the installer
shell: shell:
cmd: "curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC='server --cluster-init --tls-san {{ cp_fqdn }} --write-kubeconfig-mode 644' sh -" cmd: "curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC='server --cluster-init --tls-san {{ cp_fqdn }} --write-kubeconfig-mode 644' sh -"

View File

@ -6,9 +6,11 @@
- name: Get the primary ip - name: Get the primary ip
set_fact: set_fact:
primary_ip: "{{ lookup('file',playbook_dir + '/config/primary_ip') }}" primary_ip: "{{ lookup('file',playbook_dir + '/config/primary_ip') }}"
- debug:
msg: "curl -sfL https://get.k3s.io | K3S_URL=https://{{ primary_ip }}:6443 K3S_TOKEN={{ token }} sh -"
- name: Run the installer - name: Run the installer
shell: shell:
cmd: "curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC='agent --server https://{{ primary_ip }}:6443 --token {{ token }}' sh -" cmd: "curl -sfL https://get.k3s.io | K3S_URL=https://{{ primary_ip }}:6443 K3S_TOKEN={{ token }} sh -"
args: args:
creates: /var/lib/rancher/k3s/agent/k3scontroller.kubeconfig creates: /var/lib/rancher/k3s/agent/k3scontroller.kubeconfig
become: true become: true

View File

@ -1,2 +0,0 @@
*
!.gitignore

View File

@ -1,33 +0,0 @@
---
- name: Deprovision cluster
hosts: localhost
gather_facts: no
tasks:
- community.general.proxmox_kvm:
api_host: "{{ proxmox_api_host }}"
api_user: "{{ lookup('env','PM_USER') }}"
api_password: "{{ lookup('env','PM_PASSWORD') }}"
name: "{{ item }}"
node: "{{ hostvars[item].node }}"
vmid: "{{ hostvars[item].id }}"
state: stopped
force: yes
timeout: 500
with_inventory_hostnames:
- primary
- control_plane
- workers
- community.general.proxmox_kvm:
api_host: "{{ proxmox_api_host }}"
api_user: "{{ lookup('env','PM_USER') }}"
api_password: "{{ lookup('env','PM_PASSWORD') }}"
name: "{{ item }}"
node: "{{ hostvars[item].node }}"
vmid: "{{ hostvars[item].id }}"
state: absent
force: yes
timeout: 500
with_inventory_hostnames:
- primary
- control_plane
- workers

View File

@ -1,44 +0,0 @@
all:
vars:
cp_fqdn: nc_cp.home.thejimnicholson.com
proxmox_api_host: pve.home.thejimnicholson.com
pdns_api: 10.0.96.30
pdns_dom: home.thejimnicholson.com
cloudinit_img: centos-8-cloudimg
children:
primary:
hosts:
nc001:
mem: 4096
node: pve
id: 3001
ip: "10.0.96.111"
control_plane:
hosts:
nc002:
node: pve2
mem: 4096
id: 3002
ip: "10.0.96.112"
nc003:
node: pve3
mem: 4096
id: 3003
ip: "10.0.96.113"
workers:
hosts:
nc004:
node: pve
mem: 8192
id: 3004
ip: "10.0.96.114"
nc005:
node: pve2
mem: 8192
id: 3005
ip: "10.0.96.115"
nc006:
node: pve3
mem: 8192
id: 3006
ip: "10.0.96.116"

View File

@ -1,47 +0,0 @@
- name: Prep cluster nodes
hosts:
- primary
- control_plane
- workers
tasks:
- name: Set hostname
ansible.builtin.hostname:
name: "{{ inventory_hostname }}"
become: true
- name: Set timezone
community.general.timezone:
name: America/Los_Angeles
become: true
- name: Disable swap
shell: swapoff -a
become: true
- name: Disable swap in fstab
replace:
path: /etc/fstab
regexp: '^([^#].*?\sswap\s+sw\s+.*)$'
replace: '# \1'
become: true
- name: Add GPG for elrepo
ansible.builtin.rpm_key:
state: present
key: https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
become: true
- name : Set up elrepo
dnf:
name: https://www.elrepo.org/elrepo-release-8.el8.elrepo.noarch.rpm
state: present
become: true
- name: Install kernel upgrade
dnf:
enablerepo: elrepo-kernel
name: kernel-ml
state: present
become: true
- name: Install iscsi drivers
dnf:
name: iscsi-initiator-utils
state: present
become: true
- name: Reboot servers
reboot:
become: true

View File

@ -1,29 +0,0 @@
---
- name: Create config directory
file:
path: /etc/rancher/k3s
state: directory
owner: root
group: root
mode: 0755
become: true
- name: Disable NetworkManager for container networks
copy:
src: flannel.conf
dest: /etc/NetworkManager/conf.d/flannel.conf
owner: root
group: root
mode: 0644
become: true
register: nm_update
- name: Restart NetworkManager
systemd:
name: NetworkManager
state: restarted
become: true
when: nm_update.changed
- name: Airgap optional tasks
block:
- debug:
msg: "Airgap tasks go here"
when: airgap_install

5
requirements.in Normal file
View File

@ -0,0 +1,5 @@
ansible
PyYaml
ansible-lint
proxmoxer
requests