home

My NixOS systems configurations.
Log | Files | Refs | LICENSE

k8s.infra.sh (3738B)


      1 #!/usr/bin/env bash
      2 # univ: update niv (and generate a nice commit)
      3 
      4 # TODO: Maybe rewrite this in Python..
      5 
      6 # TODO libguestfs-with-appliance
      7 # TODO create images with qemu-img and virt-format --format=qcow2 --filesystem=ext4 -a vdisk1.qcow2
      8 # TODO Create xml by hand instead of virt-install
      9 
     10 set -euo pipefail
     11 
     12 # export QEMU_URI=qemu+ssh://vincent@wakasu.home/system
     13 # virt-install --connect=${QEMU_URI} \
     14 #              --name="ocp4-bootstrap" --vcpus=4 --ram=8192 \
     15 #              --disk path=/var/lib/libvirt/images/ocp-bootstrap.qcow2,bus=virtio,size=120 \
     16 #              --boot menu=on --print-xml > ocp4-bootstrap.xml
     17 # virsh --connect=${QEMU_URI} \
     18     #       define --file ocp4-bootstrap.xml
     19 
     20 HOST=${HOST:-wakasu.home}
     21 QEMU_URI="qemu+ssh://${HOST}/system"
     22 RSYNC_COMMAND="rsync -avzHXShPse ssh --progress"
     23 VIRSH_COMMAND="virsh --connect=${QEMU_URI}"
     24 NODES=(
     25     k8sn1
     26     k8sn2
     27     k8sn3
     28 )
     29 
     30 build() {
     31     for n in ${NODES[@]}; do
     32         logs=$(mktemp)
     33         output=$(mktemp)
     34         echo "Build ${n} node (logs: ${logs})…"
     35         nixos-generate -I nixpkgs=channel:nixos-21.05 -f qcow -c ./systems/hosts/${n}.nix 2>${logs} 1>${output}
     36         echo "Resize ${n} image"
     37         qemu-img create -f qcow2 -o preallocation=metadata ${n}.qcow2 40G
     38         virt-resize --expand /dev/vda1 $(cat ${output} | tr -d '\n') ${n}.qcow2
     39         echo "Syncthing image to ${HOST}…"
     40         ${RSYNC_COMMAND} ${n}.qcow2 root@${HOST}:/var/lib/libvirt/images/${n}.qcow2
     41         echo "Remove ${n} (local) image"
     42         rm -f ${n}.qcow2
     43     done
     44 }
     45 
     46 delete() {
     47     for n in ${NODES[@]}; do
     48         echo "Delete ${n} node…"
     49         ${VIRSH_COMMAND} list | grep ${n} && {
     50             ${VIRSH_COMMAND} destroy ${n}
     51         } || {
     52             echo "skipping, not present…"
     53         }
     54         ${VIRSH_COMMAND} undefine ${n} --remove-all-storage || echo "Failed to erase.. might not exists"
     55     done
     56 }
     57 
     58 # Bootstrap the cluster, assuming images are built and synced
     59 bootstrap() {
     60     echo "Bootstrap k8s cluster on ${HOST}"
     61     k8sn1_mac="52:54:00:dd:a3:30"
     62     k8sn2_mac="52:54:00:dd:a3:31"
     63     k8sn3_mac="52:54:00:dd:a3:32"
     64     folder=$(mktemp -d)
     65     for n in ${NODES[@]}; do
     66         mac_addr=${n}_mac
     67         virt-install --connect=${QEMU_URI} \
     68                      --name="${n}" --vcpus=4 --ram=8192 \
     69                      --network bridge=br1,mac.address=${!mac_addr} \
     70                      --disk path=/var/lib/libvirt/images/${n}.qcow2,bus=virtio,size=10 \
     71                      --disk path=/var/lib/libvirt/images/${n}-data.qcow2,bus=virtio,size=40 \
     72                      --print-xml > ${folder}/${n}.xml
     73         echo "Node ${n} : ${folder}/${n}.xml"
     74         ${VIRSH_COMMAND} define --file ${folder}/${n}.xml
     75     done
     76     # Start the nodes
     77     for n in ${NODES[@]}; do
     78         ${VIRSH_COMMAND} start ${n}
     79     done
     80     # Wait for.. long time..
     81     # Not sure how to ensure k8s is running on the master
     82     token=$(ssh root@k8sn1.home cat /var/lib/kubernetes/secrets/apitoken.secret)
     83     echo $token | ssh root@k8sn2.home nixos-kubernetes-node-join
     84     echo $token | ssh root@k8sn3.home nixos-kubernetes-node-join
     85     mkdir -p $HOME/.kube
     86     # TODO: Copy cluster-admin configuration and sed the certs
     87     scp root@k8sn1.home:/etc/kubernetes/cluster-admin.kubeconfig $HOME/home.cluster-admin.config
     88 }
     89 
     90 status() {
     91     echo "TBD: display the status of the cluster"
     92 }
     93 
     94 main() {
     95     set +u
     96     ARG=$1
     97     set -u
     98     case ${ARG} in
     99         "build")
    100             build
    101             ;;
    102         "delete")
    103             delete
    104             ;;
    105         "bootstrap")
    106             bootstrap
    107             ;;
    108         "status")
    109             status
    110             ;;
    111         *)
    112             echo "No such subcommand"
    113             exit 1
    114             ;;
    115     esac
    116 }
    117 
    118 main $@