Fix dependency install on Debian 11 (#16683)
[zfs.git] / .github / workflows / scripts / qemu-5-setup.sh
blobbc40e8894b2248fa0d50f742bf051e79ccff68dc
1 #!/usr/bin/env bash
3 ######################################################################
4 # 5) start test machines and load openzfs module
5 ######################################################################
7 set -eu
9 # read our defined variables
10 source /var/tmp/env.txt
12 # wait for poweroff to succeed
13 PID=$(pidof /usr/bin/qemu-system-x86_64)
14 tail --pid=$PID -f /dev/null
15 sudo virsh undefine openzfs
17 # default values per test vm:
18 VMs=2
19 CPU=2
21 # cpu pinning
22 CPUSET=("0,1" "2,3")
24 case "$OS" in
25 freebsd*)
26 # FreeBSD can't be optimized via ksmtuned
27 RAM=6
30 # Linux can be optimized via ksmtuned
31 RAM=8
33 esac
35 # this can be different for each distro
36 echo "VMs=$VMs" >> $ENV
38 # create snapshot we can clone later
39 sudo zfs snapshot zpool/openzfs@now
41 # setup the testing vm's
42 PUBKEY=$(cat ~/.ssh/id_ed25519.pub)
43 for i in $(seq 1 $VMs); do
45 echo "Creating disk for vm$i..."
46 DISK="/dev/zvol/zpool/vm$i"
47 FORMAT="raw"
48 sudo zfs clone zpool/openzfs@now zpool/vm$i
49 sudo zfs create -ps -b 64k -V 80g zpool/vm$i-2
51 cat <<EOF > /tmp/user-data
52 #cloud-config
54 fqdn: vm$i
56 users:
57 - name: root
58 shell: $BASH
59 - name: zfs
60 sudo: ALL=(ALL) NOPASSWD:ALL
61 shell: $BASH
62 ssh_authorized_keys:
63 - $PUBKEY
65 growpart:
66 mode: auto
67 devices: ['/']
68 ignore_growroot_disabled: false
69 EOF
71 sudo virsh net-update default add ip-dhcp-host \
72 "<host mac='52:54:00:83:79:0$i' ip='192.168.122.1$i'/>" --live --config
74 sudo virt-install \
75 --os-variant $OSv \
76 --name "vm$i" \
77 --cpu host-passthrough \
78 --virt-type=kvm --hvm \
79 --vcpus=$CPU,sockets=1 \
80 --cpuset=${CPUSET[$((i-1))]} \
81 --memory $((1024*RAM)) \
82 --memballoon model=virtio \
83 --graphics none \
84 --cloud-init user-data=/tmp/user-data \
85 --network bridge=virbr0,model=$NIC,mac="52:54:00:83:79:0$i" \
86 --disk $DISK,bus=virtio,cache=none,format=$FORMAT,driver.discard=unmap \
87 --disk $DISK-2,bus=virtio,cache=none,format=$FORMAT,driver.discard=unmap \
88 --import --noautoconsole >/dev/null
89 done
91 # check the memory state from time to time
92 cat <<EOF > cronjob.sh
93 # $OS
94 exec 1>>/var/tmp/stats.txt
95 exec 2>&1
96 echo "*******************************************************"
97 date
98 uptime
99 free -m
100 df -h /mnt/tests
101 zfs list
103 sudo chmod +x cronjob.sh
104 sudo mv -f cronjob.sh /root/cronjob.sh
105 echo '*/5 * * * * /root/cronjob.sh' > crontab.txt
106 sudo crontab crontab.txt
107 rm crontab.txt
109 # check if the machines are okay
110 echo "Waiting for vm's to come up... (${VMs}x CPU=$CPU RAM=$RAM)"
111 for i in $(seq 1 $VMs); do
112 while true; do
113 ssh 2>/dev/null zfs@192.168.122.1$i "uname -a" && break
114 done
115 done
116 echo "All $VMs VMs are up now."
118 # Save the VM's serial output (ttyS0) to /var/tmp/console.txt
119 # - ttyS0 on the VM corresponds to a local /dev/pty/N entry
120 # - use 'virsh ttyconsole' to lookup the /dev/pty/N entry
121 for i in $(seq 1 $VMs); do
122 mkdir -p $RESPATH/vm$i
123 read "pty" <<< $(sudo virsh ttyconsole vm$i)
124 sudo nohup bash -c "cat $pty > $RESPATH/vm$i/console.txt" &
125 done
126 echo "Console logging for ${VMs}x $OS started."