3 ######################################################################
4 # 5) start test machines and load openzfs module
5 ######################################################################
9 # read our defined variables
10 source /var
/tmp
/env.txt
12 # wait for poweroff to succeed
13 PID
=$
(pidof
/usr
/bin
/qemu-system-x86_64
)
14 tail --pid=$PID -f /dev
/null
15 sudo virsh undefine openzfs
17 # default values per test vm:
26 # FreeBSD can't be optimized via ksmtuned
30 # Linux can be optimized via ksmtuned
35 # this can be different for each distro
36 echo "VMs=$VMs" >> $ENV
38 # create snapshot we can clone later
39 sudo zfs snapshot zpool
/openzfs@now
41 # setup the testing vm's
42 PUBKEY
=$
(cat ~
/.ssh
/id_ed25519.pub
)
43 for i
in $
(seq 1 $VMs); do
45 echo "Creating disk for vm$i..."
46 DISK
="/dev/zvol/zpool/vm$i"
48 sudo zfs clone zpool
/openzfs@now zpool
/vm
$i
49 sudo zfs create
-ps -b 64k
-V 80g zpool
/vm
$i-2
51 cat <<EOF > /tmp/user-data
60 sudo: ALL=(ALL) NOPASSWD:ALL
68 ignore_growroot_disabled: false
71 sudo virsh net-update default add ip-dhcp-host \
72 "<host mac='52:54:00:83:79:0$i' ip='192.168.122.1$i'/>" --live --config
77 --cpu host-passthrough \
78 --virt-type=kvm
--hvm \
79 --vcpus=$CPU,sockets
=1 \
80 --cpuset=${CPUSET[$((i-1))]} \
81 --memory $
((1024*RAM
)) \
82 --memballoon model
=virtio \
84 --cloud-init user-data
=/tmp
/user-data \
85 --network bridge
=virbr0
,model
=$NIC,mac
="52:54:00:83:79:0$i" \
86 --disk $DISK,bus
=virtio
,cache
=none
,format
=$FORMAT,driver.discard
=unmap \
87 --disk $DISK-2,bus
=virtio
,cache
=none
,format
=$FORMAT,driver.discard
=unmap \
88 --import --noautoconsole >/dev
/null
91 # check the memory state from time to time
92 cat <<EOF > cronjob.sh
94 exec 1>>/var/tmp/stats.txt
96 echo "*******************************************************"
103 sudo
chmod +x cronjob.sh
104 sudo
mv -f cronjob.sh
/root
/cronjob.sh
105 echo '*/5 * * * * /root/cronjob.sh' > crontab.txt
106 sudo crontab crontab.txt
109 # check if the machines are okay
110 echo "Waiting for vm's to come up... (${VMs}x CPU=$CPU RAM=$RAM)"
111 for i
in $
(seq 1 $VMs); do
113 ssh 2>/dev
/null zfs@
192.168.122.1$i "uname -a" && break
116 echo "All $VMs VMs are up now."
118 # Save the VM's serial output (ttyS0) to /var/tmp/console.txt
119 # - ttyS0 on the VM corresponds to a local /dev/pty/N entry
120 # - use 'virsh ttyconsole' to lookup the /dev/pty/N entry
121 for i
in $
(seq 1 $VMs); do
122 mkdir
-p $RESPATH/vm
$i
123 read "pty" <<< $
(sudo virsh ttyconsole vm
$i)
124 sudo nohup bash
-c "cat $pty > $RESPATH/vm$i/console.txt" &
126 echo "Console logging for ${VMs}x $OS started."