2 # SPDX-License-Identifier: GPL-2.0-only
4 export KSELFTESTS_SKIP
=4
12 local path
="/sys/bus/pci/devices/$dev/eeh_pe_state"
14 # if a driver doesn't support the error handling callbacks then the
15 # device is recovered by removing and re-probing it. This causes the
16 # sysfs directory to disappear so read the PE state once and squash
17 # any potential error messages
18 local eeh_state
="$(cat $path 2>/dev/null)"
19 if [ -z "$eeh_state" ]; then
23 local fw_state
="$(echo $eeh_state | cut -d' ' -f1)"
24 local sw_state
="$(echo $eeh_state | cut -d' ' -f2)"
26 # If EEH_PE_ISOLATED or EEH_PE_RECOVERING are set then the PE is in an
27 # error state or being recovered. Either way, not ok.
28 if [ "$((sw_state & 0x3))" -ne 0 ] ; then
32 # A functioning PE should have the EEH_STATE_MMIO_ACTIVE and
33 # EEH_STATE_DMA_ACTIVE flags set. For some goddamn stupid reason
34 # the platform backends set these when the PE is in reset. The
35 # RECOVERING check above should stop any false positives though.
36 if [ "$((fw_state & 0x18))" -ne "$((0x18))" ] ; then
44 test -e /proc
/powerpc
/eeh
&& \
45 grep -q 'EEH Subsystem is enabled' /proc
/powerpc
/eeh
49 if ! eeh_supported
; then
50 echo "EEH not supported on this system, skipping"
51 exit $KSELFTESTS_SKIP;
54 if [ ! -e "/sys/kernel/debug/powerpc/eeh_dev_check" ] && \
55 [ ! -e "/sys/kernel/debug/powerpc/eeh_dev_break" ] ; then
56 log
"debugfs EEH testing files are missing. Is debugfs mounted?"
57 exit $KSELFTESTS_SKIP;
60 # Bump the max freeze count to something absurd so we don't
61 # trip over it while breaking things.
62 echo 5000 > /sys
/kernel
/debug
/powerpc
/eeh_max_freezes
66 # skip bridges since we can't recover them (yet...)
67 if [ -e "/sys/bus/pci/devices/$dev/pci_bus" ] ; then
68 log
"$dev, Skipped: bridge"
72 # The ahci driver doesn't support error recovery. If the ahci device
73 # happens to be hosting the root filesystem, and then we go and break
74 # it the system will generally go down. We should probably fix that
76 if [ "ahci" = "$(basename $(realpath /sys/bus/pci/devices/$dev/driver))" ] ; then
77 log
"$dev, Skipped: ahci doesn't support recovery"
81 # Don't inject errosr into an already-frozen PE. This happens with
82 # PEs that contain multiple PCI devices (e.g. multi-function cards)
83 # and injecting new errors during the recovery process will probably
84 # result in the recovery failing and the device being marked as
86 if ! pe_ok
$dev ; then
87 log
"$dev, Skipped: Bad initial PE state"
97 # Using this function from the command line is sometimes useful for
98 # testing so check that the argument is a well-formed sysfs device
100 if ! test -e /sys
/bus
/pci
/devices
/$dev/ ; then
101 log
"Error: '$dev' must be a sysfs device name (DDDD:BB:DD.F)"
106 echo $dev >/sys
/kernel
/debug
/powerpc
/eeh_dev_break
108 # Force an EEH device check. If the kernel has already
109 # noticed the EEH (due to a driver poll or whatever), this
111 echo $dev >/sys
/kernel
/debug
/powerpc
/eeh_dev_check
113 # Default to a 60s timeout when waiting for a device to recover. This
114 # is an arbitrary default which can be overridden by setting the
115 # EEH_MAX_WAIT environmental variable when required.
117 # The current record holder for longest recovery time is:
118 # "Adaptec Series 8 12G SAS/PCIe 3" at 39 seconds
119 max_wait
=${EEH_MAX_WAIT:=60}
121 for i
in `seq 0 ${max_wait}` ; do
125 log
"$dev, waited $i/${max_wait}"
129 if ! pe_ok
$dev ; then
130 log
"$dev, Failed to recover!"
134 log
"$dev, Recovered after $i seconds"
139 test -e /sys
/bus
/pci
/devices
/$1/driver
;
144 # we'll get an IO error if the device's current driver doesn't support
146 echo $1 > '/sys/kernel/debug/powerpc/eeh_dev_can_recover' 2>/dev
/null
154 # SR-IOV on pseries requires hypervisor support, so check for that
156 if grep -q pSeries
/proc
/cpuinfo
; then
157 if [ ! -f /proc
/device-tree
/rtas
/ibm
,open-sriov-allow-unfreeze
] ||
158 [ ! -f /proc
/device-tree
/rtas
/ibm
,open-sriov-map-pe-number
] ; then
165 for dev
in `ls -1 /sys/bus/pci/devices/` ; do
166 sysfs
="/sys/bus/pci/devices/$dev"
167 if [ ! -e "$sysfs/sriov_numvfs" ] ; then
171 # skip unsupported PFs on pseries
172 if [ -z "$is_pseries" ] &&
173 [ ! -f "$sysfs/of_node/ibm,is-open-sriov-pf" ] &&
174 [ ! -f "$sysfs/of_node/ibm,open-sriov-vf-bar-info" ] ; then
179 if ! eeh_has_driver
$dev ; then
183 devices
="$devices $dev"
186 if [ -z "$devices" ] ; then
194 # attempts to enable one VF on each PF so we can do VF specific tests.
195 # stdout: list of enabled VFs, one per line
196 # return code: 0 if vfs are found, 1 otherwise
198 pf_list
="$(eeh_find_all_pfs)"
201 for dev
in $pf_list ; do
202 pf_sysfs
="/sys/bus/pci/devices/$dev"
204 # make sure we have a single VF
205 echo 0 > "$pf_sysfs/sriov_numvfs"
206 echo 1 > "$pf_sysfs/sriov_numvfs"
207 if [ "$?" != 0 ] ; then
208 log
"Unable to enable VFs on $pf, skipping"
212 vf
="$(basename $(realpath "$pf_sysfs/virtfn0
"))"
213 if [ $?
!= 0 ] ; then
214 log
"unable to find enabled vf on $pf"
215 echo 0 > "$pf_sysfs/sriov_numvfs"
219 if ! eeh_can_break
$vf ; then
222 echo 0 > "$pf_sysfs/sriov_numvfs"
235 pf_list
="$(eeh_find_all_pfs)"
236 if [ -z "$pf_list" ] ; then
240 for dev
in $pf_list ; do
241 echo 0 > "/sys/bus/pci/devices/$dev/sriov_numvfs"