1 // SPDX-License-Identifier: GPL-2.0
3 * PCIe AER software error injection support.
5 * Debugging PCIe AER code is quite difficult because it is hard to
6 * trigger various real hardware errors. Software based error
7 * injection can fake almost all kinds of errors with the help of a
8 * user space helper tool aer-inject, which can be gotten from:
9 * http://www.kernel.org/pub/linux/utils/pci/aer-inject/
11 * Copyright 2009 Intel Corporation.
12 * Huang Ying <ying.huang@intel.com>
15 #define dev_fmt(fmt) "aer_inject: " fmt
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/interrupt.h>
20 #include <linux/miscdevice.h>
21 #include <linux/pci.h>
22 #include <linux/slab.h>
24 #include <linux/uaccess.h>
25 #include <linux/stddef.h>
26 #include <linux/device.h>
30 /* Override the existing corrected and uncorrected error masks */
31 static bool aer_mask_override
;
32 module_param(aer_mask_override
, bool, 0);
34 struct aer_error_inj
{
48 struct list_head list
;
65 struct list_head list
;
70 static LIST_HEAD(einjected
);
72 static LIST_HEAD(pci_bus_ops_list
);
74 /* Protect einjected and pci_bus_ops_list */
75 static DEFINE_SPINLOCK(inject_lock
);
77 static void aer_error_init(struct aer_error
*err
, u32 domain
,
78 unsigned int bus
, unsigned int devfn
,
81 INIT_LIST_HEAD(&err
->list
);
85 err
->pos_cap_err
= pos_cap_err
;
88 /* inject_lock must be held before calling */
89 static struct aer_error
*__find_aer_error(u32 domain
, unsigned int bus
,
92 struct aer_error
*err
;
94 list_for_each_entry(err
, &einjected
, list
) {
95 if (domain
== err
->domain
&&
103 /* inject_lock must be held before calling */
104 static struct aer_error
*__find_aer_error_by_dev(struct pci_dev
*dev
)
106 int domain
= pci_domain_nr(dev
->bus
);
109 return __find_aer_error(domain
, dev
->bus
->number
, dev
->devfn
);
112 /* inject_lock must be held before calling */
113 static struct pci_ops
*__find_pci_bus_ops(struct pci_bus
*bus
)
115 struct pci_bus_ops
*bus_ops
;
117 list_for_each_entry(bus_ops
, &pci_bus_ops_list
, list
) {
118 if (bus_ops
->bus
== bus
)
124 static struct pci_bus_ops
*pci_bus_ops_pop(void)
127 struct pci_bus_ops
*bus_ops
;
129 spin_lock_irqsave(&inject_lock
, flags
);
130 bus_ops
= list_first_entry_or_null(&pci_bus_ops_list
,
131 struct pci_bus_ops
, list
);
133 list_del(&bus_ops
->list
);
134 spin_unlock_irqrestore(&inject_lock
, flags
);
138 static u32
*find_pci_config_dword(struct aer_error
*err
, int where
,
144 if (err
->pos_cap_err
== -1)
147 switch (where
- err
->pos_cap_err
) {
148 case PCI_ERR_UNCOR_STATUS
:
149 target
= &err
->uncor_status
;
152 case PCI_ERR_COR_STATUS
:
153 target
= &err
->cor_status
;
156 case PCI_ERR_HEADER_LOG
:
157 target
= &err
->header_log0
;
159 case PCI_ERR_HEADER_LOG
+4:
160 target
= &err
->header_log1
;
162 case PCI_ERR_HEADER_LOG
+8:
163 target
= &err
->header_log2
;
165 case PCI_ERR_HEADER_LOG
+12:
166 target
= &err
->header_log3
;
168 case PCI_ERR_ROOT_STATUS
:
169 target
= &err
->root_status
;
172 case PCI_ERR_ROOT_ERR_SRC
:
173 target
= &err
->source_id
;
181 static int aer_inj_read(struct pci_bus
*bus
, unsigned int devfn
, int where
,
184 struct pci_ops
*ops
, *my_ops
;
187 ops
= __find_pci_bus_ops(bus
);
193 rv
= ops
->read(bus
, devfn
, where
, size
, val
);
199 static int aer_inj_write(struct pci_bus
*bus
, unsigned int devfn
, int where
,
202 struct pci_ops
*ops
, *my_ops
;
205 ops
= __find_pci_bus_ops(bus
);
211 rv
= ops
->write(bus
, devfn
, where
, size
, val
);
217 static int aer_inj_read_config(struct pci_bus
*bus
, unsigned int devfn
,
218 int where
, int size
, u32
*val
)
221 struct aer_error
*err
;
226 spin_lock_irqsave(&inject_lock
, flags
);
227 if (size
!= sizeof(u32
))
229 domain
= pci_domain_nr(bus
);
232 err
= __find_aer_error(domain
, bus
->number
, devfn
);
236 sim
= find_pci_config_dword(err
, where
, NULL
);
239 spin_unlock_irqrestore(&inject_lock
, flags
);
243 rv
= aer_inj_read(bus
, devfn
, where
, size
, val
);
244 spin_unlock_irqrestore(&inject_lock
, flags
);
248 static int aer_inj_write_config(struct pci_bus
*bus
, unsigned int devfn
,
249 int where
, int size
, u32 val
)
252 struct aer_error
*err
;
258 spin_lock_irqsave(&inject_lock
, flags
);
259 if (size
!= sizeof(u32
))
261 domain
= pci_domain_nr(bus
);
264 err
= __find_aer_error(domain
, bus
->number
, devfn
);
268 sim
= find_pci_config_dword(err
, where
, &rw1cs
);
274 spin_unlock_irqrestore(&inject_lock
, flags
);
278 rv
= aer_inj_write(bus
, devfn
, where
, size
, val
);
279 spin_unlock_irqrestore(&inject_lock
, flags
);
283 static struct pci_ops aer_inj_pci_ops
= {
284 .read
= aer_inj_read_config
,
285 .write
= aer_inj_write_config
,
288 static void pci_bus_ops_init(struct pci_bus_ops
*bus_ops
,
292 INIT_LIST_HEAD(&bus_ops
->list
);
297 static int pci_bus_set_aer_ops(struct pci_bus
*bus
)
300 struct pci_bus_ops
*bus_ops
;
303 bus_ops
= kmalloc(sizeof(*bus_ops
), GFP_KERNEL
);
306 ops
= pci_bus_set_ops(bus
, &aer_inj_pci_ops
);
307 spin_lock_irqsave(&inject_lock
, flags
);
308 if (ops
== &aer_inj_pci_ops
)
310 pci_bus_ops_init(bus_ops
, bus
, ops
);
311 list_add(&bus_ops
->list
, &pci_bus_ops_list
);
314 spin_unlock_irqrestore(&inject_lock
, flags
);
319 static int aer_inject(struct aer_error_inj
*einj
)
321 struct aer_error
*err
, *rperr
;
322 struct aer_error
*err_alloc
= NULL
, *rperr_alloc
= NULL
;
323 struct pci_dev
*dev
, *rpdev
;
324 struct pcie_device
*edev
;
325 struct device
*device
;
327 unsigned int devfn
= PCI_DEVFN(einj
->dev
, einj
->fn
);
328 int pos_cap_err
, rp_pos_cap_err
;
329 u32 sever
, cor_mask
, uncor_mask
, cor_mask_orig
= 0, uncor_mask_orig
= 0;
332 dev
= pci_get_domain_bus_and_slot(einj
->domain
, einj
->bus
, devfn
);
335 rpdev
= pcie_find_root_port(dev
);
337 pci_err(dev
, "Root port not found\n");
342 pos_cap_err
= dev
->aer_cap
;
344 pci_err(dev
, "Device doesn't support AER\n");
345 ret
= -EPROTONOSUPPORT
;
348 pci_read_config_dword(dev
, pos_cap_err
+ PCI_ERR_UNCOR_SEVER
, &sever
);
349 pci_read_config_dword(dev
, pos_cap_err
+ PCI_ERR_COR_MASK
, &cor_mask
);
350 pci_read_config_dword(dev
, pos_cap_err
+ PCI_ERR_UNCOR_MASK
,
353 rp_pos_cap_err
= rpdev
->aer_cap
;
354 if (!rp_pos_cap_err
) {
355 pci_err(rpdev
, "Root port doesn't support AER\n");
356 ret
= -EPROTONOSUPPORT
;
360 err_alloc
= kzalloc(sizeof(struct aer_error
), GFP_KERNEL
);
365 rperr_alloc
= kzalloc(sizeof(struct aer_error
), GFP_KERNEL
);
371 if (aer_mask_override
) {
372 cor_mask_orig
= cor_mask
;
373 cor_mask
&= !(einj
->cor_status
);
374 pci_write_config_dword(dev
, pos_cap_err
+ PCI_ERR_COR_MASK
,
377 uncor_mask_orig
= uncor_mask
;
378 uncor_mask
&= !(einj
->uncor_status
);
379 pci_write_config_dword(dev
, pos_cap_err
+ PCI_ERR_UNCOR_MASK
,
383 spin_lock_irqsave(&inject_lock
, flags
);
385 err
= __find_aer_error_by_dev(dev
);
389 aer_error_init(err
, einj
->domain
, einj
->bus
, devfn
,
391 list_add(&err
->list
, &einjected
);
393 err
->uncor_status
|= einj
->uncor_status
;
394 err
->cor_status
|= einj
->cor_status
;
395 err
->header_log0
= einj
->header_log0
;
396 err
->header_log1
= einj
->header_log1
;
397 err
->header_log2
= einj
->header_log2
;
398 err
->header_log3
= einj
->header_log3
;
400 if (!aer_mask_override
&& einj
->cor_status
&&
401 !(einj
->cor_status
& ~cor_mask
)) {
403 pci_warn(dev
, "The correctable error(s) is masked by device\n");
404 spin_unlock_irqrestore(&inject_lock
, flags
);
407 if (!aer_mask_override
&& einj
->uncor_status
&&
408 !(einj
->uncor_status
& ~uncor_mask
)) {
410 pci_warn(dev
, "The uncorrectable error(s) is masked by device\n");
411 spin_unlock_irqrestore(&inject_lock
, flags
);
415 rperr
= __find_aer_error_by_dev(rpdev
);
419 aer_error_init(rperr
, pci_domain_nr(rpdev
->bus
),
420 rpdev
->bus
->number
, rpdev
->devfn
,
422 list_add(&rperr
->list
, &einjected
);
424 if (einj
->cor_status
) {
425 if (rperr
->root_status
& PCI_ERR_ROOT_COR_RCV
)
426 rperr
->root_status
|= PCI_ERR_ROOT_MULTI_COR_RCV
;
428 rperr
->root_status
|= PCI_ERR_ROOT_COR_RCV
;
429 rperr
->source_id
&= 0xffff0000;
430 rperr
->source_id
|= (einj
->bus
<< 8) | devfn
;
432 if (einj
->uncor_status
) {
433 if (rperr
->root_status
& PCI_ERR_ROOT_UNCOR_RCV
)
434 rperr
->root_status
|= PCI_ERR_ROOT_MULTI_UNCOR_RCV
;
435 if (sever
& einj
->uncor_status
) {
436 rperr
->root_status
|= PCI_ERR_ROOT_FATAL_RCV
;
437 if (!(rperr
->root_status
& PCI_ERR_ROOT_UNCOR_RCV
))
438 rperr
->root_status
|= PCI_ERR_ROOT_FIRST_FATAL
;
440 rperr
->root_status
|= PCI_ERR_ROOT_NONFATAL_RCV
;
441 rperr
->root_status
|= PCI_ERR_ROOT_UNCOR_RCV
;
442 rperr
->source_id
&= 0x0000ffff;
443 rperr
->source_id
|= ((einj
->bus
<< 8) | devfn
) << 16;
445 spin_unlock_irqrestore(&inject_lock
, flags
);
447 if (aer_mask_override
) {
448 pci_write_config_dword(dev
, pos_cap_err
+ PCI_ERR_COR_MASK
,
450 pci_write_config_dword(dev
, pos_cap_err
+ PCI_ERR_UNCOR_MASK
,
454 ret
= pci_bus_set_aer_ops(dev
->bus
);
457 ret
= pci_bus_set_aer_ops(rpdev
->bus
);
461 device
= pcie_port_find_device(rpdev
, PCIE_PORT_SERVICE_AER
);
463 edev
= to_pcie_device(device
);
464 if (!get_service_data(edev
)) {
465 pci_warn(edev
->port
, "AER service is not initialized\n");
466 ret
= -EPROTONOSUPPORT
;
469 pci_info(edev
->port
, "Injecting errors %08x/%08x into device %s\n",
470 einj
->cor_status
, einj
->uncor_status
, pci_name(dev
));
471 ret
= irq_inject_interrupt(edev
->irq
);
473 pci_err(rpdev
, "AER device not found\n");
483 static ssize_t
aer_inject_write(struct file
*filp
, const char __user
*ubuf
,
484 size_t usize
, loff_t
*off
)
486 struct aer_error_inj einj
;
489 if (!capable(CAP_SYS_ADMIN
))
491 if (usize
< offsetof(struct aer_error_inj
, domain
) ||
492 usize
> sizeof(einj
))
495 memset(&einj
, 0, sizeof(einj
));
496 if (copy_from_user(&einj
, ubuf
, usize
))
499 ret
= aer_inject(&einj
);
500 return ret
? ret
: usize
;
503 static const struct file_operations aer_inject_fops
= {
504 .write
= aer_inject_write
,
505 .owner
= THIS_MODULE
,
506 .llseek
= noop_llseek
,
509 static struct miscdevice aer_inject_device
= {
510 .minor
= MISC_DYNAMIC_MINOR
,
511 .name
= "aer_inject",
512 .fops
= &aer_inject_fops
,
515 static int __init
aer_inject_init(void)
517 return misc_register(&aer_inject_device
);
520 static void __exit
aer_inject_exit(void)
522 struct aer_error
*err
, *err_next
;
524 struct pci_bus_ops
*bus_ops
;
526 misc_deregister(&aer_inject_device
);
528 while ((bus_ops
= pci_bus_ops_pop())) {
529 pci_bus_set_ops(bus_ops
->bus
, bus_ops
->ops
);
533 spin_lock_irqsave(&inject_lock
, flags
);
534 list_for_each_entry_safe(err
, err_next
, &einjected
, list
) {
535 list_del(&err
->list
);
538 spin_unlock_irqrestore(&inject_lock
, flags
);
541 module_init(aer_inject_init
);
542 module_exit(aer_inject_exit
);
544 MODULE_DESCRIPTION("PCIe AER software error injector");
545 MODULE_LICENSE("GPL");