2 * PCIe AER software error injection support.
4 * Debuging PCIe AER code is quite difficult because it is hard to
5 * trigger various real hardware errors. Software based error
6 * injection can fake almost all kinds of errors with the help of a
7 * user space helper tool aer-inject, which can be gotten from:
8 * http://www.kernel.org/pub/linux/utils/pci/aer-inject/
10 * Copyright 2009 Intel Corporation.
11 * Huang Ying <ying.huang@intel.com>
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; version 2
20 #include <linux/module.h>
21 #include <linux/init.h>
22 #include <linux/miscdevice.h>
23 #include <linux/pci.h>
24 #include <linux/slab.h>
26 #include <linux/uaccess.h>
27 #include <linux/stddef.h>
30 /* Override the existing corrected and uncorrected error masks */
31 static int aer_mask_override
;
32 module_param(aer_mask_override
, bool, 0);
34 struct aer_error_inj
{
48 struct list_head list
;
65 struct list_head list
;
70 static LIST_HEAD(einjected
);
72 static LIST_HEAD(pci_bus_ops_list
);
74 /* Protect einjected and pci_bus_ops_list */
75 static DEFINE_SPINLOCK(inject_lock
);
77 static void aer_error_init(struct aer_error
*err
, u16 domain
,
78 unsigned int bus
, unsigned int devfn
,
81 INIT_LIST_HEAD(&err
->list
);
85 err
->pos_cap_err
= pos_cap_err
;
88 /* inject_lock must be held before calling */
89 static struct aer_error
*__find_aer_error(u16 domain
, unsigned int bus
,
92 struct aer_error
*err
;
94 list_for_each_entry(err
, &einjected
, list
) {
95 if (domain
== err
->domain
&&
103 /* inject_lock must be held before calling */
104 static struct aer_error
*__find_aer_error_by_dev(struct pci_dev
*dev
)
106 int domain
= pci_domain_nr(dev
->bus
);
109 return __find_aer_error((u16
)domain
, dev
->bus
->number
, dev
->devfn
);
112 /* inject_lock must be held before calling */
113 static struct pci_ops
*__find_pci_bus_ops(struct pci_bus
*bus
)
115 struct pci_bus_ops
*bus_ops
;
117 list_for_each_entry(bus_ops
, &pci_bus_ops_list
, list
) {
118 if (bus_ops
->bus
== bus
)
124 static struct pci_bus_ops
*pci_bus_ops_pop(void)
127 struct pci_bus_ops
*bus_ops
= NULL
;
129 spin_lock_irqsave(&inject_lock
, flags
);
130 if (list_empty(&pci_bus_ops_list
))
133 struct list_head
*lh
= pci_bus_ops_list
.next
;
135 bus_ops
= list_entry(lh
, struct pci_bus_ops
, list
);
137 spin_unlock_irqrestore(&inject_lock
, flags
);
141 static u32
*find_pci_config_dword(struct aer_error
*err
, int where
,
147 if (err
->pos_cap_err
== -1)
150 switch (where
- err
->pos_cap_err
) {
151 case PCI_ERR_UNCOR_STATUS
:
152 target
= &err
->uncor_status
;
155 case PCI_ERR_COR_STATUS
:
156 target
= &err
->cor_status
;
159 case PCI_ERR_HEADER_LOG
:
160 target
= &err
->header_log0
;
162 case PCI_ERR_HEADER_LOG
+4:
163 target
= &err
->header_log1
;
165 case PCI_ERR_HEADER_LOG
+8:
166 target
= &err
->header_log2
;
168 case PCI_ERR_HEADER_LOG
+12:
169 target
= &err
->header_log3
;
171 case PCI_ERR_ROOT_STATUS
:
172 target
= &err
->root_status
;
175 case PCI_ERR_ROOT_ERR_SRC
:
176 target
= &err
->source_id
;
184 static int pci_read_aer(struct pci_bus
*bus
, unsigned int devfn
, int where
,
188 struct aer_error
*err
;
193 spin_lock_irqsave(&inject_lock
, flags
);
194 if (size
!= sizeof(u32
))
196 domain
= pci_domain_nr(bus
);
199 err
= __find_aer_error((u16
)domain
, bus
->number
, devfn
);
203 sim
= find_pci_config_dword(err
, where
, NULL
);
206 spin_unlock_irqrestore(&inject_lock
, flags
);
210 ops
= __find_pci_bus_ops(bus
);
211 spin_unlock_irqrestore(&inject_lock
, flags
);
212 return ops
->read(bus
, devfn
, where
, size
, val
);
215 int pci_write_aer(struct pci_bus
*bus
, unsigned int devfn
, int where
, int size
,
219 struct aer_error
*err
;
225 spin_lock_irqsave(&inject_lock
, flags
);
226 if (size
!= sizeof(u32
))
228 domain
= pci_domain_nr(bus
);
231 err
= __find_aer_error((u16
)domain
, bus
->number
, devfn
);
235 sim
= find_pci_config_dword(err
, where
, &rw1cs
);
241 spin_unlock_irqrestore(&inject_lock
, flags
);
245 ops
= __find_pci_bus_ops(bus
);
246 spin_unlock_irqrestore(&inject_lock
, flags
);
247 return ops
->write(bus
, devfn
, where
, size
, val
);
250 static struct pci_ops pci_ops_aer
= {
251 .read
= pci_read_aer
,
252 .write
= pci_write_aer
,
255 static void pci_bus_ops_init(struct pci_bus_ops
*bus_ops
,
259 INIT_LIST_HEAD(&bus_ops
->list
);
264 static int pci_bus_set_aer_ops(struct pci_bus
*bus
)
267 struct pci_bus_ops
*bus_ops
;
270 bus_ops
= kmalloc(sizeof(*bus_ops
), GFP_KERNEL
);
273 ops
= pci_bus_set_ops(bus
, &pci_ops_aer
);
274 spin_lock_irqsave(&inject_lock
, flags
);
275 if (ops
== &pci_ops_aer
)
277 pci_bus_ops_init(bus_ops
, bus
, ops
);
278 list_add(&bus_ops
->list
, &pci_bus_ops_list
);
281 spin_unlock_irqrestore(&inject_lock
, flags
);
286 static struct pci_dev
*pcie_find_root_port(struct pci_dev
*dev
)
289 if (!pci_is_pcie(dev
))
291 if (dev
->pcie_type
== PCI_EXP_TYPE_ROOT_PORT
)
295 dev
= dev
->bus
->self
;
300 static int find_aer_device_iter(struct device
*device
, void *data
)
302 struct pcie_device
**result
= data
;
303 struct pcie_device
*pcie_dev
;
305 if (device
->bus
== &pcie_port_bus_type
) {
306 pcie_dev
= to_pcie_device(device
);
307 if (pcie_dev
->service
& PCIE_PORT_SERVICE_AER
) {
315 static int find_aer_device(struct pci_dev
*dev
, struct pcie_device
**result
)
317 return device_for_each_child(&dev
->dev
, result
, find_aer_device_iter
);
320 static int aer_inject(struct aer_error_inj
*einj
)
322 struct aer_error
*err
, *rperr
;
323 struct aer_error
*err_alloc
= NULL
, *rperr_alloc
= NULL
;
324 struct pci_dev
*dev
, *rpdev
;
325 struct pcie_device
*edev
;
327 unsigned int devfn
= PCI_DEVFN(einj
->dev
, einj
->fn
);
328 int pos_cap_err
, rp_pos_cap_err
;
329 u32 sever
, cor_mask
, uncor_mask
, cor_mask_orig
= 0, uncor_mask_orig
= 0;
332 dev
= pci_get_domain_bus_and_slot((int)einj
->domain
, einj
->bus
, devfn
);
335 rpdev
= pcie_find_root_port(dev
);
341 pos_cap_err
= pci_find_ext_capability(dev
, PCI_EXT_CAP_ID_ERR
);
346 pci_read_config_dword(dev
, pos_cap_err
+ PCI_ERR_UNCOR_SEVER
, &sever
);
347 pci_read_config_dword(dev
, pos_cap_err
+ PCI_ERR_COR_MASK
, &cor_mask
);
348 pci_read_config_dword(dev
, pos_cap_err
+ PCI_ERR_UNCOR_MASK
,
351 rp_pos_cap_err
= pci_find_ext_capability(rpdev
, PCI_EXT_CAP_ID_ERR
);
352 if (!rp_pos_cap_err
) {
357 err_alloc
= kzalloc(sizeof(struct aer_error
), GFP_KERNEL
);
362 rperr_alloc
= kzalloc(sizeof(struct aer_error
), GFP_KERNEL
);
368 if (aer_mask_override
) {
369 cor_mask_orig
= cor_mask
;
370 cor_mask
&= !(einj
->cor_status
);
371 pci_write_config_dword(dev
, pos_cap_err
+ PCI_ERR_COR_MASK
,
374 uncor_mask_orig
= uncor_mask
;
375 uncor_mask
&= !(einj
->uncor_status
);
376 pci_write_config_dword(dev
, pos_cap_err
+ PCI_ERR_UNCOR_MASK
,
380 spin_lock_irqsave(&inject_lock
, flags
);
382 err
= __find_aer_error_by_dev(dev
);
386 aer_error_init(err
, einj
->domain
, einj
->bus
, devfn
,
388 list_add(&err
->list
, &einjected
);
390 err
->uncor_status
|= einj
->uncor_status
;
391 err
->cor_status
|= einj
->cor_status
;
392 err
->header_log0
= einj
->header_log0
;
393 err
->header_log1
= einj
->header_log1
;
394 err
->header_log2
= einj
->header_log2
;
395 err
->header_log3
= einj
->header_log3
;
397 if (!aer_mask_override
&& einj
->cor_status
&&
398 !(einj
->cor_status
& ~cor_mask
)) {
400 printk(KERN_WARNING
"The correctable error(s) is masked "
402 spin_unlock_irqrestore(&inject_lock
, flags
);
405 if (!aer_mask_override
&& einj
->uncor_status
&&
406 !(einj
->uncor_status
& ~uncor_mask
)) {
408 printk(KERN_WARNING
"The uncorrectable error(s) is masked "
410 spin_unlock_irqrestore(&inject_lock
, flags
);
414 rperr
= __find_aer_error_by_dev(rpdev
);
418 aer_error_init(rperr
, pci_domain_nr(rpdev
->bus
),
419 rpdev
->bus
->number
, rpdev
->devfn
,
421 list_add(&rperr
->list
, &einjected
);
423 if (einj
->cor_status
) {
424 if (rperr
->root_status
& PCI_ERR_ROOT_COR_RCV
)
425 rperr
->root_status
|= PCI_ERR_ROOT_MULTI_COR_RCV
;
427 rperr
->root_status
|= PCI_ERR_ROOT_COR_RCV
;
428 rperr
->source_id
&= 0xffff0000;
429 rperr
->source_id
|= (einj
->bus
<< 8) | devfn
;
431 if (einj
->uncor_status
) {
432 if (rperr
->root_status
& PCI_ERR_ROOT_UNCOR_RCV
)
433 rperr
->root_status
|= PCI_ERR_ROOT_MULTI_UNCOR_RCV
;
434 if (sever
& einj
->uncor_status
) {
435 rperr
->root_status
|= PCI_ERR_ROOT_FATAL_RCV
;
436 if (!(rperr
->root_status
& PCI_ERR_ROOT_UNCOR_RCV
))
437 rperr
->root_status
|= PCI_ERR_ROOT_FIRST_FATAL
;
439 rperr
->root_status
|= PCI_ERR_ROOT_NONFATAL_RCV
;
440 rperr
->root_status
|= PCI_ERR_ROOT_UNCOR_RCV
;
441 rperr
->source_id
&= 0x0000ffff;
442 rperr
->source_id
|= ((einj
->bus
<< 8) | devfn
) << 16;
444 spin_unlock_irqrestore(&inject_lock
, flags
);
446 if (aer_mask_override
) {
447 pci_write_config_dword(dev
, pos_cap_err
+ PCI_ERR_COR_MASK
,
449 pci_write_config_dword(dev
, pos_cap_err
+ PCI_ERR_UNCOR_MASK
,
453 ret
= pci_bus_set_aer_ops(dev
->bus
);
456 ret
= pci_bus_set_aer_ops(rpdev
->bus
);
460 if (find_aer_device(rpdev
, &edev
)) {
461 if (!get_service_data(edev
)) {
462 printk(KERN_WARNING
"AER service is not initialized\n");
477 static ssize_t
aer_inject_write(struct file
*filp
, const char __user
*ubuf
,
478 size_t usize
, loff_t
*off
)
480 struct aer_error_inj einj
;
483 if (!capable(CAP_SYS_ADMIN
))
485 if (usize
< offsetof(struct aer_error_inj
, domain
) ||
486 usize
> sizeof(einj
))
489 memset(&einj
, 0, sizeof(einj
));
490 if (copy_from_user(&einj
, ubuf
, usize
))
493 ret
= aer_inject(&einj
);
494 return ret
? ret
: usize
;
497 static const struct file_operations aer_inject_fops
= {
498 .write
= aer_inject_write
,
499 .owner
= THIS_MODULE
,
500 .llseek
= noop_llseek
,
503 static struct miscdevice aer_inject_device
= {
504 .minor
= MISC_DYNAMIC_MINOR
,
505 .name
= "aer_inject",
506 .fops
= &aer_inject_fops
,
509 static int __init
aer_inject_init(void)
511 return misc_register(&aer_inject_device
);
514 static void __exit
aer_inject_exit(void)
516 struct aer_error
*err
, *err_next
;
518 struct pci_bus_ops
*bus_ops
;
520 misc_deregister(&aer_inject_device
);
522 while ((bus_ops
= pci_bus_ops_pop())) {
523 pci_bus_set_ops(bus_ops
->bus
, bus_ops
->ops
);
527 spin_lock_irqsave(&inject_lock
, flags
);
528 list_for_each_entry_safe(err
, err_next
, &einjected
, list
) {
529 list_del(&err
->list
);
532 spin_unlock_irqrestore(&inject_lock
, flags
);
535 module_init(aer_inject_init
);
536 module_exit(aer_inject_exit
);
538 MODULE_DESCRIPTION("PCIe AER software error injector");
539 MODULE_LICENSE("GPL");