2 * PCIe AER software error injection support.
4 * Debuging PCIe AER code is quite difficult because it is hard to
5 * trigger various real hardware errors. Software based error
6 * injection can fake almost all kinds of errors with the help of a
7 * user space helper tool aer-inject, which can be gotten from:
8 * http://www.kernel.org/pub/linux/utils/pci/aer-inject/
10 * Copyright 2009 Intel Corporation.
11 * Huang Ying <ying.huang@intel.com>
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; version 2
20 #include <linux/module.h>
21 #include <linux/init.h>
22 #include <linux/miscdevice.h>
23 #include <linux/pci.h>
24 #include <linux/slab.h>
26 #include <linux/uaccess.h>
27 #include <linux/stddef.h>
30 struct aer_error_inj
{
44 struct list_head list
;
61 struct list_head list
;
66 static LIST_HEAD(einjected
);
68 static LIST_HEAD(pci_bus_ops_list
);
70 /* Protect einjected and pci_bus_ops_list */
71 static DEFINE_SPINLOCK(inject_lock
);
73 static void aer_error_init(struct aer_error
*err
, u16 domain
,
74 unsigned int bus
, unsigned int devfn
,
77 INIT_LIST_HEAD(&err
->list
);
81 err
->pos_cap_err
= pos_cap_err
;
84 /* inject_lock must be held before calling */
85 static struct aer_error
*__find_aer_error(u16 domain
, unsigned int bus
,
88 struct aer_error
*err
;
90 list_for_each_entry(err
, &einjected
, list
) {
91 if (domain
== err
->domain
&&
99 /* inject_lock must be held before calling */
100 static struct aer_error
*__find_aer_error_by_dev(struct pci_dev
*dev
)
102 int domain
= pci_domain_nr(dev
->bus
);
105 return __find_aer_error((u16
)domain
, dev
->bus
->number
, dev
->devfn
);
108 /* inject_lock must be held before calling */
109 static struct pci_ops
*__find_pci_bus_ops(struct pci_bus
*bus
)
111 struct pci_bus_ops
*bus_ops
;
113 list_for_each_entry(bus_ops
, &pci_bus_ops_list
, list
) {
114 if (bus_ops
->bus
== bus
)
120 static struct pci_bus_ops
*pci_bus_ops_pop(void)
123 struct pci_bus_ops
*bus_ops
= NULL
;
125 spin_lock_irqsave(&inject_lock
, flags
);
126 if (list_empty(&pci_bus_ops_list
))
129 struct list_head
*lh
= pci_bus_ops_list
.next
;
131 bus_ops
= list_entry(lh
, struct pci_bus_ops
, list
);
133 spin_unlock_irqrestore(&inject_lock
, flags
);
137 static u32
*find_pci_config_dword(struct aer_error
*err
, int where
,
143 if (err
->pos_cap_err
== -1)
146 switch (where
- err
->pos_cap_err
) {
147 case PCI_ERR_UNCOR_STATUS
:
148 target
= &err
->uncor_status
;
151 case PCI_ERR_COR_STATUS
:
152 target
= &err
->cor_status
;
155 case PCI_ERR_HEADER_LOG
:
156 target
= &err
->header_log0
;
158 case PCI_ERR_HEADER_LOG
+4:
159 target
= &err
->header_log1
;
161 case PCI_ERR_HEADER_LOG
+8:
162 target
= &err
->header_log2
;
164 case PCI_ERR_HEADER_LOG
+12:
165 target
= &err
->header_log3
;
167 case PCI_ERR_ROOT_STATUS
:
168 target
= &err
->root_status
;
171 case PCI_ERR_ROOT_COR_SRC
:
172 target
= &err
->source_id
;
180 static int pci_read_aer(struct pci_bus
*bus
, unsigned int devfn
, int where
,
184 struct aer_error
*err
;
189 spin_lock_irqsave(&inject_lock
, flags
);
190 if (size
!= sizeof(u32
))
192 domain
= pci_domain_nr(bus
);
195 err
= __find_aer_error((u16
)domain
, bus
->number
, devfn
);
199 sim
= find_pci_config_dword(err
, where
, NULL
);
202 spin_unlock_irqrestore(&inject_lock
, flags
);
206 ops
= __find_pci_bus_ops(bus
);
207 spin_unlock_irqrestore(&inject_lock
, flags
);
208 return ops
->read(bus
, devfn
, where
, size
, val
);
211 int pci_write_aer(struct pci_bus
*bus
, unsigned int devfn
, int where
, int size
,
215 struct aer_error
*err
;
221 spin_lock_irqsave(&inject_lock
, flags
);
222 if (size
!= sizeof(u32
))
224 domain
= pci_domain_nr(bus
);
227 err
= __find_aer_error((u16
)domain
, bus
->number
, devfn
);
231 sim
= find_pci_config_dword(err
, where
, &rw1cs
);
237 spin_unlock_irqrestore(&inject_lock
, flags
);
241 ops
= __find_pci_bus_ops(bus
);
242 spin_unlock_irqrestore(&inject_lock
, flags
);
243 return ops
->write(bus
, devfn
, where
, size
, val
);
246 static struct pci_ops pci_ops_aer
= {
247 .read
= pci_read_aer
,
248 .write
= pci_write_aer
,
251 static void pci_bus_ops_init(struct pci_bus_ops
*bus_ops
,
255 INIT_LIST_HEAD(&bus_ops
->list
);
260 static int pci_bus_set_aer_ops(struct pci_bus
*bus
)
263 struct pci_bus_ops
*bus_ops
;
266 bus_ops
= kmalloc(sizeof(*bus_ops
), GFP_KERNEL
);
269 ops
= pci_bus_set_ops(bus
, &pci_ops_aer
);
270 spin_lock_irqsave(&inject_lock
, flags
);
271 if (ops
== &pci_ops_aer
)
273 pci_bus_ops_init(bus_ops
, bus
, ops
);
274 list_add(&bus_ops
->list
, &pci_bus_ops_list
);
277 spin_unlock_irqrestore(&inject_lock
, flags
);
282 static struct pci_dev
*pcie_find_root_port(struct pci_dev
*dev
)
285 if (!pci_is_pcie(dev
))
287 if (dev
->pcie_type
== PCI_EXP_TYPE_ROOT_PORT
)
291 dev
= dev
->bus
->self
;
296 static int find_aer_device_iter(struct device
*device
, void *data
)
298 struct pcie_device
**result
= data
;
299 struct pcie_device
*pcie_dev
;
301 if (device
->bus
== &pcie_port_bus_type
) {
302 pcie_dev
= to_pcie_device(device
);
303 if (pcie_dev
->service
& PCIE_PORT_SERVICE_AER
) {
311 static int find_aer_device(struct pci_dev
*dev
, struct pcie_device
**result
)
313 return device_for_each_child(&dev
->dev
, result
, find_aer_device_iter
);
316 static int aer_inject(struct aer_error_inj
*einj
)
318 struct aer_error
*err
, *rperr
;
319 struct aer_error
*err_alloc
= NULL
, *rperr_alloc
= NULL
;
320 struct pci_dev
*dev
, *rpdev
;
321 struct pcie_device
*edev
;
323 unsigned int devfn
= PCI_DEVFN(einj
->dev
, einj
->fn
);
324 int pos_cap_err
, rp_pos_cap_err
;
325 u32 sever
, cor_mask
, uncor_mask
;
328 dev
= pci_get_domain_bus_and_slot((int)einj
->domain
, einj
->bus
, devfn
);
331 rpdev
= pcie_find_root_port(dev
);
337 pos_cap_err
= pci_find_ext_capability(dev
, PCI_EXT_CAP_ID_ERR
);
342 pci_read_config_dword(dev
, pos_cap_err
+ PCI_ERR_UNCOR_SEVER
, &sever
);
343 pci_read_config_dword(dev
, pos_cap_err
+ PCI_ERR_COR_MASK
, &cor_mask
);
344 pci_read_config_dword(dev
, pos_cap_err
+ PCI_ERR_UNCOR_MASK
,
347 rp_pos_cap_err
= pci_find_ext_capability(rpdev
, PCI_EXT_CAP_ID_ERR
);
348 if (!rp_pos_cap_err
) {
353 err_alloc
= kzalloc(sizeof(struct aer_error
), GFP_KERNEL
);
358 rperr_alloc
= kzalloc(sizeof(struct aer_error
), GFP_KERNEL
);
364 spin_lock_irqsave(&inject_lock
, flags
);
366 err
= __find_aer_error_by_dev(dev
);
370 aer_error_init(err
, einj
->domain
, einj
->bus
, devfn
,
372 list_add(&err
->list
, &einjected
);
374 err
->uncor_status
|= einj
->uncor_status
;
375 err
->cor_status
|= einj
->cor_status
;
376 err
->header_log0
= einj
->header_log0
;
377 err
->header_log1
= einj
->header_log1
;
378 err
->header_log2
= einj
->header_log2
;
379 err
->header_log3
= einj
->header_log3
;
381 if (einj
->cor_status
&& !(einj
->cor_status
& ~cor_mask
)) {
383 printk(KERN_WARNING
"The correctable error(s) is masked "
385 spin_unlock_irqrestore(&inject_lock
, flags
);
388 if (einj
->uncor_status
&& !(einj
->uncor_status
& ~uncor_mask
)) {
390 printk(KERN_WARNING
"The uncorrectable error(s) is masked "
392 spin_unlock_irqrestore(&inject_lock
, flags
);
396 rperr
= __find_aer_error_by_dev(rpdev
);
400 aer_error_init(rperr
, pci_domain_nr(rpdev
->bus
),
401 rpdev
->bus
->number
, rpdev
->devfn
,
403 list_add(&rperr
->list
, &einjected
);
405 if (einj
->cor_status
) {
406 if (rperr
->root_status
& PCI_ERR_ROOT_COR_RCV
)
407 rperr
->root_status
|= PCI_ERR_ROOT_MULTI_COR_RCV
;
409 rperr
->root_status
|= PCI_ERR_ROOT_COR_RCV
;
410 rperr
->source_id
&= 0xffff0000;
411 rperr
->source_id
|= (einj
->bus
<< 8) | devfn
;
413 if (einj
->uncor_status
) {
414 if (rperr
->root_status
& PCI_ERR_ROOT_UNCOR_RCV
)
415 rperr
->root_status
|= PCI_ERR_ROOT_MULTI_UNCOR_RCV
;
416 if (sever
& einj
->uncor_status
) {
417 rperr
->root_status
|= PCI_ERR_ROOT_FATAL_RCV
;
418 if (!(rperr
->root_status
& PCI_ERR_ROOT_UNCOR_RCV
))
419 rperr
->root_status
|= PCI_ERR_ROOT_FIRST_FATAL
;
421 rperr
->root_status
|= PCI_ERR_ROOT_NONFATAL_RCV
;
422 rperr
->root_status
|= PCI_ERR_ROOT_UNCOR_RCV
;
423 rperr
->source_id
&= 0x0000ffff;
424 rperr
->source_id
|= ((einj
->bus
<< 8) | devfn
) << 16;
426 spin_unlock_irqrestore(&inject_lock
, flags
);
428 ret
= pci_bus_set_aer_ops(dev
->bus
);
431 ret
= pci_bus_set_aer_ops(rpdev
->bus
);
435 if (find_aer_device(rpdev
, &edev
)) {
436 if (!get_service_data(edev
)) {
437 printk(KERN_WARNING
"AER service is not initialized\n");
452 static ssize_t
aer_inject_write(struct file
*filp
, const char __user
*ubuf
,
453 size_t usize
, loff_t
*off
)
455 struct aer_error_inj einj
;
458 if (!capable(CAP_SYS_ADMIN
))
460 if (usize
< offsetof(struct aer_error_inj
, domain
) ||
461 usize
> sizeof(einj
))
464 memset(&einj
, 0, sizeof(einj
));
465 if (copy_from_user(&einj
, ubuf
, usize
))
468 ret
= aer_inject(&einj
);
469 return ret
? ret
: usize
;
472 static const struct file_operations aer_inject_fops
= {
473 .write
= aer_inject_write
,
474 .owner
= THIS_MODULE
,
477 static struct miscdevice aer_inject_device
= {
478 .minor
= MISC_DYNAMIC_MINOR
,
479 .name
= "aer_inject",
480 .fops
= &aer_inject_fops
,
483 static int __init
aer_inject_init(void)
485 return misc_register(&aer_inject_device
);
488 static void __exit
aer_inject_exit(void)
490 struct aer_error
*err
, *err_next
;
492 struct pci_bus_ops
*bus_ops
;
494 misc_deregister(&aer_inject_device
);
496 while ((bus_ops
= pci_bus_ops_pop())) {
497 pci_bus_set_ops(bus_ops
->bus
, bus_ops
->ops
);
501 spin_lock_irqsave(&inject_lock
, flags
);
502 list_for_each_entry_safe(err
, err_next
, &einjected
, list
) {
503 list_del(&err
->list
);
506 spin_unlock_irqrestore(&inject_lock
, flags
);
509 module_init(aer_inject_init
);
510 module_exit(aer_inject_exit
);
512 MODULE_DESCRIPTION("PCIe AER software error injector");
513 MODULE_LICENSE("GPL");