2 * APEI Generic Hardware Error Source support
4 * Generic Hardware Error Source provides a way to report platform
5 * hardware errors (such as that from chipset). It works in so called
6 * "Firmware First" mode, that is, hardware errors are reported to
7 * firmware firstly, then reported to Linux by firmware. This way,
8 * some non-standard hardware error registers or non-standard hardware
9 * link can be checked by firmware to produce more hardware error
10 * information for Linux.
12 * For more information about Generic Hardware Error Source, please
13 * refer to ACPI Specification version 4.0, section 17.3.2.6
15 * Copyright 2010,2011 Intel Corp.
16 * Author: Huang Ying <ying.huang@intel.com>
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License version
20 * 2 as published by the Free Software Foundation;
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
27 * You should have received a copy of the GNU General Public License
28 * along with this program; if not, write to the Free Software
29 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
32 #include <linux/kernel.h>
33 #include <linux/module.h>
34 #include <linux/init.h>
35 #include <linux/acpi.h>
37 #include <linux/interrupt.h>
38 #include <linux/timer.h>
39 #include <linux/cper.h>
40 #include <linux/kdebug.h>
41 #include <linux/platform_device.h>
42 #include <linux/mutex.h>
43 #include <linux/ratelimit.h>
44 #include <linux/vmalloc.h>
45 #include <linux/irq_work.h>
46 #include <linux/llist.h>
47 #include <linux/genalloc.h>
48 #include <linux/pci.h>
49 #include <linux/aer.h>
51 #include <acpi/ghes.h>
53 #include <asm/tlbflush.h>
56 #include "apei-internal.h"
58 #define GHES_PFX "GHES: "
60 #define GHES_ESTATUS_MAX_SIZE 65536
61 #define GHES_ESOURCE_PREALLOC_MAX_SIZE 65536
63 #define GHES_ESTATUS_POOL_MIN_ALLOC_ORDER 3
65 /* This is just an estimation for memory pool allocation */
66 #define GHES_ESTATUS_CACHE_AVG_SIZE 512
68 #define GHES_ESTATUS_CACHES_SIZE 4
70 #define GHES_ESTATUS_IN_CACHE_MAX_NSEC 10000000000ULL
71 /* Prevent too many caches are allocated because of RCU */
72 #define GHES_ESTATUS_CACHE_ALLOCED_MAX (GHES_ESTATUS_CACHES_SIZE * 3 / 2)
74 #define GHES_ESTATUS_CACHE_LEN(estatus_len) \
75 (sizeof(struct ghes_estatus_cache) + (estatus_len))
76 #define GHES_ESTATUS_FROM_CACHE(estatus_cache) \
77 ((struct acpi_generic_status *) \
78 ((struct ghes_estatus_cache *)(estatus_cache) + 1))
80 #define GHES_ESTATUS_NODE_LEN(estatus_len) \
81 (sizeof(struct ghes_estatus_node) + (estatus_len))
82 #define GHES_ESTATUS_FROM_NODE(estatus_node) \
83 ((struct acpi_generic_status *) \
84 ((struct ghes_estatus_node *)(estatus_node) + 1))
87 module_param_named(disable
, ghes_disable
, bool, 0);
89 static int ghes_panic_timeout __read_mostly
= 30;
92 * All error sources notified with SCI shares one notifier function,
93 * so they need to be linked and checked one by one. This is applied
96 * RCU is used for these lists, so ghes_list_mutex is only used for
97 * list changing, not for traversing.
99 static LIST_HEAD(ghes_sci
);
100 static LIST_HEAD(ghes_nmi
);
101 static DEFINE_MUTEX(ghes_list_mutex
);
104 * NMI may be triggered on any CPU, so ghes_nmi_lock is used for
107 static DEFINE_RAW_SPINLOCK(ghes_nmi_lock
);
110 * Because the memory area used to transfer hardware error information
111 * from BIOS to Linux can be determined only in NMI, IRQ or timer
112 * handler, but general ioremap can not be used in atomic context, so
113 * a special version of atomic ioremap is implemented for that.
117 * Two virtual pages are used, one for NMI context, the other for
118 * IRQ/PROCESS context
120 #define GHES_IOREMAP_PAGES 2
121 #define GHES_IOREMAP_NMI_PAGE(base) (base)
122 #define GHES_IOREMAP_IRQ_PAGE(base) ((base) + PAGE_SIZE)
124 /* virtual memory area for atomic ioremap */
125 static struct vm_struct
*ghes_ioremap_area
;
127 * These 2 spinlock is used to prevent atomic ioremap virtual memory
128 * area from being mapped simultaneously.
130 static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi
);
131 static DEFINE_SPINLOCK(ghes_ioremap_lock_irq
);
134 * printk is not safe in NMI context. So in NMI handler, we allocate
135 * required memory from lock-less memory allocator
136 * (ghes_estatus_pool), save estatus into it, put them into lock-less
137 * list (ghes_estatus_llist), then delay printk into IRQ context via
138 * irq_work (ghes_proc_irq_work). ghes_estatus_size_request record
139 * required pool size by all NMI error source.
141 static struct gen_pool
*ghes_estatus_pool
;
142 static unsigned long ghes_estatus_pool_size_request
;
143 static struct llist_head ghes_estatus_llist
;
144 static struct irq_work ghes_proc_irq_work
;
146 struct ghes_estatus_cache
*ghes_estatus_caches
[GHES_ESTATUS_CACHES_SIZE
];
147 static atomic_t ghes_estatus_cache_alloced
;
149 static int ghes_ioremap_init(void)
151 ghes_ioremap_area
= __get_vm_area(PAGE_SIZE
* GHES_IOREMAP_PAGES
,
152 VM_IOREMAP
, VMALLOC_START
, VMALLOC_END
);
153 if (!ghes_ioremap_area
) {
154 pr_err(GHES_PFX
"Failed to allocate virtual memory area for atomic ioremap.\n");
161 static void ghes_ioremap_exit(void)
163 free_vm_area(ghes_ioremap_area
);
166 static void __iomem
*ghes_ioremap_pfn_nmi(u64 pfn
)
170 vaddr
= (unsigned long)GHES_IOREMAP_NMI_PAGE(ghes_ioremap_area
->addr
);
171 ioremap_page_range(vaddr
, vaddr
+ PAGE_SIZE
,
172 pfn
<< PAGE_SHIFT
, PAGE_KERNEL
);
174 return (void __iomem
*)vaddr
;
177 static void __iomem
*ghes_ioremap_pfn_irq(u64 pfn
)
181 vaddr
= (unsigned long)GHES_IOREMAP_IRQ_PAGE(ghes_ioremap_area
->addr
);
182 ioremap_page_range(vaddr
, vaddr
+ PAGE_SIZE
,
183 pfn
<< PAGE_SHIFT
, PAGE_KERNEL
);
185 return (void __iomem
*)vaddr
;
188 static void ghes_iounmap_nmi(void __iomem
*vaddr_ptr
)
190 unsigned long vaddr
= (unsigned long __force
)vaddr_ptr
;
191 void *base
= ghes_ioremap_area
->addr
;
193 BUG_ON(vaddr
!= (unsigned long)GHES_IOREMAP_NMI_PAGE(base
));
194 unmap_kernel_range_noflush(vaddr
, PAGE_SIZE
);
195 __flush_tlb_one(vaddr
);
198 static void ghes_iounmap_irq(void __iomem
*vaddr_ptr
)
200 unsigned long vaddr
= (unsigned long __force
)vaddr_ptr
;
201 void *base
= ghes_ioremap_area
->addr
;
203 BUG_ON(vaddr
!= (unsigned long)GHES_IOREMAP_IRQ_PAGE(base
));
204 unmap_kernel_range_noflush(vaddr
, PAGE_SIZE
);
205 __flush_tlb_one(vaddr
);
208 static int ghes_estatus_pool_init(void)
210 ghes_estatus_pool
= gen_pool_create(GHES_ESTATUS_POOL_MIN_ALLOC_ORDER
, -1);
211 if (!ghes_estatus_pool
)
216 static void ghes_estatus_pool_free_chunk_page(struct gen_pool
*pool
,
217 struct gen_pool_chunk
*chunk
,
220 free_page(chunk
->start_addr
);
223 static void ghes_estatus_pool_exit(void)
225 gen_pool_for_each_chunk(ghes_estatus_pool
,
226 ghes_estatus_pool_free_chunk_page
, NULL
);
227 gen_pool_destroy(ghes_estatus_pool
);
230 static int ghes_estatus_pool_expand(unsigned long len
)
232 unsigned long i
, pages
, size
, addr
;
235 ghes_estatus_pool_size_request
+= PAGE_ALIGN(len
);
236 size
= gen_pool_size(ghes_estatus_pool
);
237 if (size
>= ghes_estatus_pool_size_request
)
239 pages
= (ghes_estatus_pool_size_request
- size
) / PAGE_SIZE
;
240 for (i
= 0; i
< pages
; i
++) {
241 addr
= __get_free_page(GFP_KERNEL
);
244 ret
= gen_pool_add(ghes_estatus_pool
, addr
, PAGE_SIZE
, -1);
252 static void ghes_estatus_pool_shrink(unsigned long len
)
254 ghes_estatus_pool_size_request
-= PAGE_ALIGN(len
);
257 static struct ghes
*ghes_new(struct acpi_hest_generic
*generic
)
260 unsigned int error_block_length
;
263 ghes
= kzalloc(sizeof(*ghes
), GFP_KERNEL
);
265 return ERR_PTR(-ENOMEM
);
266 ghes
->generic
= generic
;
267 rc
= apei_map_generic_address(&generic
->error_status_address
);
270 error_block_length
= generic
->error_block_length
;
271 if (error_block_length
> GHES_ESTATUS_MAX_SIZE
) {
272 pr_warning(FW_WARN GHES_PFX
273 "Error status block length is too long: %u for "
274 "generic hardware error source: %d.\n",
275 error_block_length
, generic
->header
.source_id
);
276 error_block_length
= GHES_ESTATUS_MAX_SIZE
;
278 ghes
->estatus
= kmalloc(error_block_length
, GFP_KERNEL
);
279 if (!ghes
->estatus
) {
287 apei_unmap_generic_address(&generic
->error_status_address
);
293 static void ghes_fini(struct ghes
*ghes
)
295 kfree(ghes
->estatus
);
296 apei_unmap_generic_address(&ghes
->generic
->error_status_address
);
299 static inline int ghes_severity(int severity
)
302 case CPER_SEV_INFORMATIONAL
:
304 case CPER_SEV_CORRECTED
:
305 return GHES_SEV_CORRECTED
;
306 case CPER_SEV_RECOVERABLE
:
307 return GHES_SEV_RECOVERABLE
;
309 return GHES_SEV_PANIC
;
311 /* Unknown, go panic */
312 return GHES_SEV_PANIC
;
316 static void ghes_copy_tofrom_phys(void *buffer
, u64 paddr
, u32 len
,
320 unsigned long flags
= 0;
321 int in_nmi
= in_nmi();
326 offset
= paddr
- (paddr
& PAGE_MASK
);
328 raw_spin_lock(&ghes_ioremap_lock_nmi
);
329 vaddr
= ghes_ioremap_pfn_nmi(paddr
>> PAGE_SHIFT
);
331 spin_lock_irqsave(&ghes_ioremap_lock_irq
, flags
);
332 vaddr
= ghes_ioremap_pfn_irq(paddr
>> PAGE_SHIFT
);
334 trunk
= PAGE_SIZE
- offset
;
335 trunk
= min(trunk
, len
);
337 memcpy_fromio(buffer
, vaddr
+ offset
, trunk
);
339 memcpy_toio(vaddr
+ offset
, buffer
, trunk
);
344 ghes_iounmap_nmi(vaddr
);
345 raw_spin_unlock(&ghes_ioremap_lock_nmi
);
347 ghes_iounmap_irq(vaddr
);
348 spin_unlock_irqrestore(&ghes_ioremap_lock_irq
, flags
);
353 static int ghes_read_estatus(struct ghes
*ghes
, int silent
)
355 struct acpi_hest_generic
*g
= ghes
->generic
;
360 rc
= apei_read(&buf_paddr
, &g
->error_status_address
);
362 if (!silent
&& printk_ratelimit())
363 pr_warning(FW_WARN GHES_PFX
364 "Failed to read error status block address for hardware error source: %d.\n",
365 g
->header
.source_id
);
371 ghes_copy_tofrom_phys(ghes
->estatus
, buf_paddr
,
372 sizeof(*ghes
->estatus
), 1);
373 if (!ghes
->estatus
->block_status
)
376 ghes
->buffer_paddr
= buf_paddr
;
377 ghes
->flags
|= GHES_TO_CLEAR
;
380 len
= cper_estatus_len(ghes
->estatus
);
381 if (len
< sizeof(*ghes
->estatus
))
383 if (len
> ghes
->generic
->error_block_length
)
385 if (cper_estatus_check_header(ghes
->estatus
))
387 ghes_copy_tofrom_phys(ghes
->estatus
+ 1,
388 buf_paddr
+ sizeof(*ghes
->estatus
),
389 len
- sizeof(*ghes
->estatus
), 1);
390 if (cper_estatus_check(ghes
->estatus
))
395 if (rc
&& !silent
&& printk_ratelimit())
396 pr_warning(FW_WARN GHES_PFX
397 "Failed to read error status block!\n");
401 static void ghes_clear_estatus(struct ghes
*ghes
)
403 ghes
->estatus
->block_status
= 0;
404 if (!(ghes
->flags
& GHES_TO_CLEAR
))
406 ghes_copy_tofrom_phys(ghes
->estatus
, ghes
->buffer_paddr
,
407 sizeof(ghes
->estatus
->block_status
), 0);
408 ghes
->flags
&= ~GHES_TO_CLEAR
;
411 static void ghes_handle_memory_failure(struct acpi_generic_data
*gdata
, int sev
)
413 #ifdef CONFIG_ACPI_APEI_MEMORY_FAILURE
416 int sec_sev
= ghes_severity(gdata
->error_severity
);
417 struct cper_sec_mem_err
*mem_err
;
418 mem_err
= (struct cper_sec_mem_err
*)(gdata
+ 1);
420 if (!(mem_err
->validation_bits
& CPER_MEM_VALID_PA
))
423 pfn
= mem_err
->physical_addr
>> PAGE_SHIFT
;
424 if (!pfn_valid(pfn
)) {
425 pr_warn_ratelimited(FW_WARN GHES_PFX
426 "Invalid address in generic error data: %#llx\n",
427 mem_err
->physical_addr
);
431 /* iff following two events can be handled properly by now */
432 if (sec_sev
== GHES_SEV_CORRECTED
&&
433 (gdata
->flags
& CPER_SEC_ERROR_THRESHOLD_EXCEEDED
))
434 flags
= MF_SOFT_OFFLINE
;
435 if (sev
== GHES_SEV_RECOVERABLE
&& sec_sev
== GHES_SEV_RECOVERABLE
)
439 memory_failure_queue(pfn
, 0, flags
);
443 static void ghes_do_proc(struct ghes
*ghes
,
444 const struct acpi_generic_status
*estatus
)
447 struct acpi_generic_data
*gdata
;
449 sev
= ghes_severity(estatus
->error_severity
);
450 apei_estatus_for_each_section(estatus
, gdata
) {
451 sec_sev
= ghes_severity(gdata
->error_severity
);
452 if (!uuid_le_cmp(*(uuid_le
*)gdata
->section_type
,
453 CPER_SEC_PLATFORM_MEM
)) {
454 struct cper_sec_mem_err
*mem_err
;
455 mem_err
= (struct cper_sec_mem_err
*)(gdata
+1);
456 ghes_edac_report_mem_error(ghes
, sev
, mem_err
);
458 #ifdef CONFIG_X86_MCE
459 apei_mce_report_mem_error(sev
, mem_err
);
461 ghes_handle_memory_failure(gdata
, sev
);
463 #ifdef CONFIG_ACPI_APEI_PCIEAER
464 else if (!uuid_le_cmp(*(uuid_le
*)gdata
->section_type
,
466 struct cper_sec_pcie
*pcie_err
;
467 pcie_err
= (struct cper_sec_pcie
*)(gdata
+1);
468 if (sev
== GHES_SEV_RECOVERABLE
&&
469 sec_sev
== GHES_SEV_RECOVERABLE
&&
470 pcie_err
->validation_bits
& CPER_PCIE_VALID_DEVICE_ID
&&
471 pcie_err
->validation_bits
& CPER_PCIE_VALID_AER_INFO
) {
475 devfn
= PCI_DEVFN(pcie_err
->device_id
.device
,
476 pcie_err
->device_id
.function
);
477 aer_severity
= cper_severity_to_aer(sev
);
480 * If firmware reset the component to contain
481 * the error, we must reinitialize it before
482 * use, so treat it as a fatal AER error.
484 if (gdata
->flags
& CPER_SEC_RESET
)
485 aer_severity
= AER_FATAL
;
487 aer_recover_queue(pcie_err
->device_id
.segment
,
488 pcie_err
->device_id
.bus
,
490 (struct aer_capability_regs
*)
499 static void __ghes_print_estatus(const char *pfx
,
500 const struct acpi_hest_generic
*generic
,
501 const struct acpi_generic_status
*estatus
)
503 static atomic_t seqno
;
504 unsigned int curr_seqno
;
508 if (ghes_severity(estatus
->error_severity
) <=
514 curr_seqno
= atomic_inc_return(&seqno
);
515 snprintf(pfx_seq
, sizeof(pfx_seq
), "%s{%u}" HW_ERR
, pfx
, curr_seqno
);
516 printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
517 pfx_seq
, generic
->header
.source_id
);
518 cper_estatus_print(pfx_seq
, estatus
);
521 static int ghes_print_estatus(const char *pfx
,
522 const struct acpi_hest_generic
*generic
,
523 const struct acpi_generic_status
*estatus
)
525 /* Not more than 2 messages every 5 seconds */
526 static DEFINE_RATELIMIT_STATE(ratelimit_corrected
, 5*HZ
, 2);
527 static DEFINE_RATELIMIT_STATE(ratelimit_uncorrected
, 5*HZ
, 2);
528 struct ratelimit_state
*ratelimit
;
530 if (ghes_severity(estatus
->error_severity
) <= GHES_SEV_CORRECTED
)
531 ratelimit
= &ratelimit_corrected
;
533 ratelimit
= &ratelimit_uncorrected
;
534 if (__ratelimit(ratelimit
)) {
535 __ghes_print_estatus(pfx
, generic
, estatus
);
542 * GHES error status reporting throttle, to report more kinds of
543 * errors, instead of just most frequently occurred errors.
545 static int ghes_estatus_cached(struct acpi_generic_status
*estatus
)
549 unsigned long long now
;
550 struct ghes_estatus_cache
*cache
;
551 struct acpi_generic_status
*cache_estatus
;
553 len
= cper_estatus_len(estatus
);
555 for (i
= 0; i
< GHES_ESTATUS_CACHES_SIZE
; i
++) {
556 cache
= rcu_dereference(ghes_estatus_caches
[i
]);
559 if (len
!= cache
->estatus_len
)
561 cache_estatus
= GHES_ESTATUS_FROM_CACHE(cache
);
562 if (memcmp(estatus
, cache_estatus
, len
))
564 atomic_inc(&cache
->count
);
566 if (now
- cache
->time_in
< GHES_ESTATUS_IN_CACHE_MAX_NSEC
)
574 static struct ghes_estatus_cache
*ghes_estatus_cache_alloc(
575 struct acpi_hest_generic
*generic
,
576 struct acpi_generic_status
*estatus
)
580 struct ghes_estatus_cache
*cache
;
581 struct acpi_generic_status
*cache_estatus
;
583 alloced
= atomic_add_return(1, &ghes_estatus_cache_alloced
);
584 if (alloced
> GHES_ESTATUS_CACHE_ALLOCED_MAX
) {
585 atomic_dec(&ghes_estatus_cache_alloced
);
588 len
= cper_estatus_len(estatus
);
589 cache_len
= GHES_ESTATUS_CACHE_LEN(len
);
590 cache
= (void *)gen_pool_alloc(ghes_estatus_pool
, cache_len
);
592 atomic_dec(&ghes_estatus_cache_alloced
);
595 cache_estatus
= GHES_ESTATUS_FROM_CACHE(cache
);
596 memcpy(cache_estatus
, estatus
, len
);
597 cache
->estatus_len
= len
;
598 atomic_set(&cache
->count
, 0);
599 cache
->generic
= generic
;
600 cache
->time_in
= sched_clock();
604 static void ghes_estatus_cache_free(struct ghes_estatus_cache
*cache
)
608 len
= cper_estatus_len(GHES_ESTATUS_FROM_CACHE(cache
));
609 len
= GHES_ESTATUS_CACHE_LEN(len
);
610 gen_pool_free(ghes_estatus_pool
, (unsigned long)cache
, len
);
611 atomic_dec(&ghes_estatus_cache_alloced
);
614 static void ghes_estatus_cache_rcu_free(struct rcu_head
*head
)
616 struct ghes_estatus_cache
*cache
;
618 cache
= container_of(head
, struct ghes_estatus_cache
, rcu
);
619 ghes_estatus_cache_free(cache
);
622 static void ghes_estatus_cache_add(
623 struct acpi_hest_generic
*generic
,
624 struct acpi_generic_status
*estatus
)
626 int i
, slot
= -1, count
;
627 unsigned long long now
, duration
, period
, max_period
= 0;
628 struct ghes_estatus_cache
*cache
, *slot_cache
= NULL
, *new_cache
;
630 new_cache
= ghes_estatus_cache_alloc(generic
, estatus
);
631 if (new_cache
== NULL
)
635 for (i
= 0; i
< GHES_ESTATUS_CACHES_SIZE
; i
++) {
636 cache
= rcu_dereference(ghes_estatus_caches
[i
]);
642 duration
= now
- cache
->time_in
;
643 if (duration
>= GHES_ESTATUS_IN_CACHE_MAX_NSEC
) {
648 count
= atomic_read(&cache
->count
);
650 do_div(period
, (count
+ 1));
651 if (period
> max_period
) {
657 /* new_cache must be put into array after its contents are written */
659 if (slot
!= -1 && cmpxchg(ghes_estatus_caches
+ slot
,
660 slot_cache
, new_cache
) == slot_cache
) {
662 call_rcu(&slot_cache
->rcu
, ghes_estatus_cache_rcu_free
);
664 ghes_estatus_cache_free(new_cache
);
668 static int ghes_proc(struct ghes
*ghes
)
672 rc
= ghes_read_estatus(ghes
, 0);
675 if (!ghes_estatus_cached(ghes
->estatus
)) {
676 if (ghes_print_estatus(NULL
, ghes
->generic
, ghes
->estatus
))
677 ghes_estatus_cache_add(ghes
->generic
, ghes
->estatus
);
679 ghes_do_proc(ghes
, ghes
->estatus
);
681 ghes_clear_estatus(ghes
);
685 static void ghes_add_timer(struct ghes
*ghes
)
687 struct acpi_hest_generic
*g
= ghes
->generic
;
688 unsigned long expire
;
690 if (!g
->notify
.poll_interval
) {
691 pr_warning(FW_WARN GHES_PFX
"Poll interval is 0 for generic hardware error source: %d, disabled.\n",
692 g
->header
.source_id
);
695 expire
= jiffies
+ msecs_to_jiffies(g
->notify
.poll_interval
);
696 ghes
->timer
.expires
= round_jiffies_relative(expire
);
697 add_timer(&ghes
->timer
);
700 static void ghes_poll_func(unsigned long data
)
702 struct ghes
*ghes
= (void *)data
;
705 if (!(ghes
->flags
& GHES_EXITING
))
706 ghes_add_timer(ghes
);
709 static irqreturn_t
ghes_irq_func(int irq
, void *data
)
711 struct ghes
*ghes
= data
;
714 rc
= ghes_proc(ghes
);
721 static int ghes_notify_sci(struct notifier_block
*this,
722 unsigned long event
, void *data
)
725 int ret
= NOTIFY_DONE
;
728 list_for_each_entry_rcu(ghes
, &ghes_sci
, list
) {
729 if (!ghes_proc(ghes
))
737 static struct llist_node
*llist_nodes_reverse(struct llist_node
*llnode
)
739 struct llist_node
*next
, *tail
= NULL
;
751 static void ghes_proc_in_irq(struct irq_work
*irq_work
)
753 struct llist_node
*llnode
, *next
;
754 struct ghes_estatus_node
*estatus_node
;
755 struct acpi_hest_generic
*generic
;
756 struct acpi_generic_status
*estatus
;
759 llnode
= llist_del_all(&ghes_estatus_llist
);
761 * Because the time order of estatus in list is reversed,
762 * revert it back to proper order.
764 llnode
= llist_nodes_reverse(llnode
);
767 estatus_node
= llist_entry(llnode
, struct ghes_estatus_node
,
769 estatus
= GHES_ESTATUS_FROM_NODE(estatus_node
);
770 len
= cper_estatus_len(estatus
);
771 node_len
= GHES_ESTATUS_NODE_LEN(len
);
772 ghes_do_proc(estatus_node
->ghes
, estatus
);
773 if (!ghes_estatus_cached(estatus
)) {
774 generic
= estatus_node
->generic
;
775 if (ghes_print_estatus(NULL
, generic
, estatus
))
776 ghes_estatus_cache_add(generic
, estatus
);
778 gen_pool_free(ghes_estatus_pool
, (unsigned long)estatus_node
,
784 static void ghes_print_queued_estatus(void)
786 struct llist_node
*llnode
;
787 struct ghes_estatus_node
*estatus_node
;
788 struct acpi_hest_generic
*generic
;
789 struct acpi_generic_status
*estatus
;
792 llnode
= llist_del_all(&ghes_estatus_llist
);
794 * Because the time order of estatus in list is reversed,
795 * revert it back to proper order.
797 llnode
= llist_nodes_reverse(llnode
);
799 estatus_node
= llist_entry(llnode
, struct ghes_estatus_node
,
801 estatus
= GHES_ESTATUS_FROM_NODE(estatus_node
);
802 len
= cper_estatus_len(estatus
);
803 node_len
= GHES_ESTATUS_NODE_LEN(len
);
804 generic
= estatus_node
->generic
;
805 ghes_print_estatus(NULL
, generic
, estatus
);
806 llnode
= llnode
->next
;
810 static int ghes_notify_nmi(unsigned int cmd
, struct pt_regs
*regs
)
812 struct ghes
*ghes
, *ghes_global
= NULL
;
813 int sev
, sev_global
= -1;
816 raw_spin_lock(&ghes_nmi_lock
);
817 list_for_each_entry_rcu(ghes
, &ghes_nmi
, list
) {
818 if (ghes_read_estatus(ghes
, 1)) {
819 ghes_clear_estatus(ghes
);
822 sev
= ghes_severity(ghes
->estatus
->error_severity
);
823 if (sev
> sev_global
) {
833 if (sev_global
>= GHES_SEV_PANIC
) {
835 ghes_print_queued_estatus();
836 __ghes_print_estatus(KERN_EMERG
, ghes_global
->generic
,
837 ghes_global
->estatus
);
838 /* reboot to log the error! */
839 if (panic_timeout
== 0)
840 panic_timeout
= ghes_panic_timeout
;
841 panic("Fatal hardware error!");
844 list_for_each_entry_rcu(ghes
, &ghes_nmi
, list
) {
845 #ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
847 struct ghes_estatus_node
*estatus_node
;
848 struct acpi_generic_status
*estatus
;
850 if (!(ghes
->flags
& GHES_TO_CLEAR
))
852 #ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
853 if (ghes_estatus_cached(ghes
->estatus
))
855 /* Save estatus for further processing in IRQ context */
856 len
= cper_estatus_len(ghes
->estatus
);
857 node_len
= GHES_ESTATUS_NODE_LEN(len
);
858 estatus_node
= (void *)gen_pool_alloc(ghes_estatus_pool
,
861 estatus_node
->ghes
= ghes
;
862 estatus_node
->generic
= ghes
->generic
;
863 estatus
= GHES_ESTATUS_FROM_NODE(estatus_node
);
864 memcpy(estatus
, ghes
->estatus
, len
);
865 llist_add(&estatus_node
->llnode
, &ghes_estatus_llist
);
869 ghes_clear_estatus(ghes
);
871 #ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
872 irq_work_queue(&ghes_proc_irq_work
);
876 raw_spin_unlock(&ghes_nmi_lock
);
880 static struct notifier_block ghes_notifier_sci
= {
881 .notifier_call
= ghes_notify_sci
,
884 static unsigned long ghes_esource_prealloc_size(
885 const struct acpi_hest_generic
*generic
)
887 unsigned long block_length
, prealloc_records
, prealloc_size
;
889 block_length
= min_t(unsigned long, generic
->error_block_length
,
890 GHES_ESTATUS_MAX_SIZE
);
891 prealloc_records
= max_t(unsigned long,
892 generic
->records_to_preallocate
, 1);
893 prealloc_size
= min_t(unsigned long, block_length
* prealloc_records
,
894 GHES_ESOURCE_PREALLOC_MAX_SIZE
);
896 return prealloc_size
;
899 static int ghes_probe(struct platform_device
*ghes_dev
)
901 struct acpi_hest_generic
*generic
;
902 struct ghes
*ghes
= NULL
;
906 generic
= *(struct acpi_hest_generic
**)ghes_dev
->dev
.platform_data
;
907 if (!generic
->enabled
)
910 switch (generic
->notify
.type
) {
911 case ACPI_HEST_NOTIFY_POLLED
:
912 case ACPI_HEST_NOTIFY_EXTERNAL
:
913 case ACPI_HEST_NOTIFY_SCI
:
914 case ACPI_HEST_NOTIFY_NMI
:
916 case ACPI_HEST_NOTIFY_LOCAL
:
917 pr_warning(GHES_PFX
"Generic hardware error source: %d notified via local interrupt is not supported!\n",
918 generic
->header
.source_id
);
921 pr_warning(FW_WARN GHES_PFX
"Unknown notification type: %u for generic hardware error source: %d\n",
922 generic
->notify
.type
, generic
->header
.source_id
);
927 if (generic
->error_block_length
<
928 sizeof(struct acpi_generic_status
)) {
929 pr_warning(FW_BUG GHES_PFX
"Invalid error block length: %u for generic hardware error source: %d\n",
930 generic
->error_block_length
,
931 generic
->header
.source_id
);
934 ghes
= ghes_new(generic
);
941 rc
= ghes_edac_register(ghes
, &ghes_dev
->dev
);
945 switch (generic
->notify
.type
) {
946 case ACPI_HEST_NOTIFY_POLLED
:
947 ghes
->timer
.function
= ghes_poll_func
;
948 ghes
->timer
.data
= (unsigned long)ghes
;
949 init_timer_deferrable(&ghes
->timer
);
950 ghes_add_timer(ghes
);
952 case ACPI_HEST_NOTIFY_EXTERNAL
:
953 /* External interrupt vector is GSI */
954 rc
= acpi_gsi_to_irq(generic
->notify
.vector
, &ghes
->irq
);
956 pr_err(GHES_PFX
"Failed to map GSI to IRQ for generic hardware error source: %d\n",
957 generic
->header
.source_id
);
960 rc
= request_irq(ghes
->irq
, ghes_irq_func
, 0, "GHES IRQ", ghes
);
962 pr_err(GHES_PFX
"Failed to register IRQ for generic hardware error source: %d\n",
963 generic
->header
.source_id
);
967 case ACPI_HEST_NOTIFY_SCI
:
968 mutex_lock(&ghes_list_mutex
);
969 if (list_empty(&ghes_sci
))
970 register_acpi_hed_notifier(&ghes_notifier_sci
);
971 list_add_rcu(&ghes
->list
, &ghes_sci
);
972 mutex_unlock(&ghes_list_mutex
);
974 case ACPI_HEST_NOTIFY_NMI
:
975 len
= ghes_esource_prealloc_size(generic
);
976 ghes_estatus_pool_expand(len
);
977 mutex_lock(&ghes_list_mutex
);
978 if (list_empty(&ghes_nmi
))
979 register_nmi_handler(NMI_LOCAL
, ghes_notify_nmi
, 0,
981 list_add_rcu(&ghes
->list
, &ghes_nmi
);
982 mutex_unlock(&ghes_list_mutex
);
987 platform_set_drvdata(ghes_dev
, ghes
);
991 ghes_edac_unregister(ghes
);
1000 static int ghes_remove(struct platform_device
*ghes_dev
)
1003 struct acpi_hest_generic
*generic
;
1006 ghes
= platform_get_drvdata(ghes_dev
);
1007 generic
= ghes
->generic
;
1009 ghes
->flags
|= GHES_EXITING
;
1010 switch (generic
->notify
.type
) {
1011 case ACPI_HEST_NOTIFY_POLLED
:
1012 del_timer_sync(&ghes
->timer
);
1014 case ACPI_HEST_NOTIFY_EXTERNAL
:
1015 free_irq(ghes
->irq
, ghes
);
1017 case ACPI_HEST_NOTIFY_SCI
:
1018 mutex_lock(&ghes_list_mutex
);
1019 list_del_rcu(&ghes
->list
);
1020 if (list_empty(&ghes_sci
))
1021 unregister_acpi_hed_notifier(&ghes_notifier_sci
);
1022 mutex_unlock(&ghes_list_mutex
);
1024 case ACPI_HEST_NOTIFY_NMI
:
1025 mutex_lock(&ghes_list_mutex
);
1026 list_del_rcu(&ghes
->list
);
1027 if (list_empty(&ghes_nmi
))
1028 unregister_nmi_handler(NMI_LOCAL
, "ghes");
1029 mutex_unlock(&ghes_list_mutex
);
1031 * To synchronize with NMI handler, ghes can only be
1032 * freed after NMI handler finishes.
1035 len
= ghes_esource_prealloc_size(generic
);
1036 ghes_estatus_pool_shrink(len
);
1045 ghes_edac_unregister(ghes
);
1049 platform_set_drvdata(ghes_dev
, NULL
);
1054 static struct platform_driver ghes_platform_driver
= {
1057 .owner
= THIS_MODULE
,
1059 .probe
= ghes_probe
,
1060 .remove
= ghes_remove
,
1063 static int __init
ghes_init(void)
1071 pr_info(GHES_PFX
"HEST is not enabled!\n");
1076 pr_info(GHES_PFX
"GHES is not enabled!\n");
1080 init_irq_work(&ghes_proc_irq_work
, ghes_proc_in_irq
);
1082 rc
= ghes_ioremap_init();
1086 rc
= ghes_estatus_pool_init();
1088 goto err_ioremap_exit
;
1090 rc
= ghes_estatus_pool_expand(GHES_ESTATUS_CACHE_AVG_SIZE
*
1091 GHES_ESTATUS_CACHE_ALLOCED_MAX
);
1095 rc
= platform_driver_register(&ghes_platform_driver
);
1099 rc
= apei_osc_setup();
1100 if (rc
== 0 && osc_sb_apei_support_acked
)
1101 pr_info(GHES_PFX
"APEI firmware first mode is enabled by APEI bit and WHEA _OSC.\n");
1102 else if (rc
== 0 && !osc_sb_apei_support_acked
)
1103 pr_info(GHES_PFX
"APEI firmware first mode is enabled by WHEA _OSC.\n");
1104 else if (rc
&& osc_sb_apei_support_acked
)
1105 pr_info(GHES_PFX
"APEI firmware first mode is enabled by APEI bit.\n");
1107 pr_info(GHES_PFX
"Failed to enable APEI firmware first mode.\n");
1111 ghes_estatus_pool_exit();
1113 ghes_ioremap_exit();
1118 static void __exit
ghes_exit(void)
1120 platform_driver_unregister(&ghes_platform_driver
);
1121 ghes_estatus_pool_exit();
1122 ghes_ioremap_exit();
1125 module_init(ghes_init
);
1126 module_exit(ghes_exit
);
1128 MODULE_AUTHOR("Huang Ying");
1129 MODULE_DESCRIPTION("APEI Generic Hardware Error Source support");
1130 MODULE_LICENSE("GPL");
1131 MODULE_ALIAS("platform:GHES");