2 * APEI Generic Hardware Error Source support
4 * Generic Hardware Error Source provides a way to report platform
5 * hardware errors (such as that from chipset). It works in so called
6 * "Firmware First" mode, that is, hardware errors are reported to
7 * firmware firstly, then reported to Linux by firmware. This way,
8 * some non-standard hardware error registers or non-standard hardware
9 * link can be checked by firmware to produce more hardware error
10 * information for Linux.
12 * For more information about Generic Hardware Error Source, please
13 * refer to ACPI Specification version 4.0, section 17.3.2.6
15 * Copyright 2010,2011 Intel Corp.
16 * Author: Huang Ying <ying.huang@intel.com>
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License version
20 * 2 as published by the Free Software Foundation;
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
28 #include <linux/kernel.h>
29 #include <linux/moduleparam.h>
30 #include <linux/init.h>
31 #include <linux/acpi.h>
33 #include <linux/interrupt.h>
34 #include <linux/timer.h>
35 #include <linux/cper.h>
36 #include <linux/kdebug.h>
37 #include <linux/platform_device.h>
38 #include <linux/mutex.h>
39 #include <linux/ratelimit.h>
40 #include <linux/vmalloc.h>
41 #include <linux/irq_work.h>
42 #include <linux/llist.h>
43 #include <linux/genalloc.h>
44 #include <linux/pci.h>
45 #include <linux/aer.h>
46 #include <linux/nmi.h>
48 #include <acpi/ghes.h>
49 #include <acpi/apei.h>
50 #include <asm/tlbflush.h>
52 #include "apei-internal.h"
54 #define GHES_PFX "GHES: "
56 #define GHES_ESTATUS_MAX_SIZE 65536
57 #define GHES_ESOURCE_PREALLOC_MAX_SIZE 65536
59 #define GHES_ESTATUS_POOL_MIN_ALLOC_ORDER 3
61 /* This is just an estimation for memory pool allocation */
62 #define GHES_ESTATUS_CACHE_AVG_SIZE 512
64 #define GHES_ESTATUS_CACHES_SIZE 4
66 #define GHES_ESTATUS_IN_CACHE_MAX_NSEC 10000000000ULL
67 /* Prevent too many caches are allocated because of RCU */
68 #define GHES_ESTATUS_CACHE_ALLOCED_MAX (GHES_ESTATUS_CACHES_SIZE * 3 / 2)
70 #define GHES_ESTATUS_CACHE_LEN(estatus_len) \
71 (sizeof(struct ghes_estatus_cache) + (estatus_len))
72 #define GHES_ESTATUS_FROM_CACHE(estatus_cache) \
73 ((struct acpi_hest_generic_status *) \
74 ((struct ghes_estatus_cache *)(estatus_cache) + 1))
76 #define GHES_ESTATUS_NODE_LEN(estatus_len) \
77 (sizeof(struct ghes_estatus_node) + (estatus_len))
78 #define GHES_ESTATUS_FROM_NODE(estatus_node) \
79 ((struct acpi_hest_generic_status *) \
80 ((struct ghes_estatus_node *)(estatus_node) + 1))
83 * This driver isn't really modular, however for the time being,
84 * continuing to use module_param is the easiest way to remain
85 * compatible with existing boot arg use cases.
88 module_param_named(disable
, ghes_disable
, bool, 0);
91 * All error sources notified with SCI shares one notifier function,
92 * so they need to be linked and checked one by one. This is applied
95 * RCU is used for these lists, so ghes_list_mutex is only used for
96 * list changing, not for traversing.
98 static LIST_HEAD(ghes_sci
);
99 static DEFINE_MUTEX(ghes_list_mutex
);
102 * Because the memory area used to transfer hardware error information
103 * from BIOS to Linux can be determined only in NMI, IRQ or timer
104 * handler, but general ioremap can not be used in atomic context, so
105 * a special version of atomic ioremap is implemented for that.
109 * Two virtual pages are used, one for IRQ/PROCESS context, the other for
110 * NMI context (optionally).
112 #ifdef CONFIG_HAVE_ACPI_APEI_NMI
113 #define GHES_IOREMAP_PAGES 2
115 #define GHES_IOREMAP_PAGES 1
117 #define GHES_IOREMAP_IRQ_PAGE(base) (base)
118 #define GHES_IOREMAP_NMI_PAGE(base) ((base) + PAGE_SIZE)
120 /* virtual memory area for atomic ioremap */
121 static struct vm_struct
*ghes_ioremap_area
;
123 * These 2 spinlock is used to prevent atomic ioremap virtual memory
124 * area from being mapped simultaneously.
126 static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi
);
127 static DEFINE_SPINLOCK(ghes_ioremap_lock_irq
);
129 static struct gen_pool
*ghes_estatus_pool
;
130 static unsigned long ghes_estatus_pool_size_request
;
132 static struct ghes_estatus_cache
*ghes_estatus_caches
[GHES_ESTATUS_CACHES_SIZE
];
133 static atomic_t ghes_estatus_cache_alloced
;
135 static int ghes_ioremap_init(void)
137 ghes_ioremap_area
= __get_vm_area(PAGE_SIZE
* GHES_IOREMAP_PAGES
,
138 VM_IOREMAP
, VMALLOC_START
, VMALLOC_END
);
139 if (!ghes_ioremap_area
) {
140 pr_err(GHES_PFX
"Failed to allocate virtual memory area for atomic ioremap.\n");
147 static void ghes_ioremap_exit(void)
149 free_vm_area(ghes_ioremap_area
);
152 static void __iomem
*ghes_ioremap_pfn_nmi(u64 pfn
)
156 vaddr
= (unsigned long)GHES_IOREMAP_NMI_PAGE(ghes_ioremap_area
->addr
);
157 ioremap_page_range(vaddr
, vaddr
+ PAGE_SIZE
,
158 pfn
<< PAGE_SHIFT
, PAGE_KERNEL
);
160 return (void __iomem
*)vaddr
;
163 static void __iomem
*ghes_ioremap_pfn_irq(u64 pfn
)
165 unsigned long vaddr
, paddr
;
168 vaddr
= (unsigned long)GHES_IOREMAP_IRQ_PAGE(ghes_ioremap_area
->addr
);
170 paddr
= pfn
<< PAGE_SHIFT
;
171 prot
= arch_apei_get_mem_attribute(paddr
);
173 ioremap_page_range(vaddr
, vaddr
+ PAGE_SIZE
, paddr
, prot
);
175 return (void __iomem
*)vaddr
;
178 static void ghes_iounmap_nmi(void __iomem
*vaddr_ptr
)
180 unsigned long vaddr
= (unsigned long __force
)vaddr_ptr
;
181 void *base
= ghes_ioremap_area
->addr
;
183 BUG_ON(vaddr
!= (unsigned long)GHES_IOREMAP_NMI_PAGE(base
));
184 unmap_kernel_range_noflush(vaddr
, PAGE_SIZE
);
185 arch_apei_flush_tlb_one(vaddr
);
188 static void ghes_iounmap_irq(void __iomem
*vaddr_ptr
)
190 unsigned long vaddr
= (unsigned long __force
)vaddr_ptr
;
191 void *base
= ghes_ioremap_area
->addr
;
193 BUG_ON(vaddr
!= (unsigned long)GHES_IOREMAP_IRQ_PAGE(base
));
194 unmap_kernel_range_noflush(vaddr
, PAGE_SIZE
);
195 arch_apei_flush_tlb_one(vaddr
);
198 static int ghes_estatus_pool_init(void)
200 ghes_estatus_pool
= gen_pool_create(GHES_ESTATUS_POOL_MIN_ALLOC_ORDER
, -1);
201 if (!ghes_estatus_pool
)
206 static void ghes_estatus_pool_free_chunk_page(struct gen_pool
*pool
,
207 struct gen_pool_chunk
*chunk
,
210 free_page(chunk
->start_addr
);
213 static void ghes_estatus_pool_exit(void)
215 gen_pool_for_each_chunk(ghes_estatus_pool
,
216 ghes_estatus_pool_free_chunk_page
, NULL
);
217 gen_pool_destroy(ghes_estatus_pool
);
220 static int ghes_estatus_pool_expand(unsigned long len
)
222 unsigned long i
, pages
, size
, addr
;
225 ghes_estatus_pool_size_request
+= PAGE_ALIGN(len
);
226 size
= gen_pool_size(ghes_estatus_pool
);
227 if (size
>= ghes_estatus_pool_size_request
)
229 pages
= (ghes_estatus_pool_size_request
- size
) / PAGE_SIZE
;
230 for (i
= 0; i
< pages
; i
++) {
231 addr
= __get_free_page(GFP_KERNEL
);
234 ret
= gen_pool_add(ghes_estatus_pool
, addr
, PAGE_SIZE
, -1);
242 static struct ghes
*ghes_new(struct acpi_hest_generic
*generic
)
245 unsigned int error_block_length
;
248 ghes
= kzalloc(sizeof(*ghes
), GFP_KERNEL
);
250 return ERR_PTR(-ENOMEM
);
251 ghes
->generic
= generic
;
252 rc
= apei_map_generic_address(&generic
->error_status_address
);
255 error_block_length
= generic
->error_block_length
;
256 if (error_block_length
> GHES_ESTATUS_MAX_SIZE
) {
257 pr_warning(FW_WARN GHES_PFX
258 "Error status block length is too long: %u for "
259 "generic hardware error source: %d.\n",
260 error_block_length
, generic
->header
.source_id
);
261 error_block_length
= GHES_ESTATUS_MAX_SIZE
;
263 ghes
->estatus
= kmalloc(error_block_length
, GFP_KERNEL
);
264 if (!ghes
->estatus
) {
272 apei_unmap_generic_address(&generic
->error_status_address
);
278 static void ghes_fini(struct ghes
*ghes
)
280 kfree(ghes
->estatus
);
281 apei_unmap_generic_address(&ghes
->generic
->error_status_address
);
284 static inline int ghes_severity(int severity
)
287 case CPER_SEV_INFORMATIONAL
:
289 case CPER_SEV_CORRECTED
:
290 return GHES_SEV_CORRECTED
;
291 case CPER_SEV_RECOVERABLE
:
292 return GHES_SEV_RECOVERABLE
;
294 return GHES_SEV_PANIC
;
296 /* Unknown, go panic */
297 return GHES_SEV_PANIC
;
301 static void ghes_copy_tofrom_phys(void *buffer
, u64 paddr
, u32 len
,
305 unsigned long flags
= 0;
306 int in_nmi
= in_nmi();
311 offset
= paddr
- (paddr
& PAGE_MASK
);
313 raw_spin_lock(&ghes_ioremap_lock_nmi
);
314 vaddr
= ghes_ioremap_pfn_nmi(paddr
>> PAGE_SHIFT
);
316 spin_lock_irqsave(&ghes_ioremap_lock_irq
, flags
);
317 vaddr
= ghes_ioremap_pfn_irq(paddr
>> PAGE_SHIFT
);
319 trunk
= PAGE_SIZE
- offset
;
320 trunk
= min(trunk
, len
);
322 memcpy_fromio(buffer
, vaddr
+ offset
, trunk
);
324 memcpy_toio(vaddr
+ offset
, buffer
, trunk
);
329 ghes_iounmap_nmi(vaddr
);
330 raw_spin_unlock(&ghes_ioremap_lock_nmi
);
332 ghes_iounmap_irq(vaddr
);
333 spin_unlock_irqrestore(&ghes_ioremap_lock_irq
, flags
);
338 static int ghes_read_estatus(struct ghes
*ghes
, int silent
)
340 struct acpi_hest_generic
*g
= ghes
->generic
;
345 rc
= apei_read(&buf_paddr
, &g
->error_status_address
);
347 if (!silent
&& printk_ratelimit())
348 pr_warning(FW_WARN GHES_PFX
349 "Failed to read error status block address for hardware error source: %d.\n",
350 g
->header
.source_id
);
356 ghes_copy_tofrom_phys(ghes
->estatus
, buf_paddr
,
357 sizeof(*ghes
->estatus
), 1);
358 if (!ghes
->estatus
->block_status
)
361 ghes
->buffer_paddr
= buf_paddr
;
362 ghes
->flags
|= GHES_TO_CLEAR
;
365 len
= cper_estatus_len(ghes
->estatus
);
366 if (len
< sizeof(*ghes
->estatus
))
368 if (len
> ghes
->generic
->error_block_length
)
370 if (cper_estatus_check_header(ghes
->estatus
))
372 ghes_copy_tofrom_phys(ghes
->estatus
+ 1,
373 buf_paddr
+ sizeof(*ghes
->estatus
),
374 len
- sizeof(*ghes
->estatus
), 1);
375 if (cper_estatus_check(ghes
->estatus
))
380 if (rc
&& !silent
&& printk_ratelimit())
381 pr_warning(FW_WARN GHES_PFX
382 "Failed to read error status block!\n");
386 static void ghes_clear_estatus(struct ghes
*ghes
)
388 ghes
->estatus
->block_status
= 0;
389 if (!(ghes
->flags
& GHES_TO_CLEAR
))
391 ghes_copy_tofrom_phys(ghes
->estatus
, ghes
->buffer_paddr
,
392 sizeof(ghes
->estatus
->block_status
), 0);
393 ghes
->flags
&= ~GHES_TO_CLEAR
;
396 static void ghes_handle_memory_failure(struct acpi_hest_generic_data
*gdata
, int sev
)
398 #ifdef CONFIG_ACPI_APEI_MEMORY_FAILURE
401 int sec_sev
= ghes_severity(gdata
->error_severity
);
402 struct cper_sec_mem_err
*mem_err
;
403 mem_err
= (struct cper_sec_mem_err
*)(gdata
+ 1);
405 if (!(mem_err
->validation_bits
& CPER_MEM_VALID_PA
))
408 pfn
= mem_err
->physical_addr
>> PAGE_SHIFT
;
409 if (!pfn_valid(pfn
)) {
410 pr_warn_ratelimited(FW_WARN GHES_PFX
411 "Invalid address in generic error data: %#llx\n",
412 mem_err
->physical_addr
);
416 /* iff following two events can be handled properly by now */
417 if (sec_sev
== GHES_SEV_CORRECTED
&&
418 (gdata
->flags
& CPER_SEC_ERROR_THRESHOLD_EXCEEDED
))
419 flags
= MF_SOFT_OFFLINE
;
420 if (sev
== GHES_SEV_RECOVERABLE
&& sec_sev
== GHES_SEV_RECOVERABLE
)
424 memory_failure_queue(pfn
, 0, flags
);
428 static void ghes_do_proc(struct ghes
*ghes
,
429 const struct acpi_hest_generic_status
*estatus
)
432 struct acpi_hest_generic_data
*gdata
;
434 sev
= ghes_severity(estatus
->error_severity
);
435 apei_estatus_for_each_section(estatus
, gdata
) {
436 sec_sev
= ghes_severity(gdata
->error_severity
);
437 if (!uuid_le_cmp(*(uuid_le
*)gdata
->section_type
,
438 CPER_SEC_PLATFORM_MEM
)) {
439 struct cper_sec_mem_err
*mem_err
;
440 mem_err
= (struct cper_sec_mem_err
*)(gdata
+1);
441 ghes_edac_report_mem_error(ghes
, sev
, mem_err
);
443 arch_apei_report_mem_error(sev
, mem_err
);
444 ghes_handle_memory_failure(gdata
, sev
);
446 #ifdef CONFIG_ACPI_APEI_PCIEAER
447 else if (!uuid_le_cmp(*(uuid_le
*)gdata
->section_type
,
449 struct cper_sec_pcie
*pcie_err
;
450 pcie_err
= (struct cper_sec_pcie
*)(gdata
+1);
451 if (sev
== GHES_SEV_RECOVERABLE
&&
452 sec_sev
== GHES_SEV_RECOVERABLE
&&
453 pcie_err
->validation_bits
& CPER_PCIE_VALID_DEVICE_ID
&&
454 pcie_err
->validation_bits
& CPER_PCIE_VALID_AER_INFO
) {
458 devfn
= PCI_DEVFN(pcie_err
->device_id
.device
,
459 pcie_err
->device_id
.function
);
460 aer_severity
= cper_severity_to_aer(sev
);
463 * If firmware reset the component to contain
464 * the error, we must reinitialize it before
465 * use, so treat it as a fatal AER error.
467 if (gdata
->flags
& CPER_SEC_RESET
)
468 aer_severity
= AER_FATAL
;
470 aer_recover_queue(pcie_err
->device_id
.segment
,
471 pcie_err
->device_id
.bus
,
473 (struct aer_capability_regs
*)
482 static void __ghes_print_estatus(const char *pfx
,
483 const struct acpi_hest_generic
*generic
,
484 const struct acpi_hest_generic_status
*estatus
)
486 static atomic_t seqno
;
487 unsigned int curr_seqno
;
491 if (ghes_severity(estatus
->error_severity
) <=
497 curr_seqno
= atomic_inc_return(&seqno
);
498 snprintf(pfx_seq
, sizeof(pfx_seq
), "%s{%u}" HW_ERR
, pfx
, curr_seqno
);
499 printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
500 pfx_seq
, generic
->header
.source_id
);
501 cper_estatus_print(pfx_seq
, estatus
);
504 static int ghes_print_estatus(const char *pfx
,
505 const struct acpi_hest_generic
*generic
,
506 const struct acpi_hest_generic_status
*estatus
)
508 /* Not more than 2 messages every 5 seconds */
509 static DEFINE_RATELIMIT_STATE(ratelimit_corrected
, 5*HZ
, 2);
510 static DEFINE_RATELIMIT_STATE(ratelimit_uncorrected
, 5*HZ
, 2);
511 struct ratelimit_state
*ratelimit
;
513 if (ghes_severity(estatus
->error_severity
) <= GHES_SEV_CORRECTED
)
514 ratelimit
= &ratelimit_corrected
;
516 ratelimit
= &ratelimit_uncorrected
;
517 if (__ratelimit(ratelimit
)) {
518 __ghes_print_estatus(pfx
, generic
, estatus
);
525 * GHES error status reporting throttle, to report more kinds of
526 * errors, instead of just most frequently occurred errors.
528 static int ghes_estatus_cached(struct acpi_hest_generic_status
*estatus
)
532 unsigned long long now
;
533 struct ghes_estatus_cache
*cache
;
534 struct acpi_hest_generic_status
*cache_estatus
;
536 len
= cper_estatus_len(estatus
);
538 for (i
= 0; i
< GHES_ESTATUS_CACHES_SIZE
; i
++) {
539 cache
= rcu_dereference(ghes_estatus_caches
[i
]);
542 if (len
!= cache
->estatus_len
)
544 cache_estatus
= GHES_ESTATUS_FROM_CACHE(cache
);
545 if (memcmp(estatus
, cache_estatus
, len
))
547 atomic_inc(&cache
->count
);
549 if (now
- cache
->time_in
< GHES_ESTATUS_IN_CACHE_MAX_NSEC
)
557 static struct ghes_estatus_cache
*ghes_estatus_cache_alloc(
558 struct acpi_hest_generic
*generic
,
559 struct acpi_hest_generic_status
*estatus
)
563 struct ghes_estatus_cache
*cache
;
564 struct acpi_hest_generic_status
*cache_estatus
;
566 alloced
= atomic_add_return(1, &ghes_estatus_cache_alloced
);
567 if (alloced
> GHES_ESTATUS_CACHE_ALLOCED_MAX
) {
568 atomic_dec(&ghes_estatus_cache_alloced
);
571 len
= cper_estatus_len(estatus
);
572 cache_len
= GHES_ESTATUS_CACHE_LEN(len
);
573 cache
= (void *)gen_pool_alloc(ghes_estatus_pool
, cache_len
);
575 atomic_dec(&ghes_estatus_cache_alloced
);
578 cache_estatus
= GHES_ESTATUS_FROM_CACHE(cache
);
579 memcpy(cache_estatus
, estatus
, len
);
580 cache
->estatus_len
= len
;
581 atomic_set(&cache
->count
, 0);
582 cache
->generic
= generic
;
583 cache
->time_in
= sched_clock();
587 static void ghes_estatus_cache_free(struct ghes_estatus_cache
*cache
)
591 len
= cper_estatus_len(GHES_ESTATUS_FROM_CACHE(cache
));
592 len
= GHES_ESTATUS_CACHE_LEN(len
);
593 gen_pool_free(ghes_estatus_pool
, (unsigned long)cache
, len
);
594 atomic_dec(&ghes_estatus_cache_alloced
);
597 static void ghes_estatus_cache_rcu_free(struct rcu_head
*head
)
599 struct ghes_estatus_cache
*cache
;
601 cache
= container_of(head
, struct ghes_estatus_cache
, rcu
);
602 ghes_estatus_cache_free(cache
);
605 static void ghes_estatus_cache_add(
606 struct acpi_hest_generic
*generic
,
607 struct acpi_hest_generic_status
*estatus
)
609 int i
, slot
= -1, count
;
610 unsigned long long now
, duration
, period
, max_period
= 0;
611 struct ghes_estatus_cache
*cache
, *slot_cache
= NULL
, *new_cache
;
613 new_cache
= ghes_estatus_cache_alloc(generic
, estatus
);
614 if (new_cache
== NULL
)
618 for (i
= 0; i
< GHES_ESTATUS_CACHES_SIZE
; i
++) {
619 cache
= rcu_dereference(ghes_estatus_caches
[i
]);
625 duration
= now
- cache
->time_in
;
626 if (duration
>= GHES_ESTATUS_IN_CACHE_MAX_NSEC
) {
631 count
= atomic_read(&cache
->count
);
633 do_div(period
, (count
+ 1));
634 if (period
> max_period
) {
640 /* new_cache must be put into array after its contents are written */
642 if (slot
!= -1 && cmpxchg(ghes_estatus_caches
+ slot
,
643 slot_cache
, new_cache
) == slot_cache
) {
645 call_rcu(&slot_cache
->rcu
, ghes_estatus_cache_rcu_free
);
647 ghes_estatus_cache_free(new_cache
);
651 static int ghes_proc(struct ghes
*ghes
)
655 rc
= ghes_read_estatus(ghes
, 0);
658 if (!ghes_estatus_cached(ghes
->estatus
)) {
659 if (ghes_print_estatus(NULL
, ghes
->generic
, ghes
->estatus
))
660 ghes_estatus_cache_add(ghes
->generic
, ghes
->estatus
);
662 ghes_do_proc(ghes
, ghes
->estatus
);
664 ghes_clear_estatus(ghes
);
668 static void ghes_add_timer(struct ghes
*ghes
)
670 struct acpi_hest_generic
*g
= ghes
->generic
;
671 unsigned long expire
;
673 if (!g
->notify
.poll_interval
) {
674 pr_warning(FW_WARN GHES_PFX
"Poll interval is 0 for generic hardware error source: %d, disabled.\n",
675 g
->header
.source_id
);
678 expire
= jiffies
+ msecs_to_jiffies(g
->notify
.poll_interval
);
679 ghes
->timer
.expires
= round_jiffies_relative(expire
);
680 add_timer(&ghes
->timer
);
683 static void ghes_poll_func(unsigned long data
)
685 struct ghes
*ghes
= (void *)data
;
688 if (!(ghes
->flags
& GHES_EXITING
))
689 ghes_add_timer(ghes
);
692 static irqreturn_t
ghes_irq_func(int irq
, void *data
)
694 struct ghes
*ghes
= data
;
697 rc
= ghes_proc(ghes
);
704 static int ghes_notify_sci(struct notifier_block
*this,
705 unsigned long event
, void *data
)
708 int ret
= NOTIFY_DONE
;
711 list_for_each_entry_rcu(ghes
, &ghes_sci
, list
) {
712 if (!ghes_proc(ghes
))
720 static struct notifier_block ghes_notifier_sci
= {
721 .notifier_call
= ghes_notify_sci
,
724 #ifdef CONFIG_HAVE_ACPI_APEI_NMI
726 * printk is not safe in NMI context. So in NMI handler, we allocate
727 * required memory from lock-less memory allocator
728 * (ghes_estatus_pool), save estatus into it, put them into lock-less
729 * list (ghes_estatus_llist), then delay printk into IRQ context via
730 * irq_work (ghes_proc_irq_work). ghes_estatus_size_request record
731 * required pool size by all NMI error source.
733 static struct llist_head ghes_estatus_llist
;
734 static struct irq_work ghes_proc_irq_work
;
737 * NMI may be triggered on any CPU, so ghes_in_nmi is used for
738 * having only one concurrent reader.
740 static atomic_t ghes_in_nmi
= ATOMIC_INIT(0);
742 static LIST_HEAD(ghes_nmi
);
744 static int ghes_panic_timeout __read_mostly
= 30;
746 static void ghes_proc_in_irq(struct irq_work
*irq_work
)
748 struct llist_node
*llnode
, *next
;
749 struct ghes_estatus_node
*estatus_node
;
750 struct acpi_hest_generic
*generic
;
751 struct acpi_hest_generic_status
*estatus
;
754 llnode
= llist_del_all(&ghes_estatus_llist
);
756 * Because the time order of estatus in list is reversed,
757 * revert it back to proper order.
759 llnode
= llist_reverse_order(llnode
);
762 estatus_node
= llist_entry(llnode
, struct ghes_estatus_node
,
764 estatus
= GHES_ESTATUS_FROM_NODE(estatus_node
);
765 len
= cper_estatus_len(estatus
);
766 node_len
= GHES_ESTATUS_NODE_LEN(len
);
767 ghes_do_proc(estatus_node
->ghes
, estatus
);
768 if (!ghes_estatus_cached(estatus
)) {
769 generic
= estatus_node
->generic
;
770 if (ghes_print_estatus(NULL
, generic
, estatus
))
771 ghes_estatus_cache_add(generic
, estatus
);
773 gen_pool_free(ghes_estatus_pool
, (unsigned long)estatus_node
,
779 static void ghes_print_queued_estatus(void)
781 struct llist_node
*llnode
;
782 struct ghes_estatus_node
*estatus_node
;
783 struct acpi_hest_generic
*generic
;
784 struct acpi_hest_generic_status
*estatus
;
787 llnode
= llist_del_all(&ghes_estatus_llist
);
789 * Because the time order of estatus in list is reversed,
790 * revert it back to proper order.
792 llnode
= llist_reverse_order(llnode
);
794 estatus_node
= llist_entry(llnode
, struct ghes_estatus_node
,
796 estatus
= GHES_ESTATUS_FROM_NODE(estatus_node
);
797 len
= cper_estatus_len(estatus
);
798 node_len
= GHES_ESTATUS_NODE_LEN(len
);
799 generic
= estatus_node
->generic
;
800 ghes_print_estatus(NULL
, generic
, estatus
);
801 llnode
= llnode
->next
;
805 /* Save estatus for further processing in IRQ context */
806 static void __process_error(struct ghes
*ghes
)
808 #ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
810 struct ghes_estatus_node
*estatus_node
;
811 struct acpi_hest_generic_status
*estatus
;
813 if (ghes_estatus_cached(ghes
->estatus
))
816 len
= cper_estatus_len(ghes
->estatus
);
817 node_len
= GHES_ESTATUS_NODE_LEN(len
);
819 estatus_node
= (void *)gen_pool_alloc(ghes_estatus_pool
, node_len
);
823 estatus_node
->ghes
= ghes
;
824 estatus_node
->generic
= ghes
->generic
;
825 estatus
= GHES_ESTATUS_FROM_NODE(estatus_node
);
826 memcpy(estatus
, ghes
->estatus
, len
);
827 llist_add(&estatus_node
->llnode
, &ghes_estatus_llist
);
831 static void __ghes_panic(struct ghes
*ghes
)
834 ghes_print_queued_estatus();
835 __ghes_print_estatus(KERN_EMERG
, ghes
->generic
, ghes
->estatus
);
837 /* reboot to log the error! */
838 if (panic_timeout
== 0)
839 panic_timeout
= ghes_panic_timeout
;
840 panic("Fatal hardware error!");
843 static int ghes_notify_nmi(unsigned int cmd
, struct pt_regs
*regs
)
846 int sev
, ret
= NMI_DONE
;
848 if (!atomic_add_unless(&ghes_in_nmi
, 1, 1))
851 list_for_each_entry_rcu(ghes
, &ghes_nmi
, list
) {
852 if (ghes_read_estatus(ghes
, 1)) {
853 ghes_clear_estatus(ghes
);
857 sev
= ghes_severity(ghes
->estatus
->error_severity
);
858 if (sev
>= GHES_SEV_PANIC
)
861 if (!(ghes
->flags
& GHES_TO_CLEAR
))
864 __process_error(ghes
);
865 ghes_clear_estatus(ghes
);
870 #ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
871 irq_work_queue(&ghes_proc_irq_work
);
873 atomic_dec(&ghes_in_nmi
);
877 static unsigned long ghes_esource_prealloc_size(
878 const struct acpi_hest_generic
*generic
)
880 unsigned long block_length
, prealloc_records
, prealloc_size
;
882 block_length
= min_t(unsigned long, generic
->error_block_length
,
883 GHES_ESTATUS_MAX_SIZE
);
884 prealloc_records
= max_t(unsigned long,
885 generic
->records_to_preallocate
, 1);
886 prealloc_size
= min_t(unsigned long, block_length
* prealloc_records
,
887 GHES_ESOURCE_PREALLOC_MAX_SIZE
);
889 return prealloc_size
;
892 static void ghes_estatus_pool_shrink(unsigned long len
)
894 ghes_estatus_pool_size_request
-= PAGE_ALIGN(len
);
897 static void ghes_nmi_add(struct ghes
*ghes
)
901 len
= ghes_esource_prealloc_size(ghes
->generic
);
902 ghes_estatus_pool_expand(len
);
903 mutex_lock(&ghes_list_mutex
);
904 if (list_empty(&ghes_nmi
))
905 register_nmi_handler(NMI_LOCAL
, ghes_notify_nmi
, 0, "ghes");
906 list_add_rcu(&ghes
->list
, &ghes_nmi
);
907 mutex_unlock(&ghes_list_mutex
);
910 static void ghes_nmi_remove(struct ghes
*ghes
)
914 mutex_lock(&ghes_list_mutex
);
915 list_del_rcu(&ghes
->list
);
916 if (list_empty(&ghes_nmi
))
917 unregister_nmi_handler(NMI_LOCAL
, "ghes");
918 mutex_unlock(&ghes_list_mutex
);
920 * To synchronize with NMI handler, ghes can only be
921 * freed after NMI handler finishes.
924 len
= ghes_esource_prealloc_size(ghes
->generic
);
925 ghes_estatus_pool_shrink(len
);
928 static void ghes_nmi_init_cxt(void)
930 init_irq_work(&ghes_proc_irq_work
, ghes_proc_in_irq
);
932 #else /* CONFIG_HAVE_ACPI_APEI_NMI */
933 static inline void ghes_nmi_add(struct ghes
*ghes
)
935 pr_err(GHES_PFX
"ID: %d, trying to add NMI notification which is not supported!\n",
936 ghes
->generic
->header
.source_id
);
940 static inline void ghes_nmi_remove(struct ghes
*ghes
)
942 pr_err(GHES_PFX
"ID: %d, trying to remove NMI notification which is not supported!\n",
943 ghes
->generic
->header
.source_id
);
947 static inline void ghes_nmi_init_cxt(void)
950 #endif /* CONFIG_HAVE_ACPI_APEI_NMI */
952 static int ghes_probe(struct platform_device
*ghes_dev
)
954 struct acpi_hest_generic
*generic
;
955 struct ghes
*ghes
= NULL
;
959 generic
= *(struct acpi_hest_generic
**)ghes_dev
->dev
.platform_data
;
960 if (!generic
->enabled
)
963 switch (generic
->notify
.type
) {
964 case ACPI_HEST_NOTIFY_POLLED
:
965 case ACPI_HEST_NOTIFY_EXTERNAL
:
966 case ACPI_HEST_NOTIFY_SCI
:
968 case ACPI_HEST_NOTIFY_NMI
:
969 if (!IS_ENABLED(CONFIG_HAVE_ACPI_APEI_NMI
)) {
970 pr_warn(GHES_PFX
"Generic hardware error source: %d notified via NMI interrupt is not supported!\n",
971 generic
->header
.source_id
);
975 case ACPI_HEST_NOTIFY_LOCAL
:
976 pr_warning(GHES_PFX
"Generic hardware error source: %d notified via local interrupt is not supported!\n",
977 generic
->header
.source_id
);
980 pr_warning(FW_WARN GHES_PFX
"Unknown notification type: %u for generic hardware error source: %d\n",
981 generic
->notify
.type
, generic
->header
.source_id
);
986 if (generic
->error_block_length
<
987 sizeof(struct acpi_hest_generic_status
)) {
988 pr_warning(FW_BUG GHES_PFX
"Invalid error block length: %u for generic hardware error source: %d\n",
989 generic
->error_block_length
,
990 generic
->header
.source_id
);
993 ghes
= ghes_new(generic
);
1000 rc
= ghes_edac_register(ghes
, &ghes_dev
->dev
);
1004 switch (generic
->notify
.type
) {
1005 case ACPI_HEST_NOTIFY_POLLED
:
1006 ghes
->timer
.function
= ghes_poll_func
;
1007 ghes
->timer
.data
= (unsigned long)ghes
;
1008 init_timer_deferrable(&ghes
->timer
);
1009 ghes_add_timer(ghes
);
1011 case ACPI_HEST_NOTIFY_EXTERNAL
:
1012 /* External interrupt vector is GSI */
1013 rc
= acpi_gsi_to_irq(generic
->notify
.vector
, &ghes
->irq
);
1015 pr_err(GHES_PFX
"Failed to map GSI to IRQ for generic hardware error source: %d\n",
1016 generic
->header
.source_id
);
1017 goto err_edac_unreg
;
1019 rc
= request_irq(ghes
->irq
, ghes_irq_func
, 0, "GHES IRQ", ghes
);
1021 pr_err(GHES_PFX
"Failed to register IRQ for generic hardware error source: %d\n",
1022 generic
->header
.source_id
);
1023 goto err_edac_unreg
;
1026 case ACPI_HEST_NOTIFY_SCI
:
1027 mutex_lock(&ghes_list_mutex
);
1028 if (list_empty(&ghes_sci
))
1029 register_acpi_hed_notifier(&ghes_notifier_sci
);
1030 list_add_rcu(&ghes
->list
, &ghes_sci
);
1031 mutex_unlock(&ghes_list_mutex
);
1033 case ACPI_HEST_NOTIFY_NMI
:
1039 platform_set_drvdata(ghes_dev
, ghes
);
1043 ghes_edac_unregister(ghes
);
1052 static int ghes_remove(struct platform_device
*ghes_dev
)
1055 struct acpi_hest_generic
*generic
;
1057 ghes
= platform_get_drvdata(ghes_dev
);
1058 generic
= ghes
->generic
;
1060 ghes
->flags
|= GHES_EXITING
;
1061 switch (generic
->notify
.type
) {
1062 case ACPI_HEST_NOTIFY_POLLED
:
1063 del_timer_sync(&ghes
->timer
);
1065 case ACPI_HEST_NOTIFY_EXTERNAL
:
1066 free_irq(ghes
->irq
, ghes
);
1068 case ACPI_HEST_NOTIFY_SCI
:
1069 mutex_lock(&ghes_list_mutex
);
1070 list_del_rcu(&ghes
->list
);
1071 if (list_empty(&ghes_sci
))
1072 unregister_acpi_hed_notifier(&ghes_notifier_sci
);
1073 mutex_unlock(&ghes_list_mutex
);
1075 case ACPI_HEST_NOTIFY_NMI
:
1076 ghes_nmi_remove(ghes
);
1085 ghes_edac_unregister(ghes
);
1089 platform_set_drvdata(ghes_dev
, NULL
);
1094 static struct platform_driver ghes_platform_driver
= {
1098 .probe
= ghes_probe
,
1099 .remove
= ghes_remove
,
1102 static int __init
ghes_init(void)
1110 pr_info(GHES_PFX
"HEST is not enabled!\n");
1115 pr_info(GHES_PFX
"GHES is not enabled!\n");
1119 ghes_nmi_init_cxt();
1121 rc
= ghes_ioremap_init();
1125 rc
= ghes_estatus_pool_init();
1127 goto err_ioremap_exit
;
1129 rc
= ghes_estatus_pool_expand(GHES_ESTATUS_CACHE_AVG_SIZE
*
1130 GHES_ESTATUS_CACHE_ALLOCED_MAX
);
1134 rc
= platform_driver_register(&ghes_platform_driver
);
1138 rc
= apei_osc_setup();
1139 if (rc
== 0 && osc_sb_apei_support_acked
)
1140 pr_info(GHES_PFX
"APEI firmware first mode is enabled by APEI bit and WHEA _OSC.\n");
1141 else if (rc
== 0 && !osc_sb_apei_support_acked
)
1142 pr_info(GHES_PFX
"APEI firmware first mode is enabled by WHEA _OSC.\n");
1143 else if (rc
&& osc_sb_apei_support_acked
)
1144 pr_info(GHES_PFX
"APEI firmware first mode is enabled by APEI bit.\n");
1146 pr_info(GHES_PFX
"Failed to enable APEI firmware first mode.\n");
1150 ghes_estatus_pool_exit();
1152 ghes_ioremap_exit();
1156 device_initcall(ghes_init
);