2 * APEI Generic Hardware Error Source support
4 * Generic Hardware Error Source provides a way to report platform
5 * hardware errors (such as that from chipset). It works in so called
6 * "Firmware First" mode, that is, hardware errors are reported to
7 * firmware firstly, then reported to Linux by firmware. This way,
8 * some non-standard hardware error registers or non-standard hardware
9 * link can be checked by firmware to produce more hardware error
10 * information for Linux.
12 * For more information about Generic Hardware Error Source, please
13 * refer to ACPI Specification version 4.0, section 17.3.2.6
15 * Copyright 2010,2011 Intel Corp.
16 * Author: Huang Ying <ying.huang@intel.com>
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License version
20 * 2 as published by the Free Software Foundation;
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
27 * You should have received a copy of the GNU General Public License
28 * along with this program; if not, write to the Free Software
29 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
32 #include <linux/kernel.h>
33 #include <linux/module.h>
34 #include <linux/init.h>
35 #include <linux/acpi.h>
37 #include <linux/interrupt.h>
38 #include <linux/timer.h>
39 #include <linux/cper.h>
40 #include <linux/kdebug.h>
41 #include <linux/platform_device.h>
42 #include <linux/mutex.h>
43 #include <linux/ratelimit.h>
44 #include <linux/vmalloc.h>
45 #include <linux/irq_work.h>
46 #include <linux/llist.h>
47 #include <linux/genalloc.h>
48 #include <acpi/apei.h>
49 #include <acpi/atomicio.h>
52 #include <asm/tlbflush.h>
54 #include "apei-internal.h"
56 #define GHES_PFX "GHES: "
58 #define GHES_ESTATUS_MAX_SIZE 65536
59 #define GHES_ESOURCE_PREALLOC_MAX_SIZE 65536
61 #define GHES_ESTATUS_POOL_MIN_ALLOC_ORDER 3
63 /* This is just an estimation for memory pool allocation */
64 #define GHES_ESTATUS_CACHE_AVG_SIZE 512
66 #define GHES_ESTATUS_CACHES_SIZE 4
68 #define GHES_ESTATUS_IN_CACHE_MAX_NSEC 10000000000ULL
69 /* Prevent too many caches are allocated because of RCU */
70 #define GHES_ESTATUS_CACHE_ALLOCED_MAX (GHES_ESTATUS_CACHES_SIZE * 3 / 2)
72 #define GHES_ESTATUS_CACHE_LEN(estatus_len) \
73 (sizeof(struct ghes_estatus_cache) + (estatus_len))
74 #define GHES_ESTATUS_FROM_CACHE(estatus_cache) \
75 ((struct acpi_hest_generic_status *) \
76 ((struct ghes_estatus_cache *)(estatus_cache) + 1))
78 #define GHES_ESTATUS_NODE_LEN(estatus_len) \
79 (sizeof(struct ghes_estatus_node) + (estatus_len))
80 #define GHES_ESTATUS_FROM_NODE(estatus_node) \
81 ((struct acpi_hest_generic_status *) \
82 ((struct ghes_estatus_node *)(estatus_node) + 1))
85 * One struct ghes is created for each generic hardware error source.
86 * It provides the context for APEI hardware error timer/IRQ/SCI/NMI
89 * estatus: memory buffer for error status block, allocated during
92 #define GHES_TO_CLEAR 0x0001
93 #define GHES_EXITING 0x0002
96 struct acpi_hest_generic
*generic
;
97 struct acpi_hest_generic_status
*estatus
;
101 struct list_head list
;
102 struct timer_list timer
;
107 struct ghes_estatus_node
{
108 struct llist_node llnode
;
109 struct acpi_hest_generic
*generic
;
112 struct ghes_estatus_cache
{
115 struct acpi_hest_generic
*generic
;
116 unsigned long long time_in
;
121 module_param_named(disable
, ghes_disable
, bool, 0);
123 static int ghes_panic_timeout __read_mostly
= 30;
126 * All error sources notified with SCI shares one notifier function,
127 * so they need to be linked and checked one by one. This is applied
130 * RCU is used for these lists, so ghes_list_mutex is only used for
131 * list changing, not for traversing.
133 static LIST_HEAD(ghes_sci
);
134 static LIST_HEAD(ghes_nmi
);
135 static DEFINE_MUTEX(ghes_list_mutex
);
138 * NMI may be triggered on any CPU, so ghes_nmi_lock is used for
141 static DEFINE_RAW_SPINLOCK(ghes_nmi_lock
);
144 * Because the memory area used to transfer hardware error information
145 * from BIOS to Linux can be determined only in NMI, IRQ or timer
146 * handler, but general ioremap can not be used in atomic context, so
147 * a special version of atomic ioremap is implemented for that.
151 * Two virtual pages are used, one for NMI context, the other for
152 * IRQ/PROCESS context
154 #define GHES_IOREMAP_PAGES 2
155 #define GHES_IOREMAP_NMI_PAGE(base) (base)
156 #define GHES_IOREMAP_IRQ_PAGE(base) ((base) + PAGE_SIZE)
158 /* virtual memory area for atomic ioremap */
159 static struct vm_struct
*ghes_ioremap_area
;
161 * These 2 spinlock is used to prevent atomic ioremap virtual memory
162 * area from being mapped simultaneously.
164 static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi
);
165 static DEFINE_SPINLOCK(ghes_ioremap_lock_irq
);
168 * printk is not safe in NMI context. So in NMI handler, we allocate
169 * required memory from lock-less memory allocator
170 * (ghes_estatus_pool), save estatus into it, put them into lock-less
171 * list (ghes_estatus_llist), then delay printk into IRQ context via
172 * irq_work (ghes_proc_irq_work). ghes_estatus_size_request record
173 * required pool size by all NMI error source.
175 static struct gen_pool
*ghes_estatus_pool
;
176 static unsigned long ghes_estatus_pool_size_request
;
177 static struct llist_head ghes_estatus_llist
;
178 static struct irq_work ghes_proc_irq_work
;
180 struct ghes_estatus_cache
*ghes_estatus_caches
[GHES_ESTATUS_CACHES_SIZE
];
181 static atomic_t ghes_estatus_cache_alloced
;
183 static int ghes_ioremap_init(void)
185 ghes_ioremap_area
= __get_vm_area(PAGE_SIZE
* GHES_IOREMAP_PAGES
,
186 VM_IOREMAP
, VMALLOC_START
, VMALLOC_END
);
187 if (!ghes_ioremap_area
) {
188 pr_err(GHES_PFX
"Failed to allocate virtual memory area for atomic ioremap.\n");
195 static void ghes_ioremap_exit(void)
197 free_vm_area(ghes_ioremap_area
);
200 static void __iomem
*ghes_ioremap_pfn_nmi(u64 pfn
)
204 vaddr
= (unsigned long)GHES_IOREMAP_NMI_PAGE(ghes_ioremap_area
->addr
);
205 ioremap_page_range(vaddr
, vaddr
+ PAGE_SIZE
,
206 pfn
<< PAGE_SHIFT
, PAGE_KERNEL
);
208 return (void __iomem
*)vaddr
;
211 static void __iomem
*ghes_ioremap_pfn_irq(u64 pfn
)
215 vaddr
= (unsigned long)GHES_IOREMAP_IRQ_PAGE(ghes_ioremap_area
->addr
);
216 ioremap_page_range(vaddr
, vaddr
+ PAGE_SIZE
,
217 pfn
<< PAGE_SHIFT
, PAGE_KERNEL
);
219 return (void __iomem
*)vaddr
;
222 static void ghes_iounmap_nmi(void __iomem
*vaddr_ptr
)
224 unsigned long vaddr
= (unsigned long __force
)vaddr_ptr
;
225 void *base
= ghes_ioremap_area
->addr
;
227 BUG_ON(vaddr
!= (unsigned long)GHES_IOREMAP_NMI_PAGE(base
));
228 unmap_kernel_range_noflush(vaddr
, PAGE_SIZE
);
229 __flush_tlb_one(vaddr
);
232 static void ghes_iounmap_irq(void __iomem
*vaddr_ptr
)
234 unsigned long vaddr
= (unsigned long __force
)vaddr_ptr
;
235 void *base
= ghes_ioremap_area
->addr
;
237 BUG_ON(vaddr
!= (unsigned long)GHES_IOREMAP_IRQ_PAGE(base
));
238 unmap_kernel_range_noflush(vaddr
, PAGE_SIZE
);
239 __flush_tlb_one(vaddr
);
242 static int ghes_estatus_pool_init(void)
244 ghes_estatus_pool
= gen_pool_create(GHES_ESTATUS_POOL_MIN_ALLOC_ORDER
, -1);
245 if (!ghes_estatus_pool
)
250 static void ghes_estatus_pool_free_chunk_page(struct gen_pool
*pool
,
251 struct gen_pool_chunk
*chunk
,
254 free_page(chunk
->start_addr
);
257 static void ghes_estatus_pool_exit(void)
259 gen_pool_for_each_chunk(ghes_estatus_pool
,
260 ghes_estatus_pool_free_chunk_page
, NULL
);
261 gen_pool_destroy(ghes_estatus_pool
);
264 static int ghes_estatus_pool_expand(unsigned long len
)
266 unsigned long i
, pages
, size
, addr
;
269 ghes_estatus_pool_size_request
+= PAGE_ALIGN(len
);
270 size
= gen_pool_size(ghes_estatus_pool
);
271 if (size
>= ghes_estatus_pool_size_request
)
273 pages
= (ghes_estatus_pool_size_request
- size
) / PAGE_SIZE
;
274 for (i
= 0; i
< pages
; i
++) {
275 addr
= __get_free_page(GFP_KERNEL
);
278 ret
= gen_pool_add(ghes_estatus_pool
, addr
, PAGE_SIZE
, -1);
286 static void ghes_estatus_pool_shrink(unsigned long len
)
288 ghes_estatus_pool_size_request
-= PAGE_ALIGN(len
);
291 static struct ghes
*ghes_new(struct acpi_hest_generic
*generic
)
294 unsigned int error_block_length
;
297 ghes
= kzalloc(sizeof(*ghes
), GFP_KERNEL
);
299 return ERR_PTR(-ENOMEM
);
300 ghes
->generic
= generic
;
301 rc
= acpi_pre_map_gar(&generic
->error_status_address
);
304 error_block_length
= generic
->error_block_length
;
305 if (error_block_length
> GHES_ESTATUS_MAX_SIZE
) {
306 pr_warning(FW_WARN GHES_PFX
307 "Error status block length is too long: %u for "
308 "generic hardware error source: %d.\n",
309 error_block_length
, generic
->header
.source_id
);
310 error_block_length
= GHES_ESTATUS_MAX_SIZE
;
312 ghes
->estatus
= kmalloc(error_block_length
, GFP_KERNEL
);
313 if (!ghes
->estatus
) {
321 acpi_post_unmap_gar(&generic
->error_status_address
);
327 static void ghes_fini(struct ghes
*ghes
)
329 kfree(ghes
->estatus
);
330 acpi_post_unmap_gar(&ghes
->generic
->error_status_address
);
335 GHES_SEV_CORRECTED
= 0x1,
336 GHES_SEV_RECOVERABLE
= 0x2,
337 GHES_SEV_PANIC
= 0x3,
340 static inline int ghes_severity(int severity
)
343 case CPER_SEV_INFORMATIONAL
:
345 case CPER_SEV_CORRECTED
:
346 return GHES_SEV_CORRECTED
;
347 case CPER_SEV_RECOVERABLE
:
348 return GHES_SEV_RECOVERABLE
;
350 return GHES_SEV_PANIC
;
352 /* Unknown, go panic */
353 return GHES_SEV_PANIC
;
357 static void ghes_copy_tofrom_phys(void *buffer
, u64 paddr
, u32 len
,
361 unsigned long flags
= 0;
362 int in_nmi
= in_nmi();
367 offset
= paddr
- (paddr
& PAGE_MASK
);
369 raw_spin_lock(&ghes_ioremap_lock_nmi
);
370 vaddr
= ghes_ioremap_pfn_nmi(paddr
>> PAGE_SHIFT
);
372 spin_lock_irqsave(&ghes_ioremap_lock_irq
, flags
);
373 vaddr
= ghes_ioremap_pfn_irq(paddr
>> PAGE_SHIFT
);
375 trunk
= PAGE_SIZE
- offset
;
376 trunk
= min(trunk
, len
);
378 memcpy_fromio(buffer
, vaddr
+ offset
, trunk
);
380 memcpy_toio(vaddr
+ offset
, buffer
, trunk
);
385 ghes_iounmap_nmi(vaddr
);
386 raw_spin_unlock(&ghes_ioremap_lock_nmi
);
388 ghes_iounmap_irq(vaddr
);
389 spin_unlock_irqrestore(&ghes_ioremap_lock_irq
, flags
);
394 static int ghes_read_estatus(struct ghes
*ghes
, int silent
)
396 struct acpi_hest_generic
*g
= ghes
->generic
;
401 rc
= acpi_atomic_read(&buf_paddr
, &g
->error_status_address
);
403 if (!silent
&& printk_ratelimit())
404 pr_warning(FW_WARN GHES_PFX
405 "Failed to read error status block address for hardware error source: %d.\n",
406 g
->header
.source_id
);
412 ghes_copy_tofrom_phys(ghes
->estatus
, buf_paddr
,
413 sizeof(*ghes
->estatus
), 1);
414 if (!ghes
->estatus
->block_status
)
417 ghes
->buffer_paddr
= buf_paddr
;
418 ghes
->flags
|= GHES_TO_CLEAR
;
421 len
= apei_estatus_len(ghes
->estatus
);
422 if (len
< sizeof(*ghes
->estatus
))
424 if (len
> ghes
->generic
->error_block_length
)
426 if (apei_estatus_check_header(ghes
->estatus
))
428 ghes_copy_tofrom_phys(ghes
->estatus
+ 1,
429 buf_paddr
+ sizeof(*ghes
->estatus
),
430 len
- sizeof(*ghes
->estatus
), 1);
431 if (apei_estatus_check(ghes
->estatus
))
436 if (rc
&& !silent
&& printk_ratelimit())
437 pr_warning(FW_WARN GHES_PFX
438 "Failed to read error status block!\n");
442 static void ghes_clear_estatus(struct ghes
*ghes
)
444 ghes
->estatus
->block_status
= 0;
445 if (!(ghes
->flags
& GHES_TO_CLEAR
))
447 ghes_copy_tofrom_phys(ghes
->estatus
, ghes
->buffer_paddr
,
448 sizeof(ghes
->estatus
->block_status
), 0);
449 ghes
->flags
&= ~GHES_TO_CLEAR
;
452 static void ghes_do_proc(const struct acpi_hest_generic_status
*estatus
)
455 struct acpi_hest_generic_data
*gdata
;
457 sev
= ghes_severity(estatus
->error_severity
);
458 apei_estatus_for_each_section(estatus
, gdata
) {
459 sec_sev
= ghes_severity(gdata
->error_severity
);
460 if (!uuid_le_cmp(*(uuid_le
*)gdata
->section_type
,
461 CPER_SEC_PLATFORM_MEM
)) {
462 struct cper_sec_mem_err
*mem_err
;
463 mem_err
= (struct cper_sec_mem_err
*)(gdata
+1);
464 #ifdef CONFIG_X86_MCE
465 apei_mce_report_mem_error(sev
== GHES_SEV_CORRECTED
,
468 #ifdef CONFIG_ACPI_APEI_MEMORY_FAILURE
469 if (sev
== GHES_SEV_RECOVERABLE
&&
470 sec_sev
== GHES_SEV_RECOVERABLE
&&
471 mem_err
->validation_bits
& CPER_MEM_VALID_PHYSICAL_ADDRESS
) {
473 pfn
= mem_err
->physical_addr
>> PAGE_SHIFT
;
474 memory_failure_queue(pfn
, 0, 0);
481 static void __ghes_print_estatus(const char *pfx
,
482 const struct acpi_hest_generic
*generic
,
483 const struct acpi_hest_generic_status
*estatus
)
486 if (ghes_severity(estatus
->error_severity
) <=
488 pfx
= KERN_WARNING HW_ERR
;
490 pfx
= KERN_ERR HW_ERR
;
492 printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
493 pfx
, generic
->header
.source_id
);
494 apei_estatus_print(pfx
, estatus
);
497 static int ghes_print_estatus(const char *pfx
,
498 const struct acpi_hest_generic
*generic
,
499 const struct acpi_hest_generic_status
*estatus
)
501 /* Not more than 2 messages every 5 seconds */
502 static DEFINE_RATELIMIT_STATE(ratelimit_corrected
, 5*HZ
, 2);
503 static DEFINE_RATELIMIT_STATE(ratelimit_uncorrected
, 5*HZ
, 2);
504 struct ratelimit_state
*ratelimit
;
506 if (ghes_severity(estatus
->error_severity
) <= GHES_SEV_CORRECTED
)
507 ratelimit
= &ratelimit_corrected
;
509 ratelimit
= &ratelimit_uncorrected
;
510 if (__ratelimit(ratelimit
)) {
511 __ghes_print_estatus(pfx
, generic
, estatus
);
518 * GHES error status reporting throttle, to report more kinds of
519 * errors, instead of just most frequently occurred errors.
521 static int ghes_estatus_cached(struct acpi_hest_generic_status
*estatus
)
525 unsigned long long now
;
526 struct ghes_estatus_cache
*cache
;
527 struct acpi_hest_generic_status
*cache_estatus
;
529 len
= apei_estatus_len(estatus
);
531 for (i
= 0; i
< GHES_ESTATUS_CACHES_SIZE
; i
++) {
532 cache
= rcu_dereference(ghes_estatus_caches
[i
]);
535 if (len
!= cache
->estatus_len
)
537 cache_estatus
= GHES_ESTATUS_FROM_CACHE(cache
);
538 if (memcmp(estatus
, cache_estatus
, len
))
540 atomic_inc(&cache
->count
);
542 if (now
- cache
->time_in
< GHES_ESTATUS_IN_CACHE_MAX_NSEC
)
550 static struct ghes_estatus_cache
*ghes_estatus_cache_alloc(
551 struct acpi_hest_generic
*generic
,
552 struct acpi_hest_generic_status
*estatus
)
556 struct ghes_estatus_cache
*cache
;
557 struct acpi_hest_generic_status
*cache_estatus
;
559 alloced
= atomic_add_return(1, &ghes_estatus_cache_alloced
);
560 if (alloced
> GHES_ESTATUS_CACHE_ALLOCED_MAX
) {
561 atomic_dec(&ghes_estatus_cache_alloced
);
564 len
= apei_estatus_len(estatus
);
565 cache_len
= GHES_ESTATUS_CACHE_LEN(len
);
566 cache
= (void *)gen_pool_alloc(ghes_estatus_pool
, cache_len
);
568 atomic_dec(&ghes_estatus_cache_alloced
);
571 cache_estatus
= GHES_ESTATUS_FROM_CACHE(cache
);
572 memcpy(cache_estatus
, estatus
, len
);
573 cache
->estatus_len
= len
;
574 atomic_set(&cache
->count
, 0);
575 cache
->generic
= generic
;
576 cache
->time_in
= sched_clock();
580 static void ghes_estatus_cache_free(struct ghes_estatus_cache
*cache
)
584 len
= apei_estatus_len(GHES_ESTATUS_FROM_CACHE(cache
));
585 len
= GHES_ESTATUS_CACHE_LEN(len
);
586 gen_pool_free(ghes_estatus_pool
, (unsigned long)cache
, len
);
587 atomic_dec(&ghes_estatus_cache_alloced
);
590 static void ghes_estatus_cache_rcu_free(struct rcu_head
*head
)
592 struct ghes_estatus_cache
*cache
;
594 cache
= container_of(head
, struct ghes_estatus_cache
, rcu
);
595 ghes_estatus_cache_free(cache
);
598 static void ghes_estatus_cache_add(
599 struct acpi_hest_generic
*generic
,
600 struct acpi_hest_generic_status
*estatus
)
602 int i
, slot
= -1, count
;
603 unsigned long long now
, duration
, period
, max_period
= 0;
604 struct ghes_estatus_cache
*cache
, *slot_cache
= NULL
, *new_cache
;
606 new_cache
= ghes_estatus_cache_alloc(generic
, estatus
);
607 if (new_cache
== NULL
)
611 for (i
= 0; i
< GHES_ESTATUS_CACHES_SIZE
; i
++) {
612 cache
= rcu_dereference(ghes_estatus_caches
[i
]);
618 duration
= now
- cache
->time_in
;
619 if (duration
>= GHES_ESTATUS_IN_CACHE_MAX_NSEC
) {
624 count
= atomic_read(&cache
->count
);
626 do_div(period
, (count
+ 1));
627 if (period
> max_period
) {
633 /* new_cache must be put into array after its contents are written */
635 if (slot
!= -1 && cmpxchg(ghes_estatus_caches
+ slot
,
636 slot_cache
, new_cache
) == slot_cache
) {
638 call_rcu(&slot_cache
->rcu
, ghes_estatus_cache_rcu_free
);
640 ghes_estatus_cache_free(new_cache
);
644 static int ghes_proc(struct ghes
*ghes
)
648 rc
= ghes_read_estatus(ghes
, 0);
651 if (!ghes_estatus_cached(ghes
->estatus
)) {
652 if (ghes_print_estatus(NULL
, ghes
->generic
, ghes
->estatus
))
653 ghes_estatus_cache_add(ghes
->generic
, ghes
->estatus
);
655 ghes_do_proc(ghes
->estatus
);
657 ghes_clear_estatus(ghes
);
661 static void ghes_add_timer(struct ghes
*ghes
)
663 struct acpi_hest_generic
*g
= ghes
->generic
;
664 unsigned long expire
;
666 if (!g
->notify
.poll_interval
) {
667 pr_warning(FW_WARN GHES_PFX
"Poll interval is 0 for generic hardware error source: %d, disabled.\n",
668 g
->header
.source_id
);
671 expire
= jiffies
+ msecs_to_jiffies(g
->notify
.poll_interval
);
672 ghes
->timer
.expires
= round_jiffies_relative(expire
);
673 add_timer(&ghes
->timer
);
676 static void ghes_poll_func(unsigned long data
)
678 struct ghes
*ghes
= (void *)data
;
681 if (!(ghes
->flags
& GHES_EXITING
))
682 ghes_add_timer(ghes
);
685 static irqreturn_t
ghes_irq_func(int irq
, void *data
)
687 struct ghes
*ghes
= data
;
690 rc
= ghes_proc(ghes
);
697 static int ghes_notify_sci(struct notifier_block
*this,
698 unsigned long event
, void *data
)
701 int ret
= NOTIFY_DONE
;
704 list_for_each_entry_rcu(ghes
, &ghes_sci
, list
) {
705 if (!ghes_proc(ghes
))
713 static void ghes_proc_in_irq(struct irq_work
*irq_work
)
715 struct llist_node
*llnode
, *next
, *tail
= NULL
;
716 struct ghes_estatus_node
*estatus_node
;
717 struct acpi_hest_generic
*generic
;
718 struct acpi_hest_generic_status
*estatus
;
722 * Because the time order of estatus in list is reversed,
723 * revert it back to proper order.
725 llnode
= llist_del_all(&ghes_estatus_llist
);
735 estatus_node
= llist_entry(llnode
, struct ghes_estatus_node
,
737 estatus
= GHES_ESTATUS_FROM_NODE(estatus_node
);
738 len
= apei_estatus_len(estatus
);
739 node_len
= GHES_ESTATUS_NODE_LEN(len
);
740 ghes_do_proc(estatus
);
741 if (!ghes_estatus_cached(estatus
)) {
742 generic
= estatus_node
->generic
;
743 if (ghes_print_estatus(NULL
, generic
, estatus
))
744 ghes_estatus_cache_add(generic
, estatus
);
746 gen_pool_free(ghes_estatus_pool
, (unsigned long)estatus_node
,
752 static int ghes_notify_nmi(struct notifier_block
*this,
753 unsigned long cmd
, void *data
)
755 struct ghes
*ghes
, *ghes_global
= NULL
;
756 int sev
, sev_global
= -1;
757 int ret
= NOTIFY_DONE
;
762 raw_spin_lock(&ghes_nmi_lock
);
763 list_for_each_entry_rcu(ghes
, &ghes_nmi
, list
) {
764 if (ghes_read_estatus(ghes
, 1)) {
765 ghes_clear_estatus(ghes
);
768 sev
= ghes_severity(ghes
->estatus
->error_severity
);
769 if (sev
> sev_global
) {
776 if (ret
== NOTIFY_DONE
)
779 if (sev_global
>= GHES_SEV_PANIC
) {
781 __ghes_print_estatus(KERN_EMERG HW_ERR
, ghes_global
->generic
,
782 ghes_global
->estatus
);
783 /* reboot to log the error! */
784 if (panic_timeout
== 0)
785 panic_timeout
= ghes_panic_timeout
;
786 panic("Fatal hardware error!");
789 list_for_each_entry_rcu(ghes
, &ghes_nmi
, list
) {
790 #ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
792 struct ghes_estatus_node
*estatus_node
;
793 struct acpi_hest_generic_status
*estatus
;
795 if (!(ghes
->flags
& GHES_TO_CLEAR
))
797 #ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
798 if (ghes_estatus_cached(ghes
->estatus
))
800 /* Save estatus for further processing in IRQ context */
801 len
= apei_estatus_len(ghes
->estatus
);
802 node_len
= GHES_ESTATUS_NODE_LEN(len
);
803 estatus_node
= (void *)gen_pool_alloc(ghes_estatus_pool
,
806 estatus_node
->generic
= ghes
->generic
;
807 estatus
= GHES_ESTATUS_FROM_NODE(estatus_node
);
808 memcpy(estatus
, ghes
->estatus
, len
);
809 llist_add(&estatus_node
->llnode
, &ghes_estatus_llist
);
813 ghes_clear_estatus(ghes
);
815 #ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
816 irq_work_queue(&ghes_proc_irq_work
);
820 raw_spin_unlock(&ghes_nmi_lock
);
824 static struct notifier_block ghes_notifier_sci
= {
825 .notifier_call
= ghes_notify_sci
,
828 static struct notifier_block ghes_notifier_nmi
= {
829 .notifier_call
= ghes_notify_nmi
,
832 static unsigned long ghes_esource_prealloc_size(
833 const struct acpi_hest_generic
*generic
)
835 unsigned long block_length
, prealloc_records
, prealloc_size
;
837 block_length
= min_t(unsigned long, generic
->error_block_length
,
838 GHES_ESTATUS_MAX_SIZE
);
839 prealloc_records
= max_t(unsigned long,
840 generic
->records_to_preallocate
, 1);
841 prealloc_size
= min_t(unsigned long, block_length
* prealloc_records
,
842 GHES_ESOURCE_PREALLOC_MAX_SIZE
);
844 return prealloc_size
;
847 static int __devinit
ghes_probe(struct platform_device
*ghes_dev
)
849 struct acpi_hest_generic
*generic
;
850 struct ghes
*ghes
= NULL
;
854 generic
= *(struct acpi_hest_generic
**)ghes_dev
->dev
.platform_data
;
855 if (!generic
->enabled
)
858 switch (generic
->notify
.type
) {
859 case ACPI_HEST_NOTIFY_POLLED
:
860 case ACPI_HEST_NOTIFY_EXTERNAL
:
861 case ACPI_HEST_NOTIFY_SCI
:
862 case ACPI_HEST_NOTIFY_NMI
:
864 case ACPI_HEST_NOTIFY_LOCAL
:
865 pr_warning(GHES_PFX
"Generic hardware error source: %d notified via local interrupt is not supported!\n",
866 generic
->header
.source_id
);
869 pr_warning(FW_WARN GHES_PFX
"Unknown notification type: %u for generic hardware error source: %d\n",
870 generic
->notify
.type
, generic
->header
.source_id
);
875 if (generic
->error_block_length
<
876 sizeof(struct acpi_hest_generic_status
)) {
877 pr_warning(FW_BUG GHES_PFX
"Invalid error block length: %u for generic hardware error source: %d\n",
878 generic
->error_block_length
,
879 generic
->header
.source_id
);
882 ghes
= ghes_new(generic
);
888 switch (generic
->notify
.type
) {
889 case ACPI_HEST_NOTIFY_POLLED
:
890 ghes
->timer
.function
= ghes_poll_func
;
891 ghes
->timer
.data
= (unsigned long)ghes
;
892 init_timer_deferrable(&ghes
->timer
);
893 ghes_add_timer(ghes
);
895 case ACPI_HEST_NOTIFY_EXTERNAL
:
896 /* External interrupt vector is GSI */
897 if (acpi_gsi_to_irq(generic
->notify
.vector
, &ghes
->irq
)) {
898 pr_err(GHES_PFX
"Failed to map GSI to IRQ for generic hardware error source: %d\n",
899 generic
->header
.source_id
);
902 if (request_irq(ghes
->irq
, ghes_irq_func
,
903 0, "GHES IRQ", ghes
)) {
904 pr_err(GHES_PFX
"Failed to register IRQ for generic hardware error source: %d\n",
905 generic
->header
.source_id
);
909 case ACPI_HEST_NOTIFY_SCI
:
910 mutex_lock(&ghes_list_mutex
);
911 if (list_empty(&ghes_sci
))
912 register_acpi_hed_notifier(&ghes_notifier_sci
);
913 list_add_rcu(&ghes
->list
, &ghes_sci
);
914 mutex_unlock(&ghes_list_mutex
);
916 case ACPI_HEST_NOTIFY_NMI
:
917 len
= ghes_esource_prealloc_size(generic
);
918 ghes_estatus_pool_expand(len
);
919 mutex_lock(&ghes_list_mutex
);
920 if (list_empty(&ghes_nmi
))
921 register_die_notifier(&ghes_notifier_nmi
);
922 list_add_rcu(&ghes
->list
, &ghes_nmi
);
923 mutex_unlock(&ghes_list_mutex
);
928 platform_set_drvdata(ghes_dev
, ghes
);
939 static int __devexit
ghes_remove(struct platform_device
*ghes_dev
)
942 struct acpi_hest_generic
*generic
;
945 ghes
= platform_get_drvdata(ghes_dev
);
946 generic
= ghes
->generic
;
948 ghes
->flags
|= GHES_EXITING
;
949 switch (generic
->notify
.type
) {
950 case ACPI_HEST_NOTIFY_POLLED
:
951 del_timer_sync(&ghes
->timer
);
953 case ACPI_HEST_NOTIFY_EXTERNAL
:
954 free_irq(ghes
->irq
, ghes
);
956 case ACPI_HEST_NOTIFY_SCI
:
957 mutex_lock(&ghes_list_mutex
);
958 list_del_rcu(&ghes
->list
);
959 if (list_empty(&ghes_sci
))
960 unregister_acpi_hed_notifier(&ghes_notifier_sci
);
961 mutex_unlock(&ghes_list_mutex
);
963 case ACPI_HEST_NOTIFY_NMI
:
964 mutex_lock(&ghes_list_mutex
);
965 list_del_rcu(&ghes
->list
);
966 if (list_empty(&ghes_nmi
))
967 unregister_die_notifier(&ghes_notifier_nmi
);
968 mutex_unlock(&ghes_list_mutex
);
970 * To synchronize with NMI handler, ghes can only be
971 * freed after NMI handler finishes.
974 len
= ghes_esource_prealloc_size(generic
);
975 ghes_estatus_pool_shrink(len
);
985 platform_set_drvdata(ghes_dev
, NULL
);
990 static struct platform_driver ghes_platform_driver
= {
993 .owner
= THIS_MODULE
,
996 .remove
= ghes_remove
,
999 static int __init
ghes_init(void)
1007 pr_info(GHES_PFX
"HEST is not enabled!\n");
1012 pr_info(GHES_PFX
"GHES is not enabled!\n");
1016 init_irq_work(&ghes_proc_irq_work
, ghes_proc_in_irq
);
1018 rc
= ghes_ioremap_init();
1022 rc
= ghes_estatus_pool_init();
1024 goto err_ioremap_exit
;
1026 rc
= ghes_estatus_pool_expand(GHES_ESTATUS_CACHE_AVG_SIZE
*
1027 GHES_ESTATUS_CACHE_ALLOCED_MAX
);
1031 rc
= platform_driver_register(&ghes_platform_driver
);
1035 rc
= apei_osc_setup();
1036 if (rc
== 0 && osc_sb_apei_support_acked
)
1037 pr_info(GHES_PFX
"APEI firmware first mode is enabled by APEI bit and WHEA _OSC.\n");
1038 else if (rc
== 0 && !osc_sb_apei_support_acked
)
1039 pr_info(GHES_PFX
"APEI firmware first mode is enabled by WHEA _OSC.\n");
1040 else if (rc
&& osc_sb_apei_support_acked
)
1041 pr_info(GHES_PFX
"APEI firmware first mode is enabled by APEI bit.\n");
1043 pr_info(GHES_PFX
"Failed to enable APEI firmware first mode.\n");
1047 ghes_estatus_pool_exit();
1049 ghes_ioremap_exit();
1054 static void __exit
ghes_exit(void)
1056 platform_driver_unregister(&ghes_platform_driver
);
1057 ghes_estatus_pool_exit();
1058 ghes_ioremap_exit();
1061 module_init(ghes_init
);
1062 module_exit(ghes_exit
);
1064 MODULE_AUTHOR("Huang Ying");
1065 MODULE_DESCRIPTION("APEI Generic Hardware Error Source support");
1066 MODULE_LICENSE("GPL");
1067 MODULE_ALIAS("platform:GHES");