1 // SPDX-License-Identifier: GPL-2.0-only
3 * GHES/EDAC Linux driver
5 * Copyright (c) 2013 by Mauro Carvalho Chehab
7 * Red Hat Inc. https://www.redhat.com
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <acpi/ghes.h>
13 #include <linux/edac.h>
14 #include <linux/dmi.h>
15 #include "edac_module.h"
16 #include <ras/ras_event.h>
19 struct mem_ctl_info
*mci
;
21 /* Buffers for the error handling routine */
22 char other_detail
[400];
26 static refcount_t ghes_refcount
= REFCOUNT_INIT(0);
29 * Access to ghes_pvt must be protected by ghes_lock. The spinlock
30 * also provides the necessary (implicit) memory barrier for the SMP
31 * case to make the pointer visible on another CPU.
33 static struct ghes_pvt
*ghes_pvt
;
36 * This driver's representation of the system hardware, as collected
41 struct dimm_info
*dimms
;
44 /* GHES registration mutex */
45 static DEFINE_MUTEX(ghes_reg_mutex
);
48 * Sync with other, potentially concurrent callers of
49 * ghes_edac_report_mem_error(). We don't know what the
50 * "inventive" firmware would do.
52 static DEFINE_SPINLOCK(ghes_lock
);
54 /* "ghes_edac.force_load=1" skips the platform check */
55 static bool __read_mostly force_load
;
56 module_param(force_load
, bool, 0);
58 static bool system_scanned
;
60 /* Memory Device - Type 17 of SMBIOS spec */
61 struct memdev_dmi_entry
{
65 u16 phys_mem_array_handle
;
66 u16 mem_err_info_handle
;
83 u16 conf_mem_clk_speed
;
84 } __attribute__((__packed__
));
86 static struct dimm_info
*find_dimm_by_handle(struct mem_ctl_info
*mci
, u16 handle
)
88 struct dimm_info
*dimm
;
90 mci_for_each_dimm(mci
, dimm
) {
91 if (dimm
->smbios_handle
== handle
)
98 static void dimm_setup_label(struct dimm_info
*dimm
, u16 handle
)
100 const char *bank
= NULL
, *device
= NULL
;
102 dmi_memdev_name(handle
, &bank
, &device
);
104 /* both strings must be non-zero */
105 if (bank
&& *bank
&& device
&& *device
)
106 snprintf(dimm
->label
, sizeof(dimm
->label
), "%s %s", bank
, device
);
109 static void assign_dmi_dimm_info(struct dimm_info
*dimm
, struct memdev_dmi_entry
*entry
)
111 u16 rdr_mask
= BIT(7) | BIT(13);
113 if (entry
->size
== 0xffff) {
114 pr_info("Can't get DIMM%i size\n", dimm
->idx
);
115 dimm
->nr_pages
= MiB_TO_PAGES(32);/* Unknown */
116 } else if (entry
->size
== 0x7fff) {
117 dimm
->nr_pages
= MiB_TO_PAGES(entry
->extended_size
);
119 if (entry
->size
& BIT(15))
120 dimm
->nr_pages
= MiB_TO_PAGES((entry
->size
& 0x7fff) << 10);
122 dimm
->nr_pages
= MiB_TO_PAGES(entry
->size
);
125 switch (entry
->memory_type
) {
127 if (entry
->type_detail
& BIT(13))
128 dimm
->mtype
= MEM_RDDR
;
130 dimm
->mtype
= MEM_DDR
;
133 if (entry
->type_detail
& BIT(13))
134 dimm
->mtype
= MEM_RDDR2
;
136 dimm
->mtype
= MEM_DDR2
;
139 dimm
->mtype
= MEM_FB_DDR2
;
142 if (entry
->type_detail
& BIT(12))
143 dimm
->mtype
= MEM_NVDIMM
;
144 else if (entry
->type_detail
& BIT(13))
145 dimm
->mtype
= MEM_RDDR3
;
147 dimm
->mtype
= MEM_DDR3
;
150 if (entry
->type_detail
& BIT(12))
151 dimm
->mtype
= MEM_NVDIMM
;
152 else if (entry
->type_detail
& BIT(13))
153 dimm
->mtype
= MEM_RDDR4
;
155 dimm
->mtype
= MEM_DDR4
;
158 if (entry
->type_detail
& BIT(6))
159 dimm
->mtype
= MEM_RMBS
;
160 else if ((entry
->type_detail
& rdr_mask
) == rdr_mask
)
161 dimm
->mtype
= MEM_RDR
;
162 else if (entry
->type_detail
& BIT(7))
163 dimm
->mtype
= MEM_SDR
;
164 else if (entry
->type_detail
& BIT(9))
165 dimm
->mtype
= MEM_EDO
;
167 dimm
->mtype
= MEM_UNKNOWN
;
171 * Actually, we can only detect if the memory has bits for
174 if (entry
->total_width
== entry
->data_width
)
175 dimm
->edac_mode
= EDAC_NONE
;
177 dimm
->edac_mode
= EDAC_SECDED
;
179 dimm
->dtype
= DEV_UNKNOWN
;
180 dimm
->grain
= 128; /* Likely, worse case */
182 dimm_setup_label(dimm
, entry
->handle
);
184 if (dimm
->nr_pages
) {
185 edac_dbg(1, "DIMM%i: %s size = %d MB%s\n",
186 dimm
->idx
, edac_mem_types
[dimm
->mtype
],
187 PAGES_TO_MiB(dimm
->nr_pages
),
188 (dimm
->edac_mode
!= EDAC_NONE
) ? "(ECC)" : "");
189 edac_dbg(2, "\ttype %d, detail 0x%02x, width %d(total %d)\n",
190 entry
->memory_type
, entry
->type_detail
,
191 entry
->total_width
, entry
->data_width
);
194 dimm
->smbios_handle
= entry
->handle
;
197 static void enumerate_dimms(const struct dmi_header
*dh
, void *arg
)
199 struct memdev_dmi_entry
*entry
= (struct memdev_dmi_entry
*)dh
;
200 struct ghes_hw_desc
*hw
= (struct ghes_hw_desc
*)arg
;
203 if (dh
->type
!= DMI_ENTRY_MEM_DEVICE
)
206 /* Enlarge the array with additional 16 */
207 if (!hw
->num_dimms
|| !(hw
->num_dimms
% 16)) {
208 struct dimm_info
*new;
210 new = krealloc_array(hw
->dimms
, hw
->num_dimms
+ 16,
211 sizeof(struct dimm_info
), GFP_KERNEL
);
220 d
= &hw
->dimms
[hw
->num_dimms
];
221 d
->idx
= hw
->num_dimms
;
223 assign_dmi_dimm_info(d
, entry
);
228 static void ghes_scan_system(void)
233 dmi_walk(enumerate_dimms
, &ghes_hw
);
235 system_scanned
= true;
238 void ghes_edac_report_mem_error(int sev
, struct cper_sec_mem_err
*mem_err
)
240 struct edac_raw_error_desc
*e
;
241 struct mem_ctl_info
*mci
;
242 struct ghes_pvt
*pvt
;
247 * We can do the locking below because GHES defers error processing
248 * from NMI to IRQ context. Whenever that changes, we'd at least
251 if (WARN_ON_ONCE(in_nmi()))
254 spin_lock_irqsave(&ghes_lock
, flags
);
261 e
= &mci
->error_desc
;
263 /* Cleans the error report buffer */
264 memset(e
, 0, sizeof (*e
));
268 e
->other_detail
= pvt
->other_detail
;
272 *pvt
->other_detail
= '\0';
276 case GHES_SEV_CORRECTED
:
277 e
->type
= HW_EVENT_ERR_CORRECTED
;
279 case GHES_SEV_RECOVERABLE
:
280 e
->type
= HW_EVENT_ERR_UNCORRECTED
;
283 e
->type
= HW_EVENT_ERR_FATAL
;
287 e
->type
= HW_EVENT_ERR_INFO
;
290 edac_dbg(1, "error validation_bits: 0x%08llx\n",
291 (long long)mem_err
->validation_bits
);
293 /* Error type, mapped on e->msg */
294 if (mem_err
->validation_bits
& CPER_MEM_VALID_ERROR_TYPE
) {
296 switch (mem_err
->error_type
) {
298 p
+= sprintf(p
, "Unknown");
301 p
+= sprintf(p
, "No error");
304 p
+= sprintf(p
, "Single-bit ECC");
307 p
+= sprintf(p
, "Multi-bit ECC");
310 p
+= sprintf(p
, "Single-symbol ChipKill ECC");
313 p
+= sprintf(p
, "Multi-symbol ChipKill ECC");
316 p
+= sprintf(p
, "Master abort");
319 p
+= sprintf(p
, "Target abort");
322 p
+= sprintf(p
, "Parity Error");
325 p
+= sprintf(p
, "Watchdog timeout");
328 p
+= sprintf(p
, "Invalid address");
331 p
+= sprintf(p
, "Mirror Broken");
334 p
+= sprintf(p
, "Memory Sparing");
337 p
+= sprintf(p
, "Scrub corrected error");
340 p
+= sprintf(p
, "Scrub uncorrected error");
343 p
+= sprintf(p
, "Physical Memory Map-out event");
346 p
+= sprintf(p
, "reserved error (%d)",
347 mem_err
->error_type
);
350 strcpy(pvt
->msg
, "unknown error");
354 if (mem_err
->validation_bits
& CPER_MEM_VALID_PA
) {
355 e
->page_frame_number
= PHYS_PFN(mem_err
->physical_addr
);
356 e
->offset_in_page
= offset_in_page(mem_err
->physical_addr
);
360 if (mem_err
->validation_bits
& CPER_MEM_VALID_PA_MASK
)
361 e
->grain
= ~mem_err
->physical_addr_mask
+ 1;
363 /* Memory error location, mapped on e->location */
365 if (mem_err
->validation_bits
& CPER_MEM_VALID_NODE
)
366 p
+= sprintf(p
, "node:%d ", mem_err
->node
);
367 if (mem_err
->validation_bits
& CPER_MEM_VALID_CARD
)
368 p
+= sprintf(p
, "card:%d ", mem_err
->card
);
369 if (mem_err
->validation_bits
& CPER_MEM_VALID_MODULE
)
370 p
+= sprintf(p
, "module:%d ", mem_err
->module
);
371 if (mem_err
->validation_bits
& CPER_MEM_VALID_RANK_NUMBER
)
372 p
+= sprintf(p
, "rank:%d ", mem_err
->rank
);
373 if (mem_err
->validation_bits
& CPER_MEM_VALID_BANK
)
374 p
+= sprintf(p
, "bank:%d ", mem_err
->bank
);
375 if (mem_err
->validation_bits
& CPER_MEM_VALID_BANK_GROUP
)
376 p
+= sprintf(p
, "bank_group:%d ",
377 mem_err
->bank
>> CPER_MEM_BANK_GROUP_SHIFT
);
378 if (mem_err
->validation_bits
& CPER_MEM_VALID_BANK_ADDRESS
)
379 p
+= sprintf(p
, "bank_address:%d ",
380 mem_err
->bank
& CPER_MEM_BANK_ADDRESS_MASK
);
381 if (mem_err
->validation_bits
& (CPER_MEM_VALID_ROW
| CPER_MEM_VALID_ROW_EXT
)) {
382 u32 row
= mem_err
->row
;
384 row
|= cper_get_mem_extension(mem_err
->validation_bits
, mem_err
->extended
);
385 p
+= sprintf(p
, "row:%d ", row
);
387 if (mem_err
->validation_bits
& CPER_MEM_VALID_COLUMN
)
388 p
+= sprintf(p
, "col:%d ", mem_err
->column
);
389 if (mem_err
->validation_bits
& CPER_MEM_VALID_BIT_POSITION
)
390 p
+= sprintf(p
, "bit_pos:%d ", mem_err
->bit_pos
);
391 if (mem_err
->validation_bits
& CPER_MEM_VALID_MODULE_HANDLE
) {
392 const char *bank
= NULL
, *device
= NULL
;
393 struct dimm_info
*dimm
;
395 dmi_memdev_name(mem_err
->mem_dev_handle
, &bank
, &device
);
396 if (bank
!= NULL
&& device
!= NULL
)
397 p
+= sprintf(p
, "DIMM location:%s %s ", bank
, device
);
399 p
+= sprintf(p
, "DIMM DMI handle: 0x%.4x ",
400 mem_err
->mem_dev_handle
);
402 dimm
= find_dimm_by_handle(mci
, mem_err
->mem_dev_handle
);
404 e
->top_layer
= dimm
->idx
;
405 strcpy(e
->label
, dimm
->label
);
408 if (mem_err
->validation_bits
& CPER_MEM_VALID_CHIP_ID
)
409 p
+= sprintf(p
, "chipID: %d ",
410 mem_err
->extended
>> CPER_MEM_CHIP_ID_SHIFT
);
415 strcpy(e
->label
, "unknown memory");
417 /* All other fields are mapped on e->other_detail */
418 p
= pvt
->other_detail
;
419 p
+= snprintf(p
, sizeof(pvt
->other_detail
),
420 "APEI location: %s ", e
->location
);
421 if (mem_err
->validation_bits
& CPER_MEM_VALID_ERROR_STATUS
) {
422 u64 status
= mem_err
->error_status
;
424 p
+= sprintf(p
, "status(0x%016llx): ", (long long)status
);
425 switch ((status
>> 8) & 0xff) {
427 p
+= sprintf(p
, "Error detected internal to the component ");
430 p
+= sprintf(p
, "Error detected in the bus ");
433 p
+= sprintf(p
, "Storage error in DRAM memory ");
436 p
+= sprintf(p
, "Storage error in TLB ");
439 p
+= sprintf(p
, "Storage error in cache ");
442 p
+= sprintf(p
, "Error in one or more functional units ");
445 p
+= sprintf(p
, "component failed self test ");
448 p
+= sprintf(p
, "Overflow or undervalue of internal queue ");
451 p
+= sprintf(p
, "Virtual address not found on IO-TLB or IO-PDIR ");
454 p
+= sprintf(p
, "Improper access error ");
457 p
+= sprintf(p
, "Access to a memory address which is not mapped to any component ");
460 p
+= sprintf(p
, "Loss of Lockstep ");
463 p
+= sprintf(p
, "Response not associated with a request ");
466 p
+= sprintf(p
, "Bus parity error - must also set the A, C, or D Bits ");
469 p
+= sprintf(p
, "Detection of a PATH_ERROR ");
472 p
+= sprintf(p
, "Bus operation timeout ");
475 p
+= sprintf(p
, "A read was issued to data that has been poisoned ");
478 p
+= sprintf(p
, "reserved ");
482 if (mem_err
->validation_bits
& CPER_MEM_VALID_REQUESTOR_ID
)
483 p
+= sprintf(p
, "requestorID: 0x%016llx ",
484 (long long)mem_err
->requestor_id
);
485 if (mem_err
->validation_bits
& CPER_MEM_VALID_RESPONDER_ID
)
486 p
+= sprintf(p
, "responderID: 0x%016llx ",
487 (long long)mem_err
->responder_id
);
488 if (mem_err
->validation_bits
& CPER_MEM_VALID_TARGET_ID
)
489 p
+= sprintf(p
, "targetID: 0x%016llx ",
490 (long long)mem_err
->responder_id
);
491 if (p
> pvt
->other_detail
)
494 edac_raw_mc_handle_error(e
);
497 spin_unlock_irqrestore(&ghes_lock
, flags
);
501 * Known systems that are safe to enable this module.
503 static struct acpi_platform_list plat_list
[] = {
504 {"HPE ", "Server ", 0, ACPI_SIG_FADT
, all_versions
},
508 int ghes_edac_register(struct ghes
*ghes
, struct device
*dev
)
511 struct mem_ctl_info
*mci
;
512 struct ghes_pvt
*pvt
;
513 struct edac_mc_layer layers
[1];
518 if (IS_ENABLED(CONFIG_X86
)) {
519 /* Check if safe to enable on this system */
520 idx
= acpi_match_platform_list(plat_list
);
521 if (!force_load
&& idx
< 0)
528 /* finish another registration/unregistration instance first */
529 mutex_lock(&ghes_reg_mutex
);
532 * We have only one logical memory controller to which all DIMMs belong.
534 if (refcount_inc_not_zero(&ghes_refcount
))
539 /* Check if we've got a bogus BIOS */
540 if (!ghes_hw
.num_dimms
) {
542 ghes_hw
.num_dimms
= 1;
545 layers
[0].type
= EDAC_MC_LAYER_ALL_MEM
;
546 layers
[0].size
= ghes_hw
.num_dimms
;
547 layers
[0].is_virt_csrow
= true;
549 mci
= edac_mc_alloc(0, ARRAY_SIZE(layers
), layers
, sizeof(struct ghes_pvt
));
551 pr_info("Can't allocate memory for EDAC data\n");
560 mci
->mtype_cap
= MEM_FLAG_EMPTY
;
561 mci
->edac_ctl_cap
= EDAC_FLAG_NONE
;
562 mci
->edac_cap
= EDAC_FLAG_NONE
;
563 mci
->mod_name
= "ghes_edac.c";
564 mci
->ctl_name
= "ghes_edac";
565 mci
->dev_name
= "ghes";
568 pr_info("This system has a very crappy BIOS: It doesn't even list the DIMMS.\n");
569 pr_info("Its SMBIOS info is wrong. It is doubtful that the error report would\n");
570 pr_info("work on such system. Use this driver with caution\n");
571 } else if (idx
< 0) {
572 pr_info("This EDAC driver relies on BIOS to enumerate memory and get error reports.\n");
573 pr_info("Unfortunately, not all BIOSes reflect the memory layout correctly.\n");
574 pr_info("So, the end result of using this driver varies from vendor to vendor.\n");
575 pr_info("If you find incorrect reports, please contact your hardware vendor\n");
576 pr_info("to correct its BIOS.\n");
577 pr_info("This system has %d DIMM sockets.\n", ghes_hw
.num_dimms
);
581 struct dimm_info
*src
, *dst
;
584 mci_for_each_dimm(mci
, dst
) {
585 src
= &ghes_hw
.dimms
[i
];
588 dst
->smbios_handle
= src
->smbios_handle
;
589 dst
->nr_pages
= src
->nr_pages
;
590 dst
->mtype
= src
->mtype
;
591 dst
->edac_mode
= src
->edac_mode
;
592 dst
->dtype
= src
->dtype
;
593 dst
->grain
= src
->grain
;
596 * If no src->label, preserve default label assigned
599 if (strlen(src
->label
))
600 memcpy(dst
->label
, src
->label
, sizeof(src
->label
));
606 struct dimm_info
*dimm
= edac_get_dimm(mci
, 0, 0, 0);
610 dimm
->mtype
= MEM_UNKNOWN
;
611 dimm
->dtype
= DEV_UNKNOWN
;
612 dimm
->edac_mode
= EDAC_SECDED
;
615 rc
= edac_mc_add_mc(mci
);
617 pr_info("Can't register with the EDAC core\n");
623 spin_lock_irqsave(&ghes_lock
, flags
);
625 spin_unlock_irqrestore(&ghes_lock
, flags
);
627 /* only set on success */
628 refcount_set(&ghes_refcount
, 1);
632 /* Not needed anymore */
633 kfree(ghes_hw
.dimms
);
634 ghes_hw
.dimms
= NULL
;
636 mutex_unlock(&ghes_reg_mutex
);
641 void ghes_edac_unregister(struct ghes
*ghes
)
643 struct mem_ctl_info
*mci
;
649 mutex_lock(&ghes_reg_mutex
);
651 system_scanned
= false;
652 memset(&ghes_hw
, 0, sizeof(struct ghes_hw_desc
));
654 if (!refcount_dec_and_test(&ghes_refcount
))
658 * Wait for the irq handler being finished.
660 spin_lock_irqsave(&ghes_lock
, flags
);
661 mci
= ghes_pvt
? ghes_pvt
->mci
: NULL
;
663 spin_unlock_irqrestore(&ghes_lock
, flags
);
668 mci
= edac_mc_del_mc(mci
->pdev
);
673 mutex_unlock(&ghes_reg_mutex
);