1 // SPDX-License-Identifier: GPL-2.0-only
3 * GHES/EDAC Linux driver
5 * Copyright (c) 2013 by Mauro Carvalho Chehab
7 * Red Hat Inc. http://www.redhat.com
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <acpi/ghes.h>
13 #include <linux/edac.h>
14 #include <linux/dmi.h>
15 #include "edac_module.h"
16 #include <ras/ras_event.h>
19 struct mem_ctl_info
*mci
;
21 /* Buffers for the error handling routine */
22 char other_detail
[400];
26 static refcount_t ghes_refcount
= REFCOUNT_INIT(0);
29 * Access to ghes_pvt must be protected by ghes_lock. The spinlock
30 * also provides the necessary (implicit) memory barrier for the SMP
31 * case to make the pointer visible on another CPU.
33 static struct ghes_pvt
*ghes_pvt
;
36 * This driver's representation of the system hardware, as collected
41 struct dimm_info
*dimms
;
44 /* GHES registration mutex */
45 static DEFINE_MUTEX(ghes_reg_mutex
);
48 * Sync with other, potentially concurrent callers of
49 * ghes_edac_report_mem_error(). We don't know what the
50 * "inventive" firmware would do.
52 static DEFINE_SPINLOCK(ghes_lock
);
54 /* "ghes_edac.force_load=1" skips the platform check */
55 static bool __read_mostly force_load
;
56 module_param(force_load
, bool, 0);
58 /* Memory Device - Type 17 of SMBIOS spec */
59 struct memdev_dmi_entry
{
63 u16 phys_mem_array_handle
;
64 u16 mem_err_info_handle
;
81 u16 conf_mem_clk_speed
;
82 } __attribute__((__packed__
));
84 static struct dimm_info
*find_dimm_by_handle(struct mem_ctl_info
*mci
, u16 handle
)
86 struct dimm_info
*dimm
;
88 mci_for_each_dimm(mci
, dimm
) {
89 if (dimm
->smbios_handle
== handle
)
96 static void dimm_setup_label(struct dimm_info
*dimm
, u16 handle
)
98 const char *bank
= NULL
, *device
= NULL
;
100 dmi_memdev_name(handle
, &bank
, &device
);
102 /* both strings must be non-zero */
103 if (bank
&& *bank
&& device
&& *device
)
104 snprintf(dimm
->label
, sizeof(dimm
->label
), "%s %s", bank
, device
);
107 static void assign_dmi_dimm_info(struct dimm_info
*dimm
, struct memdev_dmi_entry
*entry
)
109 u16 rdr_mask
= BIT(7) | BIT(13);
111 if (entry
->size
== 0xffff) {
112 pr_info("Can't get DIMM%i size\n", dimm
->idx
);
113 dimm
->nr_pages
= MiB_TO_PAGES(32);/* Unknown */
114 } else if (entry
->size
== 0x7fff) {
115 dimm
->nr_pages
= MiB_TO_PAGES(entry
->extended_size
);
117 if (entry
->size
& BIT(15))
118 dimm
->nr_pages
= MiB_TO_PAGES((entry
->size
& 0x7fff) << 10);
120 dimm
->nr_pages
= MiB_TO_PAGES(entry
->size
);
123 switch (entry
->memory_type
) {
125 if (entry
->type_detail
& BIT(13))
126 dimm
->mtype
= MEM_RDDR
;
128 dimm
->mtype
= MEM_DDR
;
131 if (entry
->type_detail
& BIT(13))
132 dimm
->mtype
= MEM_RDDR2
;
134 dimm
->mtype
= MEM_DDR2
;
137 dimm
->mtype
= MEM_FB_DDR2
;
140 if (entry
->type_detail
& BIT(12))
141 dimm
->mtype
= MEM_NVDIMM
;
142 else if (entry
->type_detail
& BIT(13))
143 dimm
->mtype
= MEM_RDDR3
;
145 dimm
->mtype
= MEM_DDR3
;
148 if (entry
->type_detail
& BIT(12))
149 dimm
->mtype
= MEM_NVDIMM
;
150 else if (entry
->type_detail
& BIT(13))
151 dimm
->mtype
= MEM_RDDR4
;
153 dimm
->mtype
= MEM_DDR4
;
156 if (entry
->type_detail
& BIT(6))
157 dimm
->mtype
= MEM_RMBS
;
158 else if ((entry
->type_detail
& rdr_mask
) == rdr_mask
)
159 dimm
->mtype
= MEM_RDR
;
160 else if (entry
->type_detail
& BIT(7))
161 dimm
->mtype
= MEM_SDR
;
162 else if (entry
->type_detail
& BIT(9))
163 dimm
->mtype
= MEM_EDO
;
165 dimm
->mtype
= MEM_UNKNOWN
;
169 * Actually, we can only detect if the memory has bits for
172 if (entry
->total_width
== entry
->data_width
)
173 dimm
->edac_mode
= EDAC_NONE
;
175 dimm
->edac_mode
= EDAC_SECDED
;
177 dimm
->dtype
= DEV_UNKNOWN
;
178 dimm
->grain
= 128; /* Likely, worse case */
180 dimm_setup_label(dimm
, entry
->handle
);
182 if (dimm
->nr_pages
) {
183 edac_dbg(1, "DIMM%i: %s size = %d MB%s\n",
184 dimm
->idx
, edac_mem_types
[dimm
->mtype
],
185 PAGES_TO_MiB(dimm
->nr_pages
),
186 (dimm
->edac_mode
!= EDAC_NONE
) ? "(ECC)" : "");
187 edac_dbg(2, "\ttype %d, detail 0x%02x, width %d(total %d)\n",
188 entry
->memory_type
, entry
->type_detail
,
189 entry
->total_width
, entry
->data_width
);
192 dimm
->smbios_handle
= entry
->handle
;
195 static void enumerate_dimms(const struct dmi_header
*dh
, void *arg
)
197 struct memdev_dmi_entry
*entry
= (struct memdev_dmi_entry
*)dh
;
198 struct ghes_hw_desc
*hw
= (struct ghes_hw_desc
*)arg
;
201 if (dh
->type
!= DMI_ENTRY_MEM_DEVICE
)
204 /* Enlarge the array with additional 16 */
205 if (!hw
->num_dimms
|| !(hw
->num_dimms
% 16)) {
206 struct dimm_info
*new;
208 new = krealloc(hw
->dimms
, (hw
->num_dimms
+ 16) * sizeof(struct dimm_info
),
218 d
= &hw
->dimms
[hw
->num_dimms
];
219 d
->idx
= hw
->num_dimms
;
221 assign_dmi_dimm_info(d
, entry
);
226 static void ghes_scan_system(void)
233 dmi_walk(enumerate_dimms
, &ghes_hw
);
238 void ghes_edac_report_mem_error(int sev
, struct cper_sec_mem_err
*mem_err
)
240 struct edac_raw_error_desc
*e
;
241 struct mem_ctl_info
*mci
;
242 struct ghes_pvt
*pvt
;
247 * We can do the locking below because GHES defers error processing
248 * from NMI to IRQ context. Whenever that changes, we'd at least
251 if (WARN_ON_ONCE(in_nmi()))
254 spin_lock_irqsave(&ghes_lock
, flags
);
261 e
= &mci
->error_desc
;
263 /* Cleans the error report buffer */
264 memset(e
, 0, sizeof (*e
));
268 e
->other_detail
= pvt
->other_detail
;
272 *pvt
->other_detail
= '\0';
276 case GHES_SEV_CORRECTED
:
277 e
->type
= HW_EVENT_ERR_CORRECTED
;
279 case GHES_SEV_RECOVERABLE
:
280 e
->type
= HW_EVENT_ERR_UNCORRECTED
;
283 e
->type
= HW_EVENT_ERR_FATAL
;
287 e
->type
= HW_EVENT_ERR_INFO
;
290 edac_dbg(1, "error validation_bits: 0x%08llx\n",
291 (long long)mem_err
->validation_bits
);
293 /* Error type, mapped on e->msg */
294 if (mem_err
->validation_bits
& CPER_MEM_VALID_ERROR_TYPE
) {
296 switch (mem_err
->error_type
) {
298 p
+= sprintf(p
, "Unknown");
301 p
+= sprintf(p
, "No error");
304 p
+= sprintf(p
, "Single-bit ECC");
307 p
+= sprintf(p
, "Multi-bit ECC");
310 p
+= sprintf(p
, "Single-symbol ChipKill ECC");
313 p
+= sprintf(p
, "Multi-symbol ChipKill ECC");
316 p
+= sprintf(p
, "Master abort");
319 p
+= sprintf(p
, "Target abort");
322 p
+= sprintf(p
, "Parity Error");
325 p
+= sprintf(p
, "Watchdog timeout");
328 p
+= sprintf(p
, "Invalid address");
331 p
+= sprintf(p
, "Mirror Broken");
334 p
+= sprintf(p
, "Memory Sparing");
337 p
+= sprintf(p
, "Scrub corrected error");
340 p
+= sprintf(p
, "Scrub uncorrected error");
343 p
+= sprintf(p
, "Physical Memory Map-out event");
346 p
+= sprintf(p
, "reserved error (%d)",
347 mem_err
->error_type
);
350 strcpy(pvt
->msg
, "unknown error");
354 if (mem_err
->validation_bits
& CPER_MEM_VALID_PA
) {
355 e
->page_frame_number
= PHYS_PFN(mem_err
->physical_addr
);
356 e
->offset_in_page
= offset_in_page(mem_err
->physical_addr
);
360 if (mem_err
->validation_bits
& CPER_MEM_VALID_PA_MASK
)
361 e
->grain
= ~mem_err
->physical_addr_mask
+ 1;
363 /* Memory error location, mapped on e->location */
365 if (mem_err
->validation_bits
& CPER_MEM_VALID_NODE
)
366 p
+= sprintf(p
, "node:%d ", mem_err
->node
);
367 if (mem_err
->validation_bits
& CPER_MEM_VALID_CARD
)
368 p
+= sprintf(p
, "card:%d ", mem_err
->card
);
369 if (mem_err
->validation_bits
& CPER_MEM_VALID_MODULE
)
370 p
+= sprintf(p
, "module:%d ", mem_err
->module
);
371 if (mem_err
->validation_bits
& CPER_MEM_VALID_RANK_NUMBER
)
372 p
+= sprintf(p
, "rank:%d ", mem_err
->rank
);
373 if (mem_err
->validation_bits
& CPER_MEM_VALID_BANK
)
374 p
+= sprintf(p
, "bank:%d ", mem_err
->bank
);
375 if (mem_err
->validation_bits
& CPER_MEM_VALID_ROW
)
376 p
+= sprintf(p
, "row:%d ", mem_err
->row
);
377 if (mem_err
->validation_bits
& CPER_MEM_VALID_COLUMN
)
378 p
+= sprintf(p
, "col:%d ", mem_err
->column
);
379 if (mem_err
->validation_bits
& CPER_MEM_VALID_BIT_POSITION
)
380 p
+= sprintf(p
, "bit_pos:%d ", mem_err
->bit_pos
);
381 if (mem_err
->validation_bits
& CPER_MEM_VALID_MODULE_HANDLE
) {
382 const char *bank
= NULL
, *device
= NULL
;
383 struct dimm_info
*dimm
;
385 dmi_memdev_name(mem_err
->mem_dev_handle
, &bank
, &device
);
386 if (bank
!= NULL
&& device
!= NULL
)
387 p
+= sprintf(p
, "DIMM location:%s %s ", bank
, device
);
389 p
+= sprintf(p
, "DIMM DMI handle: 0x%.4x ",
390 mem_err
->mem_dev_handle
);
392 dimm
= find_dimm_by_handle(mci
, mem_err
->mem_dev_handle
);
394 e
->top_layer
= dimm
->idx
;
395 strcpy(e
->label
, dimm
->label
);
402 strcpy(e
->label
, "unknown memory");
404 /* All other fields are mapped on e->other_detail */
405 p
= pvt
->other_detail
;
406 p
+= snprintf(p
, sizeof(pvt
->other_detail
),
407 "APEI location: %s ", e
->location
);
408 if (mem_err
->validation_bits
& CPER_MEM_VALID_ERROR_STATUS
) {
409 u64 status
= mem_err
->error_status
;
411 p
+= sprintf(p
, "status(0x%016llx): ", (long long)status
);
412 switch ((status
>> 8) & 0xff) {
414 p
+= sprintf(p
, "Error detected internal to the component ");
417 p
+= sprintf(p
, "Error detected in the bus ");
420 p
+= sprintf(p
, "Storage error in DRAM memory ");
423 p
+= sprintf(p
, "Storage error in TLB ");
426 p
+= sprintf(p
, "Storage error in cache ");
429 p
+= sprintf(p
, "Error in one or more functional units ");
432 p
+= sprintf(p
, "component failed self test ");
435 p
+= sprintf(p
, "Overflow or undervalue of internal queue ");
438 p
+= sprintf(p
, "Virtual address not found on IO-TLB or IO-PDIR ");
441 p
+= sprintf(p
, "Improper access error ");
444 p
+= sprintf(p
, "Access to a memory address which is not mapped to any component ");
447 p
+= sprintf(p
, "Loss of Lockstep ");
450 p
+= sprintf(p
, "Response not associated with a request ");
453 p
+= sprintf(p
, "Bus parity error - must also set the A, C, or D Bits ");
456 p
+= sprintf(p
, "Detection of a PATH_ERROR ");
459 p
+= sprintf(p
, "Bus operation timeout ");
462 p
+= sprintf(p
, "A read was issued to data that has been poisoned ");
465 p
+= sprintf(p
, "reserved ");
469 if (mem_err
->validation_bits
& CPER_MEM_VALID_REQUESTOR_ID
)
470 p
+= sprintf(p
, "requestorID: 0x%016llx ",
471 (long long)mem_err
->requestor_id
);
472 if (mem_err
->validation_bits
& CPER_MEM_VALID_RESPONDER_ID
)
473 p
+= sprintf(p
, "responderID: 0x%016llx ",
474 (long long)mem_err
->responder_id
);
475 if (mem_err
->validation_bits
& CPER_MEM_VALID_TARGET_ID
)
476 p
+= sprintf(p
, "targetID: 0x%016llx ",
477 (long long)mem_err
->responder_id
);
478 if (p
> pvt
->other_detail
)
481 edac_raw_mc_handle_error(e
);
484 spin_unlock_irqrestore(&ghes_lock
, flags
);
488 * Known systems that are safe to enable this module.
490 static struct acpi_platform_list plat_list
[] = {
491 {"HPE ", "Server ", 0, ACPI_SIG_FADT
, all_versions
},
495 int ghes_edac_register(struct ghes
*ghes
, struct device
*dev
)
498 struct mem_ctl_info
*mci
;
499 struct ghes_pvt
*pvt
;
500 struct edac_mc_layer layers
[1];
505 if (IS_ENABLED(CONFIG_X86
)) {
506 /* Check if safe to enable on this system */
507 idx
= acpi_match_platform_list(plat_list
);
508 if (!force_load
&& idx
< 0)
514 /* finish another registration/unregistration instance first */
515 mutex_lock(&ghes_reg_mutex
);
518 * We have only one logical memory controller to which all DIMMs belong.
520 if (refcount_inc_not_zero(&ghes_refcount
))
525 /* Check if we've got a bogus BIOS */
526 if (!ghes_hw
.num_dimms
) {
528 ghes_hw
.num_dimms
= 1;
531 layers
[0].type
= EDAC_MC_LAYER_ALL_MEM
;
532 layers
[0].size
= ghes_hw
.num_dimms
;
533 layers
[0].is_virt_csrow
= true;
535 mci
= edac_mc_alloc(0, ARRAY_SIZE(layers
), layers
, sizeof(struct ghes_pvt
));
537 pr_info("Can't allocate memory for EDAC data\n");
546 mci
->mtype_cap
= MEM_FLAG_EMPTY
;
547 mci
->edac_ctl_cap
= EDAC_FLAG_NONE
;
548 mci
->edac_cap
= EDAC_FLAG_NONE
;
549 mci
->mod_name
= "ghes_edac.c";
550 mci
->ctl_name
= "ghes_edac";
551 mci
->dev_name
= "ghes";
554 pr_info("This system has a very crappy BIOS: It doesn't even list the DIMMS.\n");
555 pr_info("Its SMBIOS info is wrong. It is doubtful that the error report would\n");
556 pr_info("work on such system. Use this driver with caution\n");
557 } else if (idx
< 0) {
558 pr_info("This EDAC driver relies on BIOS to enumerate memory and get error reports.\n");
559 pr_info("Unfortunately, not all BIOSes reflect the memory layout correctly.\n");
560 pr_info("So, the end result of using this driver varies from vendor to vendor.\n");
561 pr_info("If you find incorrect reports, please contact your hardware vendor\n");
562 pr_info("to correct its BIOS.\n");
563 pr_info("This system has %d DIMM sockets.\n", ghes_hw
.num_dimms
);
567 struct dimm_info
*src
, *dst
;
570 mci_for_each_dimm(mci
, dst
) {
571 src
= &ghes_hw
.dimms
[i
];
574 dst
->smbios_handle
= src
->smbios_handle
;
575 dst
->nr_pages
= src
->nr_pages
;
576 dst
->mtype
= src
->mtype
;
577 dst
->edac_mode
= src
->edac_mode
;
578 dst
->dtype
= src
->dtype
;
579 dst
->grain
= src
->grain
;
582 * If no src->label, preserve default label assigned
585 if (strlen(src
->label
))
586 memcpy(dst
->label
, src
->label
, sizeof(src
->label
));
592 struct dimm_info
*dimm
= edac_get_dimm(mci
, 0, 0, 0);
596 dimm
->mtype
= MEM_UNKNOWN
;
597 dimm
->dtype
= DEV_UNKNOWN
;
598 dimm
->edac_mode
= EDAC_SECDED
;
601 rc
= edac_mc_add_mc(mci
);
603 pr_info("Can't register with the EDAC core\n");
609 spin_lock_irqsave(&ghes_lock
, flags
);
611 spin_unlock_irqrestore(&ghes_lock
, flags
);
613 /* only set on success */
614 refcount_set(&ghes_refcount
, 1);
618 /* Not needed anymore */
619 kfree(ghes_hw
.dimms
);
620 ghes_hw
.dimms
= NULL
;
622 mutex_unlock(&ghes_reg_mutex
);
627 void ghes_edac_unregister(struct ghes
*ghes
)
629 struct mem_ctl_info
*mci
;
632 mutex_lock(&ghes_reg_mutex
);
634 if (!refcount_dec_and_test(&ghes_refcount
))
638 * Wait for the irq handler being finished.
640 spin_lock_irqsave(&ghes_lock
, flags
);
641 mci
= ghes_pvt
? ghes_pvt
->mci
: NULL
;
643 spin_unlock_irqrestore(&ghes_lock
, flags
);
648 mci
= edac_mc_del_mc(mci
->pdev
);
653 mutex_unlock(&ghes_reg_mutex
);