2 * GHES/EDAC Linux driver
4 * This file may be distributed under the terms of the GNU General Public
7 * Copyright (c) 2013 by Mauro Carvalho Chehab
9 * Red Hat Inc. http://www.redhat.com
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <acpi/ghes.h>
15 #include <linux/edac.h>
16 #include <linux/dmi.h>
17 #include "edac_module.h"
18 #include <ras/ras_event.h>
20 struct ghes_edac_pvt
{
21 struct list_head list
;
23 struct mem_ctl_info
*mci
;
25 /* Buffers for the error handling routine */
26 char detail_location
[240];
27 char other_detail
[160];
31 static atomic_t ghes_init
= ATOMIC_INIT(0);
32 static struct ghes_edac_pvt
*ghes_pvt
;
35 * Sync with other, potentially concurrent callers of
36 * ghes_edac_report_mem_error(). We don't know what the
37 * "inventive" firmware would do.
39 static DEFINE_SPINLOCK(ghes_lock
);
41 /* "ghes_edac.force_load=1" skips the platform check */
42 static bool __read_mostly force_load
;
43 module_param(force_load
, bool, 0);
45 /* Memory Device - Type 17 of SMBIOS spec */
46 struct memdev_dmi_entry
{
50 u16 phys_mem_array_handle
;
51 u16 mem_err_info_handle
;
68 u16 conf_mem_clk_speed
;
69 } __attribute__((__packed__
));
71 struct ghes_edac_dimm_fill
{
72 struct mem_ctl_info
*mci
;
76 static void ghes_edac_count_dimms(const struct dmi_header
*dh
, void *arg
)
80 if (dh
->type
== DMI_ENTRY_MEM_DEVICE
)
84 static void ghes_edac_dmidecode(const struct dmi_header
*dh
, void *arg
)
86 struct ghes_edac_dimm_fill
*dimm_fill
= arg
;
87 struct mem_ctl_info
*mci
= dimm_fill
->mci
;
89 if (dh
->type
== DMI_ENTRY_MEM_DEVICE
) {
90 struct memdev_dmi_entry
*entry
= (struct memdev_dmi_entry
*)dh
;
91 struct dimm_info
*dimm
= EDAC_DIMM_PTR(mci
->layers
, mci
->dimms
,
93 dimm_fill
->count
, 0, 0);
94 u16 rdr_mask
= BIT(7) | BIT(13);
96 if (entry
->size
== 0xffff) {
97 pr_info("Can't get DIMM%i size\n",
99 dimm
->nr_pages
= MiB_TO_PAGES(32);/* Unknown */
100 } else if (entry
->size
== 0x7fff) {
101 dimm
->nr_pages
= MiB_TO_PAGES(entry
->extended_size
);
103 if (entry
->size
& BIT(15))
104 dimm
->nr_pages
= MiB_TO_PAGES((entry
->size
& 0x7fff) << 10);
106 dimm
->nr_pages
= MiB_TO_PAGES(entry
->size
);
109 switch (entry
->memory_type
) {
111 if (entry
->type_detail
& BIT(13))
112 dimm
->mtype
= MEM_RDDR
;
114 dimm
->mtype
= MEM_DDR
;
117 if (entry
->type_detail
& BIT(13))
118 dimm
->mtype
= MEM_RDDR2
;
120 dimm
->mtype
= MEM_DDR2
;
123 dimm
->mtype
= MEM_FB_DDR2
;
126 if (entry
->type_detail
& BIT(12))
127 dimm
->mtype
= MEM_NVDIMM
;
128 else if (entry
->type_detail
& BIT(13))
129 dimm
->mtype
= MEM_RDDR3
;
131 dimm
->mtype
= MEM_DDR3
;
134 if (entry
->type_detail
& BIT(12))
135 dimm
->mtype
= MEM_NVDIMM
;
136 else if (entry
->type_detail
& BIT(13))
137 dimm
->mtype
= MEM_RDDR4
;
139 dimm
->mtype
= MEM_DDR4
;
142 if (entry
->type_detail
& BIT(6))
143 dimm
->mtype
= MEM_RMBS
;
144 else if ((entry
->type_detail
& rdr_mask
) == rdr_mask
)
145 dimm
->mtype
= MEM_RDR
;
146 else if (entry
->type_detail
& BIT(7))
147 dimm
->mtype
= MEM_SDR
;
148 else if (entry
->type_detail
& BIT(9))
149 dimm
->mtype
= MEM_EDO
;
151 dimm
->mtype
= MEM_UNKNOWN
;
155 * Actually, we can only detect if the memory has bits for
158 if (entry
->total_width
== entry
->data_width
)
159 dimm
->edac_mode
= EDAC_NONE
;
161 dimm
->edac_mode
= EDAC_SECDED
;
163 dimm
->dtype
= DEV_UNKNOWN
;
164 dimm
->grain
= 128; /* Likely, worse case */
167 * FIXME: It shouldn't be hard to also fill the DIMM labels
170 if (dimm
->nr_pages
) {
171 edac_dbg(1, "DIMM%i: %s size = %d MB%s\n",
172 dimm_fill
->count
, edac_mem_types
[dimm
->mtype
],
173 PAGES_TO_MiB(dimm
->nr_pages
),
174 (dimm
->edac_mode
!= EDAC_NONE
) ? "(ECC)" : "");
175 edac_dbg(2, "\ttype %d, detail 0x%02x, width %d(total %d)\n",
176 entry
->memory_type
, entry
->type_detail
,
177 entry
->total_width
, entry
->data_width
);
184 void ghes_edac_report_mem_error(int sev
, struct cper_sec_mem_err
*mem_err
)
186 enum hw_event_mc_err_type type
;
187 struct edac_raw_error_desc
*e
;
188 struct mem_ctl_info
*mci
;
189 struct ghes_edac_pvt
*pvt
= ghes_pvt
;
198 * We can do the locking below because GHES defers error processing
199 * from NMI to IRQ context. Whenever that changes, we'd at least
202 if (WARN_ON_ONCE(in_nmi()))
205 spin_lock_irqsave(&ghes_lock
, flags
);
208 e
= &mci
->error_desc
;
210 /* Cleans the error report buffer */
211 memset(e
, 0, sizeof (*e
));
213 strcpy(e
->label
, "unknown label");
215 e
->other_detail
= pvt
->other_detail
;
219 *pvt
->other_detail
= '\0';
223 case GHES_SEV_CORRECTED
:
224 type
= HW_EVENT_ERR_CORRECTED
;
226 case GHES_SEV_RECOVERABLE
:
227 type
= HW_EVENT_ERR_UNCORRECTED
;
230 type
= HW_EVENT_ERR_FATAL
;
234 type
= HW_EVENT_ERR_INFO
;
237 edac_dbg(1, "error validation_bits: 0x%08llx\n",
238 (long long)mem_err
->validation_bits
);
240 /* Error type, mapped on e->msg */
241 if (mem_err
->validation_bits
& CPER_MEM_VALID_ERROR_TYPE
) {
243 switch (mem_err
->error_type
) {
245 p
+= sprintf(p
, "Unknown");
248 p
+= sprintf(p
, "No error");
251 p
+= sprintf(p
, "Single-bit ECC");
254 p
+= sprintf(p
, "Multi-bit ECC");
257 p
+= sprintf(p
, "Single-symbol ChipKill ECC");
260 p
+= sprintf(p
, "Multi-symbol ChipKill ECC");
263 p
+= sprintf(p
, "Master abort");
266 p
+= sprintf(p
, "Target abort");
269 p
+= sprintf(p
, "Parity Error");
272 p
+= sprintf(p
, "Watchdog timeout");
275 p
+= sprintf(p
, "Invalid address");
278 p
+= sprintf(p
, "Mirror Broken");
281 p
+= sprintf(p
, "Memory Sparing");
284 p
+= sprintf(p
, "Scrub corrected error");
287 p
+= sprintf(p
, "Scrub uncorrected error");
290 p
+= sprintf(p
, "Physical Memory Map-out event");
293 p
+= sprintf(p
, "reserved error (%d)",
294 mem_err
->error_type
);
297 strcpy(pvt
->msg
, "unknown error");
301 if (mem_err
->validation_bits
& CPER_MEM_VALID_PA
) {
302 e
->page_frame_number
= mem_err
->physical_addr
>> PAGE_SHIFT
;
303 e
->offset_in_page
= mem_err
->physical_addr
& ~PAGE_MASK
;
307 if (mem_err
->validation_bits
& CPER_MEM_VALID_PA_MASK
)
308 e
->grain
= ~(mem_err
->physical_addr_mask
& ~PAGE_MASK
);
310 /* Memory error location, mapped on e->location */
312 if (mem_err
->validation_bits
& CPER_MEM_VALID_NODE
)
313 p
+= sprintf(p
, "node:%d ", mem_err
->node
);
314 if (mem_err
->validation_bits
& CPER_MEM_VALID_CARD
)
315 p
+= sprintf(p
, "card:%d ", mem_err
->card
);
316 if (mem_err
->validation_bits
& CPER_MEM_VALID_MODULE
)
317 p
+= sprintf(p
, "module:%d ", mem_err
->module
);
318 if (mem_err
->validation_bits
& CPER_MEM_VALID_RANK_NUMBER
)
319 p
+= sprintf(p
, "rank:%d ", mem_err
->rank
);
320 if (mem_err
->validation_bits
& CPER_MEM_VALID_BANK
)
321 p
+= sprintf(p
, "bank:%d ", mem_err
->bank
);
322 if (mem_err
->validation_bits
& CPER_MEM_VALID_ROW
)
323 p
+= sprintf(p
, "row:%d ", mem_err
->row
);
324 if (mem_err
->validation_bits
& CPER_MEM_VALID_COLUMN
)
325 p
+= sprintf(p
, "col:%d ", mem_err
->column
);
326 if (mem_err
->validation_bits
& CPER_MEM_VALID_BIT_POSITION
)
327 p
+= sprintf(p
, "bit_pos:%d ", mem_err
->bit_pos
);
328 if (mem_err
->validation_bits
& CPER_MEM_VALID_MODULE_HANDLE
) {
329 const char *bank
= NULL
, *device
= NULL
;
330 dmi_memdev_name(mem_err
->mem_dev_handle
, &bank
, &device
);
331 if (bank
!= NULL
&& device
!= NULL
)
332 p
+= sprintf(p
, "DIMM location:%s %s ", bank
, device
);
334 p
+= sprintf(p
, "DIMM DMI handle: 0x%.4x ",
335 mem_err
->mem_dev_handle
);
340 /* All other fields are mapped on e->other_detail */
341 p
= pvt
->other_detail
;
342 if (mem_err
->validation_bits
& CPER_MEM_VALID_ERROR_STATUS
) {
343 u64 status
= mem_err
->error_status
;
345 p
+= sprintf(p
, "status(0x%016llx): ", (long long)status
);
346 switch ((status
>> 8) & 0xff) {
348 p
+= sprintf(p
, "Error detected internal to the component ");
351 p
+= sprintf(p
, "Error detected in the bus ");
354 p
+= sprintf(p
, "Storage error in DRAM memory ");
357 p
+= sprintf(p
, "Storage error in TLB ");
360 p
+= sprintf(p
, "Storage error in cache ");
363 p
+= sprintf(p
, "Error in one or more functional units ");
366 p
+= sprintf(p
, "component failed self test ");
369 p
+= sprintf(p
, "Overflow or undervalue of internal queue ");
372 p
+= sprintf(p
, "Virtual address not found on IO-TLB or IO-PDIR ");
375 p
+= sprintf(p
, "Improper access error ");
378 p
+= sprintf(p
, "Access to a memory address which is not mapped to any component ");
381 p
+= sprintf(p
, "Loss of Lockstep ");
384 p
+= sprintf(p
, "Response not associated with a request ");
387 p
+= sprintf(p
, "Bus parity error - must also set the A, C, or D Bits ");
390 p
+= sprintf(p
, "Detection of a PATH_ERROR ");
393 p
+= sprintf(p
, "Bus operation timeout ");
396 p
+= sprintf(p
, "A read was issued to data that has been poisoned ");
399 p
+= sprintf(p
, "reserved ");
403 if (mem_err
->validation_bits
& CPER_MEM_VALID_REQUESTOR_ID
)
404 p
+= sprintf(p
, "requestorID: 0x%016llx ",
405 (long long)mem_err
->requestor_id
);
406 if (mem_err
->validation_bits
& CPER_MEM_VALID_RESPONDER_ID
)
407 p
+= sprintf(p
, "responderID: 0x%016llx ",
408 (long long)mem_err
->responder_id
);
409 if (mem_err
->validation_bits
& CPER_MEM_VALID_TARGET_ID
)
410 p
+= sprintf(p
, "targetID: 0x%016llx ",
411 (long long)mem_err
->responder_id
);
412 if (p
> pvt
->other_detail
)
415 /* Generate the trace event */
416 grain_bits
= fls_long(e
->grain
);
417 snprintf(pvt
->detail_location
, sizeof(pvt
->detail_location
),
418 "APEI location: %s %s", e
->location
, e
->other_detail
);
419 trace_mc_event(type
, e
->msg
, e
->label
, e
->error_count
,
420 mci
->mc_idx
, e
->top_layer
, e
->mid_layer
, e
->low_layer
,
421 (e
->page_frame_number
<< PAGE_SHIFT
) | e
->offset_in_page
,
422 grain_bits
, e
->syndrome
, pvt
->detail_location
);
424 edac_raw_mc_handle_error(type
, mci
, e
);
425 spin_unlock_irqrestore(&ghes_lock
, flags
);
429 * Known systems that are safe to enable this module.
431 static struct acpi_platform_list plat_list
[] = {
432 {"HPE ", "Server ", 0, ACPI_SIG_FADT
, all_versions
},
436 int ghes_edac_register(struct ghes
*ghes
, struct device
*dev
)
439 int rc
, num_dimm
= 0;
440 struct mem_ctl_info
*mci
;
441 struct edac_mc_layer layers
[1];
442 struct ghes_edac_dimm_fill dimm_fill
;
445 if (IS_ENABLED(CONFIG_X86
)) {
446 /* Check if safe to enable on this system */
447 idx
= acpi_match_platform_list(plat_list
);
448 if (!force_load
&& idx
< 0)
455 * We have only one logical memory controller to which all DIMMs belong.
457 if (atomic_inc_return(&ghes_init
) > 1)
460 /* Get the number of DIMMs */
461 dmi_walk(ghes_edac_count_dimms
, &num_dimm
);
463 /* Check if we've got a bogus BIOS */
469 layers
[0].type
= EDAC_MC_LAYER_ALL_MEM
;
470 layers
[0].size
= num_dimm
;
471 layers
[0].is_virt_csrow
= true;
473 mci
= edac_mc_alloc(0, ARRAY_SIZE(layers
), layers
, sizeof(struct ghes_edac_pvt
));
475 pr_info("Can't allocate memory for EDAC data\n");
479 ghes_pvt
= mci
->pvt_info
;
480 ghes_pvt
->ghes
= ghes
;
484 mci
->mtype_cap
= MEM_FLAG_EMPTY
;
485 mci
->edac_ctl_cap
= EDAC_FLAG_NONE
;
486 mci
->edac_cap
= EDAC_FLAG_NONE
;
487 mci
->mod_name
= "ghes_edac.c";
488 mci
->ctl_name
= "ghes_edac";
489 mci
->dev_name
= "ghes";
492 pr_info("This system has a very crappy BIOS: It doesn't even list the DIMMS.\n");
493 pr_info("Its SMBIOS info is wrong. It is doubtful that the error report would\n");
494 pr_info("work on such system. Use this driver with caution\n");
495 } else if (idx
< 0) {
496 pr_info("This EDAC driver relies on BIOS to enumerate memory and get error reports.\n");
497 pr_info("Unfortunately, not all BIOSes reflect the memory layout correctly.\n");
498 pr_info("So, the end result of using this driver varies from vendor to vendor.\n");
499 pr_info("If you find incorrect reports, please contact your hardware vendor\n");
500 pr_info("to correct its BIOS.\n");
501 pr_info("This system has %d DIMM sockets.\n", num_dimm
);
507 dmi_walk(ghes_edac_dmidecode
, &dimm_fill
);
509 struct dimm_info
*dimm
= EDAC_DIMM_PTR(mci
->layers
, mci
->dimms
,
510 mci
->n_layers
, 0, 0, 0);
514 dimm
->mtype
= MEM_UNKNOWN
;
515 dimm
->dtype
= DEV_UNKNOWN
;
516 dimm
->edac_mode
= EDAC_SECDED
;
519 rc
= edac_mc_add_mc(mci
);
521 pr_info("Can't register at EDAC core\n");
528 void ghes_edac_unregister(struct ghes
*ghes
)
530 struct mem_ctl_info
*mci
;
536 edac_mc_del_mc(mci
->pdev
);