2 * GHES/EDAC Linux driver
4 * This file may be distributed under the terms of the GNU General Public
7 * Copyright (c) 2013 by Mauro Carvalho Chehab
9 * Red Hat Inc. http://www.redhat.com
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <acpi/ghes.h>
15 #include <linux/edac.h>
16 #include <linux/dmi.h>
17 #include "edac_module.h"
18 #include <ras/ras_event.h>
20 struct ghes_edac_pvt
{
21 struct list_head list
;
23 struct mem_ctl_info
*mci
;
25 /* Buffers for the error handling routine */
26 char detail_location
[240];
27 char other_detail
[160];
31 static atomic_t ghes_init
= ATOMIC_INIT(0);
32 static struct ghes_edac_pvt
*ghes_pvt
;
35 * Sync with other, potentially concurrent callers of
36 * ghes_edac_report_mem_error(). We don't know what the
37 * "inventive" firmware would do.
39 static DEFINE_SPINLOCK(ghes_lock
);
41 /* "ghes_edac.force_load=1" skips the platform check */
42 static bool __read_mostly force_load
;
43 module_param(force_load
, bool, 0);
45 /* Memory Device - Type 17 of SMBIOS spec */
46 struct memdev_dmi_entry
{
50 u16 phys_mem_array_handle
;
51 u16 mem_err_info_handle
;
68 u16 conf_mem_clk_speed
;
69 } __attribute__((__packed__
));
71 struct ghes_edac_dimm_fill
{
72 struct mem_ctl_info
*mci
;
76 static void ghes_edac_count_dimms(const struct dmi_header
*dh
, void *arg
)
80 if (dh
->type
== DMI_ENTRY_MEM_DEVICE
)
84 static void ghes_edac_dmidecode(const struct dmi_header
*dh
, void *arg
)
86 struct ghes_edac_dimm_fill
*dimm_fill
= arg
;
87 struct mem_ctl_info
*mci
= dimm_fill
->mci
;
89 if (dh
->type
== DMI_ENTRY_MEM_DEVICE
) {
90 struct memdev_dmi_entry
*entry
= (struct memdev_dmi_entry
*)dh
;
91 struct dimm_info
*dimm
= EDAC_DIMM_PTR(mci
->layers
, mci
->dimms
,
93 dimm_fill
->count
, 0, 0);
94 u16 rdr_mask
= BIT(7) | BIT(13);
96 if (entry
->size
== 0xffff) {
97 pr_info("Can't get DIMM%i size\n",
99 dimm
->nr_pages
= MiB_TO_PAGES(32);/* Unknown */
100 } else if (entry
->size
== 0x7fff) {
101 dimm
->nr_pages
= MiB_TO_PAGES(entry
->extended_size
);
103 if (entry
->size
& BIT(15))
104 dimm
->nr_pages
= MiB_TO_PAGES((entry
->size
& 0x7fff) << 10);
106 dimm
->nr_pages
= MiB_TO_PAGES(entry
->size
);
109 switch (entry
->memory_type
) {
111 if (entry
->type_detail
& BIT(13))
112 dimm
->mtype
= MEM_RDDR
;
114 dimm
->mtype
= MEM_DDR
;
117 if (entry
->type_detail
& BIT(13))
118 dimm
->mtype
= MEM_RDDR2
;
120 dimm
->mtype
= MEM_DDR2
;
123 dimm
->mtype
= MEM_FB_DDR2
;
126 if (entry
->type_detail
& BIT(12))
127 dimm
->mtype
= MEM_NVDIMM
;
128 else if (entry
->type_detail
& BIT(13))
129 dimm
->mtype
= MEM_RDDR3
;
131 dimm
->mtype
= MEM_DDR3
;
134 if (entry
->type_detail
& BIT(12))
135 dimm
->mtype
= MEM_NVDIMM
;
136 else if (entry
->type_detail
& BIT(13))
137 dimm
->mtype
= MEM_RDDR4
;
139 dimm
->mtype
= MEM_DDR4
;
142 if (entry
->type_detail
& BIT(6))
143 dimm
->mtype
= MEM_RMBS
;
144 else if ((entry
->type_detail
& rdr_mask
) == rdr_mask
)
145 dimm
->mtype
= MEM_RDR
;
146 else if (entry
->type_detail
& BIT(7))
147 dimm
->mtype
= MEM_SDR
;
148 else if (entry
->type_detail
& BIT(9))
149 dimm
->mtype
= MEM_EDO
;
151 dimm
->mtype
= MEM_UNKNOWN
;
155 * Actually, we can only detect if the memory has bits for
158 if (entry
->total_width
== entry
->data_width
)
159 dimm
->edac_mode
= EDAC_NONE
;
161 dimm
->edac_mode
= EDAC_SECDED
;
163 dimm
->dtype
= DEV_UNKNOWN
;
164 dimm
->grain
= 128; /* Likely, worse case */
167 * FIXME: It shouldn't be hard to also fill the DIMM labels
170 if (dimm
->nr_pages
) {
171 edac_dbg(1, "DIMM%i: %s size = %d MB%s\n",
172 dimm_fill
->count
, edac_mem_types
[dimm
->mtype
],
173 PAGES_TO_MiB(dimm
->nr_pages
),
174 (dimm
->edac_mode
!= EDAC_NONE
) ? "(ECC)" : "");
175 edac_dbg(2, "\ttype %d, detail 0x%02x, width %d(total %d)\n",
176 entry
->memory_type
, entry
->type_detail
,
177 entry
->total_width
, entry
->data_width
);
184 void ghes_edac_report_mem_error(int sev
, struct cper_sec_mem_err
*mem_err
)
186 enum hw_event_mc_err_type type
;
187 struct edac_raw_error_desc
*e
;
188 struct mem_ctl_info
*mci
;
189 struct ghes_edac_pvt
*pvt
= ghes_pvt
;
198 * We can do the locking below because GHES defers error processing
199 * from NMI to IRQ context. Whenever that changes, we'd at least
202 if (WARN_ON_ONCE(in_nmi()))
205 spin_lock_irqsave(&ghes_lock
, flags
);
208 e
= &mci
->error_desc
;
210 /* Cleans the error report buffer */
211 memset(e
, 0, sizeof (*e
));
214 strcpy(e
->label
, "unknown label");
216 e
->other_detail
= pvt
->other_detail
;
220 *pvt
->other_detail
= '\0';
224 case GHES_SEV_CORRECTED
:
225 type
= HW_EVENT_ERR_CORRECTED
;
227 case GHES_SEV_RECOVERABLE
:
228 type
= HW_EVENT_ERR_UNCORRECTED
;
231 type
= HW_EVENT_ERR_FATAL
;
235 type
= HW_EVENT_ERR_INFO
;
238 edac_dbg(1, "error validation_bits: 0x%08llx\n",
239 (long long)mem_err
->validation_bits
);
241 /* Error type, mapped on e->msg */
242 if (mem_err
->validation_bits
& CPER_MEM_VALID_ERROR_TYPE
) {
244 switch (mem_err
->error_type
) {
246 p
+= sprintf(p
, "Unknown");
249 p
+= sprintf(p
, "No error");
252 p
+= sprintf(p
, "Single-bit ECC");
255 p
+= sprintf(p
, "Multi-bit ECC");
258 p
+= sprintf(p
, "Single-symbol ChipKill ECC");
261 p
+= sprintf(p
, "Multi-symbol ChipKill ECC");
264 p
+= sprintf(p
, "Master abort");
267 p
+= sprintf(p
, "Target abort");
270 p
+= sprintf(p
, "Parity Error");
273 p
+= sprintf(p
, "Watchdog timeout");
276 p
+= sprintf(p
, "Invalid address");
279 p
+= sprintf(p
, "Mirror Broken");
282 p
+= sprintf(p
, "Memory Sparing");
285 p
+= sprintf(p
, "Scrub corrected error");
288 p
+= sprintf(p
, "Scrub uncorrected error");
291 p
+= sprintf(p
, "Physical Memory Map-out event");
294 p
+= sprintf(p
, "reserved error (%d)",
295 mem_err
->error_type
);
298 strcpy(pvt
->msg
, "unknown error");
302 if (mem_err
->validation_bits
& CPER_MEM_VALID_PA
) {
303 e
->page_frame_number
= mem_err
->physical_addr
>> PAGE_SHIFT
;
304 e
->offset_in_page
= mem_err
->physical_addr
& ~PAGE_MASK
;
308 if (mem_err
->validation_bits
& CPER_MEM_VALID_PA_MASK
)
309 e
->grain
= ~mem_err
->physical_addr_mask
+ 1;
311 /* Memory error location, mapped on e->location */
313 if (mem_err
->validation_bits
& CPER_MEM_VALID_NODE
)
314 p
+= sprintf(p
, "node:%d ", mem_err
->node
);
315 if (mem_err
->validation_bits
& CPER_MEM_VALID_CARD
)
316 p
+= sprintf(p
, "card:%d ", mem_err
->card
);
317 if (mem_err
->validation_bits
& CPER_MEM_VALID_MODULE
)
318 p
+= sprintf(p
, "module:%d ", mem_err
->module
);
319 if (mem_err
->validation_bits
& CPER_MEM_VALID_RANK_NUMBER
)
320 p
+= sprintf(p
, "rank:%d ", mem_err
->rank
);
321 if (mem_err
->validation_bits
& CPER_MEM_VALID_BANK
)
322 p
+= sprintf(p
, "bank:%d ", mem_err
->bank
);
323 if (mem_err
->validation_bits
& CPER_MEM_VALID_ROW
)
324 p
+= sprintf(p
, "row:%d ", mem_err
->row
);
325 if (mem_err
->validation_bits
& CPER_MEM_VALID_COLUMN
)
326 p
+= sprintf(p
, "col:%d ", mem_err
->column
);
327 if (mem_err
->validation_bits
& CPER_MEM_VALID_BIT_POSITION
)
328 p
+= sprintf(p
, "bit_pos:%d ", mem_err
->bit_pos
);
329 if (mem_err
->validation_bits
& CPER_MEM_VALID_MODULE_HANDLE
) {
330 const char *bank
= NULL
, *device
= NULL
;
331 dmi_memdev_name(mem_err
->mem_dev_handle
, &bank
, &device
);
332 if (bank
!= NULL
&& device
!= NULL
)
333 p
+= sprintf(p
, "DIMM location:%s %s ", bank
, device
);
335 p
+= sprintf(p
, "DIMM DMI handle: 0x%.4x ",
336 mem_err
->mem_dev_handle
);
341 /* All other fields are mapped on e->other_detail */
342 p
= pvt
->other_detail
;
343 if (mem_err
->validation_bits
& CPER_MEM_VALID_ERROR_STATUS
) {
344 u64 status
= mem_err
->error_status
;
346 p
+= sprintf(p
, "status(0x%016llx): ", (long long)status
);
347 switch ((status
>> 8) & 0xff) {
349 p
+= sprintf(p
, "Error detected internal to the component ");
352 p
+= sprintf(p
, "Error detected in the bus ");
355 p
+= sprintf(p
, "Storage error in DRAM memory ");
358 p
+= sprintf(p
, "Storage error in TLB ");
361 p
+= sprintf(p
, "Storage error in cache ");
364 p
+= sprintf(p
, "Error in one or more functional units ");
367 p
+= sprintf(p
, "component failed self test ");
370 p
+= sprintf(p
, "Overflow or undervalue of internal queue ");
373 p
+= sprintf(p
, "Virtual address not found on IO-TLB or IO-PDIR ");
376 p
+= sprintf(p
, "Improper access error ");
379 p
+= sprintf(p
, "Access to a memory address which is not mapped to any component ");
382 p
+= sprintf(p
, "Loss of Lockstep ");
385 p
+= sprintf(p
, "Response not associated with a request ");
388 p
+= sprintf(p
, "Bus parity error - must also set the A, C, or D Bits ");
391 p
+= sprintf(p
, "Detection of a PATH_ERROR ");
394 p
+= sprintf(p
, "Bus operation timeout ");
397 p
+= sprintf(p
, "A read was issued to data that has been poisoned ");
400 p
+= sprintf(p
, "reserved ");
404 if (mem_err
->validation_bits
& CPER_MEM_VALID_REQUESTOR_ID
)
405 p
+= sprintf(p
, "requestorID: 0x%016llx ",
406 (long long)mem_err
->requestor_id
);
407 if (mem_err
->validation_bits
& CPER_MEM_VALID_RESPONDER_ID
)
408 p
+= sprintf(p
, "responderID: 0x%016llx ",
409 (long long)mem_err
->responder_id
);
410 if (mem_err
->validation_bits
& CPER_MEM_VALID_TARGET_ID
)
411 p
+= sprintf(p
, "targetID: 0x%016llx ",
412 (long long)mem_err
->responder_id
);
413 if (p
> pvt
->other_detail
)
416 /* Sanity-check driver-supplied grain value. */
417 if (WARN_ON_ONCE(!e
->grain
))
420 grain_bits
= fls_long(e
->grain
- 1);
422 /* Generate the trace event */
423 snprintf(pvt
->detail_location
, sizeof(pvt
->detail_location
),
424 "APEI location: %s %s", e
->location
, e
->other_detail
);
425 trace_mc_event(type
, e
->msg
, e
->label
, e
->error_count
,
426 mci
->mc_idx
, e
->top_layer
, e
->mid_layer
, e
->low_layer
,
427 (e
->page_frame_number
<< PAGE_SHIFT
) | e
->offset_in_page
,
428 grain_bits
, e
->syndrome
, pvt
->detail_location
);
430 edac_raw_mc_handle_error(type
, mci
, e
);
431 spin_unlock_irqrestore(&ghes_lock
, flags
);
435 * Known systems that are safe to enable this module.
437 static struct acpi_platform_list plat_list
[] = {
438 {"HPE ", "Server ", 0, ACPI_SIG_FADT
, all_versions
},
442 int ghes_edac_register(struct ghes
*ghes
, struct device
*dev
)
445 int rc
, num_dimm
= 0;
446 struct mem_ctl_info
*mci
;
447 struct edac_mc_layer layers
[1];
448 struct ghes_edac_dimm_fill dimm_fill
;
451 if (IS_ENABLED(CONFIG_X86
)) {
452 /* Check if safe to enable on this system */
453 idx
= acpi_match_platform_list(plat_list
);
454 if (!force_load
&& idx
< 0)
461 * We have only one logical memory controller to which all DIMMs belong.
463 if (atomic_inc_return(&ghes_init
) > 1)
466 /* Get the number of DIMMs */
467 dmi_walk(ghes_edac_count_dimms
, &num_dimm
);
469 /* Check if we've got a bogus BIOS */
475 layers
[0].type
= EDAC_MC_LAYER_ALL_MEM
;
476 layers
[0].size
= num_dimm
;
477 layers
[0].is_virt_csrow
= true;
479 mci
= edac_mc_alloc(0, ARRAY_SIZE(layers
), layers
, sizeof(struct ghes_edac_pvt
));
481 pr_info("Can't allocate memory for EDAC data\n");
485 ghes_pvt
= mci
->pvt_info
;
486 ghes_pvt
->ghes
= ghes
;
490 mci
->mtype_cap
= MEM_FLAG_EMPTY
;
491 mci
->edac_ctl_cap
= EDAC_FLAG_NONE
;
492 mci
->edac_cap
= EDAC_FLAG_NONE
;
493 mci
->mod_name
= "ghes_edac.c";
494 mci
->ctl_name
= "ghes_edac";
495 mci
->dev_name
= "ghes";
498 pr_info("This system has a very crappy BIOS: It doesn't even list the DIMMS.\n");
499 pr_info("Its SMBIOS info is wrong. It is doubtful that the error report would\n");
500 pr_info("work on such system. Use this driver with caution\n");
501 } else if (idx
< 0) {
502 pr_info("This EDAC driver relies on BIOS to enumerate memory and get error reports.\n");
503 pr_info("Unfortunately, not all BIOSes reflect the memory layout correctly.\n");
504 pr_info("So, the end result of using this driver varies from vendor to vendor.\n");
505 pr_info("If you find incorrect reports, please contact your hardware vendor\n");
506 pr_info("to correct its BIOS.\n");
507 pr_info("This system has %d DIMM sockets.\n", num_dimm
);
513 dmi_walk(ghes_edac_dmidecode
, &dimm_fill
);
515 struct dimm_info
*dimm
= EDAC_DIMM_PTR(mci
->layers
, mci
->dimms
,
516 mci
->n_layers
, 0, 0, 0);
520 dimm
->mtype
= MEM_UNKNOWN
;
521 dimm
->dtype
= DEV_UNKNOWN
;
522 dimm
->edac_mode
= EDAC_SECDED
;
525 rc
= edac_mc_add_mc(mci
);
527 pr_info("Can't register at EDAC core\n");
534 void ghes_edac_unregister(struct ghes
*ghes
)
536 struct mem_ctl_info
*mci
;
541 if (atomic_dec_return(&ghes_init
))
546 edac_mc_del_mc(mci
->pdev
);