2 * GHES/EDAC Linux driver
4 * This file may be distributed under the terms of the GNU General Public
7 * Copyright (c) 2013 by Mauro Carvalho Chehab
9 * Red Hat Inc. http://www.redhat.com
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <acpi/ghes.h>
15 #include <linux/edac.h>
16 #include <linux/dmi.h>
17 #include "edac_module.h"
18 #include <ras/ras_event.h>
20 struct ghes_edac_pvt
{
21 struct list_head list
;
23 struct mem_ctl_info
*mci
;
25 /* Buffers for the error handling routine */
26 char detail_location
[240];
27 char other_detail
[160];
31 static LIST_HEAD(ghes_reglist
);
32 static DEFINE_MUTEX(ghes_edac_lock
);
33 static int ghes_edac_mc_num
;
36 /* Memory Device - Type 17 of SMBIOS spec */
37 struct memdev_dmi_entry
{
41 u16 phys_mem_array_handle
;
42 u16 mem_err_info_handle
;
59 u16 conf_mem_clk_speed
;
60 } __attribute__((__packed__
));
62 struct ghes_edac_dimm_fill
{
63 struct mem_ctl_info
*mci
;
67 static void ghes_edac_count_dimms(const struct dmi_header
*dh
, void *arg
)
71 if (dh
->type
== DMI_ENTRY_MEM_DEVICE
)
75 static void ghes_edac_dmidecode(const struct dmi_header
*dh
, void *arg
)
77 struct ghes_edac_dimm_fill
*dimm_fill
= arg
;
78 struct mem_ctl_info
*mci
= dimm_fill
->mci
;
80 if (dh
->type
== DMI_ENTRY_MEM_DEVICE
) {
81 struct memdev_dmi_entry
*entry
= (struct memdev_dmi_entry
*)dh
;
82 struct dimm_info
*dimm
= EDAC_DIMM_PTR(mci
->layers
, mci
->dimms
,
84 dimm_fill
->count
, 0, 0);
86 if (entry
->size
== 0xffff) {
87 pr_info("Can't get DIMM%i size\n",
89 dimm
->nr_pages
= MiB_TO_PAGES(32);/* Unknown */
90 } else if (entry
->size
== 0x7fff) {
91 dimm
->nr_pages
= MiB_TO_PAGES(entry
->extended_size
);
93 if (entry
->size
& 1 << 15)
94 dimm
->nr_pages
= MiB_TO_PAGES((entry
->size
&
97 dimm
->nr_pages
= MiB_TO_PAGES(entry
->size
);
100 switch (entry
->memory_type
) {
102 if (entry
->type_detail
& 1 << 13)
103 dimm
->mtype
= MEM_RDDR
;
105 dimm
->mtype
= MEM_DDR
;
108 if (entry
->type_detail
& 1 << 13)
109 dimm
->mtype
= MEM_RDDR2
;
111 dimm
->mtype
= MEM_DDR2
;
114 dimm
->mtype
= MEM_FB_DDR2
;
117 if (entry
->type_detail
& 1 << 13)
118 dimm
->mtype
= MEM_RDDR3
;
120 dimm
->mtype
= MEM_DDR3
;
123 if (entry
->type_detail
& 1 << 6)
124 dimm
->mtype
= MEM_RMBS
;
125 else if ((entry
->type_detail
& ((1 << 7) | (1 << 13)))
126 == ((1 << 7) | (1 << 13)))
127 dimm
->mtype
= MEM_RDR
;
128 else if (entry
->type_detail
& 1 << 7)
129 dimm
->mtype
= MEM_SDR
;
130 else if (entry
->type_detail
& 1 << 9)
131 dimm
->mtype
= MEM_EDO
;
133 dimm
->mtype
= MEM_UNKNOWN
;
137 * Actually, we can only detect if the memory has bits for
140 if (entry
->total_width
== entry
->data_width
)
141 dimm
->edac_mode
= EDAC_NONE
;
143 dimm
->edac_mode
= EDAC_SECDED
;
145 dimm
->dtype
= DEV_UNKNOWN
;
146 dimm
->grain
= 128; /* Likely, worse case */
149 * FIXME: It shouldn't be hard to also fill the DIMM labels
152 if (dimm
->nr_pages
) {
153 edac_dbg(1, "DIMM%i: %s size = %d MB%s\n",
154 dimm_fill
->count
, edac_mem_types
[dimm
->mtype
],
155 PAGES_TO_MiB(dimm
->nr_pages
),
156 (dimm
->edac_mode
!= EDAC_NONE
) ? "(ECC)" : "");
157 edac_dbg(2, "\ttype %d, detail 0x%02x, width %d(total %d)\n",
158 entry
->memory_type
, entry
->type_detail
,
159 entry
->total_width
, entry
->data_width
);
166 void ghes_edac_report_mem_error(struct ghes
*ghes
, int sev
,
167 struct cper_sec_mem_err
*mem_err
)
169 enum hw_event_mc_err_type type
;
170 struct edac_raw_error_desc
*e
;
171 struct mem_ctl_info
*mci
;
172 struct ghes_edac_pvt
*pvt
= NULL
;
176 list_for_each_entry(pvt
, &ghes_reglist
, list
) {
177 if (ghes
== pvt
->ghes
)
181 pr_err("Internal error: Can't find EDAC structure\n");
185 e
= &mci
->error_desc
;
187 /* Cleans the error report buffer */
188 memset(e
, 0, sizeof (*e
));
190 strcpy(e
->label
, "unknown label");
192 e
->other_detail
= pvt
->other_detail
;
196 *pvt
->other_detail
= '\0';
200 case GHES_SEV_CORRECTED
:
201 type
= HW_EVENT_ERR_CORRECTED
;
203 case GHES_SEV_RECOVERABLE
:
204 type
= HW_EVENT_ERR_UNCORRECTED
;
207 type
= HW_EVENT_ERR_FATAL
;
211 type
= HW_EVENT_ERR_INFO
;
214 edac_dbg(1, "error validation_bits: 0x%08llx\n",
215 (long long)mem_err
->validation_bits
);
217 /* Error type, mapped on e->msg */
218 if (mem_err
->validation_bits
& CPER_MEM_VALID_ERROR_TYPE
) {
220 switch (mem_err
->error_type
) {
222 p
+= sprintf(p
, "Unknown");
225 p
+= sprintf(p
, "No error");
228 p
+= sprintf(p
, "Single-bit ECC");
231 p
+= sprintf(p
, "Multi-bit ECC");
234 p
+= sprintf(p
, "Single-symbol ChipKill ECC");
237 p
+= sprintf(p
, "Multi-symbol ChipKill ECC");
240 p
+= sprintf(p
, "Master abort");
243 p
+= sprintf(p
, "Target abort");
246 p
+= sprintf(p
, "Parity Error");
249 p
+= sprintf(p
, "Watchdog timeout");
252 p
+= sprintf(p
, "Invalid address");
255 p
+= sprintf(p
, "Mirror Broken");
258 p
+= sprintf(p
, "Memory Sparing");
261 p
+= sprintf(p
, "Scrub corrected error");
264 p
+= sprintf(p
, "Scrub uncorrected error");
267 p
+= sprintf(p
, "Physical Memory Map-out event");
270 p
+= sprintf(p
, "reserved error (%d)",
271 mem_err
->error_type
);
274 strcpy(pvt
->msg
, "unknown error");
278 if (mem_err
->validation_bits
& CPER_MEM_VALID_PA
) {
279 e
->page_frame_number
= mem_err
->physical_addr
>> PAGE_SHIFT
;
280 e
->offset_in_page
= mem_err
->physical_addr
& ~PAGE_MASK
;
284 if (mem_err
->validation_bits
& CPER_MEM_VALID_PA_MASK
)
285 e
->grain
= ~(mem_err
->physical_addr_mask
& ~PAGE_MASK
);
287 /* Memory error location, mapped on e->location */
289 if (mem_err
->validation_bits
& CPER_MEM_VALID_NODE
)
290 p
+= sprintf(p
, "node:%d ", mem_err
->node
);
291 if (mem_err
->validation_bits
& CPER_MEM_VALID_CARD
)
292 p
+= sprintf(p
, "card:%d ", mem_err
->card
);
293 if (mem_err
->validation_bits
& CPER_MEM_VALID_MODULE
)
294 p
+= sprintf(p
, "module:%d ", mem_err
->module
);
295 if (mem_err
->validation_bits
& CPER_MEM_VALID_RANK_NUMBER
)
296 p
+= sprintf(p
, "rank:%d ", mem_err
->rank
);
297 if (mem_err
->validation_bits
& CPER_MEM_VALID_BANK
)
298 p
+= sprintf(p
, "bank:%d ", mem_err
->bank
);
299 if (mem_err
->validation_bits
& CPER_MEM_VALID_ROW
)
300 p
+= sprintf(p
, "row:%d ", mem_err
->row
);
301 if (mem_err
->validation_bits
& CPER_MEM_VALID_COLUMN
)
302 p
+= sprintf(p
, "col:%d ", mem_err
->column
);
303 if (mem_err
->validation_bits
& CPER_MEM_VALID_BIT_POSITION
)
304 p
+= sprintf(p
, "bit_pos:%d ", mem_err
->bit_pos
);
305 if (mem_err
->validation_bits
& CPER_MEM_VALID_MODULE_HANDLE
) {
306 const char *bank
= NULL
, *device
= NULL
;
307 dmi_memdev_name(mem_err
->mem_dev_handle
, &bank
, &device
);
308 if (bank
!= NULL
&& device
!= NULL
)
309 p
+= sprintf(p
, "DIMM location:%s %s ", bank
, device
);
311 p
+= sprintf(p
, "DIMM DMI handle: 0x%.4x ",
312 mem_err
->mem_dev_handle
);
317 /* All other fields are mapped on e->other_detail */
318 p
= pvt
->other_detail
;
319 if (mem_err
->validation_bits
& CPER_MEM_VALID_ERROR_STATUS
) {
320 u64 status
= mem_err
->error_status
;
322 p
+= sprintf(p
, "status(0x%016llx): ", (long long)status
);
323 switch ((status
>> 8) & 0xff) {
325 p
+= sprintf(p
, "Error detected internal to the component ");
328 p
+= sprintf(p
, "Error detected in the bus ");
331 p
+= sprintf(p
, "Storage error in DRAM memory ");
334 p
+= sprintf(p
, "Storage error in TLB ");
337 p
+= sprintf(p
, "Storage error in cache ");
340 p
+= sprintf(p
, "Error in one or more functional units ");
343 p
+= sprintf(p
, "component failed self test ");
346 p
+= sprintf(p
, "Overflow or undervalue of internal queue ");
349 p
+= sprintf(p
, "Virtual address not found on IO-TLB or IO-PDIR ");
352 p
+= sprintf(p
, "Improper access error ");
355 p
+= sprintf(p
, "Access to a memory address which is not mapped to any component ");
358 p
+= sprintf(p
, "Loss of Lockstep ");
361 p
+= sprintf(p
, "Response not associated with a request ");
364 p
+= sprintf(p
, "Bus parity error - must also set the A, C, or D Bits ");
367 p
+= sprintf(p
, "Detection of a PATH_ERROR ");
370 p
+= sprintf(p
, "Bus operation timeout ");
373 p
+= sprintf(p
, "A read was issued to data that has been poisoned ");
376 p
+= sprintf(p
, "reserved ");
380 if (mem_err
->validation_bits
& CPER_MEM_VALID_REQUESTOR_ID
)
381 p
+= sprintf(p
, "requestorID: 0x%016llx ",
382 (long long)mem_err
->requestor_id
);
383 if (mem_err
->validation_bits
& CPER_MEM_VALID_RESPONDER_ID
)
384 p
+= sprintf(p
, "responderID: 0x%016llx ",
385 (long long)mem_err
->responder_id
);
386 if (mem_err
->validation_bits
& CPER_MEM_VALID_TARGET_ID
)
387 p
+= sprintf(p
, "targetID: 0x%016llx ",
388 (long long)mem_err
->responder_id
);
389 if (p
> pvt
->other_detail
)
392 /* Generate the trace event */
393 grain_bits
= fls_long(e
->grain
);
394 snprintf(pvt
->detail_location
, sizeof(pvt
->detail_location
),
395 "APEI location: %s %s", e
->location
, e
->other_detail
);
396 trace_mc_event(type
, e
->msg
, e
->label
, e
->error_count
,
397 mci
->mc_idx
, e
->top_layer
, e
->mid_layer
, e
->low_layer
,
398 (e
->page_frame_number
<< PAGE_SHIFT
) | e
->offset_in_page
,
399 grain_bits
, e
->syndrome
, pvt
->detail_location
);
401 /* Report the error via EDAC API */
402 edac_raw_mc_handle_error(type
, mci
, e
);
404 EXPORT_SYMBOL_GPL(ghes_edac_report_mem_error
);
406 int ghes_edac_register(struct ghes
*ghes
, struct device
*dev
)
409 int rc
, num_dimm
= 0;
410 struct mem_ctl_info
*mci
;
411 struct edac_mc_layer layers
[1];
412 struct ghes_edac_pvt
*pvt
;
413 struct ghes_edac_dimm_fill dimm_fill
;
415 /* Get the number of DIMMs */
416 dmi_walk(ghes_edac_count_dimms
, &num_dimm
);
418 /* Check if we've got a bogus BIOS */
424 layers
[0].type
= EDAC_MC_LAYER_ALL_MEM
;
425 layers
[0].size
= num_dimm
;
426 layers
[0].is_virt_csrow
= true;
429 * We need to serialize edac_mc_alloc() and edac_mc_add_mc(),
430 * to avoid duplicated memory controller numbers
432 mutex_lock(&ghes_edac_lock
);
433 mci
= edac_mc_alloc(ghes_edac_mc_num
, ARRAY_SIZE(layers
), layers
,
436 pr_info("Can't allocate memory for EDAC data\n");
437 mutex_unlock(&ghes_edac_lock
);
442 memset(pvt
, 0, sizeof(*pvt
));
443 list_add_tail(&pvt
->list
, &ghes_reglist
);
448 mci
->mtype_cap
= MEM_FLAG_EMPTY
;
449 mci
->edac_ctl_cap
= EDAC_FLAG_NONE
;
450 mci
->edac_cap
= EDAC_FLAG_NONE
;
451 mci
->mod_name
= "ghes_edac.c";
452 mci
->ctl_name
= "ghes_edac";
453 mci
->dev_name
= "ghes";
455 if (!ghes_edac_mc_num
) {
457 pr_info("This EDAC driver relies on BIOS to enumerate memory and get error reports.\n");
458 pr_info("Unfortunately, not all BIOSes reflect the memory layout correctly.\n");
459 pr_info("So, the end result of using this driver varies from vendor to vendor.\n");
460 pr_info("If you find incorrect reports, please contact your hardware vendor\n");
461 pr_info("to correct its BIOS.\n");
462 pr_info("This system has %d DIMM sockets.\n",
465 pr_info("This system has a very crappy BIOS: It doesn't even list the DIMMS.\n");
466 pr_info("Its SMBIOS info is wrong. It is doubtful that the error report would\n");
467 pr_info("work on such system. Use this driver with caution\n");
473 * Fill DIMM info from DMI for the memory controller #0
475 * Keep it in blank for the other memory controllers, as
476 * there's no reliable way to properly credit each DIMM to
477 * the memory controller, as different BIOSes fill the
478 * DMI bank location fields on different ways
480 if (!ghes_edac_mc_num
) {
483 dmi_walk(ghes_edac_dmidecode
, &dimm_fill
);
486 struct dimm_info
*dimm
= EDAC_DIMM_PTR(mci
->layers
, mci
->dimms
,
487 mci
->n_layers
, 0, 0, 0);
491 dimm
->mtype
= MEM_UNKNOWN
;
492 dimm
->dtype
= DEV_UNKNOWN
;
493 dimm
->edac_mode
= EDAC_SECDED
;
496 rc
= edac_mc_add_mc(mci
);
498 pr_info("Can't register at EDAC core\n");
500 mutex_unlock(&ghes_edac_lock
);
505 mutex_unlock(&ghes_edac_lock
);
508 EXPORT_SYMBOL_GPL(ghes_edac_register
);
510 void ghes_edac_unregister(struct ghes
*ghes
)
512 struct mem_ctl_info
*mci
;
513 struct ghes_edac_pvt
*pvt
, *tmp
;
515 list_for_each_entry_safe(pvt
, tmp
, &ghes_reglist
, list
) {
516 if (ghes
== pvt
->ghes
) {
518 edac_mc_del_mc(mci
->pdev
);
520 list_del(&pvt
->list
);
524 EXPORT_SYMBOL_GPL(ghes_edac_unregister
);