Linux 4.2.1
[linux/fpc-iii.git] / drivers / edac / ghes_edac.c
blobb24681998740033664331ed4ba5d8ced56af63d0
1 /*
2 * GHES/EDAC Linux driver
4 * This file may be distributed under the terms of the GNU General Public
5 * License version 2.
7 * Copyright (c) 2013 by Mauro Carvalho Chehab
9 * Red Hat Inc. http://www.redhat.com
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <acpi/ghes.h>
15 #include <linux/edac.h>
16 #include <linux/dmi.h>
17 #include "edac_core.h"
18 #include <ras/ras_event.h>
20 #define GHES_EDAC_REVISION " Ver: 1.0.0"
22 struct ghes_edac_pvt {
23 struct list_head list;
24 struct ghes *ghes;
25 struct mem_ctl_info *mci;
27 /* Buffers for the error handling routine */
28 char detail_location[240];
29 char other_detail[160];
30 char msg[80];
33 static LIST_HEAD(ghes_reglist);
34 static DEFINE_MUTEX(ghes_edac_lock);
35 static int ghes_edac_mc_num;
38 /* Memory Device - Type 17 of SMBIOS spec */
39 struct memdev_dmi_entry {
40 u8 type;
41 u8 length;
42 u16 handle;
43 u16 phys_mem_array_handle;
44 u16 mem_err_info_handle;
45 u16 total_width;
46 u16 data_width;
47 u16 size;
48 u8 form_factor;
49 u8 device_set;
50 u8 device_locator;
51 u8 bank_locator;
52 u8 memory_type;
53 u16 type_detail;
54 u16 speed;
55 u8 manufacturer;
56 u8 serial_number;
57 u8 asset_tag;
58 u8 part_number;
59 u8 attributes;
60 u32 extended_size;
61 u16 conf_mem_clk_speed;
62 } __attribute__((__packed__));
64 struct ghes_edac_dimm_fill {
65 struct mem_ctl_info *mci;
66 unsigned count;
69 char *memory_type[] = {
70 [MEM_EMPTY] = "EMPTY",
71 [MEM_RESERVED] = "RESERVED",
72 [MEM_UNKNOWN] = "UNKNOWN",
73 [MEM_FPM] = "FPM",
74 [MEM_EDO] = "EDO",
75 [MEM_BEDO] = "BEDO",
76 [MEM_SDR] = "SDR",
77 [MEM_RDR] = "RDR",
78 [MEM_DDR] = "DDR",
79 [MEM_RDDR] = "RDDR",
80 [MEM_RMBS] = "RMBS",
81 [MEM_DDR2] = "DDR2",
82 [MEM_FB_DDR2] = "FB_DDR2",
83 [MEM_RDDR2] = "RDDR2",
84 [MEM_XDR] = "XDR",
85 [MEM_DDR3] = "DDR3",
86 [MEM_RDDR3] = "RDDR3",
89 static void ghes_edac_count_dimms(const struct dmi_header *dh, void *arg)
91 int *num_dimm = arg;
93 if (dh->type == DMI_ENTRY_MEM_DEVICE)
94 (*num_dimm)++;
97 static void ghes_edac_dmidecode(const struct dmi_header *dh, void *arg)
99 struct ghes_edac_dimm_fill *dimm_fill = arg;
100 struct mem_ctl_info *mci = dimm_fill->mci;
102 if (dh->type == DMI_ENTRY_MEM_DEVICE) {
103 struct memdev_dmi_entry *entry = (struct memdev_dmi_entry *)dh;
104 struct dimm_info *dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
105 mci->n_layers,
106 dimm_fill->count, 0, 0);
108 if (entry->size == 0xffff) {
109 pr_info("Can't get DIMM%i size\n",
110 dimm_fill->count);
111 dimm->nr_pages = MiB_TO_PAGES(32);/* Unknown */
112 } else if (entry->size == 0x7fff) {
113 dimm->nr_pages = MiB_TO_PAGES(entry->extended_size);
114 } else {
115 if (entry->size & 1 << 15)
116 dimm->nr_pages = MiB_TO_PAGES((entry->size &
117 0x7fff) << 10);
118 else
119 dimm->nr_pages = MiB_TO_PAGES(entry->size);
122 switch (entry->memory_type) {
123 case 0x12:
124 if (entry->type_detail & 1 << 13)
125 dimm->mtype = MEM_RDDR;
126 else
127 dimm->mtype = MEM_DDR;
128 break;
129 case 0x13:
130 if (entry->type_detail & 1 << 13)
131 dimm->mtype = MEM_RDDR2;
132 else
133 dimm->mtype = MEM_DDR2;
134 break;
135 case 0x14:
136 dimm->mtype = MEM_FB_DDR2;
137 break;
138 case 0x18:
139 if (entry->type_detail & 1 << 13)
140 dimm->mtype = MEM_RDDR3;
141 else
142 dimm->mtype = MEM_DDR3;
143 break;
144 default:
145 if (entry->type_detail & 1 << 6)
146 dimm->mtype = MEM_RMBS;
147 else if ((entry->type_detail & ((1 << 7) | (1 << 13)))
148 == ((1 << 7) | (1 << 13)))
149 dimm->mtype = MEM_RDR;
150 else if (entry->type_detail & 1 << 7)
151 dimm->mtype = MEM_SDR;
152 else if (entry->type_detail & 1 << 9)
153 dimm->mtype = MEM_EDO;
154 else
155 dimm->mtype = MEM_UNKNOWN;
159 * Actually, we can only detect if the memory has bits for
160 * checksum or not
162 if (entry->total_width == entry->data_width)
163 dimm->edac_mode = EDAC_NONE;
164 else
165 dimm->edac_mode = EDAC_SECDED;
167 dimm->dtype = DEV_UNKNOWN;
168 dimm->grain = 128; /* Likely, worse case */
171 * FIXME: It shouldn't be hard to also fill the DIMM labels
174 if (dimm->nr_pages) {
175 edac_dbg(1, "DIMM%i: %s size = %d MB%s\n",
176 dimm_fill->count, memory_type[dimm->mtype],
177 PAGES_TO_MiB(dimm->nr_pages),
178 (dimm->edac_mode != EDAC_NONE) ? "(ECC)" : "");
179 edac_dbg(2, "\ttype %d, detail 0x%02x, width %d(total %d)\n",
180 entry->memory_type, entry->type_detail,
181 entry->total_width, entry->data_width);
184 dimm_fill->count++;
188 void ghes_edac_report_mem_error(struct ghes *ghes, int sev,
189 struct cper_sec_mem_err *mem_err)
191 enum hw_event_mc_err_type type;
192 struct edac_raw_error_desc *e;
193 struct mem_ctl_info *mci;
194 struct ghes_edac_pvt *pvt = NULL;
195 char *p;
196 u8 grain_bits;
198 list_for_each_entry(pvt, &ghes_reglist, list) {
199 if (ghes == pvt->ghes)
200 break;
202 if (!pvt) {
203 pr_err("Internal error: Can't find EDAC structure\n");
204 return;
206 mci = pvt->mci;
207 e = &mci->error_desc;
209 /* Cleans the error report buffer */
210 memset(e, 0, sizeof (*e));
211 e->error_count = 1;
212 strcpy(e->label, "unknown label");
213 e->msg = pvt->msg;
214 e->other_detail = pvt->other_detail;
215 e->top_layer = -1;
216 e->mid_layer = -1;
217 e->low_layer = -1;
218 *pvt->other_detail = '\0';
219 *pvt->msg = '\0';
221 switch (sev) {
222 case GHES_SEV_CORRECTED:
223 type = HW_EVENT_ERR_CORRECTED;
224 break;
225 case GHES_SEV_RECOVERABLE:
226 type = HW_EVENT_ERR_UNCORRECTED;
227 break;
228 case GHES_SEV_PANIC:
229 type = HW_EVENT_ERR_FATAL;
230 break;
231 default:
232 case GHES_SEV_NO:
233 type = HW_EVENT_ERR_INFO;
236 edac_dbg(1, "error validation_bits: 0x%08llx\n",
237 (long long)mem_err->validation_bits);
239 /* Error type, mapped on e->msg */
240 if (mem_err->validation_bits & CPER_MEM_VALID_ERROR_TYPE) {
241 p = pvt->msg;
242 switch (mem_err->error_type) {
243 case 0:
244 p += sprintf(p, "Unknown");
245 break;
246 case 1:
247 p += sprintf(p, "No error");
248 break;
249 case 2:
250 p += sprintf(p, "Single-bit ECC");
251 break;
252 case 3:
253 p += sprintf(p, "Multi-bit ECC");
254 break;
255 case 4:
256 p += sprintf(p, "Single-symbol ChipKill ECC");
257 break;
258 case 5:
259 p += sprintf(p, "Multi-symbol ChipKill ECC");
260 break;
261 case 6:
262 p += sprintf(p, "Master abort");
263 break;
264 case 7:
265 p += sprintf(p, "Target abort");
266 break;
267 case 8:
268 p += sprintf(p, "Parity Error");
269 break;
270 case 9:
271 p += sprintf(p, "Watchdog timeout");
272 break;
273 case 10:
274 p += sprintf(p, "Invalid address");
275 break;
276 case 11:
277 p += sprintf(p, "Mirror Broken");
278 break;
279 case 12:
280 p += sprintf(p, "Memory Sparing");
281 break;
282 case 13:
283 p += sprintf(p, "Scrub corrected error");
284 break;
285 case 14:
286 p += sprintf(p, "Scrub uncorrected error");
287 break;
288 case 15:
289 p += sprintf(p, "Physical Memory Map-out event");
290 break;
291 default:
292 p += sprintf(p, "reserved error (%d)",
293 mem_err->error_type);
295 } else {
296 strcpy(pvt->msg, "unknown error");
299 /* Error address */
300 if (mem_err->validation_bits & CPER_MEM_VALID_PA) {
301 e->page_frame_number = mem_err->physical_addr >> PAGE_SHIFT;
302 e->offset_in_page = mem_err->physical_addr & ~PAGE_MASK;
305 /* Error grain */
306 if (mem_err->validation_bits & CPER_MEM_VALID_PA_MASK)
307 e->grain = ~(mem_err->physical_addr_mask & ~PAGE_MASK);
309 /* Memory error location, mapped on e->location */
310 p = e->location;
311 if (mem_err->validation_bits & CPER_MEM_VALID_NODE)
312 p += sprintf(p, "node:%d ", mem_err->node);
313 if (mem_err->validation_bits & CPER_MEM_VALID_CARD)
314 p += sprintf(p, "card:%d ", mem_err->card);
315 if (mem_err->validation_bits & CPER_MEM_VALID_MODULE)
316 p += sprintf(p, "module:%d ", mem_err->module);
317 if (mem_err->validation_bits & CPER_MEM_VALID_RANK_NUMBER)
318 p += sprintf(p, "rank:%d ", mem_err->rank);
319 if (mem_err->validation_bits & CPER_MEM_VALID_BANK)
320 p += sprintf(p, "bank:%d ", mem_err->bank);
321 if (mem_err->validation_bits & CPER_MEM_VALID_ROW)
322 p += sprintf(p, "row:%d ", mem_err->row);
323 if (mem_err->validation_bits & CPER_MEM_VALID_COLUMN)
324 p += sprintf(p, "col:%d ", mem_err->column);
325 if (mem_err->validation_bits & CPER_MEM_VALID_BIT_POSITION)
326 p += sprintf(p, "bit_pos:%d ", mem_err->bit_pos);
327 if (mem_err->validation_bits & CPER_MEM_VALID_MODULE_HANDLE) {
328 const char *bank = NULL, *device = NULL;
329 dmi_memdev_name(mem_err->mem_dev_handle, &bank, &device);
330 if (bank != NULL && device != NULL)
331 p += sprintf(p, "DIMM location:%s %s ", bank, device);
332 else
333 p += sprintf(p, "DIMM DMI handle: 0x%.4x ",
334 mem_err->mem_dev_handle);
336 if (p > e->location)
337 *(p - 1) = '\0';
339 /* All other fields are mapped on e->other_detail */
340 p = pvt->other_detail;
341 if (mem_err->validation_bits & CPER_MEM_VALID_ERROR_STATUS) {
342 u64 status = mem_err->error_status;
344 p += sprintf(p, "status(0x%016llx): ", (long long)status);
345 switch ((status >> 8) & 0xff) {
346 case 1:
347 p += sprintf(p, "Error detected internal to the component ");
348 break;
349 case 16:
350 p += sprintf(p, "Error detected in the bus ");
351 break;
352 case 4:
353 p += sprintf(p, "Storage error in DRAM memory ");
354 break;
355 case 5:
356 p += sprintf(p, "Storage error in TLB ");
357 break;
358 case 6:
359 p += sprintf(p, "Storage error in cache ");
360 break;
361 case 7:
362 p += sprintf(p, "Error in one or more functional units ");
363 break;
364 case 8:
365 p += sprintf(p, "component failed self test ");
366 break;
367 case 9:
368 p += sprintf(p, "Overflow or undervalue of internal queue ");
369 break;
370 case 17:
371 p += sprintf(p, "Virtual address not found on IO-TLB or IO-PDIR ");
372 break;
373 case 18:
374 p += sprintf(p, "Improper access error ");
375 break;
376 case 19:
377 p += sprintf(p, "Access to a memory address which is not mapped to any component ");
378 break;
379 case 20:
380 p += sprintf(p, "Loss of Lockstep ");
381 break;
382 case 21:
383 p += sprintf(p, "Response not associated with a request ");
384 break;
385 case 22:
386 p += sprintf(p, "Bus parity error - must also set the A, C, or D Bits ");
387 break;
388 case 23:
389 p += sprintf(p, "Detection of a PATH_ERROR ");
390 break;
391 case 25:
392 p += sprintf(p, "Bus operation timeout ");
393 break;
394 case 26:
395 p += sprintf(p, "A read was issued to data that has been poisoned ");
396 break;
397 default:
398 p += sprintf(p, "reserved ");
399 break;
402 if (mem_err->validation_bits & CPER_MEM_VALID_REQUESTOR_ID)
403 p += sprintf(p, "requestorID: 0x%016llx ",
404 (long long)mem_err->requestor_id);
405 if (mem_err->validation_bits & CPER_MEM_VALID_RESPONDER_ID)
406 p += sprintf(p, "responderID: 0x%016llx ",
407 (long long)mem_err->responder_id);
408 if (mem_err->validation_bits & CPER_MEM_VALID_TARGET_ID)
409 p += sprintf(p, "targetID: 0x%016llx ",
410 (long long)mem_err->responder_id);
411 if (p > pvt->other_detail)
412 *(p - 1) = '\0';
414 /* Generate the trace event */
415 grain_bits = fls_long(e->grain);
416 snprintf(pvt->detail_location, sizeof(pvt->detail_location),
417 "APEI location: %s %s", e->location, e->other_detail);
418 trace_mc_event(type, e->msg, e->label, e->error_count,
419 mci->mc_idx, e->top_layer, e->mid_layer, e->low_layer,
420 PAGES_TO_MiB(e->page_frame_number) | e->offset_in_page,
421 grain_bits, e->syndrome, pvt->detail_location);
423 /* Report the error via EDAC API */
424 edac_raw_mc_handle_error(type, mci, e);
426 EXPORT_SYMBOL_GPL(ghes_edac_report_mem_error);
428 int ghes_edac_register(struct ghes *ghes, struct device *dev)
430 bool fake = false;
431 int rc, num_dimm = 0;
432 struct mem_ctl_info *mci;
433 struct edac_mc_layer layers[1];
434 struct ghes_edac_pvt *pvt;
435 struct ghes_edac_dimm_fill dimm_fill;
437 /* Get the number of DIMMs */
438 dmi_walk(ghes_edac_count_dimms, &num_dimm);
440 /* Check if we've got a bogus BIOS */
441 if (num_dimm == 0) {
442 fake = true;
443 num_dimm = 1;
446 layers[0].type = EDAC_MC_LAYER_ALL_MEM;
447 layers[0].size = num_dimm;
448 layers[0].is_virt_csrow = true;
451 * We need to serialize edac_mc_alloc() and edac_mc_add_mc(),
452 * to avoid duplicated memory controller numbers
454 mutex_lock(&ghes_edac_lock);
455 mci = edac_mc_alloc(ghes_edac_mc_num, ARRAY_SIZE(layers), layers,
456 sizeof(*pvt));
457 if (!mci) {
458 pr_info("Can't allocate memory for EDAC data\n");
459 mutex_unlock(&ghes_edac_lock);
460 return -ENOMEM;
463 pvt = mci->pvt_info;
464 memset(pvt, 0, sizeof(*pvt));
465 list_add_tail(&pvt->list, &ghes_reglist);
466 pvt->ghes = ghes;
467 pvt->mci = mci;
468 mci->pdev = dev;
470 mci->mtype_cap = MEM_FLAG_EMPTY;
471 mci->edac_ctl_cap = EDAC_FLAG_NONE;
472 mci->edac_cap = EDAC_FLAG_NONE;
473 mci->mod_name = "ghes_edac.c";
474 mci->mod_ver = GHES_EDAC_REVISION;
475 mci->ctl_name = "ghes_edac";
476 mci->dev_name = "ghes";
478 if (!ghes_edac_mc_num) {
479 if (!fake) {
480 pr_info("This EDAC driver relies on BIOS to enumerate memory and get error reports.\n");
481 pr_info("Unfortunately, not all BIOSes reflect the memory layout correctly.\n");
482 pr_info("So, the end result of using this driver varies from vendor to vendor.\n");
483 pr_info("If you find incorrect reports, please contact your hardware vendor\n");
484 pr_info("to correct its BIOS.\n");
485 pr_info("This system has %d DIMM sockets.\n",
486 num_dimm);
487 } else {
488 pr_info("This system has a very crappy BIOS: It doesn't even list the DIMMS.\n");
489 pr_info("Its SMBIOS info is wrong. It is doubtful that the error report would\n");
490 pr_info("work on such system. Use this driver with caution\n");
494 if (!fake) {
496 * Fill DIMM info from DMI for the memory controller #0
498 * Keep it in blank for the other memory controllers, as
499 * there's no reliable way to properly credit each DIMM to
500 * the memory controller, as different BIOSes fill the
501 * DMI bank location fields on different ways
503 if (!ghes_edac_mc_num) {
504 dimm_fill.count = 0;
505 dimm_fill.mci = mci;
506 dmi_walk(ghes_edac_dmidecode, &dimm_fill);
508 } else {
509 struct dimm_info *dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
510 mci->n_layers, 0, 0, 0);
512 dimm->nr_pages = 1;
513 dimm->grain = 128;
514 dimm->mtype = MEM_UNKNOWN;
515 dimm->dtype = DEV_UNKNOWN;
516 dimm->edac_mode = EDAC_SECDED;
519 rc = edac_mc_add_mc(mci);
520 if (rc < 0) {
521 pr_info("Can't register at EDAC core\n");
522 edac_mc_free(mci);
523 mutex_unlock(&ghes_edac_lock);
524 return -ENODEV;
527 ghes_edac_mc_num++;
528 mutex_unlock(&ghes_edac_lock);
529 return 0;
531 EXPORT_SYMBOL_GPL(ghes_edac_register);
533 void ghes_edac_unregister(struct ghes *ghes)
535 struct mem_ctl_info *mci;
536 struct ghes_edac_pvt *pvt, *tmp;
538 list_for_each_entry_safe(pvt, tmp, &ghes_reglist, list) {
539 if (ghes == pvt->ghes) {
540 mci = pvt->mci;
541 edac_mc_del_mc(mci->pdev);
542 edac_mc_free(mci);
543 list_del(&pvt->list);
547 EXPORT_SYMBOL_GPL(ghes_edac_unregister);