Convert trailing spaces and periods in path components
[linux/fpc-iii.git] / drivers / edac / amd64_edac.c
blob125a44d5a69e39510eceacd8e36b06fd359a5693
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include "amd64_edac.h"
3 #include <asm/amd_nb.h>
5 static struct edac_pci_ctl_info *pci_ctl;
7 static int report_gart_errors;
8 module_param(report_gart_errors, int, 0644);
11 * Set by command line parameter. If BIOS has enabled the ECC, this override is
12 * cleared to prevent re-enabling the hardware by this driver.
14 static int ecc_enable_override;
15 module_param(ecc_enable_override, int, 0644);
17 static struct msr __percpu *msrs;
19 /* Per-node stuff */
20 static struct ecc_settings **ecc_stngs;
22 /* Number of Unified Memory Controllers */
23 static u8 num_umcs;
26 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
27 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
28 * or higher value'.
30 *FIXME: Produce a better mapping/linearisation.
32 static const struct scrubrate {
33 u32 scrubval; /* bit pattern for scrub rate */
34 u32 bandwidth; /* bandwidth consumed (bytes/sec) */
35 } scrubrates[] = {
36 { 0x01, 1600000000UL},
37 { 0x02, 800000000UL},
38 { 0x03, 400000000UL},
39 { 0x04, 200000000UL},
40 { 0x05, 100000000UL},
41 { 0x06, 50000000UL},
42 { 0x07, 25000000UL},
43 { 0x08, 12284069UL},
44 { 0x09, 6274509UL},
45 { 0x0A, 3121951UL},
46 { 0x0B, 1560975UL},
47 { 0x0C, 781440UL},
48 { 0x0D, 390720UL},
49 { 0x0E, 195300UL},
50 { 0x0F, 97650UL},
51 { 0x10, 48854UL},
52 { 0x11, 24427UL},
53 { 0x12, 12213UL},
54 { 0x13, 6101UL},
55 { 0x14, 3051UL},
56 { 0x15, 1523UL},
57 { 0x16, 761UL},
58 { 0x00, 0UL}, /* scrubbing off */
61 int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
62 u32 *val, const char *func)
64 int err = 0;
66 err = pci_read_config_dword(pdev, offset, val);
67 if (err)
68 amd64_warn("%s: error reading F%dx%03x.\n",
69 func, PCI_FUNC(pdev->devfn), offset);
71 return err;
74 int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
75 u32 val, const char *func)
77 int err = 0;
79 err = pci_write_config_dword(pdev, offset, val);
80 if (err)
81 amd64_warn("%s: error writing to F%dx%03x.\n",
82 func, PCI_FUNC(pdev->devfn), offset);
84 return err;
88 * Select DCT to which PCI cfg accesses are routed
90 static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
92 u32 reg = 0;
94 amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, &reg);
95 reg &= (pvt->model == 0x30) ? ~3 : ~1;
96 reg |= dct;
97 amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
102 * Depending on the family, F2 DCT reads need special handling:
104 * K8: has a single DCT only and no address offsets >= 0x100
106 * F10h: each DCT has its own set of regs
107 * DCT0 -> F2x040..
108 * DCT1 -> F2x140..
110 * F16h: has only 1 DCT
112 * F15h: we select which DCT we access using F1x10C[DctCfgSel]
114 static inline int amd64_read_dct_pci_cfg(struct amd64_pvt *pvt, u8 dct,
115 int offset, u32 *val)
117 switch (pvt->fam) {
118 case 0xf:
119 if (dct || offset >= 0x100)
120 return -EINVAL;
121 break;
123 case 0x10:
124 if (dct) {
126 * Note: If ganging is enabled, barring the regs
127 * F2x[1,0]98 and F2x[1,0]9C; reads reads to F2x1xx
128 * return 0. (cf. Section 2.8.1 F10h BKDG)
130 if (dct_ganging_enabled(pvt))
131 return 0;
133 offset += 0x100;
135 break;
137 case 0x15:
139 * F15h: F2x1xx addresses do not map explicitly to DCT1.
140 * We should select which DCT we access using F1x10C[DctCfgSel]
142 dct = (dct && pvt->model == 0x30) ? 3 : dct;
143 f15h_select_dct(pvt, dct);
144 break;
146 case 0x16:
147 if (dct)
148 return -EINVAL;
149 break;
151 default:
152 break;
154 return amd64_read_pci_cfg(pvt->F2, offset, val);
158 * Memory scrubber control interface. For K8, memory scrubbing is handled by
159 * hardware and can involve L2 cache, dcache as well as the main memory. With
160 * F10, this is extended to L3 cache scrubbing on CPU models sporting that
161 * functionality.
163 * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
164 * (dram) over to cache lines. This is nasty, so we will use bandwidth in
165 * bytes/sec for the setting.
167 * Currently, we only do dram scrubbing. If the scrubbing is done in software on
168 * other archs, we might not have access to the caches directly.
171 static inline void __f17h_set_scrubval(struct amd64_pvt *pvt, u32 scrubval)
174 * Fam17h supports scrub values between 0x5 and 0x14. Also, the values
175 * are shifted down by 0x5, so scrubval 0x5 is written to the register
176 * as 0x0, scrubval 0x6 as 0x1, etc.
178 if (scrubval >= 0x5 && scrubval <= 0x14) {
179 scrubval -= 0x5;
180 pci_write_bits32(pvt->F6, F17H_SCR_LIMIT_ADDR, scrubval, 0xF);
181 pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 1, 0x1);
182 } else {
183 pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 0, 0x1);
187 * Scan the scrub rate mapping table for a close or matching bandwidth value to
188 * issue. If requested is too big, then use last maximum value found.
190 static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
192 u32 scrubval;
193 int i;
196 * map the configured rate (new_bw) to a value specific to the AMD64
197 * memory controller and apply to register. Search for the first
198 * bandwidth entry that is greater or equal than the setting requested
199 * and program that. If at last entry, turn off DRAM scrubbing.
201 * If no suitable bandwidth is found, turn off DRAM scrubbing entirely
202 * by falling back to the last element in scrubrates[].
204 for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) {
206 * skip scrub rates which aren't recommended
207 * (see F10 BKDG, F3x58)
209 if (scrubrates[i].scrubval < min_rate)
210 continue;
212 if (scrubrates[i].bandwidth <= new_bw)
213 break;
216 scrubval = scrubrates[i].scrubval;
218 if (pvt->fam == 0x17 || pvt->fam == 0x18) {
219 __f17h_set_scrubval(pvt, scrubval);
220 } else if (pvt->fam == 0x15 && pvt->model == 0x60) {
221 f15h_select_dct(pvt, 0);
222 pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
223 f15h_select_dct(pvt, 1);
224 pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
225 } else {
226 pci_write_bits32(pvt->F3, SCRCTRL, scrubval, 0x001F);
229 if (scrubval)
230 return scrubrates[i].bandwidth;
232 return 0;
235 static int set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
237 struct amd64_pvt *pvt = mci->pvt_info;
238 u32 min_scrubrate = 0x5;
240 if (pvt->fam == 0xf)
241 min_scrubrate = 0x0;
243 if (pvt->fam == 0x15) {
244 /* Erratum #505 */
245 if (pvt->model < 0x10)
246 f15h_select_dct(pvt, 0);
248 if (pvt->model == 0x60)
249 min_scrubrate = 0x6;
251 return __set_scrub_rate(pvt, bw, min_scrubrate);
254 static int get_scrub_rate(struct mem_ctl_info *mci)
256 struct amd64_pvt *pvt = mci->pvt_info;
257 int i, retval = -EINVAL;
258 u32 scrubval = 0;
260 switch (pvt->fam) {
261 case 0x15:
262 /* Erratum #505 */
263 if (pvt->model < 0x10)
264 f15h_select_dct(pvt, 0);
266 if (pvt->model == 0x60)
267 amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
268 else
269 amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
270 break;
272 case 0x17:
273 case 0x18:
274 amd64_read_pci_cfg(pvt->F6, F17H_SCR_BASE_ADDR, &scrubval);
275 if (scrubval & BIT(0)) {
276 amd64_read_pci_cfg(pvt->F6, F17H_SCR_LIMIT_ADDR, &scrubval);
277 scrubval &= 0xF;
278 scrubval += 0x5;
279 } else {
280 scrubval = 0;
282 break;
284 default:
285 amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
286 break;
289 scrubval = scrubval & 0x001F;
291 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
292 if (scrubrates[i].scrubval == scrubval) {
293 retval = scrubrates[i].bandwidth;
294 break;
297 return retval;
301 * returns true if the SysAddr given by sys_addr matches the
302 * DRAM base/limit associated with node_id
304 static bool base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, u8 nid)
306 u64 addr;
308 /* The K8 treats this as a 40-bit value. However, bits 63-40 will be
309 * all ones if the most significant implemented address bit is 1.
310 * Here we discard bits 63-40. See section 3.4.2 of AMD publication
311 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
312 * Application Programming.
314 addr = sys_addr & 0x000000ffffffffffull;
316 return ((addr >= get_dram_base(pvt, nid)) &&
317 (addr <= get_dram_limit(pvt, nid)));
321 * Attempt to map a SysAddr to a node. On success, return a pointer to the
322 * mem_ctl_info structure for the node that the SysAddr maps to.
324 * On failure, return NULL.
326 static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
327 u64 sys_addr)
329 struct amd64_pvt *pvt;
330 u8 node_id;
331 u32 intlv_en, bits;
334 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
335 * 3.4.4.2) registers to map the SysAddr to a node ID.
337 pvt = mci->pvt_info;
340 * The value of this field should be the same for all DRAM Base
341 * registers. Therefore we arbitrarily choose to read it from the
342 * register for node 0.
344 intlv_en = dram_intlv_en(pvt, 0);
346 if (intlv_en == 0) {
347 for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
348 if (base_limit_match(pvt, sys_addr, node_id))
349 goto found;
351 goto err_no_match;
354 if (unlikely((intlv_en != 0x01) &&
355 (intlv_en != 0x03) &&
356 (intlv_en != 0x07))) {
357 amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
358 return NULL;
361 bits = (((u32) sys_addr) >> 12) & intlv_en;
363 for (node_id = 0; ; ) {
364 if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
365 break; /* intlv_sel field matches */
367 if (++node_id >= DRAM_RANGES)
368 goto err_no_match;
371 /* sanity test for sys_addr */
372 if (unlikely(!base_limit_match(pvt, sys_addr, node_id))) {
373 amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
374 "range for node %d with node interleaving enabled.\n",
375 __func__, sys_addr, node_id);
376 return NULL;
379 found:
380 return edac_mc_find((int)node_id);
382 err_no_match:
383 edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n",
384 (unsigned long)sys_addr);
386 return NULL;
390 * compute the CS base address of the @csrow on the DRAM controller @dct.
391 * For details see F2x[5C:40] in the processor's BKDG
393 static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
394 u64 *base, u64 *mask)
396 u64 csbase, csmask, base_bits, mask_bits;
397 u8 addr_shift;
399 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
400 csbase = pvt->csels[dct].csbases[csrow];
401 csmask = pvt->csels[dct].csmasks[csrow];
402 base_bits = GENMASK_ULL(31, 21) | GENMASK_ULL(15, 9);
403 mask_bits = GENMASK_ULL(29, 21) | GENMASK_ULL(15, 9);
404 addr_shift = 4;
407 * F16h and F15h, models 30h and later need two addr_shift values:
408 * 8 for high and 6 for low (cf. F16h BKDG).
410 } else if (pvt->fam == 0x16 ||
411 (pvt->fam == 0x15 && pvt->model >= 0x30)) {
412 csbase = pvt->csels[dct].csbases[csrow];
413 csmask = pvt->csels[dct].csmasks[csrow >> 1];
415 *base = (csbase & GENMASK_ULL(15, 5)) << 6;
416 *base |= (csbase & GENMASK_ULL(30, 19)) << 8;
418 *mask = ~0ULL;
419 /* poke holes for the csmask */
420 *mask &= ~((GENMASK_ULL(15, 5) << 6) |
421 (GENMASK_ULL(30, 19) << 8));
423 *mask |= (csmask & GENMASK_ULL(15, 5)) << 6;
424 *mask |= (csmask & GENMASK_ULL(30, 19)) << 8;
426 return;
427 } else {
428 csbase = pvt->csels[dct].csbases[csrow];
429 csmask = pvt->csels[dct].csmasks[csrow >> 1];
430 addr_shift = 8;
432 if (pvt->fam == 0x15)
433 base_bits = mask_bits =
434 GENMASK_ULL(30,19) | GENMASK_ULL(13,5);
435 else
436 base_bits = mask_bits =
437 GENMASK_ULL(28,19) | GENMASK_ULL(13,5);
440 *base = (csbase & base_bits) << addr_shift;
442 *mask = ~0ULL;
443 /* poke holes for the csmask */
444 *mask &= ~(mask_bits << addr_shift);
445 /* OR them in */
446 *mask |= (csmask & mask_bits) << addr_shift;
449 #define for_each_chip_select(i, dct, pvt) \
450 for (i = 0; i < pvt->csels[dct].b_cnt; i++)
452 #define chip_select_base(i, dct, pvt) \
453 pvt->csels[dct].csbases[i]
455 #define for_each_chip_select_mask(i, dct, pvt) \
456 for (i = 0; i < pvt->csels[dct].m_cnt; i++)
458 #define for_each_umc(i) \
459 for (i = 0; i < num_umcs; i++)
462 * @input_addr is an InputAddr associated with the node given by mci. Return the
463 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
465 static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
467 struct amd64_pvt *pvt;
468 int csrow;
469 u64 base, mask;
471 pvt = mci->pvt_info;
473 for_each_chip_select(csrow, 0, pvt) {
474 if (!csrow_enabled(csrow, 0, pvt))
475 continue;
477 get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
479 mask = ~mask;
481 if ((input_addr & mask) == (base & mask)) {
482 edac_dbg(2, "InputAddr 0x%lx matches csrow %d (node %d)\n",
483 (unsigned long)input_addr, csrow,
484 pvt->mc_node_id);
486 return csrow;
489 edac_dbg(2, "no matching csrow for InputAddr 0x%lx (MC node %d)\n",
490 (unsigned long)input_addr, pvt->mc_node_id);
492 return -1;
496 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
497 * for the node represented by mci. Info is passed back in *hole_base,
498 * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
499 * info is invalid. Info may be invalid for either of the following reasons:
501 * - The revision of the node is not E or greater. In this case, the DRAM Hole
502 * Address Register does not exist.
504 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
505 * indicating that its contents are not valid.
507 * The values passed back in *hole_base, *hole_offset, and *hole_size are
508 * complete 32-bit values despite the fact that the bitfields in the DHAR
509 * only represent bits 31-24 of the base and offset values.
511 int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
512 u64 *hole_offset, u64 *hole_size)
514 struct amd64_pvt *pvt = mci->pvt_info;
516 /* only revE and later have the DRAM Hole Address Register */
517 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_E) {
518 edac_dbg(1, " revision %d for node %d does not support DHAR\n",
519 pvt->ext_model, pvt->mc_node_id);
520 return 1;
523 /* valid for Fam10h and above */
524 if (pvt->fam >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
525 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this system\n");
526 return 1;
529 if (!dhar_valid(pvt)) {
530 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this node %d\n",
531 pvt->mc_node_id);
532 return 1;
535 /* This node has Memory Hoisting */
537 /* +------------------+--------------------+--------------------+-----
538 * | memory | DRAM hole | relocated |
539 * | [0, (x - 1)] | [x, 0xffffffff] | addresses from |
540 * | | | DRAM hole |
541 * | | | [0x100000000, |
542 * | | | (0x100000000+ |
543 * | | | (0xffffffff-x))] |
544 * +------------------+--------------------+--------------------+-----
546 * Above is a diagram of physical memory showing the DRAM hole and the
547 * relocated addresses from the DRAM hole. As shown, the DRAM hole
548 * starts at address x (the base address) and extends through address
549 * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the
550 * addresses in the hole so that they start at 0x100000000.
553 *hole_base = dhar_base(pvt);
554 *hole_size = (1ULL << 32) - *hole_base;
556 *hole_offset = (pvt->fam > 0xf) ? f10_dhar_offset(pvt)
557 : k8_dhar_offset(pvt);
559 edac_dbg(1, " DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
560 pvt->mc_node_id, (unsigned long)*hole_base,
561 (unsigned long)*hole_offset, (unsigned long)*hole_size);
563 return 0;
565 EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
568 * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
569 * assumed that sys_addr maps to the node given by mci.
571 * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
572 * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
573 * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
574 * then it is also involved in translating a SysAddr to a DramAddr. Sections
575 * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
576 * These parts of the documentation are unclear. I interpret them as follows:
578 * When node n receives a SysAddr, it processes the SysAddr as follows:
580 * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
581 * Limit registers for node n. If the SysAddr is not within the range
582 * specified by the base and limit values, then node n ignores the Sysaddr
583 * (since it does not map to node n). Otherwise continue to step 2 below.
585 * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
586 * disabled so skip to step 3 below. Otherwise see if the SysAddr is within
587 * the range of relocated addresses (starting at 0x100000000) from the DRAM
588 * hole. If not, skip to step 3 below. Else get the value of the
589 * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
590 * offset defined by this value from the SysAddr.
592 * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
593 * Base register for node n. To obtain the DramAddr, subtract the base
594 * address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
596 static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
598 struct amd64_pvt *pvt = mci->pvt_info;
599 u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
600 int ret;
602 dram_base = get_dram_base(pvt, pvt->mc_node_id);
604 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
605 &hole_size);
606 if (!ret) {
607 if ((sys_addr >= (1ULL << 32)) &&
608 (sys_addr < ((1ULL << 32) + hole_size))) {
609 /* use DHAR to translate SysAddr to DramAddr */
610 dram_addr = sys_addr - hole_offset;
612 edac_dbg(2, "using DHAR to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
613 (unsigned long)sys_addr,
614 (unsigned long)dram_addr);
616 return dram_addr;
621 * Translate the SysAddr to a DramAddr as shown near the start of
622 * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8
623 * only deals with 40-bit values. Therefore we discard bits 63-40 of
624 * sys_addr below. If bit 39 of sys_addr is 1 then the bits we
625 * discard are all 1s. Otherwise the bits we discard are all 0s. See
626 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
627 * Programmer's Manual Volume 1 Application Programming.
629 dram_addr = (sys_addr & GENMASK_ULL(39, 0)) - dram_base;
631 edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
632 (unsigned long)sys_addr, (unsigned long)dram_addr);
633 return dram_addr;
637 * @intlv_en is the value of the IntlvEn field from a DRAM Base register
638 * (section 3.4.4.1). Return the number of bits from a SysAddr that are used
639 * for node interleaving.
641 static int num_node_interleave_bits(unsigned intlv_en)
643 static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
644 int n;
646 BUG_ON(intlv_en > 7);
647 n = intlv_shift_table[intlv_en];
648 return n;
651 /* Translate the DramAddr given by @dram_addr to an InputAddr. */
652 static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
654 struct amd64_pvt *pvt;
655 int intlv_shift;
656 u64 input_addr;
658 pvt = mci->pvt_info;
661 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
662 * concerning translating a DramAddr to an InputAddr.
664 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
665 input_addr = ((dram_addr >> intlv_shift) & GENMASK_ULL(35, 12)) +
666 (dram_addr & 0xfff);
668 edac_dbg(2, " Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
669 intlv_shift, (unsigned long)dram_addr,
670 (unsigned long)input_addr);
672 return input_addr;
676 * Translate the SysAddr represented by @sys_addr to an InputAddr. It is
677 * assumed that @sys_addr maps to the node given by mci.
679 static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
681 u64 input_addr;
683 input_addr =
684 dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
686 edac_dbg(2, "SysAddr 0x%lx translates to InputAddr 0x%lx\n",
687 (unsigned long)sys_addr, (unsigned long)input_addr);
689 return input_addr;
692 /* Map the Error address to a PAGE and PAGE OFFSET. */
693 static inline void error_address_to_page_and_offset(u64 error_address,
694 struct err_info *err)
696 err->page = (u32) (error_address >> PAGE_SHIFT);
697 err->offset = ((u32) error_address) & ~PAGE_MASK;
701 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
702 * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
703 * of a node that detected an ECC memory error. mci represents the node that
704 * the error address maps to (possibly different from the node that detected
705 * the error). Return the number of the csrow that sys_addr maps to, or -1 on
706 * error.
708 static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
710 int csrow;
712 csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
714 if (csrow == -1)
715 amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
716 "address 0x%lx\n", (unsigned long)sys_addr);
717 return csrow;
720 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
723 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
724 * are ECC capable.
726 static unsigned long determine_edac_cap(struct amd64_pvt *pvt)
728 unsigned long edac_cap = EDAC_FLAG_NONE;
729 u8 bit;
731 if (pvt->umc) {
732 u8 i, umc_en_mask = 0, dimm_ecc_en_mask = 0;
734 for_each_umc(i) {
735 if (!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT))
736 continue;
738 umc_en_mask |= BIT(i);
740 /* UMC Configuration bit 12 (DimmEccEn) */
741 if (pvt->umc[i].umc_cfg & BIT(12))
742 dimm_ecc_en_mask |= BIT(i);
745 if (umc_en_mask == dimm_ecc_en_mask)
746 edac_cap = EDAC_FLAG_SECDED;
747 } else {
748 bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F)
749 ? 19
750 : 17;
752 if (pvt->dclr0 & BIT(bit))
753 edac_cap = EDAC_FLAG_SECDED;
756 return edac_cap;
759 static void debug_display_dimm_sizes(struct amd64_pvt *, u8);
761 static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
763 edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
765 if (pvt->dram_type == MEM_LRDDR3) {
766 u32 dcsm = pvt->csels[chan].csmasks[0];
768 * It's assumed all LRDIMMs in a DCT are going to be of
769 * same 'type' until proven otherwise. So, use a cs
770 * value of '0' here to get dcsm value.
772 edac_dbg(1, " LRDIMM %dx rank multiply\n", (dcsm & 0x3));
775 edac_dbg(1, "All DIMMs support ECC:%s\n",
776 (dclr & BIT(19)) ? "yes" : "no");
779 edac_dbg(1, " PAR/ERR parity: %s\n",
780 (dclr & BIT(8)) ? "enabled" : "disabled");
782 if (pvt->fam == 0x10)
783 edac_dbg(1, " DCT 128bit mode width: %s\n",
784 (dclr & BIT(11)) ? "128b" : "64b");
786 edac_dbg(1, " x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
787 (dclr & BIT(12)) ? "yes" : "no",
788 (dclr & BIT(13)) ? "yes" : "no",
789 (dclr & BIT(14)) ? "yes" : "no",
790 (dclr & BIT(15)) ? "yes" : "no");
793 #define CS_EVEN_PRIMARY BIT(0)
794 #define CS_ODD_PRIMARY BIT(1)
795 #define CS_EVEN_SECONDARY BIT(2)
796 #define CS_ODD_SECONDARY BIT(3)
798 #define CS_EVEN (CS_EVEN_PRIMARY | CS_EVEN_SECONDARY)
799 #define CS_ODD (CS_ODD_PRIMARY | CS_ODD_SECONDARY)
801 static int f17_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
803 int cs_mode = 0;
805 if (csrow_enabled(2 * dimm, ctrl, pvt))
806 cs_mode |= CS_EVEN_PRIMARY;
808 if (csrow_enabled(2 * dimm + 1, ctrl, pvt))
809 cs_mode |= CS_ODD_PRIMARY;
811 /* Asymmetric dual-rank DIMM support. */
812 if (csrow_sec_enabled(2 * dimm + 1, ctrl, pvt))
813 cs_mode |= CS_ODD_SECONDARY;
815 return cs_mode;
818 static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl)
820 int dimm, size0, size1, cs0, cs1, cs_mode;
822 edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl);
824 for (dimm = 0; dimm < 2; dimm++) {
825 cs0 = dimm * 2;
826 cs1 = dimm * 2 + 1;
828 cs_mode = f17_get_cs_mode(dimm, ctrl, pvt);
830 size0 = pvt->ops->dbam_to_cs(pvt, ctrl, cs_mode, cs0);
831 size1 = pvt->ops->dbam_to_cs(pvt, ctrl, cs_mode, cs1);
833 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
834 cs0, size0,
835 cs1, size1);
839 static void __dump_misc_regs_df(struct amd64_pvt *pvt)
841 struct amd64_umc *umc;
842 u32 i, tmp, umc_base;
844 for_each_umc(i) {
845 umc_base = get_umc_base(i);
846 umc = &pvt->umc[i];
848 edac_dbg(1, "UMC%d DIMM cfg: 0x%x\n", i, umc->dimm_cfg);
849 edac_dbg(1, "UMC%d UMC cfg: 0x%x\n", i, umc->umc_cfg);
850 edac_dbg(1, "UMC%d SDP ctrl: 0x%x\n", i, umc->sdp_ctrl);
851 edac_dbg(1, "UMC%d ECC ctrl: 0x%x\n", i, umc->ecc_ctrl);
853 amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ECC_BAD_SYMBOL, &tmp);
854 edac_dbg(1, "UMC%d ECC bad symbol: 0x%x\n", i, tmp);
856 amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_UMC_CAP, &tmp);
857 edac_dbg(1, "UMC%d UMC cap: 0x%x\n", i, tmp);
858 edac_dbg(1, "UMC%d UMC cap high: 0x%x\n", i, umc->umc_cap_hi);
860 edac_dbg(1, "UMC%d ECC capable: %s, ChipKill ECC capable: %s\n",
861 i, (umc->umc_cap_hi & BIT(30)) ? "yes" : "no",
862 (umc->umc_cap_hi & BIT(31)) ? "yes" : "no");
863 edac_dbg(1, "UMC%d All DIMMs support ECC: %s\n",
864 i, (umc->umc_cfg & BIT(12)) ? "yes" : "no");
865 edac_dbg(1, "UMC%d x4 DIMMs present: %s\n",
866 i, (umc->dimm_cfg & BIT(6)) ? "yes" : "no");
867 edac_dbg(1, "UMC%d x16 DIMMs present: %s\n",
868 i, (umc->dimm_cfg & BIT(7)) ? "yes" : "no");
870 if (pvt->dram_type == MEM_LRDDR4) {
871 amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ADDR_CFG, &tmp);
872 edac_dbg(1, "UMC%d LRDIMM %dx rank multiply\n",
873 i, 1 << ((tmp >> 4) & 0x3));
876 debug_display_dimm_sizes_df(pvt, i);
879 edac_dbg(1, "F0x104 (DRAM Hole Address): 0x%08x, base: 0x%08x\n",
880 pvt->dhar, dhar_base(pvt));
883 /* Display and decode various NB registers for debug purposes. */
884 static void __dump_misc_regs(struct amd64_pvt *pvt)
886 edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
888 edac_dbg(1, " NB two channel DRAM capable: %s\n",
889 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
891 edac_dbg(1, " ECC capable: %s, ChipKill ECC capable: %s\n",
892 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
893 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
895 debug_dump_dramcfg_low(pvt, pvt->dclr0, 0);
897 edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
899 edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n",
900 pvt->dhar, dhar_base(pvt),
901 (pvt->fam == 0xf) ? k8_dhar_offset(pvt)
902 : f10_dhar_offset(pvt));
904 debug_display_dimm_sizes(pvt, 0);
906 /* everything below this point is Fam10h and above */
907 if (pvt->fam == 0xf)
908 return;
910 debug_display_dimm_sizes(pvt, 1);
912 /* Only if NOT ganged does dclr1 have valid info */
913 if (!dct_ganging_enabled(pvt))
914 debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
917 /* Display and decode various NB registers for debug purposes. */
918 static void dump_misc_regs(struct amd64_pvt *pvt)
920 if (pvt->umc)
921 __dump_misc_regs_df(pvt);
922 else
923 __dump_misc_regs(pvt);
925 edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
927 amd64_info("using x%u syndromes.\n", pvt->ecc_sym_sz);
931 * See BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
933 static void prep_chip_selects(struct amd64_pvt *pvt)
935 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
936 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
937 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
938 } else if (pvt->fam == 0x15 && pvt->model == 0x30) {
939 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4;
940 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2;
941 } else if (pvt->fam >= 0x17) {
942 int umc;
944 for_each_umc(umc) {
945 pvt->csels[umc].b_cnt = 4;
946 pvt->csels[umc].m_cnt = 2;
949 } else {
950 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
951 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
955 static void read_umc_base_mask(struct amd64_pvt *pvt)
957 u32 umc_base_reg, umc_base_reg_sec;
958 u32 umc_mask_reg, umc_mask_reg_sec;
959 u32 base_reg, base_reg_sec;
960 u32 mask_reg, mask_reg_sec;
961 u32 *base, *base_sec;
962 u32 *mask, *mask_sec;
963 int cs, umc;
965 for_each_umc(umc) {
966 umc_base_reg = get_umc_base(umc) + UMCCH_BASE_ADDR;
967 umc_base_reg_sec = get_umc_base(umc) + UMCCH_BASE_ADDR_SEC;
969 for_each_chip_select(cs, umc, pvt) {
970 base = &pvt->csels[umc].csbases[cs];
971 base_sec = &pvt->csels[umc].csbases_sec[cs];
973 base_reg = umc_base_reg + (cs * 4);
974 base_reg_sec = umc_base_reg_sec + (cs * 4);
976 if (!amd_smn_read(pvt->mc_node_id, base_reg, base))
977 edac_dbg(0, " DCSB%d[%d]=0x%08x reg: 0x%x\n",
978 umc, cs, *base, base_reg);
980 if (!amd_smn_read(pvt->mc_node_id, base_reg_sec, base_sec))
981 edac_dbg(0, " DCSB_SEC%d[%d]=0x%08x reg: 0x%x\n",
982 umc, cs, *base_sec, base_reg_sec);
985 umc_mask_reg = get_umc_base(umc) + UMCCH_ADDR_MASK;
986 umc_mask_reg_sec = get_umc_base(umc) + UMCCH_ADDR_MASK_SEC;
988 for_each_chip_select_mask(cs, umc, pvt) {
989 mask = &pvt->csels[umc].csmasks[cs];
990 mask_sec = &pvt->csels[umc].csmasks_sec[cs];
992 mask_reg = umc_mask_reg + (cs * 4);
993 mask_reg_sec = umc_mask_reg_sec + (cs * 4);
995 if (!amd_smn_read(pvt->mc_node_id, mask_reg, mask))
996 edac_dbg(0, " DCSM%d[%d]=0x%08x reg: 0x%x\n",
997 umc, cs, *mask, mask_reg);
999 if (!amd_smn_read(pvt->mc_node_id, mask_reg_sec, mask_sec))
1000 edac_dbg(0, " DCSM_SEC%d[%d]=0x%08x reg: 0x%x\n",
1001 umc, cs, *mask_sec, mask_reg_sec);
1007 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
1009 static void read_dct_base_mask(struct amd64_pvt *pvt)
1011 int cs;
1013 prep_chip_selects(pvt);
1015 if (pvt->umc)
1016 return read_umc_base_mask(pvt);
1018 for_each_chip_select(cs, 0, pvt) {
1019 int reg0 = DCSB0 + (cs * 4);
1020 int reg1 = DCSB1 + (cs * 4);
1021 u32 *base0 = &pvt->csels[0].csbases[cs];
1022 u32 *base1 = &pvt->csels[1].csbases[cs];
1024 if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
1025 edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
1026 cs, *base0, reg0);
1028 if (pvt->fam == 0xf)
1029 continue;
1031 if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
1032 edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
1033 cs, *base1, (pvt->fam == 0x10) ? reg1
1034 : reg0);
1037 for_each_chip_select_mask(cs, 0, pvt) {
1038 int reg0 = DCSM0 + (cs * 4);
1039 int reg1 = DCSM1 + (cs * 4);
1040 u32 *mask0 = &pvt->csels[0].csmasks[cs];
1041 u32 *mask1 = &pvt->csels[1].csmasks[cs];
1043 if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
1044 edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
1045 cs, *mask0, reg0);
1047 if (pvt->fam == 0xf)
1048 continue;
1050 if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
1051 edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
1052 cs, *mask1, (pvt->fam == 0x10) ? reg1
1053 : reg0);
1057 static void determine_memory_type(struct amd64_pvt *pvt)
1059 u32 dram_ctrl, dcsm;
1061 switch (pvt->fam) {
1062 case 0xf:
1063 if (pvt->ext_model >= K8_REV_F)
1064 goto ddr3;
1066 pvt->dram_type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
1067 return;
1069 case 0x10:
1070 if (pvt->dchr0 & DDR3_MODE)
1071 goto ddr3;
1073 pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
1074 return;
1076 case 0x15:
1077 if (pvt->model < 0x60)
1078 goto ddr3;
1081 * Model 0x60h needs special handling:
1083 * We use a Chip Select value of '0' to obtain dcsm.
1084 * Theoretically, it is possible to populate LRDIMMs of different
1085 * 'Rank' value on a DCT. But this is not the common case. So,
1086 * it's reasonable to assume all DIMMs are going to be of same
1087 * 'type' until proven otherwise.
1089 amd64_read_dct_pci_cfg(pvt, 0, DRAM_CONTROL, &dram_ctrl);
1090 dcsm = pvt->csels[0].csmasks[0];
1092 if (((dram_ctrl >> 8) & 0x7) == 0x2)
1093 pvt->dram_type = MEM_DDR4;
1094 else if (pvt->dclr0 & BIT(16))
1095 pvt->dram_type = MEM_DDR3;
1096 else if (dcsm & 0x3)
1097 pvt->dram_type = MEM_LRDDR3;
1098 else
1099 pvt->dram_type = MEM_RDDR3;
1101 return;
1103 case 0x16:
1104 goto ddr3;
1106 case 0x17:
1107 case 0x18:
1108 if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5))
1109 pvt->dram_type = MEM_LRDDR4;
1110 else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4))
1111 pvt->dram_type = MEM_RDDR4;
1112 else
1113 pvt->dram_type = MEM_DDR4;
1114 return;
1116 default:
1117 WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam);
1118 pvt->dram_type = MEM_EMPTY;
1120 return;
1122 ddr3:
1123 pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
1126 /* Get the number of DCT channels the memory controller is using. */
1127 static int k8_early_channel_count(struct amd64_pvt *pvt)
1129 int flag;
1131 if (pvt->ext_model >= K8_REV_F)
1132 /* RevF (NPT) and later */
1133 flag = pvt->dclr0 & WIDTH_128;
1134 else
1135 /* RevE and earlier */
1136 flag = pvt->dclr0 & REVE_WIDTH_128;
1138 /* not used */
1139 pvt->dclr1 = 0;
1141 return (flag) ? 2 : 1;
1144 /* On F10h and later ErrAddr is MC4_ADDR[47:1] */
1145 static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
1147 u16 mce_nid = amd_get_nb_id(m->extcpu);
1148 struct mem_ctl_info *mci;
1149 u8 start_bit = 1;
1150 u8 end_bit = 47;
1151 u64 addr;
1153 mci = edac_mc_find(mce_nid);
1154 if (!mci)
1155 return 0;
1157 pvt = mci->pvt_info;
1159 if (pvt->fam == 0xf) {
1160 start_bit = 3;
1161 end_bit = 39;
1164 addr = m->addr & GENMASK_ULL(end_bit, start_bit);
1167 * Erratum 637 workaround
1169 if (pvt->fam == 0x15) {
1170 u64 cc6_base, tmp_addr;
1171 u32 tmp;
1172 u8 intlv_en;
1174 if ((addr & GENMASK_ULL(47, 24)) >> 24 != 0x00fdf7)
1175 return addr;
1178 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp);
1179 intlv_en = tmp >> 21 & 0x7;
1181 /* add [47:27] + 3 trailing bits */
1182 cc6_base = (tmp & GENMASK_ULL(20, 0)) << 3;
1184 /* reverse and add DramIntlvEn */
1185 cc6_base |= intlv_en ^ 0x7;
1187 /* pin at [47:24] */
1188 cc6_base <<= 24;
1190 if (!intlv_en)
1191 return cc6_base | (addr & GENMASK_ULL(23, 0));
1193 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp);
1195 /* faster log2 */
1196 tmp_addr = (addr & GENMASK_ULL(23, 12)) << __fls(intlv_en + 1);
1198 /* OR DramIntlvSel into bits [14:12] */
1199 tmp_addr |= (tmp & GENMASK_ULL(23, 21)) >> 9;
1201 /* add remaining [11:0] bits from original MC4_ADDR */
1202 tmp_addr |= addr & GENMASK_ULL(11, 0);
1204 return cc6_base | tmp_addr;
1207 return addr;
1210 static struct pci_dev *pci_get_related_function(unsigned int vendor,
1211 unsigned int device,
1212 struct pci_dev *related)
1214 struct pci_dev *dev = NULL;
1216 while ((dev = pci_get_device(vendor, device, dev))) {
1217 if (pci_domain_nr(dev->bus) == pci_domain_nr(related->bus) &&
1218 (dev->bus->number == related->bus->number) &&
1219 (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
1220 break;
1223 return dev;
1226 static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
1228 struct amd_northbridge *nb;
1229 struct pci_dev *f1 = NULL;
1230 unsigned int pci_func;
1231 int off = range << 3;
1232 u32 llim;
1234 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo);
1235 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
1237 if (pvt->fam == 0xf)
1238 return;
1240 if (!dram_rw(pvt, range))
1241 return;
1243 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi);
1244 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
1246 /* F15h: factor in CC6 save area by reading dst node's limit reg */
1247 if (pvt->fam != 0x15)
1248 return;
1250 nb = node_to_amd_nb(dram_dst_node(pvt, range));
1251 if (WARN_ON(!nb))
1252 return;
1254 if (pvt->model == 0x60)
1255 pci_func = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1;
1256 else if (pvt->model == 0x30)
1257 pci_func = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1;
1258 else
1259 pci_func = PCI_DEVICE_ID_AMD_15H_NB_F1;
1261 f1 = pci_get_related_function(nb->misc->vendor, pci_func, nb->misc);
1262 if (WARN_ON(!f1))
1263 return;
1265 amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
1267 pvt->ranges[range].lim.lo &= GENMASK_ULL(15, 0);
1269 /* {[39:27],111b} */
1270 pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
1272 pvt->ranges[range].lim.hi &= GENMASK_ULL(7, 0);
1274 /* [47:40] */
1275 pvt->ranges[range].lim.hi |= llim >> 13;
1277 pci_dev_put(f1);
1280 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1281 struct err_info *err)
1283 struct amd64_pvt *pvt = mci->pvt_info;
1285 error_address_to_page_and_offset(sys_addr, err);
1288 * Find out which node the error address belongs to. This may be
1289 * different from the node that detected the error.
1291 err->src_mci = find_mc_by_sys_addr(mci, sys_addr);
1292 if (!err->src_mci) {
1293 amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
1294 (unsigned long)sys_addr);
1295 err->err_code = ERR_NODE;
1296 return;
1299 /* Now map the sys_addr to a CSROW */
1300 err->csrow = sys_addr_to_csrow(err->src_mci, sys_addr);
1301 if (err->csrow < 0) {
1302 err->err_code = ERR_CSROW;
1303 return;
1306 /* CHIPKILL enabled */
1307 if (pvt->nbcfg & NBCFG_CHIPKILL) {
1308 err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
1309 if (err->channel < 0) {
1311 * Syndrome didn't map, so we don't know which of the
1312 * 2 DIMMs is in error. So we need to ID 'both' of them
1313 * as suspect.
1315 amd64_mc_warn(err->src_mci, "unknown syndrome 0x%04x - "
1316 "possible error reporting race\n",
1317 err->syndrome);
1318 err->err_code = ERR_CHANNEL;
1319 return;
1321 } else {
1323 * non-chipkill ecc mode
1325 * The k8 documentation is unclear about how to determine the
1326 * channel number when using non-chipkill memory. This method
1327 * was obtained from email communication with someone at AMD.
1328 * (Wish the email was placed in this comment - norsk)
1330 err->channel = ((sys_addr & BIT(3)) != 0);
1334 static int ddr2_cs_size(unsigned i, bool dct_width)
1336 unsigned shift = 0;
1338 if (i <= 2)
1339 shift = i;
1340 else if (!(i & 0x1))
1341 shift = i >> 1;
1342 else
1343 shift = (i + 1) >> 1;
1345 return 128 << (shift + !!dct_width);
1348 static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1349 unsigned cs_mode, int cs_mask_nr)
1351 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1353 if (pvt->ext_model >= K8_REV_F) {
1354 WARN_ON(cs_mode > 11);
1355 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1357 else if (pvt->ext_model >= K8_REV_D) {
1358 unsigned diff;
1359 WARN_ON(cs_mode > 10);
1362 * the below calculation, besides trying to win an obfuscated C
1363 * contest, maps cs_mode values to DIMM chip select sizes. The
1364 * mappings are:
1366 * cs_mode CS size (mb)
1367 * ======= ============
1368 * 0 32
1369 * 1 64
1370 * 2 128
1371 * 3 128
1372 * 4 256
1373 * 5 512
1374 * 6 256
1375 * 7 512
1376 * 8 1024
1377 * 9 1024
1378 * 10 2048
1380 * Basically, it calculates a value with which to shift the
1381 * smallest CS size of 32MB.
1383 * ddr[23]_cs_size have a similar purpose.
1385 diff = cs_mode/3 + (unsigned)(cs_mode > 5);
1387 return 32 << (cs_mode - diff);
1389 else {
1390 WARN_ON(cs_mode > 6);
1391 return 32 << cs_mode;
1396 * Get the number of DCT channels in use.
1398 * Return:
1399 * number of Memory Channels in operation
1400 * Pass back:
1401 * contents of the DCL0_LOW register
1403 static int f1x_early_channel_count(struct amd64_pvt *pvt)
1405 int i, j, channels = 0;
1407 /* On F10h, if we are in 128 bit mode, then we are using 2 channels */
1408 if (pvt->fam == 0x10 && (pvt->dclr0 & WIDTH_128))
1409 return 2;
1412 * Need to check if in unganged mode: In such, there are 2 channels,
1413 * but they are not in 128 bit mode and thus the above 'dclr0' status
1414 * bit will be OFF.
1416 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1417 * their CSEnable bit on. If so, then SINGLE DIMM case.
1419 edac_dbg(0, "Data width is not 128 bits - need more decoding\n");
1422 * Check DRAM Bank Address Mapping values for each DIMM to see if there
1423 * is more than just one DIMM present in unganged mode. Need to check
1424 * both controllers since DIMMs can be placed in either one.
1426 for (i = 0; i < 2; i++) {
1427 u32 dbam = (i ? pvt->dbam1 : pvt->dbam0);
1429 for (j = 0; j < 4; j++) {
1430 if (DBAM_DIMM(j, dbam) > 0) {
1431 channels++;
1432 break;
1437 if (channels > 2)
1438 channels = 2;
1440 amd64_info("MCT channel count: %d\n", channels);
1442 return channels;
1445 static int f17_early_channel_count(struct amd64_pvt *pvt)
1447 int i, channels = 0;
1449 /* SDP Control bit 31 (SdpInit) is clear for unused UMC channels */
1450 for_each_umc(i)
1451 channels += !!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT);
1453 amd64_info("MCT channel count: %d\n", channels);
1455 return channels;
1458 static int ddr3_cs_size(unsigned i, bool dct_width)
1460 unsigned shift = 0;
1461 int cs_size = 0;
1463 if (i == 0 || i == 3 || i == 4)
1464 cs_size = -1;
1465 else if (i <= 2)
1466 shift = i;
1467 else if (i == 12)
1468 shift = 7;
1469 else if (!(i & 0x1))
1470 shift = i >> 1;
1471 else
1472 shift = (i + 1) >> 1;
1474 if (cs_size != -1)
1475 cs_size = (128 * (1 << !!dct_width)) << shift;
1477 return cs_size;
1480 static int ddr3_lrdimm_cs_size(unsigned i, unsigned rank_multiply)
1482 unsigned shift = 0;
1483 int cs_size = 0;
1485 if (i < 4 || i == 6)
1486 cs_size = -1;
1487 else if (i == 12)
1488 shift = 7;
1489 else if (!(i & 0x1))
1490 shift = i >> 1;
1491 else
1492 shift = (i + 1) >> 1;
1494 if (cs_size != -1)
1495 cs_size = rank_multiply * (128 << shift);
1497 return cs_size;
1500 static int ddr4_cs_size(unsigned i)
1502 int cs_size = 0;
1504 if (i == 0)
1505 cs_size = -1;
1506 else if (i == 1)
1507 cs_size = 1024;
1508 else
1509 /* Min cs_size = 1G */
1510 cs_size = 1024 * (1 << (i >> 1));
1512 return cs_size;
1515 static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1516 unsigned cs_mode, int cs_mask_nr)
1518 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1520 WARN_ON(cs_mode > 11);
1522 if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
1523 return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
1524 else
1525 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1529 * F15h supports only 64bit DCT interfaces
1531 static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1532 unsigned cs_mode, int cs_mask_nr)
1534 WARN_ON(cs_mode > 12);
1536 return ddr3_cs_size(cs_mode, false);
1539 /* F15h M60h supports DDR4 mapping as well.. */
1540 static int f15_m60h_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1541 unsigned cs_mode, int cs_mask_nr)
1543 int cs_size;
1544 u32 dcsm = pvt->csels[dct].csmasks[cs_mask_nr];
1546 WARN_ON(cs_mode > 12);
1548 if (pvt->dram_type == MEM_DDR4) {
1549 if (cs_mode > 9)
1550 return -1;
1552 cs_size = ddr4_cs_size(cs_mode);
1553 } else if (pvt->dram_type == MEM_LRDDR3) {
1554 unsigned rank_multiply = dcsm & 0xf;
1556 if (rank_multiply == 3)
1557 rank_multiply = 4;
1558 cs_size = ddr3_lrdimm_cs_size(cs_mode, rank_multiply);
1559 } else {
1560 /* Minimum cs size is 512mb for F15hM60h*/
1561 if (cs_mode == 0x1)
1562 return -1;
1564 cs_size = ddr3_cs_size(cs_mode, false);
1567 return cs_size;
1571 * F16h and F15h model 30h have only limited cs_modes.
1573 static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1574 unsigned cs_mode, int cs_mask_nr)
1576 WARN_ON(cs_mode > 12);
1578 if (cs_mode == 6 || cs_mode == 8 ||
1579 cs_mode == 9 || cs_mode == 12)
1580 return -1;
1581 else
1582 return ddr3_cs_size(cs_mode, false);
1585 static int f17_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
1586 unsigned int cs_mode, int csrow_nr)
1588 u32 addr_mask_orig, addr_mask_deinterleaved;
1589 u32 msb, weight, num_zero_bits;
1590 int dimm, size = 0;
1592 /* No Chip Selects are enabled. */
1593 if (!cs_mode)
1594 return size;
1596 /* Requested size of an even CS but none are enabled. */
1597 if (!(cs_mode & CS_EVEN) && !(csrow_nr & 1))
1598 return size;
1600 /* Requested size of an odd CS but none are enabled. */
1601 if (!(cs_mode & CS_ODD) && (csrow_nr & 1))
1602 return size;
1605 * There is one mask per DIMM, and two Chip Selects per DIMM.
1606 * CS0 and CS1 -> DIMM0
1607 * CS2 and CS3 -> DIMM1
1609 dimm = csrow_nr >> 1;
1611 /* Asymmetric dual-rank DIMM support. */
1612 if ((csrow_nr & 1) && (cs_mode & CS_ODD_SECONDARY))
1613 addr_mask_orig = pvt->csels[umc].csmasks_sec[dimm];
1614 else
1615 addr_mask_orig = pvt->csels[umc].csmasks[dimm];
1618 * The number of zero bits in the mask is equal to the number of bits
1619 * in a full mask minus the number of bits in the current mask.
1621 * The MSB is the number of bits in the full mask because BIT[0] is
1622 * always 0.
1624 msb = fls(addr_mask_orig) - 1;
1625 weight = hweight_long(addr_mask_orig);
1626 num_zero_bits = msb - weight;
1628 /* Take the number of zero bits off from the top of the mask. */
1629 addr_mask_deinterleaved = GENMASK_ULL(msb - num_zero_bits, 1);
1631 edac_dbg(1, "CS%d DIMM%d AddrMasks:\n", csrow_nr, dimm);
1632 edac_dbg(1, " Original AddrMask: 0x%x\n", addr_mask_orig);
1633 edac_dbg(1, " Deinterleaved AddrMask: 0x%x\n", addr_mask_deinterleaved);
1635 /* Register [31:1] = Address [39:9]. Size is in kBs here. */
1636 size = (addr_mask_deinterleaved >> 2) + 1;
1638 /* Return size in MBs. */
1639 return size >> 10;
1642 static void read_dram_ctl_register(struct amd64_pvt *pvt)
1645 if (pvt->fam == 0xf)
1646 return;
1648 if (!amd64_read_pci_cfg(pvt->F2, DCT_SEL_LO, &pvt->dct_sel_lo)) {
1649 edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
1650 pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
1652 edac_dbg(0, " DCTs operate in %s mode\n",
1653 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
1655 if (!dct_ganging_enabled(pvt))
1656 edac_dbg(0, " Address range split per DCT: %s\n",
1657 (dct_high_range_enabled(pvt) ? "yes" : "no"));
1659 edac_dbg(0, " data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
1660 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
1661 (dct_memory_cleared(pvt) ? "yes" : "no"));
1663 edac_dbg(0, " channel interleave: %s, "
1664 "interleave bits selector: 0x%x\n",
1665 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
1666 dct_sel_interleave_addr(pvt));
1669 amd64_read_pci_cfg(pvt->F2, DCT_SEL_HI, &pvt->dct_sel_hi);
1673 * Determine channel (DCT) based on the interleaving mode (see F15h M30h BKDG,
1674 * 2.10.12 Memory Interleaving Modes).
1676 static u8 f15_m30h_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1677 u8 intlv_en, int num_dcts_intlv,
1678 u32 dct_sel)
1680 u8 channel = 0;
1681 u8 select;
1683 if (!(intlv_en))
1684 return (u8)(dct_sel);
1686 if (num_dcts_intlv == 2) {
1687 select = (sys_addr >> 8) & 0x3;
1688 channel = select ? 0x3 : 0;
1689 } else if (num_dcts_intlv == 4) {
1690 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1691 switch (intlv_addr) {
1692 case 0x4:
1693 channel = (sys_addr >> 8) & 0x3;
1694 break;
1695 case 0x5:
1696 channel = (sys_addr >> 9) & 0x3;
1697 break;
1700 return channel;
1704 * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
1705 * Interleaving Modes.
1707 static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1708 bool hi_range_sel, u8 intlv_en)
1710 u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
1712 if (dct_ganging_enabled(pvt))
1713 return 0;
1715 if (hi_range_sel)
1716 return dct_sel_high;
1719 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1721 if (dct_interleave_enabled(pvt)) {
1722 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1724 /* return DCT select function: 0=DCT0, 1=DCT1 */
1725 if (!intlv_addr)
1726 return sys_addr >> 6 & 1;
1728 if (intlv_addr & 0x2) {
1729 u8 shift = intlv_addr & 0x1 ? 9 : 6;
1730 u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) & 1;
1732 return ((sys_addr >> shift) & 1) ^ temp;
1735 if (intlv_addr & 0x4) {
1736 u8 shift = intlv_addr & 0x1 ? 9 : 8;
1738 return (sys_addr >> shift) & 1;
1741 return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
1744 if (dct_high_range_enabled(pvt))
1745 return ~dct_sel_high & 1;
1747 return 0;
1750 /* Convert the sys_addr to the normalized DCT address */
1751 static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
1752 u64 sys_addr, bool hi_rng,
1753 u32 dct_sel_base_addr)
1755 u64 chan_off;
1756 u64 dram_base = get_dram_base(pvt, range);
1757 u64 hole_off = f10_dhar_offset(pvt);
1758 u64 dct_sel_base_off = (u64)(pvt->dct_sel_hi & 0xFFFFFC00) << 16;
1760 if (hi_rng) {
1762 * if
1763 * base address of high range is below 4Gb
1764 * (bits [47:27] at [31:11])
1765 * DRAM address space on this DCT is hoisted above 4Gb &&
1766 * sys_addr > 4Gb
1768 * remove hole offset from sys_addr
1769 * else
1770 * remove high range offset from sys_addr
1772 if ((!(dct_sel_base_addr >> 16) ||
1773 dct_sel_base_addr < dhar_base(pvt)) &&
1774 dhar_valid(pvt) &&
1775 (sys_addr >= BIT_64(32)))
1776 chan_off = hole_off;
1777 else
1778 chan_off = dct_sel_base_off;
1779 } else {
1781 * if
1782 * we have a valid hole &&
1783 * sys_addr > 4Gb
1785 * remove hole
1786 * else
1787 * remove dram base to normalize to DCT address
1789 if (dhar_valid(pvt) && (sys_addr >= BIT_64(32)))
1790 chan_off = hole_off;
1791 else
1792 chan_off = dram_base;
1795 return (sys_addr & GENMASK_ULL(47,6)) - (chan_off & GENMASK_ULL(47,23));
1799 * checks if the csrow passed in is marked as SPARED, if so returns the new
1800 * spare row
1802 static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
1804 int tmp_cs;
1806 if (online_spare_swap_done(pvt, dct) &&
1807 csrow == online_spare_bad_dramcs(pvt, dct)) {
1809 for_each_chip_select(tmp_cs, dct, pvt) {
1810 if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
1811 csrow = tmp_cs;
1812 break;
1816 return csrow;
1820 * Iterate over the DRAM DCT "base" and "mask" registers looking for a
1821 * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
1823 * Return:
1824 * -EINVAL: NOT FOUND
1825 * 0..csrow = Chip-Select Row
1827 static int f1x_lookup_addr_in_dct(u64 in_addr, u8 nid, u8 dct)
1829 struct mem_ctl_info *mci;
1830 struct amd64_pvt *pvt;
1831 u64 cs_base, cs_mask;
1832 int cs_found = -EINVAL;
1833 int csrow;
1835 mci = edac_mc_find(nid);
1836 if (!mci)
1837 return cs_found;
1839 pvt = mci->pvt_info;
1841 edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr, dct);
1843 for_each_chip_select(csrow, dct, pvt) {
1844 if (!csrow_enabled(csrow, dct, pvt))
1845 continue;
1847 get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
1849 edac_dbg(1, " CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
1850 csrow, cs_base, cs_mask);
1852 cs_mask = ~cs_mask;
1854 edac_dbg(1, " (InputAddr & ~CSMask)=0x%llx (CSBase & ~CSMask)=0x%llx\n",
1855 (in_addr & cs_mask), (cs_base & cs_mask));
1857 if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
1858 if (pvt->fam == 0x15 && pvt->model >= 0x30) {
1859 cs_found = csrow;
1860 break;
1862 cs_found = f10_process_possible_spare(pvt, dct, csrow);
1864 edac_dbg(1, " MATCH csrow=%d\n", cs_found);
1865 break;
1868 return cs_found;
1872 * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
1873 * swapped with a region located at the bottom of memory so that the GPU can use
1874 * the interleaved region and thus two channels.
1876 static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
1878 u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
1880 if (pvt->fam == 0x10) {
1881 /* only revC3 and revE have that feature */
1882 if (pvt->model < 4 || (pvt->model < 0xa && pvt->stepping < 3))
1883 return sys_addr;
1886 amd64_read_pci_cfg(pvt->F2, SWAP_INTLV_REG, &swap_reg);
1888 if (!(swap_reg & 0x1))
1889 return sys_addr;
1891 swap_base = (swap_reg >> 3) & 0x7f;
1892 swap_limit = (swap_reg >> 11) & 0x7f;
1893 rgn_size = (swap_reg >> 20) & 0x7f;
1894 tmp_addr = sys_addr >> 27;
1896 if (!(sys_addr >> 34) &&
1897 (((tmp_addr >= swap_base) &&
1898 (tmp_addr <= swap_limit)) ||
1899 (tmp_addr < rgn_size)))
1900 return sys_addr ^ (u64)swap_base << 27;
1902 return sys_addr;
1905 /* For a given @dram_range, check if @sys_addr falls within it. */
1906 static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1907 u64 sys_addr, int *chan_sel)
1909 int cs_found = -EINVAL;
1910 u64 chan_addr;
1911 u32 dct_sel_base;
1912 u8 channel;
1913 bool high_range = false;
1915 u8 node_id = dram_dst_node(pvt, range);
1916 u8 intlv_en = dram_intlv_en(pvt, range);
1917 u32 intlv_sel = dram_intlv_sel(pvt, range);
1919 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1920 range, sys_addr, get_dram_limit(pvt, range));
1922 if (dhar_valid(pvt) &&
1923 dhar_base(pvt) <= sys_addr &&
1924 sys_addr < BIT_64(32)) {
1925 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1926 sys_addr);
1927 return -EINVAL;
1930 if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en)))
1931 return -EINVAL;
1933 sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
1935 dct_sel_base = dct_sel_baseaddr(pvt);
1938 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
1939 * select between DCT0 and DCT1.
1941 if (dct_high_range_enabled(pvt) &&
1942 !dct_ganging_enabled(pvt) &&
1943 ((sys_addr >> 27) >= (dct_sel_base >> 11)))
1944 high_range = true;
1946 channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
1948 chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
1949 high_range, dct_sel_base);
1951 /* Remove node interleaving, see F1x120 */
1952 if (intlv_en)
1953 chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) |
1954 (chan_addr & 0xfff);
1956 /* remove channel interleave */
1957 if (dct_interleave_enabled(pvt) &&
1958 !dct_high_range_enabled(pvt) &&
1959 !dct_ganging_enabled(pvt)) {
1961 if (dct_sel_interleave_addr(pvt) != 1) {
1962 if (dct_sel_interleave_addr(pvt) == 0x3)
1963 /* hash 9 */
1964 chan_addr = ((chan_addr >> 10) << 9) |
1965 (chan_addr & 0x1ff);
1966 else
1967 /* A[6] or hash 6 */
1968 chan_addr = ((chan_addr >> 7) << 6) |
1969 (chan_addr & 0x3f);
1970 } else
1971 /* A[12] */
1972 chan_addr = ((chan_addr >> 13) << 12) |
1973 (chan_addr & 0xfff);
1976 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
1978 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
1980 if (cs_found >= 0)
1981 *chan_sel = channel;
1983 return cs_found;
1986 static int f15_m30h_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1987 u64 sys_addr, int *chan_sel)
1989 int cs_found = -EINVAL;
1990 int num_dcts_intlv = 0;
1991 u64 chan_addr, chan_offset;
1992 u64 dct_base, dct_limit;
1993 u32 dct_cont_base_reg, dct_cont_limit_reg, tmp;
1994 u8 channel, alias_channel, leg_mmio_hole, dct_sel, dct_offset_en;
1996 u64 dhar_offset = f10_dhar_offset(pvt);
1997 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1998 u8 node_id = dram_dst_node(pvt, range);
1999 u8 intlv_en = dram_intlv_en(pvt, range);
2001 amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &dct_cont_base_reg);
2002 amd64_read_pci_cfg(pvt->F1, DRAM_CONT_LIMIT, &dct_cont_limit_reg);
2004 dct_offset_en = (u8) ((dct_cont_base_reg >> 3) & BIT(0));
2005 dct_sel = (u8) ((dct_cont_base_reg >> 4) & 0x7);
2007 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
2008 range, sys_addr, get_dram_limit(pvt, range));
2010 if (!(get_dram_base(pvt, range) <= sys_addr) &&
2011 !(get_dram_limit(pvt, range) >= sys_addr))
2012 return -EINVAL;
2014 if (dhar_valid(pvt) &&
2015 dhar_base(pvt) <= sys_addr &&
2016 sys_addr < BIT_64(32)) {
2017 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
2018 sys_addr);
2019 return -EINVAL;
2022 /* Verify sys_addr is within DCT Range. */
2023 dct_base = (u64) dct_sel_baseaddr(pvt);
2024 dct_limit = (dct_cont_limit_reg >> 11) & 0x1FFF;
2026 if (!(dct_cont_base_reg & BIT(0)) &&
2027 !(dct_base <= (sys_addr >> 27) &&
2028 dct_limit >= (sys_addr >> 27)))
2029 return -EINVAL;
2031 /* Verify number of dct's that participate in channel interleaving. */
2032 num_dcts_intlv = (int) hweight8(intlv_en);
2034 if (!(num_dcts_intlv % 2 == 0) || (num_dcts_intlv > 4))
2035 return -EINVAL;
2037 if (pvt->model >= 0x60)
2038 channel = f1x_determine_channel(pvt, sys_addr, false, intlv_en);
2039 else
2040 channel = f15_m30h_determine_channel(pvt, sys_addr, intlv_en,
2041 num_dcts_intlv, dct_sel);
2043 /* Verify we stay within the MAX number of channels allowed */
2044 if (channel > 3)
2045 return -EINVAL;
2047 leg_mmio_hole = (u8) (dct_cont_base_reg >> 1 & BIT(0));
2049 /* Get normalized DCT addr */
2050 if (leg_mmio_hole && (sys_addr >= BIT_64(32)))
2051 chan_offset = dhar_offset;
2052 else
2053 chan_offset = dct_base << 27;
2055 chan_addr = sys_addr - chan_offset;
2057 /* remove channel interleave */
2058 if (num_dcts_intlv == 2) {
2059 if (intlv_addr == 0x4)
2060 chan_addr = ((chan_addr >> 9) << 8) |
2061 (chan_addr & 0xff);
2062 else if (intlv_addr == 0x5)
2063 chan_addr = ((chan_addr >> 10) << 9) |
2064 (chan_addr & 0x1ff);
2065 else
2066 return -EINVAL;
2068 } else if (num_dcts_intlv == 4) {
2069 if (intlv_addr == 0x4)
2070 chan_addr = ((chan_addr >> 10) << 8) |
2071 (chan_addr & 0xff);
2072 else if (intlv_addr == 0x5)
2073 chan_addr = ((chan_addr >> 11) << 9) |
2074 (chan_addr & 0x1ff);
2075 else
2076 return -EINVAL;
2079 if (dct_offset_en) {
2080 amd64_read_pci_cfg(pvt->F1,
2081 DRAM_CONT_HIGH_OFF + (int) channel * 4,
2082 &tmp);
2083 chan_addr += (u64) ((tmp >> 11) & 0xfff) << 27;
2086 f15h_select_dct(pvt, channel);
2088 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
2091 * Find Chip select:
2092 * if channel = 3, then alias it to 1. This is because, in F15 M30h,
2093 * there is support for 4 DCT's, but only 2 are currently functional.
2094 * They are DCT0 and DCT3. But we have read all registers of DCT3 into
2095 * pvt->csels[1]. So we need to use '1' here to get correct info.
2096 * Refer F15 M30h BKDG Section 2.10 and 2.10.3 for clarifications.
2098 alias_channel = (channel == 3) ? 1 : channel;
2100 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, alias_channel);
2102 if (cs_found >= 0)
2103 *chan_sel = alias_channel;
2105 return cs_found;
2108 static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt,
2109 u64 sys_addr,
2110 int *chan_sel)
2112 int cs_found = -EINVAL;
2113 unsigned range;
2115 for (range = 0; range < DRAM_RANGES; range++) {
2116 if (!dram_rw(pvt, range))
2117 continue;
2119 if (pvt->fam == 0x15 && pvt->model >= 0x30)
2120 cs_found = f15_m30h_match_to_this_node(pvt, range,
2121 sys_addr,
2122 chan_sel);
2124 else if ((get_dram_base(pvt, range) <= sys_addr) &&
2125 (get_dram_limit(pvt, range) >= sys_addr)) {
2126 cs_found = f1x_match_to_this_node(pvt, range,
2127 sys_addr, chan_sel);
2128 if (cs_found >= 0)
2129 break;
2132 return cs_found;
2136 * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
2137 * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
2139 * The @sys_addr is usually an error address received from the hardware
2140 * (MCX_ADDR).
2142 static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
2143 struct err_info *err)
2145 struct amd64_pvt *pvt = mci->pvt_info;
2147 error_address_to_page_and_offset(sys_addr, err);
2149 err->csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &err->channel);
2150 if (err->csrow < 0) {
2151 err->err_code = ERR_CSROW;
2152 return;
2156 * We need the syndromes for channel detection only when we're
2157 * ganged. Otherwise @chan should already contain the channel at
2158 * this point.
2160 if (dct_ganging_enabled(pvt))
2161 err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
2165 * debug routine to display the memory sizes of all logical DIMMs and its
2166 * CSROWs
2168 static void debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
2170 int dimm, size0, size1;
2171 u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
2172 u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
2174 if (pvt->fam == 0xf) {
2175 /* K8 families < revF not supported yet */
2176 if (pvt->ext_model < K8_REV_F)
2177 return;
2178 else
2179 WARN_ON(ctrl != 0);
2182 if (pvt->fam == 0x10) {
2183 dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1
2184 : pvt->dbam0;
2185 dcsb = (ctrl && !dct_ganging_enabled(pvt)) ?
2186 pvt->csels[1].csbases :
2187 pvt->csels[0].csbases;
2188 } else if (ctrl) {
2189 dbam = pvt->dbam0;
2190 dcsb = pvt->csels[1].csbases;
2192 edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
2193 ctrl, dbam);
2195 edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
2197 /* Dump memory sizes for DIMM and its CSROWs */
2198 for (dimm = 0; dimm < 4; dimm++) {
2200 size0 = 0;
2201 if (dcsb[dimm*2] & DCSB_CS_ENABLE)
2203 * For F15m60h, we need multiplier for LRDIMM cs_size
2204 * calculation. We pass dimm value to the dbam_to_cs
2205 * mapper so we can find the multiplier from the
2206 * corresponding DCSM.
2208 size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
2209 DBAM_DIMM(dimm, dbam),
2210 dimm);
2212 size1 = 0;
2213 if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
2214 size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
2215 DBAM_DIMM(dimm, dbam),
2216 dimm);
2218 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
2219 dimm * 2, size0,
2220 dimm * 2 + 1, size1);
2224 static struct amd64_family_type family_types[] = {
2225 [K8_CPUS] = {
2226 .ctl_name = "K8",
2227 .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
2228 .f2_id = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
2229 .ops = {
2230 .early_channel_count = k8_early_channel_count,
2231 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
2232 .dbam_to_cs = k8_dbam_to_chip_select,
2235 [F10_CPUS] = {
2236 .ctl_name = "F10h",
2237 .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
2238 .f2_id = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
2239 .ops = {
2240 .early_channel_count = f1x_early_channel_count,
2241 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2242 .dbam_to_cs = f10_dbam_to_chip_select,
2245 [F15_CPUS] = {
2246 .ctl_name = "F15h",
2247 .f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
2248 .f2_id = PCI_DEVICE_ID_AMD_15H_NB_F2,
2249 .ops = {
2250 .early_channel_count = f1x_early_channel_count,
2251 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2252 .dbam_to_cs = f15_dbam_to_chip_select,
2255 [F15_M30H_CPUS] = {
2256 .ctl_name = "F15h_M30h",
2257 .f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1,
2258 .f2_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2,
2259 .ops = {
2260 .early_channel_count = f1x_early_channel_count,
2261 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2262 .dbam_to_cs = f16_dbam_to_chip_select,
2265 [F15_M60H_CPUS] = {
2266 .ctl_name = "F15h_M60h",
2267 .f1_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1,
2268 .f2_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F2,
2269 .ops = {
2270 .early_channel_count = f1x_early_channel_count,
2271 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2272 .dbam_to_cs = f15_m60h_dbam_to_chip_select,
2275 [F16_CPUS] = {
2276 .ctl_name = "F16h",
2277 .f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1,
2278 .f2_id = PCI_DEVICE_ID_AMD_16H_NB_F2,
2279 .ops = {
2280 .early_channel_count = f1x_early_channel_count,
2281 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2282 .dbam_to_cs = f16_dbam_to_chip_select,
2285 [F16_M30H_CPUS] = {
2286 .ctl_name = "F16h_M30h",
2287 .f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1,
2288 .f2_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2,
2289 .ops = {
2290 .early_channel_count = f1x_early_channel_count,
2291 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2292 .dbam_to_cs = f16_dbam_to_chip_select,
2295 [F17_CPUS] = {
2296 .ctl_name = "F17h",
2297 .f0_id = PCI_DEVICE_ID_AMD_17H_DF_F0,
2298 .f6_id = PCI_DEVICE_ID_AMD_17H_DF_F6,
2299 .ops = {
2300 .early_channel_count = f17_early_channel_count,
2301 .dbam_to_cs = f17_addr_mask_to_cs_size,
2304 [F17_M10H_CPUS] = {
2305 .ctl_name = "F17h_M10h",
2306 .f0_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F0,
2307 .f6_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F6,
2308 .ops = {
2309 .early_channel_count = f17_early_channel_count,
2310 .dbam_to_cs = f17_addr_mask_to_cs_size,
2313 [F17_M30H_CPUS] = {
2314 .ctl_name = "F17h_M30h",
2315 .f0_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F0,
2316 .f6_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F6,
2317 .ops = {
2318 .early_channel_count = f17_early_channel_count,
2319 .dbam_to_cs = f17_addr_mask_to_cs_size,
2322 [F17_M60H_CPUS] = {
2323 .ctl_name = "F17h_M60h",
2324 .f0_id = PCI_DEVICE_ID_AMD_17H_M60H_DF_F0,
2325 .f6_id = PCI_DEVICE_ID_AMD_17H_M60H_DF_F6,
2326 .ops = {
2327 .early_channel_count = f17_early_channel_count,
2328 .dbam_to_cs = f17_addr_mask_to_cs_size,
2331 [F17_M70H_CPUS] = {
2332 .ctl_name = "F17h_M70h",
2333 .f0_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F0,
2334 .f6_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F6,
2335 .ops = {
2336 .early_channel_count = f17_early_channel_count,
2337 .dbam_to_cs = f17_addr_mask_to_cs_size,
2343 * These are tables of eigenvectors (one per line) which can be used for the
2344 * construction of the syndrome tables. The modified syndrome search algorithm
2345 * uses those to find the symbol in error and thus the DIMM.
2347 * Algorithm courtesy of Ross LaFetra from AMD.
2349 static const u16 x4_vectors[] = {
2350 0x2f57, 0x1afe, 0x66cc, 0xdd88,
2351 0x11eb, 0x3396, 0x7f4c, 0xeac8,
2352 0x0001, 0x0002, 0x0004, 0x0008,
2353 0x1013, 0x3032, 0x4044, 0x8088,
2354 0x106b, 0x30d6, 0x70fc, 0xe0a8,
2355 0x4857, 0xc4fe, 0x13cc, 0x3288,
2356 0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
2357 0x1f39, 0x251e, 0xbd6c, 0x6bd8,
2358 0x15c1, 0x2a42, 0x89ac, 0x4758,
2359 0x2b03, 0x1602, 0x4f0c, 0xca08,
2360 0x1f07, 0x3a0e, 0x6b04, 0xbd08,
2361 0x8ba7, 0x465e, 0x244c, 0x1cc8,
2362 0x2b87, 0x164e, 0x642c, 0xdc18,
2363 0x40b9, 0x80de, 0x1094, 0x20e8,
2364 0x27db, 0x1eb6, 0x9dac, 0x7b58,
2365 0x11c1, 0x2242, 0x84ac, 0x4c58,
2366 0x1be5, 0x2d7a, 0x5e34, 0xa718,
2367 0x4b39, 0x8d1e, 0x14b4, 0x28d8,
2368 0x4c97, 0xc87e, 0x11fc, 0x33a8,
2369 0x8e97, 0x497e, 0x2ffc, 0x1aa8,
2370 0x16b3, 0x3d62, 0x4f34, 0x8518,
2371 0x1e2f, 0x391a, 0x5cac, 0xf858,
2372 0x1d9f, 0x3b7a, 0x572c, 0xfe18,
2373 0x15f5, 0x2a5a, 0x5264, 0xa3b8,
2374 0x1dbb, 0x3b66, 0x715c, 0xe3f8,
2375 0x4397, 0xc27e, 0x17fc, 0x3ea8,
2376 0x1617, 0x3d3e, 0x6464, 0xb8b8,
2377 0x23ff, 0x12aa, 0xab6c, 0x56d8,
2378 0x2dfb, 0x1ba6, 0x913c, 0x7328,
2379 0x185d, 0x2ca6, 0x7914, 0x9e28,
2380 0x171b, 0x3e36, 0x7d7c, 0xebe8,
2381 0x4199, 0x82ee, 0x19f4, 0x2e58,
2382 0x4807, 0xc40e, 0x130c, 0x3208,
2383 0x1905, 0x2e0a, 0x5804, 0xac08,
2384 0x213f, 0x132a, 0xadfc, 0x5ba8,
2385 0x19a9, 0x2efe, 0xb5cc, 0x6f88,
2388 static const u16 x8_vectors[] = {
2389 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
2390 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
2391 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
2392 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
2393 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
2394 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
2395 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
2396 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
2397 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
2398 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
2399 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
2400 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
2401 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
2402 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
2403 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
2404 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
2405 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
2406 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
2407 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
2410 static int decode_syndrome(u16 syndrome, const u16 *vectors, unsigned num_vecs,
2411 unsigned v_dim)
2413 unsigned int i, err_sym;
2415 for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
2416 u16 s = syndrome;
2417 unsigned v_idx = err_sym * v_dim;
2418 unsigned v_end = (err_sym + 1) * v_dim;
2420 /* walk over all 16 bits of the syndrome */
2421 for (i = 1; i < (1U << 16); i <<= 1) {
2423 /* if bit is set in that eigenvector... */
2424 if (v_idx < v_end && vectors[v_idx] & i) {
2425 u16 ev_comp = vectors[v_idx++];
2427 /* ... and bit set in the modified syndrome, */
2428 if (s & i) {
2429 /* remove it. */
2430 s ^= ev_comp;
2432 if (!s)
2433 return err_sym;
2436 } else if (s & i)
2437 /* can't get to zero, move to next symbol */
2438 break;
2442 edac_dbg(0, "syndrome(%x) not found\n", syndrome);
2443 return -1;
2446 static int map_err_sym_to_channel(int err_sym, int sym_size)
2448 if (sym_size == 4)
2449 switch (err_sym) {
2450 case 0x20:
2451 case 0x21:
2452 return 0;
2453 break;
2454 case 0x22:
2455 case 0x23:
2456 return 1;
2457 break;
2458 default:
2459 return err_sym >> 4;
2460 break;
2462 /* x8 symbols */
2463 else
2464 switch (err_sym) {
2465 /* imaginary bits not in a DIMM */
2466 case 0x10:
2467 WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
2468 err_sym);
2469 return -1;
2470 break;
2472 case 0x11:
2473 return 0;
2474 break;
2475 case 0x12:
2476 return 1;
2477 break;
2478 default:
2479 return err_sym >> 3;
2480 break;
2482 return -1;
2485 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
2487 struct amd64_pvt *pvt = mci->pvt_info;
2488 int err_sym = -1;
2490 if (pvt->ecc_sym_sz == 8)
2491 err_sym = decode_syndrome(syndrome, x8_vectors,
2492 ARRAY_SIZE(x8_vectors),
2493 pvt->ecc_sym_sz);
2494 else if (pvt->ecc_sym_sz == 4)
2495 err_sym = decode_syndrome(syndrome, x4_vectors,
2496 ARRAY_SIZE(x4_vectors),
2497 pvt->ecc_sym_sz);
2498 else {
2499 amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
2500 return err_sym;
2503 return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
2506 static void __log_ecc_error(struct mem_ctl_info *mci, struct err_info *err,
2507 u8 ecc_type)
2509 enum hw_event_mc_err_type err_type;
2510 const char *string;
2512 if (ecc_type == 2)
2513 err_type = HW_EVENT_ERR_CORRECTED;
2514 else if (ecc_type == 1)
2515 err_type = HW_EVENT_ERR_UNCORRECTED;
2516 else if (ecc_type == 3)
2517 err_type = HW_EVENT_ERR_DEFERRED;
2518 else {
2519 WARN(1, "Something is rotten in the state of Denmark.\n");
2520 return;
2523 switch (err->err_code) {
2524 case DECODE_OK:
2525 string = "";
2526 break;
2527 case ERR_NODE:
2528 string = "Failed to map error addr to a node";
2529 break;
2530 case ERR_CSROW:
2531 string = "Failed to map error addr to a csrow";
2532 break;
2533 case ERR_CHANNEL:
2534 string = "Unknown syndrome - possible error reporting race";
2535 break;
2536 case ERR_SYND:
2537 string = "MCA_SYND not valid - unknown syndrome and csrow";
2538 break;
2539 case ERR_NORM_ADDR:
2540 string = "Cannot decode normalized address";
2541 break;
2542 default:
2543 string = "WTF error";
2544 break;
2547 edac_mc_handle_error(err_type, mci, 1,
2548 err->page, err->offset, err->syndrome,
2549 err->csrow, err->channel, -1,
2550 string, "");
2553 static inline void decode_bus_error(int node_id, struct mce *m)
2555 struct mem_ctl_info *mci;
2556 struct amd64_pvt *pvt;
2557 u8 ecc_type = (m->status >> 45) & 0x3;
2558 u8 xec = XEC(m->status, 0x1f);
2559 u16 ec = EC(m->status);
2560 u64 sys_addr;
2561 struct err_info err;
2563 mci = edac_mc_find(node_id);
2564 if (!mci)
2565 return;
2567 pvt = mci->pvt_info;
2569 /* Bail out early if this was an 'observed' error */
2570 if (PP(ec) == NBSL_PP_OBS)
2571 return;
2573 /* Do only ECC errors */
2574 if (xec && xec != F10_NBSL_EXT_ERR_ECC)
2575 return;
2577 memset(&err, 0, sizeof(err));
2579 sys_addr = get_error_address(pvt, m);
2581 if (ecc_type == 2)
2582 err.syndrome = extract_syndrome(m->status);
2584 pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, &err);
2586 __log_ecc_error(mci, &err, ecc_type);
2590 * To find the UMC channel represented by this bank we need to match on its
2591 * instance_id. The instance_id of a bank is held in the lower 32 bits of its
2592 * IPID.
2594 * Currently, we can derive the channel number by looking at the 6th nibble in
2595 * the instance_id. For example, instance_id=0xYXXXXX where Y is the channel
2596 * number.
2598 static int find_umc_channel(struct mce *m)
2600 return (m->ipid & GENMASK(31, 0)) >> 20;
2603 static void decode_umc_error(int node_id, struct mce *m)
2605 u8 ecc_type = (m->status >> 45) & 0x3;
2606 struct mem_ctl_info *mci;
2607 struct amd64_pvt *pvt;
2608 struct err_info err;
2609 u64 sys_addr;
2611 mci = edac_mc_find(node_id);
2612 if (!mci)
2613 return;
2615 pvt = mci->pvt_info;
2617 memset(&err, 0, sizeof(err));
2619 if (m->status & MCI_STATUS_DEFERRED)
2620 ecc_type = 3;
2622 err.channel = find_umc_channel(m);
2624 if (!(m->status & MCI_STATUS_SYNDV)) {
2625 err.err_code = ERR_SYND;
2626 goto log_error;
2629 if (ecc_type == 2) {
2630 u8 length = (m->synd >> 18) & 0x3f;
2632 if (length)
2633 err.syndrome = (m->synd >> 32) & GENMASK(length - 1, 0);
2634 else
2635 err.err_code = ERR_CHANNEL;
2638 err.csrow = m->synd & 0x7;
2640 if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
2641 err.err_code = ERR_NORM_ADDR;
2642 goto log_error;
2645 error_address_to_page_and_offset(sys_addr, &err);
2647 log_error:
2648 __log_ecc_error(mci, &err, ecc_type);
2652 * Use pvt->F3 which contains the F3 CPU PCI device to get the related
2653 * F1 (AddrMap) and F2 (Dct) devices. Return negative value on error.
2654 * Reserve F0 and F6 on systems with a UMC.
2656 static int
2657 reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
2659 if (pvt->umc) {
2660 pvt->F0 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
2661 if (!pvt->F0) {
2662 amd64_err("F0 not found, device 0x%x (broken BIOS?)\n", pci_id1);
2663 return -ENODEV;
2666 pvt->F6 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
2667 if (!pvt->F6) {
2668 pci_dev_put(pvt->F0);
2669 pvt->F0 = NULL;
2671 amd64_err("F6 not found: device 0x%x (broken BIOS?)\n", pci_id2);
2672 return -ENODEV;
2675 edac_dbg(1, "F0: %s\n", pci_name(pvt->F0));
2676 edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
2677 edac_dbg(1, "F6: %s\n", pci_name(pvt->F6));
2679 return 0;
2682 /* Reserve the ADDRESS MAP Device */
2683 pvt->F1 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
2684 if (!pvt->F1) {
2685 amd64_err("F1 not found: device 0x%x (broken BIOS?)\n", pci_id1);
2686 return -ENODEV;
2689 /* Reserve the DCT Device */
2690 pvt->F2 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
2691 if (!pvt->F2) {
2692 pci_dev_put(pvt->F1);
2693 pvt->F1 = NULL;
2695 amd64_err("F2 not found: device 0x%x (broken BIOS?)\n", pci_id2);
2696 return -ENODEV;
2699 edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
2700 edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
2701 edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
2703 return 0;
2706 static void free_mc_sibling_devs(struct amd64_pvt *pvt)
2708 if (pvt->umc) {
2709 pci_dev_put(pvt->F0);
2710 pci_dev_put(pvt->F6);
2711 } else {
2712 pci_dev_put(pvt->F1);
2713 pci_dev_put(pvt->F2);
2717 static void determine_ecc_sym_sz(struct amd64_pvt *pvt)
2719 pvt->ecc_sym_sz = 4;
2721 if (pvt->umc) {
2722 u8 i;
2724 for_each_umc(i) {
2725 /* Check enabled channels only: */
2726 if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
2727 if (pvt->umc[i].ecc_ctrl & BIT(9)) {
2728 pvt->ecc_sym_sz = 16;
2729 return;
2730 } else if (pvt->umc[i].ecc_ctrl & BIT(7)) {
2731 pvt->ecc_sym_sz = 8;
2732 return;
2736 } else if (pvt->fam >= 0x10) {
2737 u32 tmp;
2739 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
2740 /* F16h has only DCT0, so no need to read dbam1. */
2741 if (pvt->fam != 0x16)
2742 amd64_read_dct_pci_cfg(pvt, 1, DBAM0, &pvt->dbam1);
2744 /* F10h, revD and later can do x8 ECC too. */
2745 if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25))
2746 pvt->ecc_sym_sz = 8;
2751 * Retrieve the hardware registers of the memory controller.
2753 static void __read_mc_regs_df(struct amd64_pvt *pvt)
2755 u8 nid = pvt->mc_node_id;
2756 struct amd64_umc *umc;
2757 u32 i, umc_base;
2759 /* Read registers from each UMC */
2760 for_each_umc(i) {
2762 umc_base = get_umc_base(i);
2763 umc = &pvt->umc[i];
2765 amd_smn_read(nid, umc_base + UMCCH_DIMM_CFG, &umc->dimm_cfg);
2766 amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg);
2767 amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl);
2768 amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl);
2769 amd_smn_read(nid, umc_base + UMCCH_UMC_CAP_HI, &umc->umc_cap_hi);
2774 * Retrieve the hardware registers of the memory controller (this includes the
2775 * 'Address Map' and 'Misc' device regs)
2777 static void read_mc_regs(struct amd64_pvt *pvt)
2779 unsigned int range;
2780 u64 msr_val;
2783 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
2784 * those are Read-As-Zero.
2786 rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
2787 edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt->top_mem);
2789 /* Check first whether TOP_MEM2 is enabled: */
2790 rdmsrl(MSR_K8_SYSCFG, msr_val);
2791 if (msr_val & BIT(21)) {
2792 rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
2793 edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
2794 } else {
2795 edac_dbg(0, " TOP_MEM2 disabled\n");
2798 if (pvt->umc) {
2799 __read_mc_regs_df(pvt);
2800 amd64_read_pci_cfg(pvt->F0, DF_DHAR, &pvt->dhar);
2802 goto skip;
2805 amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
2807 read_dram_ctl_register(pvt);
2809 for (range = 0; range < DRAM_RANGES; range++) {
2810 u8 rw;
2812 /* read settings for this DRAM range */
2813 read_dram_base_limit_regs(pvt, range);
2815 rw = dram_rw(pvt, range);
2816 if (!rw)
2817 continue;
2819 edac_dbg(1, " DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
2820 range,
2821 get_dram_base(pvt, range),
2822 get_dram_limit(pvt, range));
2824 edac_dbg(1, " IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
2825 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
2826 (rw & 0x1) ? "R" : "-",
2827 (rw & 0x2) ? "W" : "-",
2828 dram_intlv_sel(pvt, range),
2829 dram_dst_node(pvt, range));
2832 amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
2833 amd64_read_dct_pci_cfg(pvt, 0, DBAM0, &pvt->dbam0);
2835 amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
2837 amd64_read_dct_pci_cfg(pvt, 0, DCLR0, &pvt->dclr0);
2838 amd64_read_dct_pci_cfg(pvt, 0, DCHR0, &pvt->dchr0);
2840 if (!dct_ganging_enabled(pvt)) {
2841 amd64_read_dct_pci_cfg(pvt, 1, DCLR0, &pvt->dclr1);
2842 amd64_read_dct_pci_cfg(pvt, 1, DCHR0, &pvt->dchr1);
2845 skip:
2846 read_dct_base_mask(pvt);
2848 determine_memory_type(pvt);
2849 edac_dbg(1, " DIMM type: %s\n", edac_mem_types[pvt->dram_type]);
2851 determine_ecc_sym_sz(pvt);
2853 dump_misc_regs(pvt);
2857 * NOTE: CPU Revision Dependent code
2859 * Input:
2860 * @csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
2861 * k8 private pointer to -->
2862 * DRAM Bank Address mapping register
2863 * node_id
2864 * DCL register where dual_channel_active is
2866 * The DBAM register consists of 4 sets of 4 bits each definitions:
2868 * Bits: CSROWs
2869 * 0-3 CSROWs 0 and 1
2870 * 4-7 CSROWs 2 and 3
2871 * 8-11 CSROWs 4 and 5
2872 * 12-15 CSROWs 6 and 7
2874 * Values range from: 0 to 15
2875 * The meaning of the values depends on CPU revision and dual-channel state,
2876 * see relevant BKDG more info.
2878 * The memory controller provides for total of only 8 CSROWs in its current
2879 * architecture. Each "pair" of CSROWs normally represents just one DIMM in
2880 * single channel or two (2) DIMMs in dual channel mode.
2882 * The following code logic collapses the various tables for CSROW based on CPU
2883 * revision.
2885 * Returns:
2886 * The number of PAGE_SIZE pages on the specified CSROW number it
2887 * encompasses
2890 static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr_orig)
2892 u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
2893 int csrow_nr = csrow_nr_orig;
2894 u32 cs_mode, nr_pages;
2896 if (!pvt->umc) {
2897 csrow_nr >>= 1;
2898 cs_mode = DBAM_DIMM(csrow_nr, dbam);
2899 } else {
2900 cs_mode = f17_get_cs_mode(csrow_nr >> 1, dct, pvt);
2903 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, csrow_nr);
2904 nr_pages <<= 20 - PAGE_SHIFT;
2906 edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n",
2907 csrow_nr_orig, dct, cs_mode);
2908 edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
2910 return nr_pages;
2913 static int init_csrows_df(struct mem_ctl_info *mci)
2915 struct amd64_pvt *pvt = mci->pvt_info;
2916 enum edac_type edac_mode = EDAC_NONE;
2917 enum dev_type dev_type = DEV_UNKNOWN;
2918 struct dimm_info *dimm;
2919 int empty = 1;
2920 u8 umc, cs;
2922 if (mci->edac_ctl_cap & EDAC_FLAG_S16ECD16ED) {
2923 edac_mode = EDAC_S16ECD16ED;
2924 dev_type = DEV_X16;
2925 } else if (mci->edac_ctl_cap & EDAC_FLAG_S8ECD8ED) {
2926 edac_mode = EDAC_S8ECD8ED;
2927 dev_type = DEV_X8;
2928 } else if (mci->edac_ctl_cap & EDAC_FLAG_S4ECD4ED) {
2929 edac_mode = EDAC_S4ECD4ED;
2930 dev_type = DEV_X4;
2931 } else if (mci->edac_ctl_cap & EDAC_FLAG_SECDED) {
2932 edac_mode = EDAC_SECDED;
2935 for_each_umc(umc) {
2936 for_each_chip_select(cs, umc, pvt) {
2937 if (!csrow_enabled(cs, umc, pvt))
2938 continue;
2940 empty = 0;
2941 dimm = mci->csrows[cs]->channels[umc]->dimm;
2943 edac_dbg(1, "MC node: %d, csrow: %d\n",
2944 pvt->mc_node_id, cs);
2946 dimm->nr_pages = get_csrow_nr_pages(pvt, umc, cs);
2947 dimm->mtype = pvt->dram_type;
2948 dimm->edac_mode = edac_mode;
2949 dimm->dtype = dev_type;
2950 dimm->grain = 64;
2954 return empty;
2958 * Initialize the array of csrow attribute instances, based on the values
2959 * from pci config hardware registers.
2961 static int init_csrows(struct mem_ctl_info *mci)
2963 struct amd64_pvt *pvt = mci->pvt_info;
2964 enum edac_type edac_mode = EDAC_NONE;
2965 struct csrow_info *csrow;
2966 struct dimm_info *dimm;
2967 int i, j, empty = 1;
2968 int nr_pages = 0;
2969 u32 val;
2971 if (pvt->umc)
2972 return init_csrows_df(mci);
2974 amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
2976 pvt->nbcfg = val;
2978 edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2979 pvt->mc_node_id, val,
2980 !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
2983 * We iterate over DCT0 here but we look at DCT1 in parallel, if needed.
2985 for_each_chip_select(i, 0, pvt) {
2986 bool row_dct0 = !!csrow_enabled(i, 0, pvt);
2987 bool row_dct1 = false;
2989 if (pvt->fam != 0xf)
2990 row_dct1 = !!csrow_enabled(i, 1, pvt);
2992 if (!row_dct0 && !row_dct1)
2993 continue;
2995 csrow = mci->csrows[i];
2996 empty = 0;
2998 edac_dbg(1, "MC node: %d, csrow: %d\n",
2999 pvt->mc_node_id, i);
3001 if (row_dct0) {
3002 nr_pages = get_csrow_nr_pages(pvt, 0, i);
3003 csrow->channels[0]->dimm->nr_pages = nr_pages;
3006 /* K8 has only one DCT */
3007 if (pvt->fam != 0xf && row_dct1) {
3008 int row_dct1_pages = get_csrow_nr_pages(pvt, 1, i);
3010 csrow->channels[1]->dimm->nr_pages = row_dct1_pages;
3011 nr_pages += row_dct1_pages;
3014 edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages);
3016 /* Determine DIMM ECC mode: */
3017 if (pvt->nbcfg & NBCFG_ECC_ENABLE) {
3018 edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL)
3019 ? EDAC_S4ECD4ED
3020 : EDAC_SECDED;
3023 for (j = 0; j < pvt->channel_count; j++) {
3024 dimm = csrow->channels[j]->dimm;
3025 dimm->mtype = pvt->dram_type;
3026 dimm->edac_mode = edac_mode;
3027 dimm->grain = 64;
3031 return empty;
3034 /* get all cores on this DCT */
3035 static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
3037 int cpu;
3039 for_each_online_cpu(cpu)
3040 if (amd_get_nb_id(cpu) == nid)
3041 cpumask_set_cpu(cpu, mask);
3044 /* check MCG_CTL on all the cpus on this node */
3045 static bool nb_mce_bank_enabled_on_node(u16 nid)
3047 cpumask_var_t mask;
3048 int cpu, nbe;
3049 bool ret = false;
3051 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
3052 amd64_warn("%s: Error allocating mask\n", __func__);
3053 return false;
3056 get_cpus_on_this_dct_cpumask(mask, nid);
3058 rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
3060 for_each_cpu(cpu, mask) {
3061 struct msr *reg = per_cpu_ptr(msrs, cpu);
3062 nbe = reg->l & MSR_MCGCTL_NBE;
3064 edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
3065 cpu, reg->q,
3066 (nbe ? "enabled" : "disabled"));
3068 if (!nbe)
3069 goto out;
3071 ret = true;
3073 out:
3074 free_cpumask_var(mask);
3075 return ret;
3078 static int toggle_ecc_err_reporting(struct ecc_settings *s, u16 nid, bool on)
3080 cpumask_var_t cmask;
3081 int cpu;
3083 if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
3084 amd64_warn("%s: error allocating mask\n", __func__);
3085 return -ENOMEM;
3088 get_cpus_on_this_dct_cpumask(cmask, nid);
3090 rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
3092 for_each_cpu(cpu, cmask) {
3094 struct msr *reg = per_cpu_ptr(msrs, cpu);
3096 if (on) {
3097 if (reg->l & MSR_MCGCTL_NBE)
3098 s->flags.nb_mce_enable = 1;
3100 reg->l |= MSR_MCGCTL_NBE;
3101 } else {
3103 * Turn off NB MCE reporting only when it was off before
3105 if (!s->flags.nb_mce_enable)
3106 reg->l &= ~MSR_MCGCTL_NBE;
3109 wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
3111 free_cpumask_var(cmask);
3113 return 0;
3116 static bool enable_ecc_error_reporting(struct ecc_settings *s, u16 nid,
3117 struct pci_dev *F3)
3119 bool ret = true;
3120 u32 value, mask = 0x3; /* UECC/CECC enable */
3122 if (toggle_ecc_err_reporting(s, nid, ON)) {
3123 amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
3124 return false;
3127 amd64_read_pci_cfg(F3, NBCTL, &value);
3129 s->old_nbctl = value & mask;
3130 s->nbctl_valid = true;
3132 value |= mask;
3133 amd64_write_pci_cfg(F3, NBCTL, value);
3135 amd64_read_pci_cfg(F3, NBCFG, &value);
3137 edac_dbg(0, "1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
3138 nid, value, !!(value & NBCFG_ECC_ENABLE));
3140 if (!(value & NBCFG_ECC_ENABLE)) {
3141 amd64_warn("DRAM ECC disabled on this node, enabling...\n");
3143 s->flags.nb_ecc_prev = 0;
3145 /* Attempt to turn on DRAM ECC Enable */
3146 value |= NBCFG_ECC_ENABLE;
3147 amd64_write_pci_cfg(F3, NBCFG, value);
3149 amd64_read_pci_cfg(F3, NBCFG, &value);
3151 if (!(value & NBCFG_ECC_ENABLE)) {
3152 amd64_warn("Hardware rejected DRAM ECC enable,"
3153 "check memory DIMM configuration.\n");
3154 ret = false;
3155 } else {
3156 amd64_info("Hardware accepted DRAM ECC Enable\n");
3158 } else {
3159 s->flags.nb_ecc_prev = 1;
3162 edac_dbg(0, "2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
3163 nid, value, !!(value & NBCFG_ECC_ENABLE));
3165 return ret;
3168 static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid,
3169 struct pci_dev *F3)
3171 u32 value, mask = 0x3; /* UECC/CECC enable */
3173 if (!s->nbctl_valid)
3174 return;
3176 amd64_read_pci_cfg(F3, NBCTL, &value);
3177 value &= ~mask;
3178 value |= s->old_nbctl;
3180 amd64_write_pci_cfg(F3, NBCTL, value);
3182 /* restore previous BIOS DRAM ECC "off" setting we force-enabled */
3183 if (!s->flags.nb_ecc_prev) {
3184 amd64_read_pci_cfg(F3, NBCFG, &value);
3185 value &= ~NBCFG_ECC_ENABLE;
3186 amd64_write_pci_cfg(F3, NBCFG, value);
3189 /* restore the NB Enable MCGCTL bit */
3190 if (toggle_ecc_err_reporting(s, nid, OFF))
3191 amd64_warn("Error restoring NB MCGCTL settings!\n");
3195 * EDAC requires that the BIOS have ECC enabled before
3196 * taking over the processing of ECC errors. A command line
3197 * option allows to force-enable hardware ECC later in
3198 * enable_ecc_error_reporting().
3200 static const char *ecc_msg =
3201 "ECC disabled in the BIOS or no ECC capability, module will not load.\n"
3202 " Either enable ECC checking or force module loading by setting "
3203 "'ecc_enable_override'.\n"
3204 " (Note that use of the override may cause unknown side effects.)\n";
3206 static bool ecc_enabled(struct pci_dev *F3, u16 nid)
3208 bool nb_mce_en = false;
3209 u8 ecc_en = 0, i;
3210 u32 value;
3212 if (boot_cpu_data.x86 >= 0x17) {
3213 u8 umc_en_mask = 0, ecc_en_mask = 0;
3215 for_each_umc(i) {
3216 u32 base = get_umc_base(i);
3218 /* Only check enabled UMCs. */
3219 if (amd_smn_read(nid, base + UMCCH_SDP_CTRL, &value))
3220 continue;
3222 if (!(value & UMC_SDP_INIT))
3223 continue;
3225 umc_en_mask |= BIT(i);
3227 if (amd_smn_read(nid, base + UMCCH_UMC_CAP_HI, &value))
3228 continue;
3230 if (value & UMC_ECC_ENABLED)
3231 ecc_en_mask |= BIT(i);
3234 /* Check whether at least one UMC is enabled: */
3235 if (umc_en_mask)
3236 ecc_en = umc_en_mask == ecc_en_mask;
3237 else
3238 edac_dbg(0, "Node %d: No enabled UMCs.\n", nid);
3240 /* Assume UMC MCA banks are enabled. */
3241 nb_mce_en = true;
3242 } else {
3243 amd64_read_pci_cfg(F3, NBCFG, &value);
3245 ecc_en = !!(value & NBCFG_ECC_ENABLE);
3247 nb_mce_en = nb_mce_bank_enabled_on_node(nid);
3248 if (!nb_mce_en)
3249 edac_dbg(0, "NB MCE bank disabled, set MSR 0x%08x[4] on node %d to enable.\n",
3250 MSR_IA32_MCG_CTL, nid);
3253 amd64_info("Node %d: DRAM ECC %s.\n",
3254 nid, (ecc_en ? "enabled" : "disabled"));
3256 if (!ecc_en || !nb_mce_en) {
3257 amd64_info("%s", ecc_msg);
3258 return false;
3260 return true;
3263 static inline void
3264 f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
3266 u8 i, ecc_en = 1, cpk_en = 1, dev_x4 = 1, dev_x16 = 1;
3268 for_each_umc(i) {
3269 if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
3270 ecc_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_ENABLED);
3271 cpk_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_CHIPKILL_CAP);
3273 dev_x4 &= !!(pvt->umc[i].dimm_cfg & BIT(6));
3274 dev_x16 &= !!(pvt->umc[i].dimm_cfg & BIT(7));
3278 /* Set chipkill only if ECC is enabled: */
3279 if (ecc_en) {
3280 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
3282 if (!cpk_en)
3283 return;
3285 if (dev_x4)
3286 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
3287 else if (dev_x16)
3288 mci->edac_ctl_cap |= EDAC_FLAG_S16ECD16ED;
3289 else
3290 mci->edac_ctl_cap |= EDAC_FLAG_S8ECD8ED;
3294 static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
3295 struct amd64_family_type *fam)
3297 struct amd64_pvt *pvt = mci->pvt_info;
3299 mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
3300 mci->edac_ctl_cap = EDAC_FLAG_NONE;
3302 if (pvt->umc) {
3303 f17h_determine_edac_ctl_cap(mci, pvt);
3304 } else {
3305 if (pvt->nbcap & NBCAP_SECDED)
3306 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
3308 if (pvt->nbcap & NBCAP_CHIPKILL)
3309 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
3312 mci->edac_cap = determine_edac_cap(pvt);
3313 mci->mod_name = EDAC_MOD_STR;
3314 mci->ctl_name = fam->ctl_name;
3315 mci->dev_name = pci_name(pvt->F3);
3316 mci->ctl_page_to_phys = NULL;
3318 /* memory scrubber interface */
3319 mci->set_sdram_scrub_rate = set_scrub_rate;
3320 mci->get_sdram_scrub_rate = get_scrub_rate;
3324 * returns a pointer to the family descriptor on success, NULL otherwise.
3326 static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
3328 struct amd64_family_type *fam_type = NULL;
3330 pvt->ext_model = boot_cpu_data.x86_model >> 4;
3331 pvt->stepping = boot_cpu_data.x86_stepping;
3332 pvt->model = boot_cpu_data.x86_model;
3333 pvt->fam = boot_cpu_data.x86;
3335 switch (pvt->fam) {
3336 case 0xf:
3337 fam_type = &family_types[K8_CPUS];
3338 pvt->ops = &family_types[K8_CPUS].ops;
3339 break;
3341 case 0x10:
3342 fam_type = &family_types[F10_CPUS];
3343 pvt->ops = &family_types[F10_CPUS].ops;
3344 break;
3346 case 0x15:
3347 if (pvt->model == 0x30) {
3348 fam_type = &family_types[F15_M30H_CPUS];
3349 pvt->ops = &family_types[F15_M30H_CPUS].ops;
3350 break;
3351 } else if (pvt->model == 0x60) {
3352 fam_type = &family_types[F15_M60H_CPUS];
3353 pvt->ops = &family_types[F15_M60H_CPUS].ops;
3354 break;
3357 fam_type = &family_types[F15_CPUS];
3358 pvt->ops = &family_types[F15_CPUS].ops;
3359 break;
3361 case 0x16:
3362 if (pvt->model == 0x30) {
3363 fam_type = &family_types[F16_M30H_CPUS];
3364 pvt->ops = &family_types[F16_M30H_CPUS].ops;
3365 break;
3367 fam_type = &family_types[F16_CPUS];
3368 pvt->ops = &family_types[F16_CPUS].ops;
3369 break;
3371 case 0x17:
3372 if (pvt->model >= 0x10 && pvt->model <= 0x2f) {
3373 fam_type = &family_types[F17_M10H_CPUS];
3374 pvt->ops = &family_types[F17_M10H_CPUS].ops;
3375 break;
3376 } else if (pvt->model >= 0x30 && pvt->model <= 0x3f) {
3377 fam_type = &family_types[F17_M30H_CPUS];
3378 pvt->ops = &family_types[F17_M30H_CPUS].ops;
3379 break;
3380 } else if (pvt->model >= 0x60 && pvt->model <= 0x6f) {
3381 fam_type = &family_types[F17_M60H_CPUS];
3382 pvt->ops = &family_types[F17_M60H_CPUS].ops;
3383 break;
3384 } else if (pvt->model >= 0x70 && pvt->model <= 0x7f) {
3385 fam_type = &family_types[F17_M70H_CPUS];
3386 pvt->ops = &family_types[F17_M70H_CPUS].ops;
3387 break;
3389 /* fall through */
3390 case 0x18:
3391 fam_type = &family_types[F17_CPUS];
3392 pvt->ops = &family_types[F17_CPUS].ops;
3394 if (pvt->fam == 0x18)
3395 family_types[F17_CPUS].ctl_name = "F18h";
3396 break;
3398 default:
3399 amd64_err("Unsupported family!\n");
3400 return NULL;
3403 amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
3404 (pvt->fam == 0xf ?
3405 (pvt->ext_model >= K8_REV_F ? "revF or later "
3406 : "revE or earlier ")
3407 : ""), pvt->mc_node_id);
3408 return fam_type;
3411 static const struct attribute_group *amd64_edac_attr_groups[] = {
3412 #ifdef CONFIG_EDAC_DEBUG
3413 &amd64_edac_dbg_group,
3414 #endif
3415 #ifdef CONFIG_EDAC_AMD64_ERROR_INJECTION
3416 &amd64_edac_inj_group,
3417 #endif
3418 NULL
3421 /* Set the number of Unified Memory Controllers in the system. */
3422 static void compute_num_umcs(void)
3424 u8 model = boot_cpu_data.x86_model;
3426 if (boot_cpu_data.x86 < 0x17)
3427 return;
3429 if (model >= 0x30 && model <= 0x3f)
3430 num_umcs = 8;
3431 else
3432 num_umcs = 2;
3434 edac_dbg(1, "Number of UMCs: %x", num_umcs);
3437 static int init_one_instance(unsigned int nid)
3439 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
3440 struct amd64_family_type *fam_type = NULL;
3441 struct mem_ctl_info *mci = NULL;
3442 struct edac_mc_layer layers[2];
3443 struct amd64_pvt *pvt = NULL;
3444 u16 pci_id1, pci_id2;
3445 int err = 0, ret;
3447 ret = -ENOMEM;
3448 pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
3449 if (!pvt)
3450 goto err_ret;
3452 pvt->mc_node_id = nid;
3453 pvt->F3 = F3;
3455 ret = -EINVAL;
3456 fam_type = per_family_init(pvt);
3457 if (!fam_type)
3458 goto err_free;
3460 if (pvt->fam >= 0x17) {
3461 pvt->umc = kcalloc(num_umcs, sizeof(struct amd64_umc), GFP_KERNEL);
3462 if (!pvt->umc) {
3463 ret = -ENOMEM;
3464 goto err_free;
3467 pci_id1 = fam_type->f0_id;
3468 pci_id2 = fam_type->f6_id;
3469 } else {
3470 pci_id1 = fam_type->f1_id;
3471 pci_id2 = fam_type->f2_id;
3474 err = reserve_mc_sibling_devs(pvt, pci_id1, pci_id2);
3475 if (err)
3476 goto err_post_init;
3478 read_mc_regs(pvt);
3481 * We need to determine how many memory channels there are. Then use
3482 * that information for calculating the size of the dynamic instance
3483 * tables in the 'mci' structure.
3485 ret = -EINVAL;
3486 pvt->channel_count = pvt->ops->early_channel_count(pvt);
3487 if (pvt->channel_count < 0)
3488 goto err_siblings;
3490 ret = -ENOMEM;
3491 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
3492 layers[0].size = pvt->csels[0].b_cnt;
3493 layers[0].is_virt_csrow = true;
3494 layers[1].type = EDAC_MC_LAYER_CHANNEL;
3497 * Always allocate two channels since we can have setups with DIMMs on
3498 * only one channel. Also, this simplifies handling later for the price
3499 * of a couple of KBs tops.
3501 * On Fam17h+, the number of controllers may be greater than two. So set
3502 * the size equal to the maximum number of UMCs.
3504 if (pvt->fam >= 0x17)
3505 layers[1].size = num_umcs;
3506 else
3507 layers[1].size = 2;
3508 layers[1].is_virt_csrow = false;
3510 mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0);
3511 if (!mci)
3512 goto err_siblings;
3514 mci->pvt_info = pvt;
3515 mci->pdev = &pvt->F3->dev;
3517 setup_mci_misc_attrs(mci, fam_type);
3519 if (init_csrows(mci))
3520 mci->edac_cap = EDAC_FLAG_NONE;
3522 ret = -ENODEV;
3523 if (edac_mc_add_mc_with_groups(mci, amd64_edac_attr_groups)) {
3524 edac_dbg(1, "failed edac_mc_add_mc()\n");
3525 goto err_add_mc;
3528 return 0;
3530 err_add_mc:
3531 edac_mc_free(mci);
3533 err_siblings:
3534 free_mc_sibling_devs(pvt);
3536 err_post_init:
3537 if (pvt->fam >= 0x17)
3538 kfree(pvt->umc);
3540 err_free:
3541 kfree(pvt);
3543 err_ret:
3544 return ret;
3547 static int probe_one_instance(unsigned int nid)
3549 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
3550 struct ecc_settings *s;
3551 int ret;
3553 ret = -ENOMEM;
3554 s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
3555 if (!s)
3556 goto err_out;
3558 ecc_stngs[nid] = s;
3560 if (!ecc_enabled(F3, nid)) {
3561 ret = 0;
3563 if (!ecc_enable_override)
3564 goto err_enable;
3566 if (boot_cpu_data.x86 >= 0x17) {
3567 amd64_warn("Forcing ECC on is not recommended on newer systems. Please enable ECC in BIOS.");
3568 goto err_enable;
3569 } else
3570 amd64_warn("Forcing ECC on!\n");
3572 if (!enable_ecc_error_reporting(s, nid, F3))
3573 goto err_enable;
3576 ret = init_one_instance(nid);
3577 if (ret < 0) {
3578 amd64_err("Error probing instance: %d\n", nid);
3580 if (boot_cpu_data.x86 < 0x17)
3581 restore_ecc_error_reporting(s, nid, F3);
3583 goto err_enable;
3586 return ret;
3588 err_enable:
3589 kfree(s);
3590 ecc_stngs[nid] = NULL;
3592 err_out:
3593 return ret;
3596 static void remove_one_instance(unsigned int nid)
3598 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
3599 struct ecc_settings *s = ecc_stngs[nid];
3600 struct mem_ctl_info *mci;
3601 struct amd64_pvt *pvt;
3603 mci = find_mci_by_dev(&F3->dev);
3604 WARN_ON(!mci);
3606 /* Remove from EDAC CORE tracking list */
3607 mci = edac_mc_del_mc(&F3->dev);
3608 if (!mci)
3609 return;
3611 pvt = mci->pvt_info;
3613 restore_ecc_error_reporting(s, nid, F3);
3615 free_mc_sibling_devs(pvt);
3617 kfree(ecc_stngs[nid]);
3618 ecc_stngs[nid] = NULL;
3620 /* Free the EDAC CORE resources */
3621 mci->pvt_info = NULL;
3623 kfree(pvt);
3624 edac_mc_free(mci);
3627 static void setup_pci_device(void)
3629 struct mem_ctl_info *mci;
3630 struct amd64_pvt *pvt;
3632 if (pci_ctl)
3633 return;
3635 mci = edac_mc_find(0);
3636 if (!mci)
3637 return;
3639 pvt = mci->pvt_info;
3640 if (pvt->umc)
3641 pci_ctl = edac_pci_create_generic_ctl(&pvt->F0->dev, EDAC_MOD_STR);
3642 else
3643 pci_ctl = edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
3644 if (!pci_ctl) {
3645 pr_warn("%s(): Unable to create PCI control\n", __func__);
3646 pr_warn("%s(): PCI error report via EDAC not set\n", __func__);
3650 static const struct x86_cpu_id amd64_cpuids[] = {
3651 { X86_VENDOR_AMD, 0xF, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
3652 { X86_VENDOR_AMD, 0x10, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
3653 { X86_VENDOR_AMD, 0x15, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
3654 { X86_VENDOR_AMD, 0x16, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
3655 { X86_VENDOR_AMD, 0x17, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
3656 { X86_VENDOR_HYGON, 0x18, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
3659 MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids);
3661 static int __init amd64_edac_init(void)
3663 const char *owner;
3664 int err = -ENODEV;
3665 int i;
3667 owner = edac_get_owner();
3668 if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
3669 return -EBUSY;
3671 if (!x86_match_cpu(amd64_cpuids))
3672 return -ENODEV;
3674 if (amd_cache_northbridges() < 0)
3675 return -ENODEV;
3677 opstate_init();
3679 err = -ENOMEM;
3680 ecc_stngs = kcalloc(amd_nb_num(), sizeof(ecc_stngs[0]), GFP_KERNEL);
3681 if (!ecc_stngs)
3682 goto err_free;
3684 msrs = msrs_alloc();
3685 if (!msrs)
3686 goto err_free;
3688 compute_num_umcs();
3690 for (i = 0; i < amd_nb_num(); i++) {
3691 err = probe_one_instance(i);
3692 if (err) {
3693 /* unwind properly */
3694 while (--i >= 0)
3695 remove_one_instance(i);
3697 goto err_pci;
3701 if (!edac_has_mcs()) {
3702 err = -ENODEV;
3703 goto err_pci;
3706 /* register stuff with EDAC MCE */
3707 if (report_gart_errors)
3708 amd_report_gart_errors(true);
3710 if (boot_cpu_data.x86 >= 0x17)
3711 amd_register_ecc_decoder(decode_umc_error);
3712 else
3713 amd_register_ecc_decoder(decode_bus_error);
3715 setup_pci_device();
3717 #ifdef CONFIG_X86_32
3718 amd64_err("%s on 32-bit is unsupported. USE AT YOUR OWN RISK!\n", EDAC_MOD_STR);
3719 #endif
3721 printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
3723 return 0;
3725 err_pci:
3726 msrs_free(msrs);
3727 msrs = NULL;
3729 err_free:
3730 kfree(ecc_stngs);
3731 ecc_stngs = NULL;
3733 return err;
3736 static void __exit amd64_edac_exit(void)
3738 int i;
3740 if (pci_ctl)
3741 edac_pci_release_generic_ctl(pci_ctl);
3743 /* unregister from EDAC MCE */
3744 amd_report_gart_errors(false);
3746 if (boot_cpu_data.x86 >= 0x17)
3747 amd_unregister_ecc_decoder(decode_umc_error);
3748 else
3749 amd_unregister_ecc_decoder(decode_bus_error);
3751 for (i = 0; i < amd_nb_num(); i++)
3752 remove_one_instance(i);
3754 kfree(ecc_stngs);
3755 ecc_stngs = NULL;
3757 msrs_free(msrs);
3758 msrs = NULL;
3761 module_init(amd64_edac_init);
3762 module_exit(amd64_edac_exit);
3764 MODULE_LICENSE("GPL");
3765 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
3766 "Dave Peterson, Thayne Harbaugh");
3767 MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
3768 EDAC_AMD64_VERSION);
3770 module_param(edac_op_state, int, 0444);
3771 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");