Merge tag '5.9-rc-smb3-fixes-part2' of git://git.samba.org/sfrench/cifs-2.6
[linux/fpc-iii.git] / drivers / edac / amd64_edac.c
blob6262f6370c5dea9c7b7d83855e1e313ef9e50d0c
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include "amd64_edac.h"
3 #include <asm/amd_nb.h>
5 static struct edac_pci_ctl_info *pci_ctl;
7 /*
8 * Set by command line parameter. If BIOS has enabled the ECC, this override is
9 * cleared to prevent re-enabling the hardware by this driver.
11 static int ecc_enable_override;
12 module_param(ecc_enable_override, int, 0644);
14 static struct msr __percpu *msrs;
16 static struct amd64_family_type *fam_type;
18 /* Per-node stuff */
19 static struct ecc_settings **ecc_stngs;
22 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
23 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
24 * or higher value'.
26 *FIXME: Produce a better mapping/linearisation.
28 static const struct scrubrate {
29 u32 scrubval; /* bit pattern for scrub rate */
30 u32 bandwidth; /* bandwidth consumed (bytes/sec) */
31 } scrubrates[] = {
32 { 0x01, 1600000000UL},
33 { 0x02, 800000000UL},
34 { 0x03, 400000000UL},
35 { 0x04, 200000000UL},
36 { 0x05, 100000000UL},
37 { 0x06, 50000000UL},
38 { 0x07, 25000000UL},
39 { 0x08, 12284069UL},
40 { 0x09, 6274509UL},
41 { 0x0A, 3121951UL},
42 { 0x0B, 1560975UL},
43 { 0x0C, 781440UL},
44 { 0x0D, 390720UL},
45 { 0x0E, 195300UL},
46 { 0x0F, 97650UL},
47 { 0x10, 48854UL},
48 { 0x11, 24427UL},
49 { 0x12, 12213UL},
50 { 0x13, 6101UL},
51 { 0x14, 3051UL},
52 { 0x15, 1523UL},
53 { 0x16, 761UL},
54 { 0x00, 0UL}, /* scrubbing off */
57 int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
58 u32 *val, const char *func)
60 int err = 0;
62 err = pci_read_config_dword(pdev, offset, val);
63 if (err)
64 amd64_warn("%s: error reading F%dx%03x.\n",
65 func, PCI_FUNC(pdev->devfn), offset);
67 return err;
70 int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
71 u32 val, const char *func)
73 int err = 0;
75 err = pci_write_config_dword(pdev, offset, val);
76 if (err)
77 amd64_warn("%s: error writing to F%dx%03x.\n",
78 func, PCI_FUNC(pdev->devfn), offset);
80 return err;
84 * Select DCT to which PCI cfg accesses are routed
86 static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
88 u32 reg = 0;
90 amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, &reg);
91 reg &= (pvt->model == 0x30) ? ~3 : ~1;
92 reg |= dct;
93 amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
98 * Depending on the family, F2 DCT reads need special handling:
100 * K8: has a single DCT only and no address offsets >= 0x100
102 * F10h: each DCT has its own set of regs
103 * DCT0 -> F2x040..
104 * DCT1 -> F2x140..
106 * F16h: has only 1 DCT
108 * F15h: we select which DCT we access using F1x10C[DctCfgSel]
110 static inline int amd64_read_dct_pci_cfg(struct amd64_pvt *pvt, u8 dct,
111 int offset, u32 *val)
113 switch (pvt->fam) {
114 case 0xf:
115 if (dct || offset >= 0x100)
116 return -EINVAL;
117 break;
119 case 0x10:
120 if (dct) {
122 * Note: If ganging is enabled, barring the regs
123 * F2x[1,0]98 and F2x[1,0]9C; reads reads to F2x1xx
124 * return 0. (cf. Section 2.8.1 F10h BKDG)
126 if (dct_ganging_enabled(pvt))
127 return 0;
129 offset += 0x100;
131 break;
133 case 0x15:
135 * F15h: F2x1xx addresses do not map explicitly to DCT1.
136 * We should select which DCT we access using F1x10C[DctCfgSel]
138 dct = (dct && pvt->model == 0x30) ? 3 : dct;
139 f15h_select_dct(pvt, dct);
140 break;
142 case 0x16:
143 if (dct)
144 return -EINVAL;
145 break;
147 default:
148 break;
150 return amd64_read_pci_cfg(pvt->F2, offset, val);
154 * Memory scrubber control interface. For K8, memory scrubbing is handled by
155 * hardware and can involve L2 cache, dcache as well as the main memory. With
156 * F10, this is extended to L3 cache scrubbing on CPU models sporting that
157 * functionality.
159 * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
160 * (dram) over to cache lines. This is nasty, so we will use bandwidth in
161 * bytes/sec for the setting.
163 * Currently, we only do dram scrubbing. If the scrubbing is done in software on
164 * other archs, we might not have access to the caches directly.
167 static inline void __f17h_set_scrubval(struct amd64_pvt *pvt, u32 scrubval)
170 * Fam17h supports scrub values between 0x5 and 0x14. Also, the values
171 * are shifted down by 0x5, so scrubval 0x5 is written to the register
172 * as 0x0, scrubval 0x6 as 0x1, etc.
174 if (scrubval >= 0x5 && scrubval <= 0x14) {
175 scrubval -= 0x5;
176 pci_write_bits32(pvt->F6, F17H_SCR_LIMIT_ADDR, scrubval, 0xF);
177 pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 1, 0x1);
178 } else {
179 pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 0, 0x1);
183 * Scan the scrub rate mapping table for a close or matching bandwidth value to
184 * issue. If requested is too big, then use last maximum value found.
186 static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
188 u32 scrubval;
189 int i;
192 * map the configured rate (new_bw) to a value specific to the AMD64
193 * memory controller and apply to register. Search for the first
194 * bandwidth entry that is greater or equal than the setting requested
195 * and program that. If at last entry, turn off DRAM scrubbing.
197 * If no suitable bandwidth is found, turn off DRAM scrubbing entirely
198 * by falling back to the last element in scrubrates[].
200 for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) {
202 * skip scrub rates which aren't recommended
203 * (see F10 BKDG, F3x58)
205 if (scrubrates[i].scrubval < min_rate)
206 continue;
208 if (scrubrates[i].bandwidth <= new_bw)
209 break;
212 scrubval = scrubrates[i].scrubval;
214 if (pvt->umc) {
215 __f17h_set_scrubval(pvt, scrubval);
216 } else if (pvt->fam == 0x15 && pvt->model == 0x60) {
217 f15h_select_dct(pvt, 0);
218 pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
219 f15h_select_dct(pvt, 1);
220 pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
221 } else {
222 pci_write_bits32(pvt->F3, SCRCTRL, scrubval, 0x001F);
225 if (scrubval)
226 return scrubrates[i].bandwidth;
228 return 0;
231 static int set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
233 struct amd64_pvt *pvt = mci->pvt_info;
234 u32 min_scrubrate = 0x5;
236 if (pvt->fam == 0xf)
237 min_scrubrate = 0x0;
239 if (pvt->fam == 0x15) {
240 /* Erratum #505 */
241 if (pvt->model < 0x10)
242 f15h_select_dct(pvt, 0);
244 if (pvt->model == 0x60)
245 min_scrubrate = 0x6;
247 return __set_scrub_rate(pvt, bw, min_scrubrate);
250 static int get_scrub_rate(struct mem_ctl_info *mci)
252 struct amd64_pvt *pvt = mci->pvt_info;
253 int i, retval = -EINVAL;
254 u32 scrubval = 0;
256 if (pvt->umc) {
257 amd64_read_pci_cfg(pvt->F6, F17H_SCR_BASE_ADDR, &scrubval);
258 if (scrubval & BIT(0)) {
259 amd64_read_pci_cfg(pvt->F6, F17H_SCR_LIMIT_ADDR, &scrubval);
260 scrubval &= 0xF;
261 scrubval += 0x5;
262 } else {
263 scrubval = 0;
265 } else if (pvt->fam == 0x15) {
266 /* Erratum #505 */
267 if (pvt->model < 0x10)
268 f15h_select_dct(pvt, 0);
270 if (pvt->model == 0x60)
271 amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
272 else
273 amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
274 } else {
275 amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
278 scrubval = scrubval & 0x001F;
280 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
281 if (scrubrates[i].scrubval == scrubval) {
282 retval = scrubrates[i].bandwidth;
283 break;
286 return retval;
290 * returns true if the SysAddr given by sys_addr matches the
291 * DRAM base/limit associated with node_id
293 static bool base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, u8 nid)
295 u64 addr;
297 /* The K8 treats this as a 40-bit value. However, bits 63-40 will be
298 * all ones if the most significant implemented address bit is 1.
299 * Here we discard bits 63-40. See section 3.4.2 of AMD publication
300 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
301 * Application Programming.
303 addr = sys_addr & 0x000000ffffffffffull;
305 return ((addr >= get_dram_base(pvt, nid)) &&
306 (addr <= get_dram_limit(pvt, nid)));
310 * Attempt to map a SysAddr to a node. On success, return a pointer to the
311 * mem_ctl_info structure for the node that the SysAddr maps to.
313 * On failure, return NULL.
315 static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
316 u64 sys_addr)
318 struct amd64_pvt *pvt;
319 u8 node_id;
320 u32 intlv_en, bits;
323 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
324 * 3.4.4.2) registers to map the SysAddr to a node ID.
326 pvt = mci->pvt_info;
329 * The value of this field should be the same for all DRAM Base
330 * registers. Therefore we arbitrarily choose to read it from the
331 * register for node 0.
333 intlv_en = dram_intlv_en(pvt, 0);
335 if (intlv_en == 0) {
336 for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
337 if (base_limit_match(pvt, sys_addr, node_id))
338 goto found;
340 goto err_no_match;
343 if (unlikely((intlv_en != 0x01) &&
344 (intlv_en != 0x03) &&
345 (intlv_en != 0x07))) {
346 amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
347 return NULL;
350 bits = (((u32) sys_addr) >> 12) & intlv_en;
352 for (node_id = 0; ; ) {
353 if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
354 break; /* intlv_sel field matches */
356 if (++node_id >= DRAM_RANGES)
357 goto err_no_match;
360 /* sanity test for sys_addr */
361 if (unlikely(!base_limit_match(pvt, sys_addr, node_id))) {
362 amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
363 "range for node %d with node interleaving enabled.\n",
364 __func__, sys_addr, node_id);
365 return NULL;
368 found:
369 return edac_mc_find((int)node_id);
371 err_no_match:
372 edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n",
373 (unsigned long)sys_addr);
375 return NULL;
379 * compute the CS base address of the @csrow on the DRAM controller @dct.
380 * For details see F2x[5C:40] in the processor's BKDG
382 static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
383 u64 *base, u64 *mask)
385 u64 csbase, csmask, base_bits, mask_bits;
386 u8 addr_shift;
388 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
389 csbase = pvt->csels[dct].csbases[csrow];
390 csmask = pvt->csels[dct].csmasks[csrow];
391 base_bits = GENMASK_ULL(31, 21) | GENMASK_ULL(15, 9);
392 mask_bits = GENMASK_ULL(29, 21) | GENMASK_ULL(15, 9);
393 addr_shift = 4;
396 * F16h and F15h, models 30h and later need two addr_shift values:
397 * 8 for high and 6 for low (cf. F16h BKDG).
399 } else if (pvt->fam == 0x16 ||
400 (pvt->fam == 0x15 && pvt->model >= 0x30)) {
401 csbase = pvt->csels[dct].csbases[csrow];
402 csmask = pvt->csels[dct].csmasks[csrow >> 1];
404 *base = (csbase & GENMASK_ULL(15, 5)) << 6;
405 *base |= (csbase & GENMASK_ULL(30, 19)) << 8;
407 *mask = ~0ULL;
408 /* poke holes for the csmask */
409 *mask &= ~((GENMASK_ULL(15, 5) << 6) |
410 (GENMASK_ULL(30, 19) << 8));
412 *mask |= (csmask & GENMASK_ULL(15, 5)) << 6;
413 *mask |= (csmask & GENMASK_ULL(30, 19)) << 8;
415 return;
416 } else {
417 csbase = pvt->csels[dct].csbases[csrow];
418 csmask = pvt->csels[dct].csmasks[csrow >> 1];
419 addr_shift = 8;
421 if (pvt->fam == 0x15)
422 base_bits = mask_bits =
423 GENMASK_ULL(30,19) | GENMASK_ULL(13,5);
424 else
425 base_bits = mask_bits =
426 GENMASK_ULL(28,19) | GENMASK_ULL(13,5);
429 *base = (csbase & base_bits) << addr_shift;
431 *mask = ~0ULL;
432 /* poke holes for the csmask */
433 *mask &= ~(mask_bits << addr_shift);
434 /* OR them in */
435 *mask |= (csmask & mask_bits) << addr_shift;
438 #define for_each_chip_select(i, dct, pvt) \
439 for (i = 0; i < pvt->csels[dct].b_cnt; i++)
441 #define chip_select_base(i, dct, pvt) \
442 pvt->csels[dct].csbases[i]
444 #define for_each_chip_select_mask(i, dct, pvt) \
445 for (i = 0; i < pvt->csels[dct].m_cnt; i++)
447 #define for_each_umc(i) \
448 for (i = 0; i < fam_type->max_mcs; i++)
451 * @input_addr is an InputAddr associated with the node given by mci. Return the
452 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
454 static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
456 struct amd64_pvt *pvt;
457 int csrow;
458 u64 base, mask;
460 pvt = mci->pvt_info;
462 for_each_chip_select(csrow, 0, pvt) {
463 if (!csrow_enabled(csrow, 0, pvt))
464 continue;
466 get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
468 mask = ~mask;
470 if ((input_addr & mask) == (base & mask)) {
471 edac_dbg(2, "InputAddr 0x%lx matches csrow %d (node %d)\n",
472 (unsigned long)input_addr, csrow,
473 pvt->mc_node_id);
475 return csrow;
478 edac_dbg(2, "no matching csrow for InputAddr 0x%lx (MC node %d)\n",
479 (unsigned long)input_addr, pvt->mc_node_id);
481 return -1;
485 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
486 * for the node represented by mci. Info is passed back in *hole_base,
487 * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
488 * info is invalid. Info may be invalid for either of the following reasons:
490 * - The revision of the node is not E or greater. In this case, the DRAM Hole
491 * Address Register does not exist.
493 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
494 * indicating that its contents are not valid.
496 * The values passed back in *hole_base, *hole_offset, and *hole_size are
497 * complete 32-bit values despite the fact that the bitfields in the DHAR
498 * only represent bits 31-24 of the base and offset values.
500 int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
501 u64 *hole_offset, u64 *hole_size)
503 struct amd64_pvt *pvt = mci->pvt_info;
505 /* only revE and later have the DRAM Hole Address Register */
506 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_E) {
507 edac_dbg(1, " revision %d for node %d does not support DHAR\n",
508 pvt->ext_model, pvt->mc_node_id);
509 return 1;
512 /* valid for Fam10h and above */
513 if (pvt->fam >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
514 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this system\n");
515 return 1;
518 if (!dhar_valid(pvt)) {
519 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this node %d\n",
520 pvt->mc_node_id);
521 return 1;
524 /* This node has Memory Hoisting */
526 /* +------------------+--------------------+--------------------+-----
527 * | memory | DRAM hole | relocated |
528 * | [0, (x - 1)] | [x, 0xffffffff] | addresses from |
529 * | | | DRAM hole |
530 * | | | [0x100000000, |
531 * | | | (0x100000000+ |
532 * | | | (0xffffffff-x))] |
533 * +------------------+--------------------+--------------------+-----
535 * Above is a diagram of physical memory showing the DRAM hole and the
536 * relocated addresses from the DRAM hole. As shown, the DRAM hole
537 * starts at address x (the base address) and extends through address
538 * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the
539 * addresses in the hole so that they start at 0x100000000.
542 *hole_base = dhar_base(pvt);
543 *hole_size = (1ULL << 32) - *hole_base;
545 *hole_offset = (pvt->fam > 0xf) ? f10_dhar_offset(pvt)
546 : k8_dhar_offset(pvt);
548 edac_dbg(1, " DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
549 pvt->mc_node_id, (unsigned long)*hole_base,
550 (unsigned long)*hole_offset, (unsigned long)*hole_size);
552 return 0;
554 EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
557 * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
558 * assumed that sys_addr maps to the node given by mci.
560 * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
561 * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
562 * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
563 * then it is also involved in translating a SysAddr to a DramAddr. Sections
564 * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
565 * These parts of the documentation are unclear. I interpret them as follows:
567 * When node n receives a SysAddr, it processes the SysAddr as follows:
569 * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
570 * Limit registers for node n. If the SysAddr is not within the range
571 * specified by the base and limit values, then node n ignores the Sysaddr
572 * (since it does not map to node n). Otherwise continue to step 2 below.
574 * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
575 * disabled so skip to step 3 below. Otherwise see if the SysAddr is within
576 * the range of relocated addresses (starting at 0x100000000) from the DRAM
577 * hole. If not, skip to step 3 below. Else get the value of the
578 * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
579 * offset defined by this value from the SysAddr.
581 * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
582 * Base register for node n. To obtain the DramAddr, subtract the base
583 * address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
585 static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
587 struct amd64_pvt *pvt = mci->pvt_info;
588 u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
589 int ret;
591 dram_base = get_dram_base(pvt, pvt->mc_node_id);
593 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
594 &hole_size);
595 if (!ret) {
596 if ((sys_addr >= (1ULL << 32)) &&
597 (sys_addr < ((1ULL << 32) + hole_size))) {
598 /* use DHAR to translate SysAddr to DramAddr */
599 dram_addr = sys_addr - hole_offset;
601 edac_dbg(2, "using DHAR to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
602 (unsigned long)sys_addr,
603 (unsigned long)dram_addr);
605 return dram_addr;
610 * Translate the SysAddr to a DramAddr as shown near the start of
611 * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8
612 * only deals with 40-bit values. Therefore we discard bits 63-40 of
613 * sys_addr below. If bit 39 of sys_addr is 1 then the bits we
614 * discard are all 1s. Otherwise the bits we discard are all 0s. See
615 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
616 * Programmer's Manual Volume 1 Application Programming.
618 dram_addr = (sys_addr & GENMASK_ULL(39, 0)) - dram_base;
620 edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
621 (unsigned long)sys_addr, (unsigned long)dram_addr);
622 return dram_addr;
626 * @intlv_en is the value of the IntlvEn field from a DRAM Base register
627 * (section 3.4.4.1). Return the number of bits from a SysAddr that are used
628 * for node interleaving.
630 static int num_node_interleave_bits(unsigned intlv_en)
632 static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
633 int n;
635 BUG_ON(intlv_en > 7);
636 n = intlv_shift_table[intlv_en];
637 return n;
640 /* Translate the DramAddr given by @dram_addr to an InputAddr. */
641 static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
643 struct amd64_pvt *pvt;
644 int intlv_shift;
645 u64 input_addr;
647 pvt = mci->pvt_info;
650 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
651 * concerning translating a DramAddr to an InputAddr.
653 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
654 input_addr = ((dram_addr >> intlv_shift) & GENMASK_ULL(35, 12)) +
655 (dram_addr & 0xfff);
657 edac_dbg(2, " Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
658 intlv_shift, (unsigned long)dram_addr,
659 (unsigned long)input_addr);
661 return input_addr;
665 * Translate the SysAddr represented by @sys_addr to an InputAddr. It is
666 * assumed that @sys_addr maps to the node given by mci.
668 static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
670 u64 input_addr;
672 input_addr =
673 dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
675 edac_dbg(2, "SysAddr 0x%lx translates to InputAddr 0x%lx\n",
676 (unsigned long)sys_addr, (unsigned long)input_addr);
678 return input_addr;
681 /* Map the Error address to a PAGE and PAGE OFFSET. */
682 static inline void error_address_to_page_and_offset(u64 error_address,
683 struct err_info *err)
685 err->page = (u32) (error_address >> PAGE_SHIFT);
686 err->offset = ((u32) error_address) & ~PAGE_MASK;
690 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
691 * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
692 * of a node that detected an ECC memory error. mci represents the node that
693 * the error address maps to (possibly different from the node that detected
694 * the error). Return the number of the csrow that sys_addr maps to, or -1 on
695 * error.
697 static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
699 int csrow;
701 csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
703 if (csrow == -1)
704 amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
705 "address 0x%lx\n", (unsigned long)sys_addr);
706 return csrow;
709 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
712 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
713 * are ECC capable.
715 static unsigned long determine_edac_cap(struct amd64_pvt *pvt)
717 unsigned long edac_cap = EDAC_FLAG_NONE;
718 u8 bit;
720 if (pvt->umc) {
721 u8 i, umc_en_mask = 0, dimm_ecc_en_mask = 0;
723 for_each_umc(i) {
724 if (!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT))
725 continue;
727 umc_en_mask |= BIT(i);
729 /* UMC Configuration bit 12 (DimmEccEn) */
730 if (pvt->umc[i].umc_cfg & BIT(12))
731 dimm_ecc_en_mask |= BIT(i);
734 if (umc_en_mask == dimm_ecc_en_mask)
735 edac_cap = EDAC_FLAG_SECDED;
736 } else {
737 bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F)
738 ? 19
739 : 17;
741 if (pvt->dclr0 & BIT(bit))
742 edac_cap = EDAC_FLAG_SECDED;
745 return edac_cap;
748 static void debug_display_dimm_sizes(struct amd64_pvt *, u8);
750 static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
752 edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
754 if (pvt->dram_type == MEM_LRDDR3) {
755 u32 dcsm = pvt->csels[chan].csmasks[0];
757 * It's assumed all LRDIMMs in a DCT are going to be of
758 * same 'type' until proven otherwise. So, use a cs
759 * value of '0' here to get dcsm value.
761 edac_dbg(1, " LRDIMM %dx rank multiply\n", (dcsm & 0x3));
764 edac_dbg(1, "All DIMMs support ECC:%s\n",
765 (dclr & BIT(19)) ? "yes" : "no");
768 edac_dbg(1, " PAR/ERR parity: %s\n",
769 (dclr & BIT(8)) ? "enabled" : "disabled");
771 if (pvt->fam == 0x10)
772 edac_dbg(1, " DCT 128bit mode width: %s\n",
773 (dclr & BIT(11)) ? "128b" : "64b");
775 edac_dbg(1, " x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
776 (dclr & BIT(12)) ? "yes" : "no",
777 (dclr & BIT(13)) ? "yes" : "no",
778 (dclr & BIT(14)) ? "yes" : "no",
779 (dclr & BIT(15)) ? "yes" : "no");
782 #define CS_EVEN_PRIMARY BIT(0)
783 #define CS_ODD_PRIMARY BIT(1)
784 #define CS_EVEN_SECONDARY BIT(2)
785 #define CS_ODD_SECONDARY BIT(3)
787 #define CS_EVEN (CS_EVEN_PRIMARY | CS_EVEN_SECONDARY)
788 #define CS_ODD (CS_ODD_PRIMARY | CS_ODD_SECONDARY)
790 static int f17_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
792 int cs_mode = 0;
794 if (csrow_enabled(2 * dimm, ctrl, pvt))
795 cs_mode |= CS_EVEN_PRIMARY;
797 if (csrow_enabled(2 * dimm + 1, ctrl, pvt))
798 cs_mode |= CS_ODD_PRIMARY;
800 /* Asymmetric dual-rank DIMM support. */
801 if (csrow_sec_enabled(2 * dimm + 1, ctrl, pvt))
802 cs_mode |= CS_ODD_SECONDARY;
804 return cs_mode;
807 static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl)
809 int dimm, size0, size1, cs0, cs1, cs_mode;
811 edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl);
813 for (dimm = 0; dimm < 2; dimm++) {
814 cs0 = dimm * 2;
815 cs1 = dimm * 2 + 1;
817 cs_mode = f17_get_cs_mode(dimm, ctrl, pvt);
819 size0 = pvt->ops->dbam_to_cs(pvt, ctrl, cs_mode, cs0);
820 size1 = pvt->ops->dbam_to_cs(pvt, ctrl, cs_mode, cs1);
822 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
823 cs0, size0,
824 cs1, size1);
828 static void __dump_misc_regs_df(struct amd64_pvt *pvt)
830 struct amd64_umc *umc;
831 u32 i, tmp, umc_base;
833 for_each_umc(i) {
834 umc_base = get_umc_base(i);
835 umc = &pvt->umc[i];
837 edac_dbg(1, "UMC%d DIMM cfg: 0x%x\n", i, umc->dimm_cfg);
838 edac_dbg(1, "UMC%d UMC cfg: 0x%x\n", i, umc->umc_cfg);
839 edac_dbg(1, "UMC%d SDP ctrl: 0x%x\n", i, umc->sdp_ctrl);
840 edac_dbg(1, "UMC%d ECC ctrl: 0x%x\n", i, umc->ecc_ctrl);
842 amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ECC_BAD_SYMBOL, &tmp);
843 edac_dbg(1, "UMC%d ECC bad symbol: 0x%x\n", i, tmp);
845 amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_UMC_CAP, &tmp);
846 edac_dbg(1, "UMC%d UMC cap: 0x%x\n", i, tmp);
847 edac_dbg(1, "UMC%d UMC cap high: 0x%x\n", i, umc->umc_cap_hi);
849 edac_dbg(1, "UMC%d ECC capable: %s, ChipKill ECC capable: %s\n",
850 i, (umc->umc_cap_hi & BIT(30)) ? "yes" : "no",
851 (umc->umc_cap_hi & BIT(31)) ? "yes" : "no");
852 edac_dbg(1, "UMC%d All DIMMs support ECC: %s\n",
853 i, (umc->umc_cfg & BIT(12)) ? "yes" : "no");
854 edac_dbg(1, "UMC%d x4 DIMMs present: %s\n",
855 i, (umc->dimm_cfg & BIT(6)) ? "yes" : "no");
856 edac_dbg(1, "UMC%d x16 DIMMs present: %s\n",
857 i, (umc->dimm_cfg & BIT(7)) ? "yes" : "no");
859 if (pvt->dram_type == MEM_LRDDR4) {
860 amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ADDR_CFG, &tmp);
861 edac_dbg(1, "UMC%d LRDIMM %dx rank multiply\n",
862 i, 1 << ((tmp >> 4) & 0x3));
865 debug_display_dimm_sizes_df(pvt, i);
868 edac_dbg(1, "F0x104 (DRAM Hole Address): 0x%08x, base: 0x%08x\n",
869 pvt->dhar, dhar_base(pvt));
872 /* Display and decode various NB registers for debug purposes. */
873 static void __dump_misc_regs(struct amd64_pvt *pvt)
875 edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
877 edac_dbg(1, " NB two channel DRAM capable: %s\n",
878 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
880 edac_dbg(1, " ECC capable: %s, ChipKill ECC capable: %s\n",
881 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
882 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
884 debug_dump_dramcfg_low(pvt, pvt->dclr0, 0);
886 edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
888 edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n",
889 pvt->dhar, dhar_base(pvt),
890 (pvt->fam == 0xf) ? k8_dhar_offset(pvt)
891 : f10_dhar_offset(pvt));
893 debug_display_dimm_sizes(pvt, 0);
895 /* everything below this point is Fam10h and above */
896 if (pvt->fam == 0xf)
897 return;
899 debug_display_dimm_sizes(pvt, 1);
901 /* Only if NOT ganged does dclr1 have valid info */
902 if (!dct_ganging_enabled(pvt))
903 debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
906 /* Display and decode various NB registers for debug purposes. */
907 static void dump_misc_regs(struct amd64_pvt *pvt)
909 if (pvt->umc)
910 __dump_misc_regs_df(pvt);
911 else
912 __dump_misc_regs(pvt);
914 edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
916 amd64_info("using x%u syndromes.\n", pvt->ecc_sym_sz);
920 * See BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
922 static void prep_chip_selects(struct amd64_pvt *pvt)
924 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
925 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
926 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
927 } else if (pvt->fam == 0x15 && pvt->model == 0x30) {
928 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4;
929 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2;
930 } else if (pvt->fam >= 0x17) {
931 int umc;
933 for_each_umc(umc) {
934 pvt->csels[umc].b_cnt = 4;
935 pvt->csels[umc].m_cnt = 2;
938 } else {
939 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
940 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
944 static void read_umc_base_mask(struct amd64_pvt *pvt)
946 u32 umc_base_reg, umc_base_reg_sec;
947 u32 umc_mask_reg, umc_mask_reg_sec;
948 u32 base_reg, base_reg_sec;
949 u32 mask_reg, mask_reg_sec;
950 u32 *base, *base_sec;
951 u32 *mask, *mask_sec;
952 int cs, umc;
954 for_each_umc(umc) {
955 umc_base_reg = get_umc_base(umc) + UMCCH_BASE_ADDR;
956 umc_base_reg_sec = get_umc_base(umc) + UMCCH_BASE_ADDR_SEC;
958 for_each_chip_select(cs, umc, pvt) {
959 base = &pvt->csels[umc].csbases[cs];
960 base_sec = &pvt->csels[umc].csbases_sec[cs];
962 base_reg = umc_base_reg + (cs * 4);
963 base_reg_sec = umc_base_reg_sec + (cs * 4);
965 if (!amd_smn_read(pvt->mc_node_id, base_reg, base))
966 edac_dbg(0, " DCSB%d[%d]=0x%08x reg: 0x%x\n",
967 umc, cs, *base, base_reg);
969 if (!amd_smn_read(pvt->mc_node_id, base_reg_sec, base_sec))
970 edac_dbg(0, " DCSB_SEC%d[%d]=0x%08x reg: 0x%x\n",
971 umc, cs, *base_sec, base_reg_sec);
974 umc_mask_reg = get_umc_base(umc) + UMCCH_ADDR_MASK;
975 umc_mask_reg_sec = get_umc_base(umc) + UMCCH_ADDR_MASK_SEC;
977 for_each_chip_select_mask(cs, umc, pvt) {
978 mask = &pvt->csels[umc].csmasks[cs];
979 mask_sec = &pvt->csels[umc].csmasks_sec[cs];
981 mask_reg = umc_mask_reg + (cs * 4);
982 mask_reg_sec = umc_mask_reg_sec + (cs * 4);
984 if (!amd_smn_read(pvt->mc_node_id, mask_reg, mask))
985 edac_dbg(0, " DCSM%d[%d]=0x%08x reg: 0x%x\n",
986 umc, cs, *mask, mask_reg);
988 if (!amd_smn_read(pvt->mc_node_id, mask_reg_sec, mask_sec))
989 edac_dbg(0, " DCSM_SEC%d[%d]=0x%08x reg: 0x%x\n",
990 umc, cs, *mask_sec, mask_reg_sec);
996 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
998 static void read_dct_base_mask(struct amd64_pvt *pvt)
1000 int cs;
1002 prep_chip_selects(pvt);
1004 if (pvt->umc)
1005 return read_umc_base_mask(pvt);
1007 for_each_chip_select(cs, 0, pvt) {
1008 int reg0 = DCSB0 + (cs * 4);
1009 int reg1 = DCSB1 + (cs * 4);
1010 u32 *base0 = &pvt->csels[0].csbases[cs];
1011 u32 *base1 = &pvt->csels[1].csbases[cs];
1013 if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
1014 edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
1015 cs, *base0, reg0);
1017 if (pvt->fam == 0xf)
1018 continue;
1020 if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
1021 edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
1022 cs, *base1, (pvt->fam == 0x10) ? reg1
1023 : reg0);
1026 for_each_chip_select_mask(cs, 0, pvt) {
1027 int reg0 = DCSM0 + (cs * 4);
1028 int reg1 = DCSM1 + (cs * 4);
1029 u32 *mask0 = &pvt->csels[0].csmasks[cs];
1030 u32 *mask1 = &pvt->csels[1].csmasks[cs];
1032 if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
1033 edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
1034 cs, *mask0, reg0);
1036 if (pvt->fam == 0xf)
1037 continue;
1039 if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
1040 edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
1041 cs, *mask1, (pvt->fam == 0x10) ? reg1
1042 : reg0);
1046 static void determine_memory_type(struct amd64_pvt *pvt)
1048 u32 dram_ctrl, dcsm;
1050 if (pvt->umc) {
1051 if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5))
1052 pvt->dram_type = MEM_LRDDR4;
1053 else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4))
1054 pvt->dram_type = MEM_RDDR4;
1055 else
1056 pvt->dram_type = MEM_DDR4;
1057 return;
1060 switch (pvt->fam) {
1061 case 0xf:
1062 if (pvt->ext_model >= K8_REV_F)
1063 goto ddr3;
1065 pvt->dram_type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
1066 return;
1068 case 0x10:
1069 if (pvt->dchr0 & DDR3_MODE)
1070 goto ddr3;
1072 pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
1073 return;
1075 case 0x15:
1076 if (pvt->model < 0x60)
1077 goto ddr3;
1080 * Model 0x60h needs special handling:
1082 * We use a Chip Select value of '0' to obtain dcsm.
1083 * Theoretically, it is possible to populate LRDIMMs of different
1084 * 'Rank' value on a DCT. But this is not the common case. So,
1085 * it's reasonable to assume all DIMMs are going to be of same
1086 * 'type' until proven otherwise.
1088 amd64_read_dct_pci_cfg(pvt, 0, DRAM_CONTROL, &dram_ctrl);
1089 dcsm = pvt->csels[0].csmasks[0];
1091 if (((dram_ctrl >> 8) & 0x7) == 0x2)
1092 pvt->dram_type = MEM_DDR4;
1093 else if (pvt->dclr0 & BIT(16))
1094 pvt->dram_type = MEM_DDR3;
1095 else if (dcsm & 0x3)
1096 pvt->dram_type = MEM_LRDDR3;
1097 else
1098 pvt->dram_type = MEM_RDDR3;
1100 return;
1102 case 0x16:
1103 goto ddr3;
1105 default:
1106 WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam);
1107 pvt->dram_type = MEM_EMPTY;
1109 return;
1111 ddr3:
1112 pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
1115 /* Get the number of DCT channels the memory controller is using. */
1116 static int k8_early_channel_count(struct amd64_pvt *pvt)
1118 int flag;
1120 if (pvt->ext_model >= K8_REV_F)
1121 /* RevF (NPT) and later */
1122 flag = pvt->dclr0 & WIDTH_128;
1123 else
1124 /* RevE and earlier */
1125 flag = pvt->dclr0 & REVE_WIDTH_128;
1127 /* not used */
1128 pvt->dclr1 = 0;
1130 return (flag) ? 2 : 1;
1133 /* On F10h and later ErrAddr is MC4_ADDR[47:1] */
1134 static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
1136 u16 mce_nid = amd_get_nb_id(m->extcpu);
1137 struct mem_ctl_info *mci;
1138 u8 start_bit = 1;
1139 u8 end_bit = 47;
1140 u64 addr;
1142 mci = edac_mc_find(mce_nid);
1143 if (!mci)
1144 return 0;
1146 pvt = mci->pvt_info;
1148 if (pvt->fam == 0xf) {
1149 start_bit = 3;
1150 end_bit = 39;
1153 addr = m->addr & GENMASK_ULL(end_bit, start_bit);
1156 * Erratum 637 workaround
1158 if (pvt->fam == 0x15) {
1159 u64 cc6_base, tmp_addr;
1160 u32 tmp;
1161 u8 intlv_en;
1163 if ((addr & GENMASK_ULL(47, 24)) >> 24 != 0x00fdf7)
1164 return addr;
1167 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp);
1168 intlv_en = tmp >> 21 & 0x7;
1170 /* add [47:27] + 3 trailing bits */
1171 cc6_base = (tmp & GENMASK_ULL(20, 0)) << 3;
1173 /* reverse and add DramIntlvEn */
1174 cc6_base |= intlv_en ^ 0x7;
1176 /* pin at [47:24] */
1177 cc6_base <<= 24;
1179 if (!intlv_en)
1180 return cc6_base | (addr & GENMASK_ULL(23, 0));
1182 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp);
1184 /* faster log2 */
1185 tmp_addr = (addr & GENMASK_ULL(23, 12)) << __fls(intlv_en + 1);
1187 /* OR DramIntlvSel into bits [14:12] */
1188 tmp_addr |= (tmp & GENMASK_ULL(23, 21)) >> 9;
1190 /* add remaining [11:0] bits from original MC4_ADDR */
1191 tmp_addr |= addr & GENMASK_ULL(11, 0);
1193 return cc6_base | tmp_addr;
1196 return addr;
1199 static struct pci_dev *pci_get_related_function(unsigned int vendor,
1200 unsigned int device,
1201 struct pci_dev *related)
1203 struct pci_dev *dev = NULL;
1205 while ((dev = pci_get_device(vendor, device, dev))) {
1206 if (pci_domain_nr(dev->bus) == pci_domain_nr(related->bus) &&
1207 (dev->bus->number == related->bus->number) &&
1208 (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
1209 break;
1212 return dev;
1215 static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
1217 struct amd_northbridge *nb;
1218 struct pci_dev *f1 = NULL;
1219 unsigned int pci_func;
1220 int off = range << 3;
1221 u32 llim;
1223 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo);
1224 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
1226 if (pvt->fam == 0xf)
1227 return;
1229 if (!dram_rw(pvt, range))
1230 return;
1232 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi);
1233 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
1235 /* F15h: factor in CC6 save area by reading dst node's limit reg */
1236 if (pvt->fam != 0x15)
1237 return;
1239 nb = node_to_amd_nb(dram_dst_node(pvt, range));
1240 if (WARN_ON(!nb))
1241 return;
1243 if (pvt->model == 0x60)
1244 pci_func = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1;
1245 else if (pvt->model == 0x30)
1246 pci_func = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1;
1247 else
1248 pci_func = PCI_DEVICE_ID_AMD_15H_NB_F1;
1250 f1 = pci_get_related_function(nb->misc->vendor, pci_func, nb->misc);
1251 if (WARN_ON(!f1))
1252 return;
1254 amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
1256 pvt->ranges[range].lim.lo &= GENMASK_ULL(15, 0);
1258 /* {[39:27],111b} */
1259 pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
1261 pvt->ranges[range].lim.hi &= GENMASK_ULL(7, 0);
1263 /* [47:40] */
1264 pvt->ranges[range].lim.hi |= llim >> 13;
1266 pci_dev_put(f1);
1269 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1270 struct err_info *err)
1272 struct amd64_pvt *pvt = mci->pvt_info;
1274 error_address_to_page_and_offset(sys_addr, err);
1277 * Find out which node the error address belongs to. This may be
1278 * different from the node that detected the error.
1280 err->src_mci = find_mc_by_sys_addr(mci, sys_addr);
1281 if (!err->src_mci) {
1282 amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
1283 (unsigned long)sys_addr);
1284 err->err_code = ERR_NODE;
1285 return;
1288 /* Now map the sys_addr to a CSROW */
1289 err->csrow = sys_addr_to_csrow(err->src_mci, sys_addr);
1290 if (err->csrow < 0) {
1291 err->err_code = ERR_CSROW;
1292 return;
1295 /* CHIPKILL enabled */
1296 if (pvt->nbcfg & NBCFG_CHIPKILL) {
1297 err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
1298 if (err->channel < 0) {
1300 * Syndrome didn't map, so we don't know which of the
1301 * 2 DIMMs is in error. So we need to ID 'both' of them
1302 * as suspect.
1304 amd64_mc_warn(err->src_mci, "unknown syndrome 0x%04x - "
1305 "possible error reporting race\n",
1306 err->syndrome);
1307 err->err_code = ERR_CHANNEL;
1308 return;
1310 } else {
1312 * non-chipkill ecc mode
1314 * The k8 documentation is unclear about how to determine the
1315 * channel number when using non-chipkill memory. This method
1316 * was obtained from email communication with someone at AMD.
1317 * (Wish the email was placed in this comment - norsk)
1319 err->channel = ((sys_addr & BIT(3)) != 0);
1323 static int ddr2_cs_size(unsigned i, bool dct_width)
1325 unsigned shift = 0;
1327 if (i <= 2)
1328 shift = i;
1329 else if (!(i & 0x1))
1330 shift = i >> 1;
1331 else
1332 shift = (i + 1) >> 1;
1334 return 128 << (shift + !!dct_width);
1337 static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1338 unsigned cs_mode, int cs_mask_nr)
1340 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1342 if (pvt->ext_model >= K8_REV_F) {
1343 WARN_ON(cs_mode > 11);
1344 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1346 else if (pvt->ext_model >= K8_REV_D) {
1347 unsigned diff;
1348 WARN_ON(cs_mode > 10);
1351 * the below calculation, besides trying to win an obfuscated C
1352 * contest, maps cs_mode values to DIMM chip select sizes. The
1353 * mappings are:
1355 * cs_mode CS size (mb)
1356 * ======= ============
1357 * 0 32
1358 * 1 64
1359 * 2 128
1360 * 3 128
1361 * 4 256
1362 * 5 512
1363 * 6 256
1364 * 7 512
1365 * 8 1024
1366 * 9 1024
1367 * 10 2048
1369 * Basically, it calculates a value with which to shift the
1370 * smallest CS size of 32MB.
1372 * ddr[23]_cs_size have a similar purpose.
1374 diff = cs_mode/3 + (unsigned)(cs_mode > 5);
1376 return 32 << (cs_mode - diff);
1378 else {
1379 WARN_ON(cs_mode > 6);
1380 return 32 << cs_mode;
1385 * Get the number of DCT channels in use.
1387 * Return:
1388 * number of Memory Channels in operation
1389 * Pass back:
1390 * contents of the DCL0_LOW register
1392 static int f1x_early_channel_count(struct amd64_pvt *pvt)
1394 int i, j, channels = 0;
1396 /* On F10h, if we are in 128 bit mode, then we are using 2 channels */
1397 if (pvt->fam == 0x10 && (pvt->dclr0 & WIDTH_128))
1398 return 2;
1401 * Need to check if in unganged mode: In such, there are 2 channels,
1402 * but they are not in 128 bit mode and thus the above 'dclr0' status
1403 * bit will be OFF.
1405 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1406 * their CSEnable bit on. If so, then SINGLE DIMM case.
1408 edac_dbg(0, "Data width is not 128 bits - need more decoding\n");
1411 * Check DRAM Bank Address Mapping values for each DIMM to see if there
1412 * is more than just one DIMM present in unganged mode. Need to check
1413 * both controllers since DIMMs can be placed in either one.
1415 for (i = 0; i < 2; i++) {
1416 u32 dbam = (i ? pvt->dbam1 : pvt->dbam0);
1418 for (j = 0; j < 4; j++) {
1419 if (DBAM_DIMM(j, dbam) > 0) {
1420 channels++;
1421 break;
1426 if (channels > 2)
1427 channels = 2;
1429 amd64_info("MCT channel count: %d\n", channels);
1431 return channels;
1434 static int f17_early_channel_count(struct amd64_pvt *pvt)
1436 int i, channels = 0;
1438 /* SDP Control bit 31 (SdpInit) is clear for unused UMC channels */
1439 for_each_umc(i)
1440 channels += !!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT);
1442 amd64_info("MCT channel count: %d\n", channels);
1444 return channels;
1447 static int ddr3_cs_size(unsigned i, bool dct_width)
1449 unsigned shift = 0;
1450 int cs_size = 0;
1452 if (i == 0 || i == 3 || i == 4)
1453 cs_size = -1;
1454 else if (i <= 2)
1455 shift = i;
1456 else if (i == 12)
1457 shift = 7;
1458 else if (!(i & 0x1))
1459 shift = i >> 1;
1460 else
1461 shift = (i + 1) >> 1;
1463 if (cs_size != -1)
1464 cs_size = (128 * (1 << !!dct_width)) << shift;
1466 return cs_size;
1469 static int ddr3_lrdimm_cs_size(unsigned i, unsigned rank_multiply)
1471 unsigned shift = 0;
1472 int cs_size = 0;
1474 if (i < 4 || i == 6)
1475 cs_size = -1;
1476 else if (i == 12)
1477 shift = 7;
1478 else if (!(i & 0x1))
1479 shift = i >> 1;
1480 else
1481 shift = (i + 1) >> 1;
1483 if (cs_size != -1)
1484 cs_size = rank_multiply * (128 << shift);
1486 return cs_size;
1489 static int ddr4_cs_size(unsigned i)
1491 int cs_size = 0;
1493 if (i == 0)
1494 cs_size = -1;
1495 else if (i == 1)
1496 cs_size = 1024;
1497 else
1498 /* Min cs_size = 1G */
1499 cs_size = 1024 * (1 << (i >> 1));
1501 return cs_size;
1504 static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1505 unsigned cs_mode, int cs_mask_nr)
1507 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1509 WARN_ON(cs_mode > 11);
1511 if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
1512 return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
1513 else
1514 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1518 * F15h supports only 64bit DCT interfaces
1520 static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1521 unsigned cs_mode, int cs_mask_nr)
1523 WARN_ON(cs_mode > 12);
1525 return ddr3_cs_size(cs_mode, false);
1528 /* F15h M60h supports DDR4 mapping as well.. */
1529 static int f15_m60h_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1530 unsigned cs_mode, int cs_mask_nr)
1532 int cs_size;
1533 u32 dcsm = pvt->csels[dct].csmasks[cs_mask_nr];
1535 WARN_ON(cs_mode > 12);
1537 if (pvt->dram_type == MEM_DDR4) {
1538 if (cs_mode > 9)
1539 return -1;
1541 cs_size = ddr4_cs_size(cs_mode);
1542 } else if (pvt->dram_type == MEM_LRDDR3) {
1543 unsigned rank_multiply = dcsm & 0xf;
1545 if (rank_multiply == 3)
1546 rank_multiply = 4;
1547 cs_size = ddr3_lrdimm_cs_size(cs_mode, rank_multiply);
1548 } else {
1549 /* Minimum cs size is 512mb for F15hM60h*/
1550 if (cs_mode == 0x1)
1551 return -1;
1553 cs_size = ddr3_cs_size(cs_mode, false);
1556 return cs_size;
1560 * F16h and F15h model 30h have only limited cs_modes.
1562 static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1563 unsigned cs_mode, int cs_mask_nr)
1565 WARN_ON(cs_mode > 12);
1567 if (cs_mode == 6 || cs_mode == 8 ||
1568 cs_mode == 9 || cs_mode == 12)
1569 return -1;
1570 else
1571 return ddr3_cs_size(cs_mode, false);
1574 static int f17_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
1575 unsigned int cs_mode, int csrow_nr)
1577 u32 addr_mask_orig, addr_mask_deinterleaved;
1578 u32 msb, weight, num_zero_bits;
1579 int dimm, size = 0;
1581 /* No Chip Selects are enabled. */
1582 if (!cs_mode)
1583 return size;
1585 /* Requested size of an even CS but none are enabled. */
1586 if (!(cs_mode & CS_EVEN) && !(csrow_nr & 1))
1587 return size;
1589 /* Requested size of an odd CS but none are enabled. */
1590 if (!(cs_mode & CS_ODD) && (csrow_nr & 1))
1591 return size;
1594 * There is one mask per DIMM, and two Chip Selects per DIMM.
1595 * CS0 and CS1 -> DIMM0
1596 * CS2 and CS3 -> DIMM1
1598 dimm = csrow_nr >> 1;
1600 /* Asymmetric dual-rank DIMM support. */
1601 if ((csrow_nr & 1) && (cs_mode & CS_ODD_SECONDARY))
1602 addr_mask_orig = pvt->csels[umc].csmasks_sec[dimm];
1603 else
1604 addr_mask_orig = pvt->csels[umc].csmasks[dimm];
1607 * The number of zero bits in the mask is equal to the number of bits
1608 * in a full mask minus the number of bits in the current mask.
1610 * The MSB is the number of bits in the full mask because BIT[0] is
1611 * always 0.
1613 msb = fls(addr_mask_orig) - 1;
1614 weight = hweight_long(addr_mask_orig);
1615 num_zero_bits = msb - weight;
1617 /* Take the number of zero bits off from the top of the mask. */
1618 addr_mask_deinterleaved = GENMASK_ULL(msb - num_zero_bits, 1);
1620 edac_dbg(1, "CS%d DIMM%d AddrMasks:\n", csrow_nr, dimm);
1621 edac_dbg(1, " Original AddrMask: 0x%x\n", addr_mask_orig);
1622 edac_dbg(1, " Deinterleaved AddrMask: 0x%x\n", addr_mask_deinterleaved);
1624 /* Register [31:1] = Address [39:9]. Size is in kBs here. */
1625 size = (addr_mask_deinterleaved >> 2) + 1;
1627 /* Return size in MBs. */
1628 return size >> 10;
1631 static void read_dram_ctl_register(struct amd64_pvt *pvt)
1634 if (pvt->fam == 0xf)
1635 return;
1637 if (!amd64_read_pci_cfg(pvt->F2, DCT_SEL_LO, &pvt->dct_sel_lo)) {
1638 edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
1639 pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
1641 edac_dbg(0, " DCTs operate in %s mode\n",
1642 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
1644 if (!dct_ganging_enabled(pvt))
1645 edac_dbg(0, " Address range split per DCT: %s\n",
1646 (dct_high_range_enabled(pvt) ? "yes" : "no"));
1648 edac_dbg(0, " data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
1649 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
1650 (dct_memory_cleared(pvt) ? "yes" : "no"));
1652 edac_dbg(0, " channel interleave: %s, "
1653 "interleave bits selector: 0x%x\n",
1654 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
1655 dct_sel_interleave_addr(pvt));
1658 amd64_read_pci_cfg(pvt->F2, DCT_SEL_HI, &pvt->dct_sel_hi);
1662 * Determine channel (DCT) based on the interleaving mode (see F15h M30h BKDG,
1663 * 2.10.12 Memory Interleaving Modes).
1665 static u8 f15_m30h_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1666 u8 intlv_en, int num_dcts_intlv,
1667 u32 dct_sel)
1669 u8 channel = 0;
1670 u8 select;
1672 if (!(intlv_en))
1673 return (u8)(dct_sel);
1675 if (num_dcts_intlv == 2) {
1676 select = (sys_addr >> 8) & 0x3;
1677 channel = select ? 0x3 : 0;
1678 } else if (num_dcts_intlv == 4) {
1679 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1680 switch (intlv_addr) {
1681 case 0x4:
1682 channel = (sys_addr >> 8) & 0x3;
1683 break;
1684 case 0x5:
1685 channel = (sys_addr >> 9) & 0x3;
1686 break;
1689 return channel;
1693 * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
1694 * Interleaving Modes.
1696 static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1697 bool hi_range_sel, u8 intlv_en)
1699 u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
1701 if (dct_ganging_enabled(pvt))
1702 return 0;
1704 if (hi_range_sel)
1705 return dct_sel_high;
1708 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1710 if (dct_interleave_enabled(pvt)) {
1711 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1713 /* return DCT select function: 0=DCT0, 1=DCT1 */
1714 if (!intlv_addr)
1715 return sys_addr >> 6 & 1;
1717 if (intlv_addr & 0x2) {
1718 u8 shift = intlv_addr & 0x1 ? 9 : 6;
1719 u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) & 1;
1721 return ((sys_addr >> shift) & 1) ^ temp;
1724 if (intlv_addr & 0x4) {
1725 u8 shift = intlv_addr & 0x1 ? 9 : 8;
1727 return (sys_addr >> shift) & 1;
1730 return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
1733 if (dct_high_range_enabled(pvt))
1734 return ~dct_sel_high & 1;
1736 return 0;
1739 /* Convert the sys_addr to the normalized DCT address */
1740 static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
1741 u64 sys_addr, bool hi_rng,
1742 u32 dct_sel_base_addr)
1744 u64 chan_off;
1745 u64 dram_base = get_dram_base(pvt, range);
1746 u64 hole_off = f10_dhar_offset(pvt);
1747 u64 dct_sel_base_off = (u64)(pvt->dct_sel_hi & 0xFFFFFC00) << 16;
1749 if (hi_rng) {
1751 * if
1752 * base address of high range is below 4Gb
1753 * (bits [47:27] at [31:11])
1754 * DRAM address space on this DCT is hoisted above 4Gb &&
1755 * sys_addr > 4Gb
1757 * remove hole offset from sys_addr
1758 * else
1759 * remove high range offset from sys_addr
1761 if ((!(dct_sel_base_addr >> 16) ||
1762 dct_sel_base_addr < dhar_base(pvt)) &&
1763 dhar_valid(pvt) &&
1764 (sys_addr >= BIT_64(32)))
1765 chan_off = hole_off;
1766 else
1767 chan_off = dct_sel_base_off;
1768 } else {
1770 * if
1771 * we have a valid hole &&
1772 * sys_addr > 4Gb
1774 * remove hole
1775 * else
1776 * remove dram base to normalize to DCT address
1778 if (dhar_valid(pvt) && (sys_addr >= BIT_64(32)))
1779 chan_off = hole_off;
1780 else
1781 chan_off = dram_base;
1784 return (sys_addr & GENMASK_ULL(47,6)) - (chan_off & GENMASK_ULL(47,23));
1788 * checks if the csrow passed in is marked as SPARED, if so returns the new
1789 * spare row
1791 static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
1793 int tmp_cs;
1795 if (online_spare_swap_done(pvt, dct) &&
1796 csrow == online_spare_bad_dramcs(pvt, dct)) {
1798 for_each_chip_select(tmp_cs, dct, pvt) {
1799 if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
1800 csrow = tmp_cs;
1801 break;
1805 return csrow;
1809 * Iterate over the DRAM DCT "base" and "mask" registers looking for a
1810 * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
1812 * Return:
1813 * -EINVAL: NOT FOUND
1814 * 0..csrow = Chip-Select Row
1816 static int f1x_lookup_addr_in_dct(u64 in_addr, u8 nid, u8 dct)
1818 struct mem_ctl_info *mci;
1819 struct amd64_pvt *pvt;
1820 u64 cs_base, cs_mask;
1821 int cs_found = -EINVAL;
1822 int csrow;
1824 mci = edac_mc_find(nid);
1825 if (!mci)
1826 return cs_found;
1828 pvt = mci->pvt_info;
1830 edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr, dct);
1832 for_each_chip_select(csrow, dct, pvt) {
1833 if (!csrow_enabled(csrow, dct, pvt))
1834 continue;
1836 get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
1838 edac_dbg(1, " CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
1839 csrow, cs_base, cs_mask);
1841 cs_mask = ~cs_mask;
1843 edac_dbg(1, " (InputAddr & ~CSMask)=0x%llx (CSBase & ~CSMask)=0x%llx\n",
1844 (in_addr & cs_mask), (cs_base & cs_mask));
1846 if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
1847 if (pvt->fam == 0x15 && pvt->model >= 0x30) {
1848 cs_found = csrow;
1849 break;
1851 cs_found = f10_process_possible_spare(pvt, dct, csrow);
1853 edac_dbg(1, " MATCH csrow=%d\n", cs_found);
1854 break;
1857 return cs_found;
1861 * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
1862 * swapped with a region located at the bottom of memory so that the GPU can use
1863 * the interleaved region and thus two channels.
1865 static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
1867 u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
1869 if (pvt->fam == 0x10) {
1870 /* only revC3 and revE have that feature */
1871 if (pvt->model < 4 || (pvt->model < 0xa && pvt->stepping < 3))
1872 return sys_addr;
1875 amd64_read_pci_cfg(pvt->F2, SWAP_INTLV_REG, &swap_reg);
1877 if (!(swap_reg & 0x1))
1878 return sys_addr;
1880 swap_base = (swap_reg >> 3) & 0x7f;
1881 swap_limit = (swap_reg >> 11) & 0x7f;
1882 rgn_size = (swap_reg >> 20) & 0x7f;
1883 tmp_addr = sys_addr >> 27;
1885 if (!(sys_addr >> 34) &&
1886 (((tmp_addr >= swap_base) &&
1887 (tmp_addr <= swap_limit)) ||
1888 (tmp_addr < rgn_size)))
1889 return sys_addr ^ (u64)swap_base << 27;
1891 return sys_addr;
1894 /* For a given @dram_range, check if @sys_addr falls within it. */
1895 static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1896 u64 sys_addr, int *chan_sel)
1898 int cs_found = -EINVAL;
1899 u64 chan_addr;
1900 u32 dct_sel_base;
1901 u8 channel;
1902 bool high_range = false;
1904 u8 node_id = dram_dst_node(pvt, range);
1905 u8 intlv_en = dram_intlv_en(pvt, range);
1906 u32 intlv_sel = dram_intlv_sel(pvt, range);
1908 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1909 range, sys_addr, get_dram_limit(pvt, range));
1911 if (dhar_valid(pvt) &&
1912 dhar_base(pvt) <= sys_addr &&
1913 sys_addr < BIT_64(32)) {
1914 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1915 sys_addr);
1916 return -EINVAL;
1919 if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en)))
1920 return -EINVAL;
1922 sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
1924 dct_sel_base = dct_sel_baseaddr(pvt);
1927 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
1928 * select between DCT0 and DCT1.
1930 if (dct_high_range_enabled(pvt) &&
1931 !dct_ganging_enabled(pvt) &&
1932 ((sys_addr >> 27) >= (dct_sel_base >> 11)))
1933 high_range = true;
1935 channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
1937 chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
1938 high_range, dct_sel_base);
1940 /* Remove node interleaving, see F1x120 */
1941 if (intlv_en)
1942 chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) |
1943 (chan_addr & 0xfff);
1945 /* remove channel interleave */
1946 if (dct_interleave_enabled(pvt) &&
1947 !dct_high_range_enabled(pvt) &&
1948 !dct_ganging_enabled(pvt)) {
1950 if (dct_sel_interleave_addr(pvt) != 1) {
1951 if (dct_sel_interleave_addr(pvt) == 0x3)
1952 /* hash 9 */
1953 chan_addr = ((chan_addr >> 10) << 9) |
1954 (chan_addr & 0x1ff);
1955 else
1956 /* A[6] or hash 6 */
1957 chan_addr = ((chan_addr >> 7) << 6) |
1958 (chan_addr & 0x3f);
1959 } else
1960 /* A[12] */
1961 chan_addr = ((chan_addr >> 13) << 12) |
1962 (chan_addr & 0xfff);
1965 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
1967 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
1969 if (cs_found >= 0)
1970 *chan_sel = channel;
1972 return cs_found;
1975 static int f15_m30h_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1976 u64 sys_addr, int *chan_sel)
1978 int cs_found = -EINVAL;
1979 int num_dcts_intlv = 0;
1980 u64 chan_addr, chan_offset;
1981 u64 dct_base, dct_limit;
1982 u32 dct_cont_base_reg, dct_cont_limit_reg, tmp;
1983 u8 channel, alias_channel, leg_mmio_hole, dct_sel, dct_offset_en;
1985 u64 dhar_offset = f10_dhar_offset(pvt);
1986 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1987 u8 node_id = dram_dst_node(pvt, range);
1988 u8 intlv_en = dram_intlv_en(pvt, range);
1990 amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &dct_cont_base_reg);
1991 amd64_read_pci_cfg(pvt->F1, DRAM_CONT_LIMIT, &dct_cont_limit_reg);
1993 dct_offset_en = (u8) ((dct_cont_base_reg >> 3) & BIT(0));
1994 dct_sel = (u8) ((dct_cont_base_reg >> 4) & 0x7);
1996 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1997 range, sys_addr, get_dram_limit(pvt, range));
1999 if (!(get_dram_base(pvt, range) <= sys_addr) &&
2000 !(get_dram_limit(pvt, range) >= sys_addr))
2001 return -EINVAL;
2003 if (dhar_valid(pvt) &&
2004 dhar_base(pvt) <= sys_addr &&
2005 sys_addr < BIT_64(32)) {
2006 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
2007 sys_addr);
2008 return -EINVAL;
2011 /* Verify sys_addr is within DCT Range. */
2012 dct_base = (u64) dct_sel_baseaddr(pvt);
2013 dct_limit = (dct_cont_limit_reg >> 11) & 0x1FFF;
2015 if (!(dct_cont_base_reg & BIT(0)) &&
2016 !(dct_base <= (sys_addr >> 27) &&
2017 dct_limit >= (sys_addr >> 27)))
2018 return -EINVAL;
2020 /* Verify number of dct's that participate in channel interleaving. */
2021 num_dcts_intlv = (int) hweight8(intlv_en);
2023 if (!(num_dcts_intlv % 2 == 0) || (num_dcts_intlv > 4))
2024 return -EINVAL;
2026 if (pvt->model >= 0x60)
2027 channel = f1x_determine_channel(pvt, sys_addr, false, intlv_en);
2028 else
2029 channel = f15_m30h_determine_channel(pvt, sys_addr, intlv_en,
2030 num_dcts_intlv, dct_sel);
2032 /* Verify we stay within the MAX number of channels allowed */
2033 if (channel > 3)
2034 return -EINVAL;
2036 leg_mmio_hole = (u8) (dct_cont_base_reg >> 1 & BIT(0));
2038 /* Get normalized DCT addr */
2039 if (leg_mmio_hole && (sys_addr >= BIT_64(32)))
2040 chan_offset = dhar_offset;
2041 else
2042 chan_offset = dct_base << 27;
2044 chan_addr = sys_addr - chan_offset;
2046 /* remove channel interleave */
2047 if (num_dcts_intlv == 2) {
2048 if (intlv_addr == 0x4)
2049 chan_addr = ((chan_addr >> 9) << 8) |
2050 (chan_addr & 0xff);
2051 else if (intlv_addr == 0x5)
2052 chan_addr = ((chan_addr >> 10) << 9) |
2053 (chan_addr & 0x1ff);
2054 else
2055 return -EINVAL;
2057 } else if (num_dcts_intlv == 4) {
2058 if (intlv_addr == 0x4)
2059 chan_addr = ((chan_addr >> 10) << 8) |
2060 (chan_addr & 0xff);
2061 else if (intlv_addr == 0x5)
2062 chan_addr = ((chan_addr >> 11) << 9) |
2063 (chan_addr & 0x1ff);
2064 else
2065 return -EINVAL;
2068 if (dct_offset_en) {
2069 amd64_read_pci_cfg(pvt->F1,
2070 DRAM_CONT_HIGH_OFF + (int) channel * 4,
2071 &tmp);
2072 chan_addr += (u64) ((tmp >> 11) & 0xfff) << 27;
2075 f15h_select_dct(pvt, channel);
2077 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
2080 * Find Chip select:
2081 * if channel = 3, then alias it to 1. This is because, in F15 M30h,
2082 * there is support for 4 DCT's, but only 2 are currently functional.
2083 * They are DCT0 and DCT3. But we have read all registers of DCT3 into
2084 * pvt->csels[1]. So we need to use '1' here to get correct info.
2085 * Refer F15 M30h BKDG Section 2.10 and 2.10.3 for clarifications.
2087 alias_channel = (channel == 3) ? 1 : channel;
2089 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, alias_channel);
2091 if (cs_found >= 0)
2092 *chan_sel = alias_channel;
2094 return cs_found;
2097 static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt,
2098 u64 sys_addr,
2099 int *chan_sel)
2101 int cs_found = -EINVAL;
2102 unsigned range;
2104 for (range = 0; range < DRAM_RANGES; range++) {
2105 if (!dram_rw(pvt, range))
2106 continue;
2108 if (pvt->fam == 0x15 && pvt->model >= 0x30)
2109 cs_found = f15_m30h_match_to_this_node(pvt, range,
2110 sys_addr,
2111 chan_sel);
2113 else if ((get_dram_base(pvt, range) <= sys_addr) &&
2114 (get_dram_limit(pvt, range) >= sys_addr)) {
2115 cs_found = f1x_match_to_this_node(pvt, range,
2116 sys_addr, chan_sel);
2117 if (cs_found >= 0)
2118 break;
2121 return cs_found;
2125 * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
2126 * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
2128 * The @sys_addr is usually an error address received from the hardware
2129 * (MCX_ADDR).
2131 static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
2132 struct err_info *err)
2134 struct amd64_pvt *pvt = mci->pvt_info;
2136 error_address_to_page_and_offset(sys_addr, err);
2138 err->csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &err->channel);
2139 if (err->csrow < 0) {
2140 err->err_code = ERR_CSROW;
2141 return;
2145 * We need the syndromes for channel detection only when we're
2146 * ganged. Otherwise @chan should already contain the channel at
2147 * this point.
2149 if (dct_ganging_enabled(pvt))
2150 err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
2154 * debug routine to display the memory sizes of all logical DIMMs and its
2155 * CSROWs
2157 static void debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
2159 int dimm, size0, size1;
2160 u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
2161 u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
2163 if (pvt->fam == 0xf) {
2164 /* K8 families < revF not supported yet */
2165 if (pvt->ext_model < K8_REV_F)
2166 return;
2167 else
2168 WARN_ON(ctrl != 0);
2171 if (pvt->fam == 0x10) {
2172 dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1
2173 : pvt->dbam0;
2174 dcsb = (ctrl && !dct_ganging_enabled(pvt)) ?
2175 pvt->csels[1].csbases :
2176 pvt->csels[0].csbases;
2177 } else if (ctrl) {
2178 dbam = pvt->dbam0;
2179 dcsb = pvt->csels[1].csbases;
2181 edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
2182 ctrl, dbam);
2184 edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
2186 /* Dump memory sizes for DIMM and its CSROWs */
2187 for (dimm = 0; dimm < 4; dimm++) {
2189 size0 = 0;
2190 if (dcsb[dimm*2] & DCSB_CS_ENABLE)
2192 * For F15m60h, we need multiplier for LRDIMM cs_size
2193 * calculation. We pass dimm value to the dbam_to_cs
2194 * mapper so we can find the multiplier from the
2195 * corresponding DCSM.
2197 size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
2198 DBAM_DIMM(dimm, dbam),
2199 dimm);
2201 size1 = 0;
2202 if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
2203 size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
2204 DBAM_DIMM(dimm, dbam),
2205 dimm);
2207 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
2208 dimm * 2, size0,
2209 dimm * 2 + 1, size1);
2213 static struct amd64_family_type family_types[] = {
2214 [K8_CPUS] = {
2215 .ctl_name = "K8",
2216 .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
2217 .f2_id = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
2218 .max_mcs = 2,
2219 .ops = {
2220 .early_channel_count = k8_early_channel_count,
2221 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
2222 .dbam_to_cs = k8_dbam_to_chip_select,
2225 [F10_CPUS] = {
2226 .ctl_name = "F10h",
2227 .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
2228 .f2_id = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
2229 .max_mcs = 2,
2230 .ops = {
2231 .early_channel_count = f1x_early_channel_count,
2232 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2233 .dbam_to_cs = f10_dbam_to_chip_select,
2236 [F15_CPUS] = {
2237 .ctl_name = "F15h",
2238 .f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
2239 .f2_id = PCI_DEVICE_ID_AMD_15H_NB_F2,
2240 .max_mcs = 2,
2241 .ops = {
2242 .early_channel_count = f1x_early_channel_count,
2243 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2244 .dbam_to_cs = f15_dbam_to_chip_select,
2247 [F15_M30H_CPUS] = {
2248 .ctl_name = "F15h_M30h",
2249 .f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1,
2250 .f2_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2,
2251 .max_mcs = 2,
2252 .ops = {
2253 .early_channel_count = f1x_early_channel_count,
2254 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2255 .dbam_to_cs = f16_dbam_to_chip_select,
2258 [F15_M60H_CPUS] = {
2259 .ctl_name = "F15h_M60h",
2260 .f1_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1,
2261 .f2_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F2,
2262 .max_mcs = 2,
2263 .ops = {
2264 .early_channel_count = f1x_early_channel_count,
2265 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2266 .dbam_to_cs = f15_m60h_dbam_to_chip_select,
2269 [F16_CPUS] = {
2270 .ctl_name = "F16h",
2271 .f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1,
2272 .f2_id = PCI_DEVICE_ID_AMD_16H_NB_F2,
2273 .max_mcs = 2,
2274 .ops = {
2275 .early_channel_count = f1x_early_channel_count,
2276 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2277 .dbam_to_cs = f16_dbam_to_chip_select,
2280 [F16_M30H_CPUS] = {
2281 .ctl_name = "F16h_M30h",
2282 .f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1,
2283 .f2_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2,
2284 .max_mcs = 2,
2285 .ops = {
2286 .early_channel_count = f1x_early_channel_count,
2287 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2288 .dbam_to_cs = f16_dbam_to_chip_select,
2291 [F17_CPUS] = {
2292 .ctl_name = "F17h",
2293 .f0_id = PCI_DEVICE_ID_AMD_17H_DF_F0,
2294 .f6_id = PCI_DEVICE_ID_AMD_17H_DF_F6,
2295 .max_mcs = 2,
2296 .ops = {
2297 .early_channel_count = f17_early_channel_count,
2298 .dbam_to_cs = f17_addr_mask_to_cs_size,
2301 [F17_M10H_CPUS] = {
2302 .ctl_name = "F17h_M10h",
2303 .f0_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F0,
2304 .f6_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F6,
2305 .max_mcs = 2,
2306 .ops = {
2307 .early_channel_count = f17_early_channel_count,
2308 .dbam_to_cs = f17_addr_mask_to_cs_size,
2311 [F17_M30H_CPUS] = {
2312 .ctl_name = "F17h_M30h",
2313 .f0_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F0,
2314 .f6_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F6,
2315 .max_mcs = 8,
2316 .ops = {
2317 .early_channel_count = f17_early_channel_count,
2318 .dbam_to_cs = f17_addr_mask_to_cs_size,
2321 [F17_M60H_CPUS] = {
2322 .ctl_name = "F17h_M60h",
2323 .f0_id = PCI_DEVICE_ID_AMD_17H_M60H_DF_F0,
2324 .f6_id = PCI_DEVICE_ID_AMD_17H_M60H_DF_F6,
2325 .max_mcs = 2,
2326 .ops = {
2327 .early_channel_count = f17_early_channel_count,
2328 .dbam_to_cs = f17_addr_mask_to_cs_size,
2331 [F17_M70H_CPUS] = {
2332 .ctl_name = "F17h_M70h",
2333 .f0_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F0,
2334 .f6_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F6,
2335 .max_mcs = 2,
2336 .ops = {
2337 .early_channel_count = f17_early_channel_count,
2338 .dbam_to_cs = f17_addr_mask_to_cs_size,
2341 [F19_CPUS] = {
2342 .ctl_name = "F19h",
2343 .f0_id = PCI_DEVICE_ID_AMD_19H_DF_F0,
2344 .f6_id = PCI_DEVICE_ID_AMD_19H_DF_F6,
2345 .max_mcs = 8,
2346 .ops = {
2347 .early_channel_count = f17_early_channel_count,
2348 .dbam_to_cs = f17_addr_mask_to_cs_size,
2354 * These are tables of eigenvectors (one per line) which can be used for the
2355 * construction of the syndrome tables. The modified syndrome search algorithm
2356 * uses those to find the symbol in error and thus the DIMM.
2358 * Algorithm courtesy of Ross LaFetra from AMD.
2360 static const u16 x4_vectors[] = {
2361 0x2f57, 0x1afe, 0x66cc, 0xdd88,
2362 0x11eb, 0x3396, 0x7f4c, 0xeac8,
2363 0x0001, 0x0002, 0x0004, 0x0008,
2364 0x1013, 0x3032, 0x4044, 0x8088,
2365 0x106b, 0x30d6, 0x70fc, 0xe0a8,
2366 0x4857, 0xc4fe, 0x13cc, 0x3288,
2367 0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
2368 0x1f39, 0x251e, 0xbd6c, 0x6bd8,
2369 0x15c1, 0x2a42, 0x89ac, 0x4758,
2370 0x2b03, 0x1602, 0x4f0c, 0xca08,
2371 0x1f07, 0x3a0e, 0x6b04, 0xbd08,
2372 0x8ba7, 0x465e, 0x244c, 0x1cc8,
2373 0x2b87, 0x164e, 0x642c, 0xdc18,
2374 0x40b9, 0x80de, 0x1094, 0x20e8,
2375 0x27db, 0x1eb6, 0x9dac, 0x7b58,
2376 0x11c1, 0x2242, 0x84ac, 0x4c58,
2377 0x1be5, 0x2d7a, 0x5e34, 0xa718,
2378 0x4b39, 0x8d1e, 0x14b4, 0x28d8,
2379 0x4c97, 0xc87e, 0x11fc, 0x33a8,
2380 0x8e97, 0x497e, 0x2ffc, 0x1aa8,
2381 0x16b3, 0x3d62, 0x4f34, 0x8518,
2382 0x1e2f, 0x391a, 0x5cac, 0xf858,
2383 0x1d9f, 0x3b7a, 0x572c, 0xfe18,
2384 0x15f5, 0x2a5a, 0x5264, 0xa3b8,
2385 0x1dbb, 0x3b66, 0x715c, 0xe3f8,
2386 0x4397, 0xc27e, 0x17fc, 0x3ea8,
2387 0x1617, 0x3d3e, 0x6464, 0xb8b8,
2388 0x23ff, 0x12aa, 0xab6c, 0x56d8,
2389 0x2dfb, 0x1ba6, 0x913c, 0x7328,
2390 0x185d, 0x2ca6, 0x7914, 0x9e28,
2391 0x171b, 0x3e36, 0x7d7c, 0xebe8,
2392 0x4199, 0x82ee, 0x19f4, 0x2e58,
2393 0x4807, 0xc40e, 0x130c, 0x3208,
2394 0x1905, 0x2e0a, 0x5804, 0xac08,
2395 0x213f, 0x132a, 0xadfc, 0x5ba8,
2396 0x19a9, 0x2efe, 0xb5cc, 0x6f88,
2399 static const u16 x8_vectors[] = {
2400 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
2401 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
2402 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
2403 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
2404 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
2405 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
2406 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
2407 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
2408 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
2409 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
2410 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
2411 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
2412 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
2413 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
2414 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
2415 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
2416 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
2417 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
2418 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
2421 static int decode_syndrome(u16 syndrome, const u16 *vectors, unsigned num_vecs,
2422 unsigned v_dim)
2424 unsigned int i, err_sym;
2426 for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
2427 u16 s = syndrome;
2428 unsigned v_idx = err_sym * v_dim;
2429 unsigned v_end = (err_sym + 1) * v_dim;
2431 /* walk over all 16 bits of the syndrome */
2432 for (i = 1; i < (1U << 16); i <<= 1) {
2434 /* if bit is set in that eigenvector... */
2435 if (v_idx < v_end && vectors[v_idx] & i) {
2436 u16 ev_comp = vectors[v_idx++];
2438 /* ... and bit set in the modified syndrome, */
2439 if (s & i) {
2440 /* remove it. */
2441 s ^= ev_comp;
2443 if (!s)
2444 return err_sym;
2447 } else if (s & i)
2448 /* can't get to zero, move to next symbol */
2449 break;
2453 edac_dbg(0, "syndrome(%x) not found\n", syndrome);
2454 return -1;
2457 static int map_err_sym_to_channel(int err_sym, int sym_size)
2459 if (sym_size == 4)
2460 switch (err_sym) {
2461 case 0x20:
2462 case 0x21:
2463 return 0;
2464 break;
2465 case 0x22:
2466 case 0x23:
2467 return 1;
2468 break;
2469 default:
2470 return err_sym >> 4;
2471 break;
2473 /* x8 symbols */
2474 else
2475 switch (err_sym) {
2476 /* imaginary bits not in a DIMM */
2477 case 0x10:
2478 WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
2479 err_sym);
2480 return -1;
2481 break;
2483 case 0x11:
2484 return 0;
2485 break;
2486 case 0x12:
2487 return 1;
2488 break;
2489 default:
2490 return err_sym >> 3;
2491 break;
2493 return -1;
2496 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
2498 struct amd64_pvt *pvt = mci->pvt_info;
2499 int err_sym = -1;
2501 if (pvt->ecc_sym_sz == 8)
2502 err_sym = decode_syndrome(syndrome, x8_vectors,
2503 ARRAY_SIZE(x8_vectors),
2504 pvt->ecc_sym_sz);
2505 else if (pvt->ecc_sym_sz == 4)
2506 err_sym = decode_syndrome(syndrome, x4_vectors,
2507 ARRAY_SIZE(x4_vectors),
2508 pvt->ecc_sym_sz);
2509 else {
2510 amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
2511 return err_sym;
2514 return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
2517 static void __log_ecc_error(struct mem_ctl_info *mci, struct err_info *err,
2518 u8 ecc_type)
2520 enum hw_event_mc_err_type err_type;
2521 const char *string;
2523 if (ecc_type == 2)
2524 err_type = HW_EVENT_ERR_CORRECTED;
2525 else if (ecc_type == 1)
2526 err_type = HW_EVENT_ERR_UNCORRECTED;
2527 else if (ecc_type == 3)
2528 err_type = HW_EVENT_ERR_DEFERRED;
2529 else {
2530 WARN(1, "Something is rotten in the state of Denmark.\n");
2531 return;
2534 switch (err->err_code) {
2535 case DECODE_OK:
2536 string = "";
2537 break;
2538 case ERR_NODE:
2539 string = "Failed to map error addr to a node";
2540 break;
2541 case ERR_CSROW:
2542 string = "Failed to map error addr to a csrow";
2543 break;
2544 case ERR_CHANNEL:
2545 string = "Unknown syndrome - possible error reporting race";
2546 break;
2547 case ERR_SYND:
2548 string = "MCA_SYND not valid - unknown syndrome and csrow";
2549 break;
2550 case ERR_NORM_ADDR:
2551 string = "Cannot decode normalized address";
2552 break;
2553 default:
2554 string = "WTF error";
2555 break;
2558 edac_mc_handle_error(err_type, mci, 1,
2559 err->page, err->offset, err->syndrome,
2560 err->csrow, err->channel, -1,
2561 string, "");
2564 static inline void decode_bus_error(int node_id, struct mce *m)
2566 struct mem_ctl_info *mci;
2567 struct amd64_pvt *pvt;
2568 u8 ecc_type = (m->status >> 45) & 0x3;
2569 u8 xec = XEC(m->status, 0x1f);
2570 u16 ec = EC(m->status);
2571 u64 sys_addr;
2572 struct err_info err;
2574 mci = edac_mc_find(node_id);
2575 if (!mci)
2576 return;
2578 pvt = mci->pvt_info;
2580 /* Bail out early if this was an 'observed' error */
2581 if (PP(ec) == NBSL_PP_OBS)
2582 return;
2584 /* Do only ECC errors */
2585 if (xec && xec != F10_NBSL_EXT_ERR_ECC)
2586 return;
2588 memset(&err, 0, sizeof(err));
2590 sys_addr = get_error_address(pvt, m);
2592 if (ecc_type == 2)
2593 err.syndrome = extract_syndrome(m->status);
2595 pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, &err);
2597 __log_ecc_error(mci, &err, ecc_type);
2601 * To find the UMC channel represented by this bank we need to match on its
2602 * instance_id. The instance_id of a bank is held in the lower 32 bits of its
2603 * IPID.
2605 * Currently, we can derive the channel number by looking at the 6th nibble in
2606 * the instance_id. For example, instance_id=0xYXXXXX where Y is the channel
2607 * number.
2609 static int find_umc_channel(struct mce *m)
2611 return (m->ipid & GENMASK(31, 0)) >> 20;
2614 static void decode_umc_error(int node_id, struct mce *m)
2616 u8 ecc_type = (m->status >> 45) & 0x3;
2617 struct mem_ctl_info *mci;
2618 struct amd64_pvt *pvt;
2619 struct err_info err;
2620 u64 sys_addr;
2622 mci = edac_mc_find(node_id);
2623 if (!mci)
2624 return;
2626 pvt = mci->pvt_info;
2628 memset(&err, 0, sizeof(err));
2630 if (m->status & MCI_STATUS_DEFERRED)
2631 ecc_type = 3;
2633 err.channel = find_umc_channel(m);
2635 if (!(m->status & MCI_STATUS_SYNDV)) {
2636 err.err_code = ERR_SYND;
2637 goto log_error;
2640 if (ecc_type == 2) {
2641 u8 length = (m->synd >> 18) & 0x3f;
2643 if (length)
2644 err.syndrome = (m->synd >> 32) & GENMASK(length - 1, 0);
2645 else
2646 err.err_code = ERR_CHANNEL;
2649 err.csrow = m->synd & 0x7;
2651 if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
2652 err.err_code = ERR_NORM_ADDR;
2653 goto log_error;
2656 error_address_to_page_and_offset(sys_addr, &err);
2658 log_error:
2659 __log_ecc_error(mci, &err, ecc_type);
2663 * Use pvt->F3 which contains the F3 CPU PCI device to get the related
2664 * F1 (AddrMap) and F2 (Dct) devices. Return negative value on error.
2665 * Reserve F0 and F6 on systems with a UMC.
2667 static int
2668 reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
2670 if (pvt->umc) {
2671 pvt->F0 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
2672 if (!pvt->F0) {
2673 amd64_err("F0 not found, device 0x%x (broken BIOS?)\n", pci_id1);
2674 return -ENODEV;
2677 pvt->F6 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
2678 if (!pvt->F6) {
2679 pci_dev_put(pvt->F0);
2680 pvt->F0 = NULL;
2682 amd64_err("F6 not found: device 0x%x (broken BIOS?)\n", pci_id2);
2683 return -ENODEV;
2686 edac_dbg(1, "F0: %s\n", pci_name(pvt->F0));
2687 edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
2688 edac_dbg(1, "F6: %s\n", pci_name(pvt->F6));
2690 return 0;
2693 /* Reserve the ADDRESS MAP Device */
2694 pvt->F1 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
2695 if (!pvt->F1) {
2696 amd64_err("F1 not found: device 0x%x (broken BIOS?)\n", pci_id1);
2697 return -ENODEV;
2700 /* Reserve the DCT Device */
2701 pvt->F2 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
2702 if (!pvt->F2) {
2703 pci_dev_put(pvt->F1);
2704 pvt->F1 = NULL;
2706 amd64_err("F2 not found: device 0x%x (broken BIOS?)\n", pci_id2);
2707 return -ENODEV;
2710 edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
2711 edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
2712 edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
2714 return 0;
2717 static void free_mc_sibling_devs(struct amd64_pvt *pvt)
2719 if (pvt->umc) {
2720 pci_dev_put(pvt->F0);
2721 pci_dev_put(pvt->F6);
2722 } else {
2723 pci_dev_put(pvt->F1);
2724 pci_dev_put(pvt->F2);
2728 static void determine_ecc_sym_sz(struct amd64_pvt *pvt)
2730 pvt->ecc_sym_sz = 4;
2732 if (pvt->umc) {
2733 u8 i;
2735 for_each_umc(i) {
2736 /* Check enabled channels only: */
2737 if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
2738 if (pvt->umc[i].ecc_ctrl & BIT(9)) {
2739 pvt->ecc_sym_sz = 16;
2740 return;
2741 } else if (pvt->umc[i].ecc_ctrl & BIT(7)) {
2742 pvt->ecc_sym_sz = 8;
2743 return;
2747 } else if (pvt->fam >= 0x10) {
2748 u32 tmp;
2750 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
2751 /* F16h has only DCT0, so no need to read dbam1. */
2752 if (pvt->fam != 0x16)
2753 amd64_read_dct_pci_cfg(pvt, 1, DBAM0, &pvt->dbam1);
2755 /* F10h, revD and later can do x8 ECC too. */
2756 if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25))
2757 pvt->ecc_sym_sz = 8;
2762 * Retrieve the hardware registers of the memory controller.
2764 static void __read_mc_regs_df(struct amd64_pvt *pvt)
2766 u8 nid = pvt->mc_node_id;
2767 struct amd64_umc *umc;
2768 u32 i, umc_base;
2770 /* Read registers from each UMC */
2771 for_each_umc(i) {
2773 umc_base = get_umc_base(i);
2774 umc = &pvt->umc[i];
2776 amd_smn_read(nid, umc_base + UMCCH_DIMM_CFG, &umc->dimm_cfg);
2777 amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg);
2778 amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl);
2779 amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl);
2780 amd_smn_read(nid, umc_base + UMCCH_UMC_CAP_HI, &umc->umc_cap_hi);
2785 * Retrieve the hardware registers of the memory controller (this includes the
2786 * 'Address Map' and 'Misc' device regs)
2788 static void read_mc_regs(struct amd64_pvt *pvt)
2790 unsigned int range;
2791 u64 msr_val;
2794 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
2795 * those are Read-As-Zero.
2797 rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
2798 edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt->top_mem);
2800 /* Check first whether TOP_MEM2 is enabled: */
2801 rdmsrl(MSR_K8_SYSCFG, msr_val);
2802 if (msr_val & BIT(21)) {
2803 rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
2804 edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
2805 } else {
2806 edac_dbg(0, " TOP_MEM2 disabled\n");
2809 if (pvt->umc) {
2810 __read_mc_regs_df(pvt);
2811 amd64_read_pci_cfg(pvt->F0, DF_DHAR, &pvt->dhar);
2813 goto skip;
2816 amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
2818 read_dram_ctl_register(pvt);
2820 for (range = 0; range < DRAM_RANGES; range++) {
2821 u8 rw;
2823 /* read settings for this DRAM range */
2824 read_dram_base_limit_regs(pvt, range);
2826 rw = dram_rw(pvt, range);
2827 if (!rw)
2828 continue;
2830 edac_dbg(1, " DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
2831 range,
2832 get_dram_base(pvt, range),
2833 get_dram_limit(pvt, range));
2835 edac_dbg(1, " IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
2836 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
2837 (rw & 0x1) ? "R" : "-",
2838 (rw & 0x2) ? "W" : "-",
2839 dram_intlv_sel(pvt, range),
2840 dram_dst_node(pvt, range));
2843 amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
2844 amd64_read_dct_pci_cfg(pvt, 0, DBAM0, &pvt->dbam0);
2846 amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
2848 amd64_read_dct_pci_cfg(pvt, 0, DCLR0, &pvt->dclr0);
2849 amd64_read_dct_pci_cfg(pvt, 0, DCHR0, &pvt->dchr0);
2851 if (!dct_ganging_enabled(pvt)) {
2852 amd64_read_dct_pci_cfg(pvt, 1, DCLR0, &pvt->dclr1);
2853 amd64_read_dct_pci_cfg(pvt, 1, DCHR0, &pvt->dchr1);
2856 skip:
2857 read_dct_base_mask(pvt);
2859 determine_memory_type(pvt);
2860 edac_dbg(1, " DIMM type: %s\n", edac_mem_types[pvt->dram_type]);
2862 determine_ecc_sym_sz(pvt);
2866 * NOTE: CPU Revision Dependent code
2868 * Input:
2869 * @csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
2870 * k8 private pointer to -->
2871 * DRAM Bank Address mapping register
2872 * node_id
2873 * DCL register where dual_channel_active is
2875 * The DBAM register consists of 4 sets of 4 bits each definitions:
2877 * Bits: CSROWs
2878 * 0-3 CSROWs 0 and 1
2879 * 4-7 CSROWs 2 and 3
2880 * 8-11 CSROWs 4 and 5
2881 * 12-15 CSROWs 6 and 7
2883 * Values range from: 0 to 15
2884 * The meaning of the values depends on CPU revision and dual-channel state,
2885 * see relevant BKDG more info.
2887 * The memory controller provides for total of only 8 CSROWs in its current
2888 * architecture. Each "pair" of CSROWs normally represents just one DIMM in
2889 * single channel or two (2) DIMMs in dual channel mode.
2891 * The following code logic collapses the various tables for CSROW based on CPU
2892 * revision.
2894 * Returns:
2895 * The number of PAGE_SIZE pages on the specified CSROW number it
2896 * encompasses
2899 static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr_orig)
2901 u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
2902 int csrow_nr = csrow_nr_orig;
2903 u32 cs_mode, nr_pages;
2905 if (!pvt->umc) {
2906 csrow_nr >>= 1;
2907 cs_mode = DBAM_DIMM(csrow_nr, dbam);
2908 } else {
2909 cs_mode = f17_get_cs_mode(csrow_nr >> 1, dct, pvt);
2912 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, csrow_nr);
2913 nr_pages <<= 20 - PAGE_SHIFT;
2915 edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n",
2916 csrow_nr_orig, dct, cs_mode);
2917 edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
2919 return nr_pages;
2922 static int init_csrows_df(struct mem_ctl_info *mci)
2924 struct amd64_pvt *pvt = mci->pvt_info;
2925 enum edac_type edac_mode = EDAC_NONE;
2926 enum dev_type dev_type = DEV_UNKNOWN;
2927 struct dimm_info *dimm;
2928 int empty = 1;
2929 u8 umc, cs;
2931 if (mci->edac_ctl_cap & EDAC_FLAG_S16ECD16ED) {
2932 edac_mode = EDAC_S16ECD16ED;
2933 dev_type = DEV_X16;
2934 } else if (mci->edac_ctl_cap & EDAC_FLAG_S8ECD8ED) {
2935 edac_mode = EDAC_S8ECD8ED;
2936 dev_type = DEV_X8;
2937 } else if (mci->edac_ctl_cap & EDAC_FLAG_S4ECD4ED) {
2938 edac_mode = EDAC_S4ECD4ED;
2939 dev_type = DEV_X4;
2940 } else if (mci->edac_ctl_cap & EDAC_FLAG_SECDED) {
2941 edac_mode = EDAC_SECDED;
2944 for_each_umc(umc) {
2945 for_each_chip_select(cs, umc, pvt) {
2946 if (!csrow_enabled(cs, umc, pvt))
2947 continue;
2949 empty = 0;
2950 dimm = mci->csrows[cs]->channels[umc]->dimm;
2952 edac_dbg(1, "MC node: %d, csrow: %d\n",
2953 pvt->mc_node_id, cs);
2955 dimm->nr_pages = get_csrow_nr_pages(pvt, umc, cs);
2956 dimm->mtype = pvt->dram_type;
2957 dimm->edac_mode = edac_mode;
2958 dimm->dtype = dev_type;
2959 dimm->grain = 64;
2963 return empty;
2967 * Initialize the array of csrow attribute instances, based on the values
2968 * from pci config hardware registers.
2970 static int init_csrows(struct mem_ctl_info *mci)
2972 struct amd64_pvt *pvt = mci->pvt_info;
2973 enum edac_type edac_mode = EDAC_NONE;
2974 struct csrow_info *csrow;
2975 struct dimm_info *dimm;
2976 int i, j, empty = 1;
2977 int nr_pages = 0;
2978 u32 val;
2980 if (pvt->umc)
2981 return init_csrows_df(mci);
2983 amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
2985 pvt->nbcfg = val;
2987 edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2988 pvt->mc_node_id, val,
2989 !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
2992 * We iterate over DCT0 here but we look at DCT1 in parallel, if needed.
2994 for_each_chip_select(i, 0, pvt) {
2995 bool row_dct0 = !!csrow_enabled(i, 0, pvt);
2996 bool row_dct1 = false;
2998 if (pvt->fam != 0xf)
2999 row_dct1 = !!csrow_enabled(i, 1, pvt);
3001 if (!row_dct0 && !row_dct1)
3002 continue;
3004 csrow = mci->csrows[i];
3005 empty = 0;
3007 edac_dbg(1, "MC node: %d, csrow: %d\n",
3008 pvt->mc_node_id, i);
3010 if (row_dct0) {
3011 nr_pages = get_csrow_nr_pages(pvt, 0, i);
3012 csrow->channels[0]->dimm->nr_pages = nr_pages;
3015 /* K8 has only one DCT */
3016 if (pvt->fam != 0xf && row_dct1) {
3017 int row_dct1_pages = get_csrow_nr_pages(pvt, 1, i);
3019 csrow->channels[1]->dimm->nr_pages = row_dct1_pages;
3020 nr_pages += row_dct1_pages;
3023 edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages);
3025 /* Determine DIMM ECC mode: */
3026 if (pvt->nbcfg & NBCFG_ECC_ENABLE) {
3027 edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL)
3028 ? EDAC_S4ECD4ED
3029 : EDAC_SECDED;
3032 for (j = 0; j < pvt->channel_count; j++) {
3033 dimm = csrow->channels[j]->dimm;
3034 dimm->mtype = pvt->dram_type;
3035 dimm->edac_mode = edac_mode;
3036 dimm->grain = 64;
3040 return empty;
3043 /* get all cores on this DCT */
3044 static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
3046 int cpu;
3048 for_each_online_cpu(cpu)
3049 if (amd_get_nb_id(cpu) == nid)
3050 cpumask_set_cpu(cpu, mask);
3053 /* check MCG_CTL on all the cpus on this node */
3054 static bool nb_mce_bank_enabled_on_node(u16 nid)
3056 cpumask_var_t mask;
3057 int cpu, nbe;
3058 bool ret = false;
3060 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
3061 amd64_warn("%s: Error allocating mask\n", __func__);
3062 return false;
3065 get_cpus_on_this_dct_cpumask(mask, nid);
3067 rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
3069 for_each_cpu(cpu, mask) {
3070 struct msr *reg = per_cpu_ptr(msrs, cpu);
3071 nbe = reg->l & MSR_MCGCTL_NBE;
3073 edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
3074 cpu, reg->q,
3075 (nbe ? "enabled" : "disabled"));
3077 if (!nbe)
3078 goto out;
3080 ret = true;
3082 out:
3083 free_cpumask_var(mask);
3084 return ret;
3087 static int toggle_ecc_err_reporting(struct ecc_settings *s, u16 nid, bool on)
3089 cpumask_var_t cmask;
3090 int cpu;
3092 if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
3093 amd64_warn("%s: error allocating mask\n", __func__);
3094 return -ENOMEM;
3097 get_cpus_on_this_dct_cpumask(cmask, nid);
3099 rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
3101 for_each_cpu(cpu, cmask) {
3103 struct msr *reg = per_cpu_ptr(msrs, cpu);
3105 if (on) {
3106 if (reg->l & MSR_MCGCTL_NBE)
3107 s->flags.nb_mce_enable = 1;
3109 reg->l |= MSR_MCGCTL_NBE;
3110 } else {
3112 * Turn off NB MCE reporting only when it was off before
3114 if (!s->flags.nb_mce_enable)
3115 reg->l &= ~MSR_MCGCTL_NBE;
3118 wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
3120 free_cpumask_var(cmask);
3122 return 0;
3125 static bool enable_ecc_error_reporting(struct ecc_settings *s, u16 nid,
3126 struct pci_dev *F3)
3128 bool ret = true;
3129 u32 value, mask = 0x3; /* UECC/CECC enable */
3131 if (toggle_ecc_err_reporting(s, nid, ON)) {
3132 amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
3133 return false;
3136 amd64_read_pci_cfg(F3, NBCTL, &value);
3138 s->old_nbctl = value & mask;
3139 s->nbctl_valid = true;
3141 value |= mask;
3142 amd64_write_pci_cfg(F3, NBCTL, value);
3144 amd64_read_pci_cfg(F3, NBCFG, &value);
3146 edac_dbg(0, "1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
3147 nid, value, !!(value & NBCFG_ECC_ENABLE));
3149 if (!(value & NBCFG_ECC_ENABLE)) {
3150 amd64_warn("DRAM ECC disabled on this node, enabling...\n");
3152 s->flags.nb_ecc_prev = 0;
3154 /* Attempt to turn on DRAM ECC Enable */
3155 value |= NBCFG_ECC_ENABLE;
3156 amd64_write_pci_cfg(F3, NBCFG, value);
3158 amd64_read_pci_cfg(F3, NBCFG, &value);
3160 if (!(value & NBCFG_ECC_ENABLE)) {
3161 amd64_warn("Hardware rejected DRAM ECC enable,"
3162 "check memory DIMM configuration.\n");
3163 ret = false;
3164 } else {
3165 amd64_info("Hardware accepted DRAM ECC Enable\n");
3167 } else {
3168 s->flags.nb_ecc_prev = 1;
3171 edac_dbg(0, "2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
3172 nid, value, !!(value & NBCFG_ECC_ENABLE));
3174 return ret;
3177 static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid,
3178 struct pci_dev *F3)
3180 u32 value, mask = 0x3; /* UECC/CECC enable */
3182 if (!s->nbctl_valid)
3183 return;
3185 amd64_read_pci_cfg(F3, NBCTL, &value);
3186 value &= ~mask;
3187 value |= s->old_nbctl;
3189 amd64_write_pci_cfg(F3, NBCTL, value);
3191 /* restore previous BIOS DRAM ECC "off" setting we force-enabled */
3192 if (!s->flags.nb_ecc_prev) {
3193 amd64_read_pci_cfg(F3, NBCFG, &value);
3194 value &= ~NBCFG_ECC_ENABLE;
3195 amd64_write_pci_cfg(F3, NBCFG, value);
3198 /* restore the NB Enable MCGCTL bit */
3199 if (toggle_ecc_err_reporting(s, nid, OFF))
3200 amd64_warn("Error restoring NB MCGCTL settings!\n");
3203 static bool ecc_enabled(struct amd64_pvt *pvt)
3205 u16 nid = pvt->mc_node_id;
3206 bool nb_mce_en = false;
3207 u8 ecc_en = 0, i;
3208 u32 value;
3210 if (boot_cpu_data.x86 >= 0x17) {
3211 u8 umc_en_mask = 0, ecc_en_mask = 0;
3212 struct amd64_umc *umc;
3214 for_each_umc(i) {
3215 umc = &pvt->umc[i];
3217 /* Only check enabled UMCs. */
3218 if (!(umc->sdp_ctrl & UMC_SDP_INIT))
3219 continue;
3221 umc_en_mask |= BIT(i);
3223 if (umc->umc_cap_hi & UMC_ECC_ENABLED)
3224 ecc_en_mask |= BIT(i);
3227 /* Check whether at least one UMC is enabled: */
3228 if (umc_en_mask)
3229 ecc_en = umc_en_mask == ecc_en_mask;
3230 else
3231 edac_dbg(0, "Node %d: No enabled UMCs.\n", nid);
3233 /* Assume UMC MCA banks are enabled. */
3234 nb_mce_en = true;
3235 } else {
3236 amd64_read_pci_cfg(pvt->F3, NBCFG, &value);
3238 ecc_en = !!(value & NBCFG_ECC_ENABLE);
3240 nb_mce_en = nb_mce_bank_enabled_on_node(nid);
3241 if (!nb_mce_en)
3242 edac_dbg(0, "NB MCE bank disabled, set MSR 0x%08x[4] on node %d to enable.\n",
3243 MSR_IA32_MCG_CTL, nid);
3246 amd64_info("Node %d: DRAM ECC %s.\n",
3247 nid, (ecc_en ? "enabled" : "disabled"));
3249 if (!ecc_en || !nb_mce_en)
3250 return false;
3251 else
3252 return true;
3255 static inline void
3256 f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
3258 u8 i, ecc_en = 1, cpk_en = 1, dev_x4 = 1, dev_x16 = 1;
3260 for_each_umc(i) {
3261 if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
3262 ecc_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_ENABLED);
3263 cpk_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_CHIPKILL_CAP);
3265 dev_x4 &= !!(pvt->umc[i].dimm_cfg & BIT(6));
3266 dev_x16 &= !!(pvt->umc[i].dimm_cfg & BIT(7));
3270 /* Set chipkill only if ECC is enabled: */
3271 if (ecc_en) {
3272 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
3274 if (!cpk_en)
3275 return;
3277 if (dev_x4)
3278 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
3279 else if (dev_x16)
3280 mci->edac_ctl_cap |= EDAC_FLAG_S16ECD16ED;
3281 else
3282 mci->edac_ctl_cap |= EDAC_FLAG_S8ECD8ED;
3286 static void setup_mci_misc_attrs(struct mem_ctl_info *mci)
3288 struct amd64_pvt *pvt = mci->pvt_info;
3290 mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
3291 mci->edac_ctl_cap = EDAC_FLAG_NONE;
3293 if (pvt->umc) {
3294 f17h_determine_edac_ctl_cap(mci, pvt);
3295 } else {
3296 if (pvt->nbcap & NBCAP_SECDED)
3297 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
3299 if (pvt->nbcap & NBCAP_CHIPKILL)
3300 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
3303 mci->edac_cap = determine_edac_cap(pvt);
3304 mci->mod_name = EDAC_MOD_STR;
3305 mci->ctl_name = fam_type->ctl_name;
3306 mci->dev_name = pci_name(pvt->F3);
3307 mci->ctl_page_to_phys = NULL;
3309 /* memory scrubber interface */
3310 mci->set_sdram_scrub_rate = set_scrub_rate;
3311 mci->get_sdram_scrub_rate = get_scrub_rate;
3315 * returns a pointer to the family descriptor on success, NULL otherwise.
3317 static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
3319 pvt->ext_model = boot_cpu_data.x86_model >> 4;
3320 pvt->stepping = boot_cpu_data.x86_stepping;
3321 pvt->model = boot_cpu_data.x86_model;
3322 pvt->fam = boot_cpu_data.x86;
3324 switch (pvt->fam) {
3325 case 0xf:
3326 fam_type = &family_types[K8_CPUS];
3327 pvt->ops = &family_types[K8_CPUS].ops;
3328 break;
3330 case 0x10:
3331 fam_type = &family_types[F10_CPUS];
3332 pvt->ops = &family_types[F10_CPUS].ops;
3333 break;
3335 case 0x15:
3336 if (pvt->model == 0x30) {
3337 fam_type = &family_types[F15_M30H_CPUS];
3338 pvt->ops = &family_types[F15_M30H_CPUS].ops;
3339 break;
3340 } else if (pvt->model == 0x60) {
3341 fam_type = &family_types[F15_M60H_CPUS];
3342 pvt->ops = &family_types[F15_M60H_CPUS].ops;
3343 break;
3346 fam_type = &family_types[F15_CPUS];
3347 pvt->ops = &family_types[F15_CPUS].ops;
3348 break;
3350 case 0x16:
3351 if (pvt->model == 0x30) {
3352 fam_type = &family_types[F16_M30H_CPUS];
3353 pvt->ops = &family_types[F16_M30H_CPUS].ops;
3354 break;
3356 fam_type = &family_types[F16_CPUS];
3357 pvt->ops = &family_types[F16_CPUS].ops;
3358 break;
3360 case 0x17:
3361 if (pvt->model >= 0x10 && pvt->model <= 0x2f) {
3362 fam_type = &family_types[F17_M10H_CPUS];
3363 pvt->ops = &family_types[F17_M10H_CPUS].ops;
3364 break;
3365 } else if (pvt->model >= 0x30 && pvt->model <= 0x3f) {
3366 fam_type = &family_types[F17_M30H_CPUS];
3367 pvt->ops = &family_types[F17_M30H_CPUS].ops;
3368 break;
3369 } else if (pvt->model >= 0x60 && pvt->model <= 0x6f) {
3370 fam_type = &family_types[F17_M60H_CPUS];
3371 pvt->ops = &family_types[F17_M60H_CPUS].ops;
3372 break;
3373 } else if (pvt->model >= 0x70 && pvt->model <= 0x7f) {
3374 fam_type = &family_types[F17_M70H_CPUS];
3375 pvt->ops = &family_types[F17_M70H_CPUS].ops;
3376 break;
3378 /* fall through */
3379 case 0x18:
3380 fam_type = &family_types[F17_CPUS];
3381 pvt->ops = &family_types[F17_CPUS].ops;
3383 if (pvt->fam == 0x18)
3384 family_types[F17_CPUS].ctl_name = "F18h";
3385 break;
3387 case 0x19:
3388 fam_type = &family_types[F19_CPUS];
3389 pvt->ops = &family_types[F19_CPUS].ops;
3390 family_types[F19_CPUS].ctl_name = "F19h";
3391 break;
3393 default:
3394 amd64_err("Unsupported family!\n");
3395 return NULL;
3398 amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
3399 (pvt->fam == 0xf ?
3400 (pvt->ext_model >= K8_REV_F ? "revF or later "
3401 : "revE or earlier ")
3402 : ""), pvt->mc_node_id);
3403 return fam_type;
3406 static const struct attribute_group *amd64_edac_attr_groups[] = {
3407 #ifdef CONFIG_EDAC_DEBUG
3408 &amd64_edac_dbg_group,
3409 #endif
3410 #ifdef CONFIG_EDAC_AMD64_ERROR_INJECTION
3411 &amd64_edac_inj_group,
3412 #endif
3413 NULL
3416 static int hw_info_get(struct amd64_pvt *pvt)
3418 u16 pci_id1, pci_id2;
3419 int ret;
3421 if (pvt->fam >= 0x17) {
3422 pvt->umc = kcalloc(fam_type->max_mcs, sizeof(struct amd64_umc), GFP_KERNEL);
3423 if (!pvt->umc)
3424 return -ENOMEM;
3426 pci_id1 = fam_type->f0_id;
3427 pci_id2 = fam_type->f6_id;
3428 } else {
3429 pci_id1 = fam_type->f1_id;
3430 pci_id2 = fam_type->f2_id;
3433 ret = reserve_mc_sibling_devs(pvt, pci_id1, pci_id2);
3434 if (ret)
3435 return ret;
3437 read_mc_regs(pvt);
3439 return 0;
3442 static void hw_info_put(struct amd64_pvt *pvt)
3444 if (pvt->F0 || pvt->F1)
3445 free_mc_sibling_devs(pvt);
3447 kfree(pvt->umc);
3450 static int init_one_instance(struct amd64_pvt *pvt)
3452 struct mem_ctl_info *mci = NULL;
3453 struct edac_mc_layer layers[2];
3454 int ret = -EINVAL;
3457 * We need to determine how many memory channels there are. Then use
3458 * that information for calculating the size of the dynamic instance
3459 * tables in the 'mci' structure.
3461 pvt->channel_count = pvt->ops->early_channel_count(pvt);
3462 if (pvt->channel_count < 0)
3463 return ret;
3465 ret = -ENOMEM;
3466 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
3467 layers[0].size = pvt->csels[0].b_cnt;
3468 layers[0].is_virt_csrow = true;
3469 layers[1].type = EDAC_MC_LAYER_CHANNEL;
3472 * Always allocate two channels since we can have setups with DIMMs on
3473 * only one channel. Also, this simplifies handling later for the price
3474 * of a couple of KBs tops.
3476 layers[1].size = fam_type->max_mcs;
3477 layers[1].is_virt_csrow = false;
3479 mci = edac_mc_alloc(pvt->mc_node_id, ARRAY_SIZE(layers), layers, 0);
3480 if (!mci)
3481 return ret;
3483 mci->pvt_info = pvt;
3484 mci->pdev = &pvt->F3->dev;
3486 setup_mci_misc_attrs(mci);
3488 if (init_csrows(mci))
3489 mci->edac_cap = EDAC_FLAG_NONE;
3491 ret = -ENODEV;
3492 if (edac_mc_add_mc_with_groups(mci, amd64_edac_attr_groups)) {
3493 edac_dbg(1, "failed edac_mc_add_mc()\n");
3494 edac_mc_free(mci);
3495 return ret;
3498 return 0;
3501 static bool instance_has_memory(struct amd64_pvt *pvt)
3503 bool cs_enabled = false;
3504 int cs = 0, dct = 0;
3506 for (dct = 0; dct < fam_type->max_mcs; dct++) {
3507 for_each_chip_select(cs, dct, pvt)
3508 cs_enabled |= csrow_enabled(cs, dct, pvt);
3511 return cs_enabled;
3514 static int probe_one_instance(unsigned int nid)
3516 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
3517 struct amd64_pvt *pvt = NULL;
3518 struct ecc_settings *s;
3519 int ret;
3521 ret = -ENOMEM;
3522 s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
3523 if (!s)
3524 goto err_out;
3526 ecc_stngs[nid] = s;
3528 pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
3529 if (!pvt)
3530 goto err_settings;
3532 pvt->mc_node_id = nid;
3533 pvt->F3 = F3;
3535 fam_type = per_family_init(pvt);
3536 if (!fam_type)
3537 goto err_enable;
3539 ret = hw_info_get(pvt);
3540 if (ret < 0)
3541 goto err_enable;
3543 ret = 0;
3544 if (!instance_has_memory(pvt)) {
3545 amd64_info("Node %d: No DIMMs detected.\n", nid);
3546 goto err_enable;
3549 if (!ecc_enabled(pvt)) {
3550 ret = -ENODEV;
3552 if (!ecc_enable_override)
3553 goto err_enable;
3555 if (boot_cpu_data.x86 >= 0x17) {
3556 amd64_warn("Forcing ECC on is not recommended on newer systems. Please enable ECC in BIOS.");
3557 goto err_enable;
3558 } else
3559 amd64_warn("Forcing ECC on!\n");
3561 if (!enable_ecc_error_reporting(s, nid, F3))
3562 goto err_enable;
3565 ret = init_one_instance(pvt);
3566 if (ret < 0) {
3567 amd64_err("Error probing instance: %d\n", nid);
3569 if (boot_cpu_data.x86 < 0x17)
3570 restore_ecc_error_reporting(s, nid, F3);
3572 goto err_enable;
3575 dump_misc_regs(pvt);
3577 return ret;
3579 err_enable:
3580 hw_info_put(pvt);
3581 kfree(pvt);
3583 err_settings:
3584 kfree(s);
3585 ecc_stngs[nid] = NULL;
3587 err_out:
3588 return ret;
3591 static void remove_one_instance(unsigned int nid)
3593 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
3594 struct ecc_settings *s = ecc_stngs[nid];
3595 struct mem_ctl_info *mci;
3596 struct amd64_pvt *pvt;
3598 /* Remove from EDAC CORE tracking list */
3599 mci = edac_mc_del_mc(&F3->dev);
3600 if (!mci)
3601 return;
3603 pvt = mci->pvt_info;
3605 restore_ecc_error_reporting(s, nid, F3);
3607 kfree(ecc_stngs[nid]);
3608 ecc_stngs[nid] = NULL;
3610 /* Free the EDAC CORE resources */
3611 mci->pvt_info = NULL;
3613 hw_info_put(pvt);
3614 kfree(pvt);
3615 edac_mc_free(mci);
3618 static void setup_pci_device(void)
3620 struct mem_ctl_info *mci;
3621 struct amd64_pvt *pvt;
3623 if (pci_ctl)
3624 return;
3626 mci = edac_mc_find(0);
3627 if (!mci)
3628 return;
3630 pvt = mci->pvt_info;
3631 if (pvt->umc)
3632 pci_ctl = edac_pci_create_generic_ctl(&pvt->F0->dev, EDAC_MOD_STR);
3633 else
3634 pci_ctl = edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
3635 if (!pci_ctl) {
3636 pr_warn("%s(): Unable to create PCI control\n", __func__);
3637 pr_warn("%s(): PCI error report via EDAC not set\n", __func__);
3641 static const struct x86_cpu_id amd64_cpuids[] = {
3642 X86_MATCH_VENDOR_FAM(AMD, 0x0F, NULL),
3643 X86_MATCH_VENDOR_FAM(AMD, 0x10, NULL),
3644 X86_MATCH_VENDOR_FAM(AMD, 0x15, NULL),
3645 X86_MATCH_VENDOR_FAM(AMD, 0x16, NULL),
3646 X86_MATCH_VENDOR_FAM(AMD, 0x17, NULL),
3647 X86_MATCH_VENDOR_FAM(HYGON, 0x18, NULL),
3648 X86_MATCH_VENDOR_FAM(AMD, 0x19, NULL),
3651 MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids);
3653 static int __init amd64_edac_init(void)
3655 const char *owner;
3656 int err = -ENODEV;
3657 int i;
3659 owner = edac_get_owner();
3660 if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
3661 return -EBUSY;
3663 if (!x86_match_cpu(amd64_cpuids))
3664 return -ENODEV;
3666 if (amd_cache_northbridges() < 0)
3667 return -ENODEV;
3669 opstate_init();
3671 err = -ENOMEM;
3672 ecc_stngs = kcalloc(amd_nb_num(), sizeof(ecc_stngs[0]), GFP_KERNEL);
3673 if (!ecc_stngs)
3674 goto err_free;
3676 msrs = msrs_alloc();
3677 if (!msrs)
3678 goto err_free;
3680 for (i = 0; i < amd_nb_num(); i++) {
3681 err = probe_one_instance(i);
3682 if (err) {
3683 /* unwind properly */
3684 while (--i >= 0)
3685 remove_one_instance(i);
3687 goto err_pci;
3691 if (!edac_has_mcs()) {
3692 err = -ENODEV;
3693 goto err_pci;
3696 /* register stuff with EDAC MCE */
3697 if (boot_cpu_data.x86 >= 0x17)
3698 amd_register_ecc_decoder(decode_umc_error);
3699 else
3700 amd_register_ecc_decoder(decode_bus_error);
3702 setup_pci_device();
3704 #ifdef CONFIG_X86_32
3705 amd64_err("%s on 32-bit is unsupported. USE AT YOUR OWN RISK!\n", EDAC_MOD_STR);
3706 #endif
3708 printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
3710 return 0;
3712 err_pci:
3713 msrs_free(msrs);
3714 msrs = NULL;
3716 err_free:
3717 kfree(ecc_stngs);
3718 ecc_stngs = NULL;
3720 return err;
3723 static void __exit amd64_edac_exit(void)
3725 int i;
3727 if (pci_ctl)
3728 edac_pci_release_generic_ctl(pci_ctl);
3730 /* unregister from EDAC MCE */
3731 if (boot_cpu_data.x86 >= 0x17)
3732 amd_unregister_ecc_decoder(decode_umc_error);
3733 else
3734 amd_unregister_ecc_decoder(decode_bus_error);
3736 for (i = 0; i < amd_nb_num(); i++)
3737 remove_one_instance(i);
3739 kfree(ecc_stngs);
3740 ecc_stngs = NULL;
3742 msrs_free(msrs);
3743 msrs = NULL;
3746 module_init(amd64_edac_init);
3747 module_exit(amd64_edac_exit);
3749 MODULE_LICENSE("GPL");
3750 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
3751 "Dave Peterson, Thayne Harbaugh");
3752 MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
3753 EDAC_AMD64_VERSION);
3755 module_param(edac_op_state, int, 0444);
3756 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");