1 #include "amd64_edac.h"
2 #include <asm/amd_nb.h>
4 static struct edac_pci_ctl_info
*pci_ctl
;
6 static int report_gart_errors
;
7 module_param(report_gart_errors
, int, 0644);
10 * Set by command line parameter. If BIOS has enabled the ECC, this override is
11 * cleared to prevent re-enabling the hardware by this driver.
13 static int ecc_enable_override
;
14 module_param(ecc_enable_override
, int, 0644);
16 static struct msr __percpu
*msrs
;
19 * count successfully initialized driver instances for setup_pci_device()
21 static atomic_t drv_instances
= ATOMIC_INIT(0);
24 static struct ecc_settings
**ecc_stngs
;
27 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
28 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
31 *FIXME: Produce a better mapping/linearisation.
33 static const struct scrubrate
{
34 u32 scrubval
; /* bit pattern for scrub rate */
35 u32 bandwidth
; /* bandwidth consumed (bytes/sec) */
37 { 0x01, 1600000000UL},
59 { 0x00, 0UL}, /* scrubbing off */
62 int __amd64_read_pci_cfg_dword(struct pci_dev
*pdev
, int offset
,
63 u32
*val
, const char *func
)
67 err
= pci_read_config_dword(pdev
, offset
, val
);
69 amd64_warn("%s: error reading F%dx%03x.\n",
70 func
, PCI_FUNC(pdev
->devfn
), offset
);
75 int __amd64_write_pci_cfg_dword(struct pci_dev
*pdev
, int offset
,
76 u32 val
, const char *func
)
80 err
= pci_write_config_dword(pdev
, offset
, val
);
82 amd64_warn("%s: error writing to F%dx%03x.\n",
83 func
, PCI_FUNC(pdev
->devfn
), offset
);
89 * Select DCT to which PCI cfg accesses are routed
91 static void f15h_select_dct(struct amd64_pvt
*pvt
, u8 dct
)
95 amd64_read_pci_cfg(pvt
->F1
, DCT_CFG_SEL
, ®
);
96 reg
&= (pvt
->model
== 0x30) ? ~3 : ~1;
98 amd64_write_pci_cfg(pvt
->F1
, DCT_CFG_SEL
, reg
);
103 * Depending on the family, F2 DCT reads need special handling:
105 * K8: has a single DCT only and no address offsets >= 0x100
107 * F10h: each DCT has its own set of regs
111 * F16h: has only 1 DCT
113 * F15h: we select which DCT we access using F1x10C[DctCfgSel]
115 static inline int amd64_read_dct_pci_cfg(struct amd64_pvt
*pvt
, u8 dct
,
116 int offset
, u32
*val
)
120 if (dct
|| offset
>= 0x100)
127 * Note: If ganging is enabled, barring the regs
128 * F2x[1,0]98 and F2x[1,0]9C; reads reads to F2x1xx
129 * return 0. (cf. Section 2.8.1 F10h BKDG)
131 if (dct_ganging_enabled(pvt
))
140 * F15h: F2x1xx addresses do not map explicitly to DCT1.
141 * We should select which DCT we access using F1x10C[DctCfgSel]
143 dct
= (dct
&& pvt
->model
== 0x30) ? 3 : dct
;
144 f15h_select_dct(pvt
, dct
);
155 return amd64_read_pci_cfg(pvt
->F2
, offset
, val
);
159 * Memory scrubber control interface. For K8, memory scrubbing is handled by
160 * hardware and can involve L2 cache, dcache as well as the main memory. With
161 * F10, this is extended to L3 cache scrubbing on CPU models sporting that
164 * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
165 * (dram) over to cache lines. This is nasty, so we will use bandwidth in
166 * bytes/sec for the setting.
168 * Currently, we only do dram scrubbing. If the scrubbing is done in software on
169 * other archs, we might not have access to the caches directly.
173 * scan the scrub rate mapping table for a close or matching bandwidth value to
174 * issue. If requested is too big, then use last maximum value found.
176 static int __set_scrub_rate(struct amd64_pvt
*pvt
, u32 new_bw
, u32 min_rate
)
182 * map the configured rate (new_bw) to a value specific to the AMD64
183 * memory controller and apply to register. Search for the first
184 * bandwidth entry that is greater or equal than the setting requested
185 * and program that. If at last entry, turn off DRAM scrubbing.
187 * If no suitable bandwidth is found, turn off DRAM scrubbing entirely
188 * by falling back to the last element in scrubrates[].
190 for (i
= 0; i
< ARRAY_SIZE(scrubrates
) - 1; i
++) {
192 * skip scrub rates which aren't recommended
193 * (see F10 BKDG, F3x58)
195 if (scrubrates
[i
].scrubval
< min_rate
)
198 if (scrubrates
[i
].bandwidth
<= new_bw
)
202 scrubval
= scrubrates
[i
].scrubval
;
204 if (pvt
->fam
== 0x15 && pvt
->model
== 0x60) {
205 f15h_select_dct(pvt
, 0);
206 pci_write_bits32(pvt
->F2
, F15H_M60H_SCRCTRL
, scrubval
, 0x001F);
207 f15h_select_dct(pvt
, 1);
208 pci_write_bits32(pvt
->F2
, F15H_M60H_SCRCTRL
, scrubval
, 0x001F);
210 pci_write_bits32(pvt
->F3
, SCRCTRL
, scrubval
, 0x001F);
214 return scrubrates
[i
].bandwidth
;
219 static int set_scrub_rate(struct mem_ctl_info
*mci
, u32 bw
)
221 struct amd64_pvt
*pvt
= mci
->pvt_info
;
222 u32 min_scrubrate
= 0x5;
227 if (pvt
->fam
== 0x15) {
229 if (pvt
->model
< 0x10)
230 f15h_select_dct(pvt
, 0);
232 if (pvt
->model
== 0x60)
235 return __set_scrub_rate(pvt
, bw
, min_scrubrate
);
238 static int get_scrub_rate(struct mem_ctl_info
*mci
)
240 struct amd64_pvt
*pvt
= mci
->pvt_info
;
242 int i
, retval
= -EINVAL
;
244 if (pvt
->fam
== 0x15) {
246 if (pvt
->model
< 0x10)
247 f15h_select_dct(pvt
, 0);
249 if (pvt
->model
== 0x60)
250 amd64_read_pci_cfg(pvt
->F2
, F15H_M60H_SCRCTRL
, &scrubval
);
252 amd64_read_pci_cfg(pvt
->F3
, SCRCTRL
, &scrubval
);
254 scrubval
= scrubval
& 0x001F;
256 for (i
= 0; i
< ARRAY_SIZE(scrubrates
); i
++) {
257 if (scrubrates
[i
].scrubval
== scrubval
) {
258 retval
= scrubrates
[i
].bandwidth
;
266 * returns true if the SysAddr given by sys_addr matches the
267 * DRAM base/limit associated with node_id
269 static bool base_limit_match(struct amd64_pvt
*pvt
, u64 sys_addr
, u8 nid
)
273 /* The K8 treats this as a 40-bit value. However, bits 63-40 will be
274 * all ones if the most significant implemented address bit is 1.
275 * Here we discard bits 63-40. See section 3.4.2 of AMD publication
276 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
277 * Application Programming.
279 addr
= sys_addr
& 0x000000ffffffffffull
;
281 return ((addr
>= get_dram_base(pvt
, nid
)) &&
282 (addr
<= get_dram_limit(pvt
, nid
)));
286 * Attempt to map a SysAddr to a node. On success, return a pointer to the
287 * mem_ctl_info structure for the node that the SysAddr maps to.
289 * On failure, return NULL.
291 static struct mem_ctl_info
*find_mc_by_sys_addr(struct mem_ctl_info
*mci
,
294 struct amd64_pvt
*pvt
;
299 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
300 * 3.4.4.2) registers to map the SysAddr to a node ID.
305 * The value of this field should be the same for all DRAM Base
306 * registers. Therefore we arbitrarily choose to read it from the
307 * register for node 0.
309 intlv_en
= dram_intlv_en(pvt
, 0);
312 for (node_id
= 0; node_id
< DRAM_RANGES
; node_id
++) {
313 if (base_limit_match(pvt
, sys_addr
, node_id
))
319 if (unlikely((intlv_en
!= 0x01) &&
320 (intlv_en
!= 0x03) &&
321 (intlv_en
!= 0x07))) {
322 amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en
);
326 bits
= (((u32
) sys_addr
) >> 12) & intlv_en
;
328 for (node_id
= 0; ; ) {
329 if ((dram_intlv_sel(pvt
, node_id
) & intlv_en
) == bits
)
330 break; /* intlv_sel field matches */
332 if (++node_id
>= DRAM_RANGES
)
336 /* sanity test for sys_addr */
337 if (unlikely(!base_limit_match(pvt
, sys_addr
, node_id
))) {
338 amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
339 "range for node %d with node interleaving enabled.\n",
340 __func__
, sys_addr
, node_id
);
345 return edac_mc_find((int)node_id
);
348 edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n",
349 (unsigned long)sys_addr
);
355 * compute the CS base address of the @csrow on the DRAM controller @dct.
356 * For details see F2x[5C:40] in the processor's BKDG
358 static void get_cs_base_and_mask(struct amd64_pvt
*pvt
, int csrow
, u8 dct
,
359 u64
*base
, u64
*mask
)
361 u64 csbase
, csmask
, base_bits
, mask_bits
;
364 if (pvt
->fam
== 0xf && pvt
->ext_model
< K8_REV_F
) {
365 csbase
= pvt
->csels
[dct
].csbases
[csrow
];
366 csmask
= pvt
->csels
[dct
].csmasks
[csrow
];
367 base_bits
= GENMASK_ULL(31, 21) | GENMASK_ULL(15, 9);
368 mask_bits
= GENMASK_ULL(29, 21) | GENMASK_ULL(15, 9);
372 * F16h and F15h, models 30h and later need two addr_shift values:
373 * 8 for high and 6 for low (cf. F16h BKDG).
375 } else if (pvt
->fam
== 0x16 ||
376 (pvt
->fam
== 0x15 && pvt
->model
>= 0x30)) {
377 csbase
= pvt
->csels
[dct
].csbases
[csrow
];
378 csmask
= pvt
->csels
[dct
].csmasks
[csrow
>> 1];
380 *base
= (csbase
& GENMASK_ULL(15, 5)) << 6;
381 *base
|= (csbase
& GENMASK_ULL(30, 19)) << 8;
384 /* poke holes for the csmask */
385 *mask
&= ~((GENMASK_ULL(15, 5) << 6) |
386 (GENMASK_ULL(30, 19) << 8));
388 *mask
|= (csmask
& GENMASK_ULL(15, 5)) << 6;
389 *mask
|= (csmask
& GENMASK_ULL(30, 19)) << 8;
393 csbase
= pvt
->csels
[dct
].csbases
[csrow
];
394 csmask
= pvt
->csels
[dct
].csmasks
[csrow
>> 1];
397 if (pvt
->fam
== 0x15)
398 base_bits
= mask_bits
=
399 GENMASK_ULL(30,19) | GENMASK_ULL(13,5);
401 base_bits
= mask_bits
=
402 GENMASK_ULL(28,19) | GENMASK_ULL(13,5);
405 *base
= (csbase
& base_bits
) << addr_shift
;
408 /* poke holes for the csmask */
409 *mask
&= ~(mask_bits
<< addr_shift
);
411 *mask
|= (csmask
& mask_bits
) << addr_shift
;
414 #define for_each_chip_select(i, dct, pvt) \
415 for (i = 0; i < pvt->csels[dct].b_cnt; i++)
417 #define chip_select_base(i, dct, pvt) \
418 pvt->csels[dct].csbases[i]
420 #define for_each_chip_select_mask(i, dct, pvt) \
421 for (i = 0; i < pvt->csels[dct].m_cnt; i++)
424 * @input_addr is an InputAddr associated with the node given by mci. Return the
425 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
427 static int input_addr_to_csrow(struct mem_ctl_info
*mci
, u64 input_addr
)
429 struct amd64_pvt
*pvt
;
435 for_each_chip_select(csrow
, 0, pvt
) {
436 if (!csrow_enabled(csrow
, 0, pvt
))
439 get_cs_base_and_mask(pvt
, csrow
, 0, &base
, &mask
);
443 if ((input_addr
& mask
) == (base
& mask
)) {
444 edac_dbg(2, "InputAddr 0x%lx matches csrow %d (node %d)\n",
445 (unsigned long)input_addr
, csrow
,
451 edac_dbg(2, "no matching csrow for InputAddr 0x%lx (MC node %d)\n",
452 (unsigned long)input_addr
, pvt
->mc_node_id
);
458 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
459 * for the node represented by mci. Info is passed back in *hole_base,
460 * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
461 * info is invalid. Info may be invalid for either of the following reasons:
463 * - The revision of the node is not E or greater. In this case, the DRAM Hole
464 * Address Register does not exist.
466 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
467 * indicating that its contents are not valid.
469 * The values passed back in *hole_base, *hole_offset, and *hole_size are
470 * complete 32-bit values despite the fact that the bitfields in the DHAR
471 * only represent bits 31-24 of the base and offset values.
473 int amd64_get_dram_hole_info(struct mem_ctl_info
*mci
, u64
*hole_base
,
474 u64
*hole_offset
, u64
*hole_size
)
476 struct amd64_pvt
*pvt
= mci
->pvt_info
;
478 /* only revE and later have the DRAM Hole Address Register */
479 if (pvt
->fam
== 0xf && pvt
->ext_model
< K8_REV_E
) {
480 edac_dbg(1, " revision %d for node %d does not support DHAR\n",
481 pvt
->ext_model
, pvt
->mc_node_id
);
485 /* valid for Fam10h and above */
486 if (pvt
->fam
>= 0x10 && !dhar_mem_hoist_valid(pvt
)) {
487 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this system\n");
491 if (!dhar_valid(pvt
)) {
492 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this node %d\n",
497 /* This node has Memory Hoisting */
499 /* +------------------+--------------------+--------------------+-----
500 * | memory | DRAM hole | relocated |
501 * | [0, (x - 1)] | [x, 0xffffffff] | addresses from |
503 * | | | [0x100000000, |
504 * | | | (0x100000000+ |
505 * | | | (0xffffffff-x))] |
506 * +------------------+--------------------+--------------------+-----
508 * Above is a diagram of physical memory showing the DRAM hole and the
509 * relocated addresses from the DRAM hole. As shown, the DRAM hole
510 * starts at address x (the base address) and extends through address
511 * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the
512 * addresses in the hole so that they start at 0x100000000.
515 *hole_base
= dhar_base(pvt
);
516 *hole_size
= (1ULL << 32) - *hole_base
;
518 *hole_offset
= (pvt
->fam
> 0xf) ? f10_dhar_offset(pvt
)
519 : k8_dhar_offset(pvt
);
521 edac_dbg(1, " DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
522 pvt
->mc_node_id
, (unsigned long)*hole_base
,
523 (unsigned long)*hole_offset
, (unsigned long)*hole_size
);
527 EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info
);
530 * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
531 * assumed that sys_addr maps to the node given by mci.
533 * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
534 * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
535 * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
536 * then it is also involved in translating a SysAddr to a DramAddr. Sections
537 * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
538 * These parts of the documentation are unclear. I interpret them as follows:
540 * When node n receives a SysAddr, it processes the SysAddr as follows:
542 * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
543 * Limit registers for node n. If the SysAddr is not within the range
544 * specified by the base and limit values, then node n ignores the Sysaddr
545 * (since it does not map to node n). Otherwise continue to step 2 below.
547 * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
548 * disabled so skip to step 3 below. Otherwise see if the SysAddr is within
549 * the range of relocated addresses (starting at 0x100000000) from the DRAM
550 * hole. If not, skip to step 3 below. Else get the value of the
551 * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
552 * offset defined by this value from the SysAddr.
554 * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
555 * Base register for node n. To obtain the DramAddr, subtract the base
556 * address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
558 static u64
sys_addr_to_dram_addr(struct mem_ctl_info
*mci
, u64 sys_addr
)
560 struct amd64_pvt
*pvt
= mci
->pvt_info
;
561 u64 dram_base
, hole_base
, hole_offset
, hole_size
, dram_addr
;
564 dram_base
= get_dram_base(pvt
, pvt
->mc_node_id
);
566 ret
= amd64_get_dram_hole_info(mci
, &hole_base
, &hole_offset
,
569 if ((sys_addr
>= (1ULL << 32)) &&
570 (sys_addr
< ((1ULL << 32) + hole_size
))) {
571 /* use DHAR to translate SysAddr to DramAddr */
572 dram_addr
= sys_addr
- hole_offset
;
574 edac_dbg(2, "using DHAR to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
575 (unsigned long)sys_addr
,
576 (unsigned long)dram_addr
);
583 * Translate the SysAddr to a DramAddr as shown near the start of
584 * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8
585 * only deals with 40-bit values. Therefore we discard bits 63-40 of
586 * sys_addr below. If bit 39 of sys_addr is 1 then the bits we
587 * discard are all 1s. Otherwise the bits we discard are all 0s. See
588 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
589 * Programmer's Manual Volume 1 Application Programming.
591 dram_addr
= (sys_addr
& GENMASK_ULL(39, 0)) - dram_base
;
593 edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
594 (unsigned long)sys_addr
, (unsigned long)dram_addr
);
599 * @intlv_en is the value of the IntlvEn field from a DRAM Base register
600 * (section 3.4.4.1). Return the number of bits from a SysAddr that are used
601 * for node interleaving.
603 static int num_node_interleave_bits(unsigned intlv_en
)
605 static const int intlv_shift_table
[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
608 BUG_ON(intlv_en
> 7);
609 n
= intlv_shift_table
[intlv_en
];
613 /* Translate the DramAddr given by @dram_addr to an InputAddr. */
614 static u64
dram_addr_to_input_addr(struct mem_ctl_info
*mci
, u64 dram_addr
)
616 struct amd64_pvt
*pvt
;
623 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
624 * concerning translating a DramAddr to an InputAddr.
626 intlv_shift
= num_node_interleave_bits(dram_intlv_en(pvt
, 0));
627 input_addr
= ((dram_addr
>> intlv_shift
) & GENMASK_ULL(35, 12)) +
630 edac_dbg(2, " Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
631 intlv_shift
, (unsigned long)dram_addr
,
632 (unsigned long)input_addr
);
638 * Translate the SysAddr represented by @sys_addr to an InputAddr. It is
639 * assumed that @sys_addr maps to the node given by mci.
641 static u64
sys_addr_to_input_addr(struct mem_ctl_info
*mci
, u64 sys_addr
)
646 dram_addr_to_input_addr(mci
, sys_addr_to_dram_addr(mci
, sys_addr
));
648 edac_dbg(2, "SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
649 (unsigned long)sys_addr
, (unsigned long)input_addr
);
654 /* Map the Error address to a PAGE and PAGE OFFSET. */
655 static inline void error_address_to_page_and_offset(u64 error_address
,
656 struct err_info
*err
)
658 err
->page
= (u32
) (error_address
>> PAGE_SHIFT
);
659 err
->offset
= ((u32
) error_address
) & ~PAGE_MASK
;
663 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
664 * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
665 * of a node that detected an ECC memory error. mci represents the node that
666 * the error address maps to (possibly different from the node that detected
667 * the error). Return the number of the csrow that sys_addr maps to, or -1 on
670 static int sys_addr_to_csrow(struct mem_ctl_info
*mci
, u64 sys_addr
)
674 csrow
= input_addr_to_csrow(mci
, sys_addr_to_input_addr(mci
, sys_addr
));
677 amd64_mc_err(mci
, "Failed to translate InputAddr to csrow for "
678 "address 0x%lx\n", (unsigned long)sys_addr
);
682 static int get_channel_from_ecc_syndrome(struct mem_ctl_info
*, u16
);
685 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
688 static unsigned long determine_edac_cap(struct amd64_pvt
*pvt
)
691 unsigned long edac_cap
= EDAC_FLAG_NONE
;
693 bit
= (pvt
->fam
> 0xf || pvt
->ext_model
>= K8_REV_F
)
697 if (pvt
->dclr0
& BIT(bit
))
698 edac_cap
= EDAC_FLAG_SECDED
;
703 static void debug_display_dimm_sizes(struct amd64_pvt
*, u8
);
705 static void debug_dump_dramcfg_low(struct amd64_pvt
*pvt
, u32 dclr
, int chan
)
707 edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan
, dclr
);
709 if (pvt
->dram_type
== MEM_LRDDR3
) {
710 u32 dcsm
= pvt
->csels
[chan
].csmasks
[0];
712 * It's assumed all LRDIMMs in a DCT are going to be of
713 * same 'type' until proven otherwise. So, use a cs
714 * value of '0' here to get dcsm value.
716 edac_dbg(1, " LRDIMM %dx rank multiply\n", (dcsm
& 0x3));
719 edac_dbg(1, "All DIMMs support ECC:%s\n",
720 (dclr
& BIT(19)) ? "yes" : "no");
723 edac_dbg(1, " PAR/ERR parity: %s\n",
724 (dclr
& BIT(8)) ? "enabled" : "disabled");
726 if (pvt
->fam
== 0x10)
727 edac_dbg(1, " DCT 128bit mode width: %s\n",
728 (dclr
& BIT(11)) ? "128b" : "64b");
730 edac_dbg(1, " x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
731 (dclr
& BIT(12)) ? "yes" : "no",
732 (dclr
& BIT(13)) ? "yes" : "no",
733 (dclr
& BIT(14)) ? "yes" : "no",
734 (dclr
& BIT(15)) ? "yes" : "no");
737 /* Display and decode various NB registers for debug purposes. */
738 static void dump_misc_regs(struct amd64_pvt
*pvt
)
740 edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt
->nbcap
);
742 edac_dbg(1, " NB two channel DRAM capable: %s\n",
743 (pvt
->nbcap
& NBCAP_DCT_DUAL
) ? "yes" : "no");
745 edac_dbg(1, " ECC capable: %s, ChipKill ECC capable: %s\n",
746 (pvt
->nbcap
& NBCAP_SECDED
) ? "yes" : "no",
747 (pvt
->nbcap
& NBCAP_CHIPKILL
) ? "yes" : "no");
749 debug_dump_dramcfg_low(pvt
, pvt
->dclr0
, 0);
751 edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt
->online_spare
);
753 edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n",
754 pvt
->dhar
, dhar_base(pvt
),
755 (pvt
->fam
== 0xf) ? k8_dhar_offset(pvt
)
756 : f10_dhar_offset(pvt
));
758 edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt
) ? "yes" : "no");
760 debug_display_dimm_sizes(pvt
, 0);
762 /* everything below this point is Fam10h and above */
766 debug_display_dimm_sizes(pvt
, 1);
768 amd64_info("using %s syndromes.\n", ((pvt
->ecc_sym_sz
== 8) ? "x8" : "x4"));
770 /* Only if NOT ganged does dclr1 have valid info */
771 if (!dct_ganging_enabled(pvt
))
772 debug_dump_dramcfg_low(pvt
, pvt
->dclr1
, 1);
776 * See BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
778 static void prep_chip_selects(struct amd64_pvt
*pvt
)
780 if (pvt
->fam
== 0xf && pvt
->ext_model
< K8_REV_F
) {
781 pvt
->csels
[0].b_cnt
= pvt
->csels
[1].b_cnt
= 8;
782 pvt
->csels
[0].m_cnt
= pvt
->csels
[1].m_cnt
= 8;
783 } else if (pvt
->fam
== 0x15 && pvt
->model
== 0x30) {
784 pvt
->csels
[0].b_cnt
= pvt
->csels
[1].b_cnt
= 4;
785 pvt
->csels
[0].m_cnt
= pvt
->csels
[1].m_cnt
= 2;
787 pvt
->csels
[0].b_cnt
= pvt
->csels
[1].b_cnt
= 8;
788 pvt
->csels
[0].m_cnt
= pvt
->csels
[1].m_cnt
= 4;
793 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
795 static void read_dct_base_mask(struct amd64_pvt
*pvt
)
799 prep_chip_selects(pvt
);
801 for_each_chip_select(cs
, 0, pvt
) {
802 int reg0
= DCSB0
+ (cs
* 4);
803 int reg1
= DCSB1
+ (cs
* 4);
804 u32
*base0
= &pvt
->csels
[0].csbases
[cs
];
805 u32
*base1
= &pvt
->csels
[1].csbases
[cs
];
807 if (!amd64_read_dct_pci_cfg(pvt
, 0, reg0
, base0
))
808 edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
814 if (!amd64_read_dct_pci_cfg(pvt
, 1, reg0
, base1
))
815 edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
816 cs
, *base1
, (pvt
->fam
== 0x10) ? reg1
820 for_each_chip_select_mask(cs
, 0, pvt
) {
821 int reg0
= DCSM0
+ (cs
* 4);
822 int reg1
= DCSM1
+ (cs
* 4);
823 u32
*mask0
= &pvt
->csels
[0].csmasks
[cs
];
824 u32
*mask1
= &pvt
->csels
[1].csmasks
[cs
];
826 if (!amd64_read_dct_pci_cfg(pvt
, 0, reg0
, mask0
))
827 edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
833 if (!amd64_read_dct_pci_cfg(pvt
, 1, reg0
, mask1
))
834 edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
835 cs
, *mask1
, (pvt
->fam
== 0x10) ? reg1
840 static void determine_memory_type(struct amd64_pvt
*pvt
)
846 if (pvt
->ext_model
>= K8_REV_F
)
849 pvt
->dram_type
= (pvt
->dclr0
& BIT(18)) ? MEM_DDR
: MEM_RDDR
;
853 if (pvt
->dchr0
& DDR3_MODE
)
856 pvt
->dram_type
= (pvt
->dclr0
& BIT(16)) ? MEM_DDR2
: MEM_RDDR2
;
860 if (pvt
->model
< 0x60)
864 * Model 0x60h needs special handling:
866 * We use a Chip Select value of '0' to obtain dcsm.
867 * Theoretically, it is possible to populate LRDIMMs of different
868 * 'Rank' value on a DCT. But this is not the common case. So,
869 * it's reasonable to assume all DIMMs are going to be of same
870 * 'type' until proven otherwise.
872 amd64_read_dct_pci_cfg(pvt
, 0, DRAM_CONTROL
, &dram_ctrl
);
873 dcsm
= pvt
->csels
[0].csmasks
[0];
875 if (((dram_ctrl
>> 8) & 0x7) == 0x2)
876 pvt
->dram_type
= MEM_DDR4
;
877 else if (pvt
->dclr0
& BIT(16))
878 pvt
->dram_type
= MEM_DDR3
;
880 pvt
->dram_type
= MEM_LRDDR3
;
882 pvt
->dram_type
= MEM_RDDR3
;
890 WARN(1, KERN_ERR
"%s: Family??? 0x%x\n", __func__
, pvt
->fam
);
891 pvt
->dram_type
= MEM_EMPTY
;
896 pvt
->dram_type
= (pvt
->dclr0
& BIT(16)) ? MEM_DDR3
: MEM_RDDR3
;
899 /* Get the number of DCT channels the memory controller is using. */
900 static int k8_early_channel_count(struct amd64_pvt
*pvt
)
904 if (pvt
->ext_model
>= K8_REV_F
)
905 /* RevF (NPT) and later */
906 flag
= pvt
->dclr0
& WIDTH_128
;
908 /* RevE and earlier */
909 flag
= pvt
->dclr0
& REVE_WIDTH_128
;
914 return (flag
) ? 2 : 1;
917 /* On F10h and later ErrAddr is MC4_ADDR[47:1] */
918 static u64
get_error_address(struct amd64_pvt
*pvt
, struct mce
*m
)
920 u16 mce_nid
= amd_get_nb_id(m
->extcpu
);
921 struct mem_ctl_info
*mci
;
926 mci
= edac_mc_find(mce_nid
);
932 if (pvt
->fam
== 0xf) {
937 addr
= m
->addr
& GENMASK_ULL(end_bit
, start_bit
);
940 * Erratum 637 workaround
942 if (pvt
->fam
== 0x15) {
943 u64 cc6_base
, tmp_addr
;
947 if ((addr
& GENMASK_ULL(47, 24)) >> 24 != 0x00fdf7)
951 amd64_read_pci_cfg(pvt
->F1
, DRAM_LOCAL_NODE_LIM
, &tmp
);
952 intlv_en
= tmp
>> 21 & 0x7;
954 /* add [47:27] + 3 trailing bits */
955 cc6_base
= (tmp
& GENMASK_ULL(20, 0)) << 3;
957 /* reverse and add DramIntlvEn */
958 cc6_base
|= intlv_en
^ 0x7;
964 return cc6_base
| (addr
& GENMASK_ULL(23, 0));
966 amd64_read_pci_cfg(pvt
->F1
, DRAM_LOCAL_NODE_BASE
, &tmp
);
969 tmp_addr
= (addr
& GENMASK_ULL(23, 12)) << __fls(intlv_en
+ 1);
971 /* OR DramIntlvSel into bits [14:12] */
972 tmp_addr
|= (tmp
& GENMASK_ULL(23, 21)) >> 9;
974 /* add remaining [11:0] bits from original MC4_ADDR */
975 tmp_addr
|= addr
& GENMASK_ULL(11, 0);
977 return cc6_base
| tmp_addr
;
983 static struct pci_dev
*pci_get_related_function(unsigned int vendor
,
985 struct pci_dev
*related
)
987 struct pci_dev
*dev
= NULL
;
989 while ((dev
= pci_get_device(vendor
, device
, dev
))) {
990 if (pci_domain_nr(dev
->bus
) == pci_domain_nr(related
->bus
) &&
991 (dev
->bus
->number
== related
->bus
->number
) &&
992 (PCI_SLOT(dev
->devfn
) == PCI_SLOT(related
->devfn
)))
999 static void read_dram_base_limit_regs(struct amd64_pvt
*pvt
, unsigned range
)
1001 struct amd_northbridge
*nb
;
1002 struct pci_dev
*f1
= NULL
;
1003 unsigned int pci_func
;
1004 int off
= range
<< 3;
1007 amd64_read_pci_cfg(pvt
->F1
, DRAM_BASE_LO
+ off
, &pvt
->ranges
[range
].base
.lo
);
1008 amd64_read_pci_cfg(pvt
->F1
, DRAM_LIMIT_LO
+ off
, &pvt
->ranges
[range
].lim
.lo
);
1010 if (pvt
->fam
== 0xf)
1013 if (!dram_rw(pvt
, range
))
1016 amd64_read_pci_cfg(pvt
->F1
, DRAM_BASE_HI
+ off
, &pvt
->ranges
[range
].base
.hi
);
1017 amd64_read_pci_cfg(pvt
->F1
, DRAM_LIMIT_HI
+ off
, &pvt
->ranges
[range
].lim
.hi
);
1019 /* F15h: factor in CC6 save area by reading dst node's limit reg */
1020 if (pvt
->fam
!= 0x15)
1023 nb
= node_to_amd_nb(dram_dst_node(pvt
, range
));
1027 if (pvt
->model
== 0x60)
1028 pci_func
= PCI_DEVICE_ID_AMD_15H_M60H_NB_F1
;
1029 else if (pvt
->model
== 0x30)
1030 pci_func
= PCI_DEVICE_ID_AMD_15H_M30H_NB_F1
;
1032 pci_func
= PCI_DEVICE_ID_AMD_15H_NB_F1
;
1034 f1
= pci_get_related_function(nb
->misc
->vendor
, pci_func
, nb
->misc
);
1038 amd64_read_pci_cfg(f1
, DRAM_LOCAL_NODE_LIM
, &llim
);
1040 pvt
->ranges
[range
].lim
.lo
&= GENMASK_ULL(15, 0);
1042 /* {[39:27],111b} */
1043 pvt
->ranges
[range
].lim
.lo
|= ((llim
& 0x1fff) << 3 | 0x7) << 16;
1045 pvt
->ranges
[range
].lim
.hi
&= GENMASK_ULL(7, 0);
1048 pvt
->ranges
[range
].lim
.hi
|= llim
>> 13;
1053 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info
*mci
, u64 sys_addr
,
1054 struct err_info
*err
)
1056 struct amd64_pvt
*pvt
= mci
->pvt_info
;
1058 error_address_to_page_and_offset(sys_addr
, err
);
1061 * Find out which node the error address belongs to. This may be
1062 * different from the node that detected the error.
1064 err
->src_mci
= find_mc_by_sys_addr(mci
, sys_addr
);
1065 if (!err
->src_mci
) {
1066 amd64_mc_err(mci
, "failed to map error addr 0x%lx to a node\n",
1067 (unsigned long)sys_addr
);
1068 err
->err_code
= ERR_NODE
;
1072 /* Now map the sys_addr to a CSROW */
1073 err
->csrow
= sys_addr_to_csrow(err
->src_mci
, sys_addr
);
1074 if (err
->csrow
< 0) {
1075 err
->err_code
= ERR_CSROW
;
1079 /* CHIPKILL enabled */
1080 if (pvt
->nbcfg
& NBCFG_CHIPKILL
) {
1081 err
->channel
= get_channel_from_ecc_syndrome(mci
, err
->syndrome
);
1082 if (err
->channel
< 0) {
1084 * Syndrome didn't map, so we don't know which of the
1085 * 2 DIMMs is in error. So we need to ID 'both' of them
1088 amd64_mc_warn(err
->src_mci
, "unknown syndrome 0x%04x - "
1089 "possible error reporting race\n",
1091 err
->err_code
= ERR_CHANNEL
;
1096 * non-chipkill ecc mode
1098 * The k8 documentation is unclear about how to determine the
1099 * channel number when using non-chipkill memory. This method
1100 * was obtained from email communication with someone at AMD.
1101 * (Wish the email was placed in this comment - norsk)
1103 err
->channel
= ((sys_addr
& BIT(3)) != 0);
1107 static int ddr2_cs_size(unsigned i
, bool dct_width
)
1113 else if (!(i
& 0x1))
1116 shift
= (i
+ 1) >> 1;
1118 return 128 << (shift
+ !!dct_width
);
1121 static int k8_dbam_to_chip_select(struct amd64_pvt
*pvt
, u8 dct
,
1122 unsigned cs_mode
, int cs_mask_nr
)
1124 u32 dclr
= dct
? pvt
->dclr1
: pvt
->dclr0
;
1126 if (pvt
->ext_model
>= K8_REV_F
) {
1127 WARN_ON(cs_mode
> 11);
1128 return ddr2_cs_size(cs_mode
, dclr
& WIDTH_128
);
1130 else if (pvt
->ext_model
>= K8_REV_D
) {
1132 WARN_ON(cs_mode
> 10);
1135 * the below calculation, besides trying to win an obfuscated C
1136 * contest, maps cs_mode values to DIMM chip select sizes. The
1139 * cs_mode CS size (mb)
1140 * ======= ============
1153 * Basically, it calculates a value with which to shift the
1154 * smallest CS size of 32MB.
1156 * ddr[23]_cs_size have a similar purpose.
1158 diff
= cs_mode
/3 + (unsigned)(cs_mode
> 5);
1160 return 32 << (cs_mode
- diff
);
1163 WARN_ON(cs_mode
> 6);
1164 return 32 << cs_mode
;
1169 * Get the number of DCT channels in use.
1172 * number of Memory Channels in operation
1174 * contents of the DCL0_LOW register
1176 static int f1x_early_channel_count(struct amd64_pvt
*pvt
)
1178 int i
, j
, channels
= 0;
1180 /* On F10h, if we are in 128 bit mode, then we are using 2 channels */
1181 if (pvt
->fam
== 0x10 && (pvt
->dclr0
& WIDTH_128
))
1185 * Need to check if in unganged mode: In such, there are 2 channels,
1186 * but they are not in 128 bit mode and thus the above 'dclr0' status
1189 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1190 * their CSEnable bit on. If so, then SINGLE DIMM case.
1192 edac_dbg(0, "Data width is not 128 bits - need more decoding\n");
1195 * Check DRAM Bank Address Mapping values for each DIMM to see if there
1196 * is more than just one DIMM present in unganged mode. Need to check
1197 * both controllers since DIMMs can be placed in either one.
1199 for (i
= 0; i
< 2; i
++) {
1200 u32 dbam
= (i
? pvt
->dbam1
: pvt
->dbam0
);
1202 for (j
= 0; j
< 4; j
++) {
1203 if (DBAM_DIMM(j
, dbam
) > 0) {
1213 amd64_info("MCT channel count: %d\n", channels
);
1218 static int ddr3_cs_size(unsigned i
, bool dct_width
)
1223 if (i
== 0 || i
== 3 || i
== 4)
1229 else if (!(i
& 0x1))
1232 shift
= (i
+ 1) >> 1;
1235 cs_size
= (128 * (1 << !!dct_width
)) << shift
;
1240 static int ddr3_lrdimm_cs_size(unsigned i
, unsigned rank_multiply
)
1245 if (i
< 4 || i
== 6)
1249 else if (!(i
& 0x1))
1252 shift
= (i
+ 1) >> 1;
1255 cs_size
= rank_multiply
* (128 << shift
);
1260 static int ddr4_cs_size(unsigned i
)
1269 /* Min cs_size = 1G */
1270 cs_size
= 1024 * (1 << (i
>> 1));
1275 static int f10_dbam_to_chip_select(struct amd64_pvt
*pvt
, u8 dct
,
1276 unsigned cs_mode
, int cs_mask_nr
)
1278 u32 dclr
= dct
? pvt
->dclr1
: pvt
->dclr0
;
1280 WARN_ON(cs_mode
> 11);
1282 if (pvt
->dchr0
& DDR3_MODE
|| pvt
->dchr1
& DDR3_MODE
)
1283 return ddr3_cs_size(cs_mode
, dclr
& WIDTH_128
);
1285 return ddr2_cs_size(cs_mode
, dclr
& WIDTH_128
);
1289 * F15h supports only 64bit DCT interfaces
1291 static int f15_dbam_to_chip_select(struct amd64_pvt
*pvt
, u8 dct
,
1292 unsigned cs_mode
, int cs_mask_nr
)
1294 WARN_ON(cs_mode
> 12);
1296 return ddr3_cs_size(cs_mode
, false);
1299 /* F15h M60h supports DDR4 mapping as well.. */
1300 static int f15_m60h_dbam_to_chip_select(struct amd64_pvt
*pvt
, u8 dct
,
1301 unsigned cs_mode
, int cs_mask_nr
)
1304 u32 dcsm
= pvt
->csels
[dct
].csmasks
[cs_mask_nr
];
1306 WARN_ON(cs_mode
> 12);
1308 if (pvt
->dram_type
== MEM_DDR4
) {
1312 cs_size
= ddr4_cs_size(cs_mode
);
1313 } else if (pvt
->dram_type
== MEM_LRDDR3
) {
1314 unsigned rank_multiply
= dcsm
& 0xf;
1316 if (rank_multiply
== 3)
1318 cs_size
= ddr3_lrdimm_cs_size(cs_mode
, rank_multiply
);
1320 /* Minimum cs size is 512mb for F15hM60h*/
1324 cs_size
= ddr3_cs_size(cs_mode
, false);
1331 * F16h and F15h model 30h have only limited cs_modes.
1333 static int f16_dbam_to_chip_select(struct amd64_pvt
*pvt
, u8 dct
,
1334 unsigned cs_mode
, int cs_mask_nr
)
1336 WARN_ON(cs_mode
> 12);
1338 if (cs_mode
== 6 || cs_mode
== 8 ||
1339 cs_mode
== 9 || cs_mode
== 12)
1342 return ddr3_cs_size(cs_mode
, false);
1345 static void read_dram_ctl_register(struct amd64_pvt
*pvt
)
1348 if (pvt
->fam
== 0xf)
1351 if (!amd64_read_pci_cfg(pvt
->F2
, DCT_SEL_LO
, &pvt
->dct_sel_lo
)) {
1352 edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
1353 pvt
->dct_sel_lo
, dct_sel_baseaddr(pvt
));
1355 edac_dbg(0, " DCTs operate in %s mode\n",
1356 (dct_ganging_enabled(pvt
) ? "ganged" : "unganged"));
1358 if (!dct_ganging_enabled(pvt
))
1359 edac_dbg(0, " Address range split per DCT: %s\n",
1360 (dct_high_range_enabled(pvt
) ? "yes" : "no"));
1362 edac_dbg(0, " data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
1363 (dct_data_intlv_enabled(pvt
) ? "enabled" : "disabled"),
1364 (dct_memory_cleared(pvt
) ? "yes" : "no"));
1366 edac_dbg(0, " channel interleave: %s, "
1367 "interleave bits selector: 0x%x\n",
1368 (dct_interleave_enabled(pvt
) ? "enabled" : "disabled"),
1369 dct_sel_interleave_addr(pvt
));
1372 amd64_read_pci_cfg(pvt
->F2
, DCT_SEL_HI
, &pvt
->dct_sel_hi
);
1376 * Determine channel (DCT) based on the interleaving mode (see F15h M30h BKDG,
1377 * 2.10.12 Memory Interleaving Modes).
1379 static u8
f15_m30h_determine_channel(struct amd64_pvt
*pvt
, u64 sys_addr
,
1380 u8 intlv_en
, int num_dcts_intlv
,
1387 return (u8
)(dct_sel
);
1389 if (num_dcts_intlv
== 2) {
1390 select
= (sys_addr
>> 8) & 0x3;
1391 channel
= select
? 0x3 : 0;
1392 } else if (num_dcts_intlv
== 4) {
1393 u8 intlv_addr
= dct_sel_interleave_addr(pvt
);
1394 switch (intlv_addr
) {
1396 channel
= (sys_addr
>> 8) & 0x3;
1399 channel
= (sys_addr
>> 9) & 0x3;
1407 * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
1408 * Interleaving Modes.
1410 static u8
f1x_determine_channel(struct amd64_pvt
*pvt
, u64 sys_addr
,
1411 bool hi_range_sel
, u8 intlv_en
)
1413 u8 dct_sel_high
= (pvt
->dct_sel_lo
>> 1) & 1;
1415 if (dct_ganging_enabled(pvt
))
1419 return dct_sel_high
;
1422 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1424 if (dct_interleave_enabled(pvt
)) {
1425 u8 intlv_addr
= dct_sel_interleave_addr(pvt
);
1427 /* return DCT select function: 0=DCT0, 1=DCT1 */
1429 return sys_addr
>> 6 & 1;
1431 if (intlv_addr
& 0x2) {
1432 u8 shift
= intlv_addr
& 0x1 ? 9 : 6;
1433 u32 temp
= hweight_long((u32
) ((sys_addr
>> 16) & 0x1F)) % 2;
1435 return ((sys_addr
>> shift
) & 1) ^ temp
;
1438 return (sys_addr
>> (12 + hweight8(intlv_en
))) & 1;
1441 if (dct_high_range_enabled(pvt
))
1442 return ~dct_sel_high
& 1;
1447 /* Convert the sys_addr to the normalized DCT address */
1448 static u64
f1x_get_norm_dct_addr(struct amd64_pvt
*pvt
, u8 range
,
1449 u64 sys_addr
, bool hi_rng
,
1450 u32 dct_sel_base_addr
)
1453 u64 dram_base
= get_dram_base(pvt
, range
);
1454 u64 hole_off
= f10_dhar_offset(pvt
);
1455 u64 dct_sel_base_off
= (pvt
->dct_sel_hi
& 0xFFFFFC00) << 16;
1460 * base address of high range is below 4Gb
1461 * (bits [47:27] at [31:11])
1462 * DRAM address space on this DCT is hoisted above 4Gb &&
1465 * remove hole offset from sys_addr
1467 * remove high range offset from sys_addr
1469 if ((!(dct_sel_base_addr
>> 16) ||
1470 dct_sel_base_addr
< dhar_base(pvt
)) &&
1472 (sys_addr
>= BIT_64(32)))
1473 chan_off
= hole_off
;
1475 chan_off
= dct_sel_base_off
;
1479 * we have a valid hole &&
1484 * remove dram base to normalize to DCT address
1486 if (dhar_valid(pvt
) && (sys_addr
>= BIT_64(32)))
1487 chan_off
= hole_off
;
1489 chan_off
= dram_base
;
1492 return (sys_addr
& GENMASK_ULL(47,6)) - (chan_off
& GENMASK_ULL(47,23));
1496 * checks if the csrow passed in is marked as SPARED, if so returns the new
1499 static int f10_process_possible_spare(struct amd64_pvt
*pvt
, u8 dct
, int csrow
)
1503 if (online_spare_swap_done(pvt
, dct
) &&
1504 csrow
== online_spare_bad_dramcs(pvt
, dct
)) {
1506 for_each_chip_select(tmp_cs
, dct
, pvt
) {
1507 if (chip_select_base(tmp_cs
, dct
, pvt
) & 0x2) {
1517 * Iterate over the DRAM DCT "base" and "mask" registers looking for a
1518 * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
1521 * -EINVAL: NOT FOUND
1522 * 0..csrow = Chip-Select Row
1524 static int f1x_lookup_addr_in_dct(u64 in_addr
, u8 nid
, u8 dct
)
1526 struct mem_ctl_info
*mci
;
1527 struct amd64_pvt
*pvt
;
1528 u64 cs_base
, cs_mask
;
1529 int cs_found
= -EINVAL
;
1532 mci
= edac_mc_find(nid
);
1536 pvt
= mci
->pvt_info
;
1538 edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr
, dct
);
1540 for_each_chip_select(csrow
, dct
, pvt
) {
1541 if (!csrow_enabled(csrow
, dct
, pvt
))
1544 get_cs_base_and_mask(pvt
, csrow
, dct
, &cs_base
, &cs_mask
);
1546 edac_dbg(1, " CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
1547 csrow
, cs_base
, cs_mask
);
1551 edac_dbg(1, " (InputAddr & ~CSMask)=0x%llx (CSBase & ~CSMask)=0x%llx\n",
1552 (in_addr
& cs_mask
), (cs_base
& cs_mask
));
1554 if ((in_addr
& cs_mask
) == (cs_base
& cs_mask
)) {
1555 if (pvt
->fam
== 0x15 && pvt
->model
>= 0x30) {
1559 cs_found
= f10_process_possible_spare(pvt
, dct
, csrow
);
1561 edac_dbg(1, " MATCH csrow=%d\n", cs_found
);
1569 * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
1570 * swapped with a region located at the bottom of memory so that the GPU can use
1571 * the interleaved region and thus two channels.
1573 static u64
f1x_swap_interleaved_region(struct amd64_pvt
*pvt
, u64 sys_addr
)
1575 u32 swap_reg
, swap_base
, swap_limit
, rgn_size
, tmp_addr
;
1577 if (pvt
->fam
== 0x10) {
1578 /* only revC3 and revE have that feature */
1579 if (pvt
->model
< 4 || (pvt
->model
< 0xa && pvt
->stepping
< 3))
1583 amd64_read_pci_cfg(pvt
->F2
, SWAP_INTLV_REG
, &swap_reg
);
1585 if (!(swap_reg
& 0x1))
1588 swap_base
= (swap_reg
>> 3) & 0x7f;
1589 swap_limit
= (swap_reg
>> 11) & 0x7f;
1590 rgn_size
= (swap_reg
>> 20) & 0x7f;
1591 tmp_addr
= sys_addr
>> 27;
1593 if (!(sys_addr
>> 34) &&
1594 (((tmp_addr
>= swap_base
) &&
1595 (tmp_addr
<= swap_limit
)) ||
1596 (tmp_addr
< rgn_size
)))
1597 return sys_addr
^ (u64
)swap_base
<< 27;
1602 /* For a given @dram_range, check if @sys_addr falls within it. */
1603 static int f1x_match_to_this_node(struct amd64_pvt
*pvt
, unsigned range
,
1604 u64 sys_addr
, int *chan_sel
)
1606 int cs_found
= -EINVAL
;
1610 bool high_range
= false;
1612 u8 node_id
= dram_dst_node(pvt
, range
);
1613 u8 intlv_en
= dram_intlv_en(pvt
, range
);
1614 u32 intlv_sel
= dram_intlv_sel(pvt
, range
);
1616 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1617 range
, sys_addr
, get_dram_limit(pvt
, range
));
1619 if (dhar_valid(pvt
) &&
1620 dhar_base(pvt
) <= sys_addr
&&
1621 sys_addr
< BIT_64(32)) {
1622 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1627 if (intlv_en
&& (intlv_sel
!= ((sys_addr
>> 12) & intlv_en
)))
1630 sys_addr
= f1x_swap_interleaved_region(pvt
, sys_addr
);
1632 dct_sel_base
= dct_sel_baseaddr(pvt
);
1635 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
1636 * select between DCT0 and DCT1.
1638 if (dct_high_range_enabled(pvt
) &&
1639 !dct_ganging_enabled(pvt
) &&
1640 ((sys_addr
>> 27) >= (dct_sel_base
>> 11)))
1643 channel
= f1x_determine_channel(pvt
, sys_addr
, high_range
, intlv_en
);
1645 chan_addr
= f1x_get_norm_dct_addr(pvt
, range
, sys_addr
,
1646 high_range
, dct_sel_base
);
1648 /* Remove node interleaving, see F1x120 */
1650 chan_addr
= ((chan_addr
>> (12 + hweight8(intlv_en
))) << 12) |
1651 (chan_addr
& 0xfff);
1653 /* remove channel interleave */
1654 if (dct_interleave_enabled(pvt
) &&
1655 !dct_high_range_enabled(pvt
) &&
1656 !dct_ganging_enabled(pvt
)) {
1658 if (dct_sel_interleave_addr(pvt
) != 1) {
1659 if (dct_sel_interleave_addr(pvt
) == 0x3)
1661 chan_addr
= ((chan_addr
>> 10) << 9) |
1662 (chan_addr
& 0x1ff);
1664 /* A[6] or hash 6 */
1665 chan_addr
= ((chan_addr
>> 7) << 6) |
1669 chan_addr
= ((chan_addr
>> 13) << 12) |
1670 (chan_addr
& 0xfff);
1673 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr
);
1675 cs_found
= f1x_lookup_addr_in_dct(chan_addr
, node_id
, channel
);
1678 *chan_sel
= channel
;
1683 static int f15_m30h_match_to_this_node(struct amd64_pvt
*pvt
, unsigned range
,
1684 u64 sys_addr
, int *chan_sel
)
1686 int cs_found
= -EINVAL
;
1687 int num_dcts_intlv
= 0;
1688 u64 chan_addr
, chan_offset
;
1689 u64 dct_base
, dct_limit
;
1690 u32 dct_cont_base_reg
, dct_cont_limit_reg
, tmp
;
1691 u8 channel
, alias_channel
, leg_mmio_hole
, dct_sel
, dct_offset_en
;
1693 u64 dhar_offset
= f10_dhar_offset(pvt
);
1694 u8 intlv_addr
= dct_sel_interleave_addr(pvt
);
1695 u8 node_id
= dram_dst_node(pvt
, range
);
1696 u8 intlv_en
= dram_intlv_en(pvt
, range
);
1698 amd64_read_pci_cfg(pvt
->F1
, DRAM_CONT_BASE
, &dct_cont_base_reg
);
1699 amd64_read_pci_cfg(pvt
->F1
, DRAM_CONT_LIMIT
, &dct_cont_limit_reg
);
1701 dct_offset_en
= (u8
) ((dct_cont_base_reg
>> 3) & BIT(0));
1702 dct_sel
= (u8
) ((dct_cont_base_reg
>> 4) & 0x7);
1704 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1705 range
, sys_addr
, get_dram_limit(pvt
, range
));
1707 if (!(get_dram_base(pvt
, range
) <= sys_addr
) &&
1708 !(get_dram_limit(pvt
, range
) >= sys_addr
))
1711 if (dhar_valid(pvt
) &&
1712 dhar_base(pvt
) <= sys_addr
&&
1713 sys_addr
< BIT_64(32)) {
1714 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1719 /* Verify sys_addr is within DCT Range. */
1720 dct_base
= (u64
) dct_sel_baseaddr(pvt
);
1721 dct_limit
= (dct_cont_limit_reg
>> 11) & 0x1FFF;
1723 if (!(dct_cont_base_reg
& BIT(0)) &&
1724 !(dct_base
<= (sys_addr
>> 27) &&
1725 dct_limit
>= (sys_addr
>> 27)))
1728 /* Verify number of dct's that participate in channel interleaving. */
1729 num_dcts_intlv
= (int) hweight8(intlv_en
);
1731 if (!(num_dcts_intlv
% 2 == 0) || (num_dcts_intlv
> 4))
1734 channel
= f15_m30h_determine_channel(pvt
, sys_addr
, intlv_en
,
1735 num_dcts_intlv
, dct_sel
);
1737 /* Verify we stay within the MAX number of channels allowed */
1741 leg_mmio_hole
= (u8
) (dct_cont_base_reg
>> 1 & BIT(0));
1743 /* Get normalized DCT addr */
1744 if (leg_mmio_hole
&& (sys_addr
>= BIT_64(32)))
1745 chan_offset
= dhar_offset
;
1747 chan_offset
= dct_base
<< 27;
1749 chan_addr
= sys_addr
- chan_offset
;
1751 /* remove channel interleave */
1752 if (num_dcts_intlv
== 2) {
1753 if (intlv_addr
== 0x4)
1754 chan_addr
= ((chan_addr
>> 9) << 8) |
1756 else if (intlv_addr
== 0x5)
1757 chan_addr
= ((chan_addr
>> 10) << 9) |
1758 (chan_addr
& 0x1ff);
1762 } else if (num_dcts_intlv
== 4) {
1763 if (intlv_addr
== 0x4)
1764 chan_addr
= ((chan_addr
>> 10) << 8) |
1766 else if (intlv_addr
== 0x5)
1767 chan_addr
= ((chan_addr
>> 11) << 9) |
1768 (chan_addr
& 0x1ff);
1773 if (dct_offset_en
) {
1774 amd64_read_pci_cfg(pvt
->F1
,
1775 DRAM_CONT_HIGH_OFF
+ (int) channel
* 4,
1777 chan_addr
+= (u64
) ((tmp
>> 11) & 0xfff) << 27;
1780 f15h_select_dct(pvt
, channel
);
1782 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr
);
1786 * if channel = 3, then alias it to 1. This is because, in F15 M30h,
1787 * there is support for 4 DCT's, but only 2 are currently functional.
1788 * They are DCT0 and DCT3. But we have read all registers of DCT3 into
1789 * pvt->csels[1]. So we need to use '1' here to get correct info.
1790 * Refer F15 M30h BKDG Section 2.10 and 2.10.3 for clarifications.
1792 alias_channel
= (channel
== 3) ? 1 : channel
;
1794 cs_found
= f1x_lookup_addr_in_dct(chan_addr
, node_id
, alias_channel
);
1797 *chan_sel
= alias_channel
;
1802 static int f1x_translate_sysaddr_to_cs(struct amd64_pvt
*pvt
,
1806 int cs_found
= -EINVAL
;
1809 for (range
= 0; range
< DRAM_RANGES
; range
++) {
1810 if (!dram_rw(pvt
, range
))
1813 if (pvt
->fam
== 0x15 && pvt
->model
>= 0x30)
1814 cs_found
= f15_m30h_match_to_this_node(pvt
, range
,
1818 else if ((get_dram_base(pvt
, range
) <= sys_addr
) &&
1819 (get_dram_limit(pvt
, range
) >= sys_addr
)) {
1820 cs_found
= f1x_match_to_this_node(pvt
, range
,
1821 sys_addr
, chan_sel
);
1830 * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
1831 * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
1833 * The @sys_addr is usually an error address received from the hardware
1836 static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info
*mci
, u64 sys_addr
,
1837 struct err_info
*err
)
1839 struct amd64_pvt
*pvt
= mci
->pvt_info
;
1841 error_address_to_page_and_offset(sys_addr
, err
);
1843 err
->csrow
= f1x_translate_sysaddr_to_cs(pvt
, sys_addr
, &err
->channel
);
1844 if (err
->csrow
< 0) {
1845 err
->err_code
= ERR_CSROW
;
1850 * We need the syndromes for channel detection only when we're
1851 * ganged. Otherwise @chan should already contain the channel at
1854 if (dct_ganging_enabled(pvt
))
1855 err
->channel
= get_channel_from_ecc_syndrome(mci
, err
->syndrome
);
1859 * debug routine to display the memory sizes of all logical DIMMs and its
1862 static void debug_display_dimm_sizes(struct amd64_pvt
*pvt
, u8 ctrl
)
1864 int dimm
, size0
, size1
;
1865 u32
*dcsb
= ctrl
? pvt
->csels
[1].csbases
: pvt
->csels
[0].csbases
;
1866 u32 dbam
= ctrl
? pvt
->dbam1
: pvt
->dbam0
;
1868 if (pvt
->fam
== 0xf) {
1869 /* K8 families < revF not supported yet */
1870 if (pvt
->ext_model
< K8_REV_F
)
1876 if (pvt
->fam
== 0x10) {
1877 dbam
= (ctrl
&& !dct_ganging_enabled(pvt
)) ? pvt
->dbam1
1879 dcsb
= (ctrl
&& !dct_ganging_enabled(pvt
)) ?
1880 pvt
->csels
[1].csbases
:
1881 pvt
->csels
[0].csbases
;
1884 dcsb
= pvt
->csels
[1].csbases
;
1886 edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
1889 edac_printk(KERN_DEBUG
, EDAC_MC
, "DCT%d chip selects:\n", ctrl
);
1891 /* Dump memory sizes for DIMM and its CSROWs */
1892 for (dimm
= 0; dimm
< 4; dimm
++) {
1895 if (dcsb
[dimm
*2] & DCSB_CS_ENABLE
)
1896 /* For f15m60h, need multiplier for LRDIMM cs_size
1897 * calculation. We pass 'dimm' value to the dbam_to_cs
1898 * mapper so we can find the multiplier from the
1899 * corresponding DCSM.
1901 size0
= pvt
->ops
->dbam_to_cs(pvt
, ctrl
,
1902 DBAM_DIMM(dimm
, dbam
),
1906 if (dcsb
[dimm
*2 + 1] & DCSB_CS_ENABLE
)
1907 size1
= pvt
->ops
->dbam_to_cs(pvt
, ctrl
,
1908 DBAM_DIMM(dimm
, dbam
),
1911 amd64_info(EDAC_MC
": %d: %5dMB %d: %5dMB\n",
1913 dimm
* 2 + 1, size1
);
1917 static struct amd64_family_type family_types
[] = {
1920 .f1_id
= PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP
,
1921 .f3_id
= PCI_DEVICE_ID_AMD_K8_NB_MISC
,
1923 .early_channel_count
= k8_early_channel_count
,
1924 .map_sysaddr_to_csrow
= k8_map_sysaddr_to_csrow
,
1925 .dbam_to_cs
= k8_dbam_to_chip_select
,
1930 .f1_id
= PCI_DEVICE_ID_AMD_10H_NB_MAP
,
1931 .f3_id
= PCI_DEVICE_ID_AMD_10H_NB_MISC
,
1933 .early_channel_count
= f1x_early_channel_count
,
1934 .map_sysaddr_to_csrow
= f1x_map_sysaddr_to_csrow
,
1935 .dbam_to_cs
= f10_dbam_to_chip_select
,
1940 .f1_id
= PCI_DEVICE_ID_AMD_15H_NB_F1
,
1941 .f3_id
= PCI_DEVICE_ID_AMD_15H_NB_F3
,
1943 .early_channel_count
= f1x_early_channel_count
,
1944 .map_sysaddr_to_csrow
= f1x_map_sysaddr_to_csrow
,
1945 .dbam_to_cs
= f15_dbam_to_chip_select
,
1949 .ctl_name
= "F15h_M30h",
1950 .f1_id
= PCI_DEVICE_ID_AMD_15H_M30H_NB_F1
,
1951 .f3_id
= PCI_DEVICE_ID_AMD_15H_M30H_NB_F3
,
1953 .early_channel_count
= f1x_early_channel_count
,
1954 .map_sysaddr_to_csrow
= f1x_map_sysaddr_to_csrow
,
1955 .dbam_to_cs
= f16_dbam_to_chip_select
,
1959 .ctl_name
= "F15h_M60h",
1960 .f1_id
= PCI_DEVICE_ID_AMD_15H_M60H_NB_F1
,
1961 .f3_id
= PCI_DEVICE_ID_AMD_15H_M60H_NB_F3
,
1963 .early_channel_count
= f1x_early_channel_count
,
1964 .map_sysaddr_to_csrow
= f1x_map_sysaddr_to_csrow
,
1965 .dbam_to_cs
= f15_m60h_dbam_to_chip_select
,
1970 .f1_id
= PCI_DEVICE_ID_AMD_16H_NB_F1
,
1971 .f3_id
= PCI_DEVICE_ID_AMD_16H_NB_F3
,
1973 .early_channel_count
= f1x_early_channel_count
,
1974 .map_sysaddr_to_csrow
= f1x_map_sysaddr_to_csrow
,
1975 .dbam_to_cs
= f16_dbam_to_chip_select
,
1979 .ctl_name
= "F16h_M30h",
1980 .f1_id
= PCI_DEVICE_ID_AMD_16H_M30H_NB_F1
,
1981 .f3_id
= PCI_DEVICE_ID_AMD_16H_M30H_NB_F3
,
1983 .early_channel_count
= f1x_early_channel_count
,
1984 .map_sysaddr_to_csrow
= f1x_map_sysaddr_to_csrow
,
1985 .dbam_to_cs
= f16_dbam_to_chip_select
,
1991 * These are tables of eigenvectors (one per line) which can be used for the
1992 * construction of the syndrome tables. The modified syndrome search algorithm
1993 * uses those to find the symbol in error and thus the DIMM.
1995 * Algorithm courtesy of Ross LaFetra from AMD.
1997 static const u16 x4_vectors
[] = {
1998 0x2f57, 0x1afe, 0x66cc, 0xdd88,
1999 0x11eb, 0x3396, 0x7f4c, 0xeac8,
2000 0x0001, 0x0002, 0x0004, 0x0008,
2001 0x1013, 0x3032, 0x4044, 0x8088,
2002 0x106b, 0x30d6, 0x70fc, 0xe0a8,
2003 0x4857, 0xc4fe, 0x13cc, 0x3288,
2004 0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
2005 0x1f39, 0x251e, 0xbd6c, 0x6bd8,
2006 0x15c1, 0x2a42, 0x89ac, 0x4758,
2007 0x2b03, 0x1602, 0x4f0c, 0xca08,
2008 0x1f07, 0x3a0e, 0x6b04, 0xbd08,
2009 0x8ba7, 0x465e, 0x244c, 0x1cc8,
2010 0x2b87, 0x164e, 0x642c, 0xdc18,
2011 0x40b9, 0x80de, 0x1094, 0x20e8,
2012 0x27db, 0x1eb6, 0x9dac, 0x7b58,
2013 0x11c1, 0x2242, 0x84ac, 0x4c58,
2014 0x1be5, 0x2d7a, 0x5e34, 0xa718,
2015 0x4b39, 0x8d1e, 0x14b4, 0x28d8,
2016 0x4c97, 0xc87e, 0x11fc, 0x33a8,
2017 0x8e97, 0x497e, 0x2ffc, 0x1aa8,
2018 0x16b3, 0x3d62, 0x4f34, 0x8518,
2019 0x1e2f, 0x391a, 0x5cac, 0xf858,
2020 0x1d9f, 0x3b7a, 0x572c, 0xfe18,
2021 0x15f5, 0x2a5a, 0x5264, 0xa3b8,
2022 0x1dbb, 0x3b66, 0x715c, 0xe3f8,
2023 0x4397, 0xc27e, 0x17fc, 0x3ea8,
2024 0x1617, 0x3d3e, 0x6464, 0xb8b8,
2025 0x23ff, 0x12aa, 0xab6c, 0x56d8,
2026 0x2dfb, 0x1ba6, 0x913c, 0x7328,
2027 0x185d, 0x2ca6, 0x7914, 0x9e28,
2028 0x171b, 0x3e36, 0x7d7c, 0xebe8,
2029 0x4199, 0x82ee, 0x19f4, 0x2e58,
2030 0x4807, 0xc40e, 0x130c, 0x3208,
2031 0x1905, 0x2e0a, 0x5804, 0xac08,
2032 0x213f, 0x132a, 0xadfc, 0x5ba8,
2033 0x19a9, 0x2efe, 0xb5cc, 0x6f88,
2036 static const u16 x8_vectors
[] = {
2037 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
2038 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
2039 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
2040 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
2041 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
2042 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
2043 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
2044 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
2045 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
2046 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
2047 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
2048 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
2049 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
2050 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
2051 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
2052 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
2053 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
2054 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
2055 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
2058 static int decode_syndrome(u16 syndrome
, const u16
*vectors
, unsigned num_vecs
,
2061 unsigned int i
, err_sym
;
2063 for (err_sym
= 0; err_sym
< num_vecs
/ v_dim
; err_sym
++) {
2065 unsigned v_idx
= err_sym
* v_dim
;
2066 unsigned v_end
= (err_sym
+ 1) * v_dim
;
2068 /* walk over all 16 bits of the syndrome */
2069 for (i
= 1; i
< (1U << 16); i
<<= 1) {
2071 /* if bit is set in that eigenvector... */
2072 if (v_idx
< v_end
&& vectors
[v_idx
] & i
) {
2073 u16 ev_comp
= vectors
[v_idx
++];
2075 /* ... and bit set in the modified syndrome, */
2085 /* can't get to zero, move to next symbol */
2090 edac_dbg(0, "syndrome(%x) not found\n", syndrome
);
2094 static int map_err_sym_to_channel(int err_sym
, int sym_size
)
2107 return err_sym
>> 4;
2113 /* imaginary bits not in a DIMM */
2115 WARN(1, KERN_ERR
"Invalid error symbol: 0x%x\n",
2127 return err_sym
>> 3;
2133 static int get_channel_from_ecc_syndrome(struct mem_ctl_info
*mci
, u16 syndrome
)
2135 struct amd64_pvt
*pvt
= mci
->pvt_info
;
2138 if (pvt
->ecc_sym_sz
== 8)
2139 err_sym
= decode_syndrome(syndrome
, x8_vectors
,
2140 ARRAY_SIZE(x8_vectors
),
2142 else if (pvt
->ecc_sym_sz
== 4)
2143 err_sym
= decode_syndrome(syndrome
, x4_vectors
,
2144 ARRAY_SIZE(x4_vectors
),
2147 amd64_warn("Illegal syndrome type: %u\n", pvt
->ecc_sym_sz
);
2151 return map_err_sym_to_channel(err_sym
, pvt
->ecc_sym_sz
);
2154 static void __log_bus_error(struct mem_ctl_info
*mci
, struct err_info
*err
,
2157 enum hw_event_mc_err_type err_type
;
2161 err_type
= HW_EVENT_ERR_CORRECTED
;
2162 else if (ecc_type
== 1)
2163 err_type
= HW_EVENT_ERR_UNCORRECTED
;
2165 WARN(1, "Something is rotten in the state of Denmark.\n");
2169 switch (err
->err_code
) {
2174 string
= "Failed to map error addr to a node";
2177 string
= "Failed to map error addr to a csrow";
2180 string
= "unknown syndrome - possible error reporting race";
2183 string
= "WTF error";
2187 edac_mc_handle_error(err_type
, mci
, 1,
2188 err
->page
, err
->offset
, err
->syndrome
,
2189 err
->csrow
, err
->channel
, -1,
2193 static inline void decode_bus_error(int node_id
, struct mce
*m
)
2195 struct mem_ctl_info
*mci
;
2196 struct amd64_pvt
*pvt
;
2197 u8 ecc_type
= (m
->status
>> 45) & 0x3;
2198 u8 xec
= XEC(m
->status
, 0x1f);
2199 u16 ec
= EC(m
->status
);
2201 struct err_info err
;
2203 mci
= edac_mc_find(node_id
);
2207 pvt
= mci
->pvt_info
;
2209 /* Bail out early if this was an 'observed' error */
2210 if (PP(ec
) == NBSL_PP_OBS
)
2213 /* Do only ECC errors */
2214 if (xec
&& xec
!= F10_NBSL_EXT_ERR_ECC
)
2217 memset(&err
, 0, sizeof(err
));
2219 sys_addr
= get_error_address(pvt
, m
);
2222 err
.syndrome
= extract_syndrome(m
->status
);
2224 pvt
->ops
->map_sysaddr_to_csrow(mci
, sys_addr
, &err
);
2226 __log_bus_error(mci
, &err
, ecc_type
);
2230 * Use pvt->F2 which contains the F2 CPU PCI device to get the related
2231 * F1 (AddrMap) and F3 (Misc) devices. Return negative value on error.
2233 static int reserve_mc_sibling_devs(struct amd64_pvt
*pvt
, u16 f1_id
, u16 f3_id
)
2235 /* Reserve the ADDRESS MAP Device */
2236 pvt
->F1
= pci_get_related_function(pvt
->F2
->vendor
, f1_id
, pvt
->F2
);
2238 amd64_err("error address map device not found: "
2239 "vendor %x device 0x%x (broken BIOS?)\n",
2240 PCI_VENDOR_ID_AMD
, f1_id
);
2244 /* Reserve the MISC Device */
2245 pvt
->F3
= pci_get_related_function(pvt
->F2
->vendor
, f3_id
, pvt
->F2
);
2247 pci_dev_put(pvt
->F1
);
2250 amd64_err("error F3 device not found: "
2251 "vendor %x device 0x%x (broken BIOS?)\n",
2252 PCI_VENDOR_ID_AMD
, f3_id
);
2256 edac_dbg(1, "F1: %s\n", pci_name(pvt
->F1
));
2257 edac_dbg(1, "F2: %s\n", pci_name(pvt
->F2
));
2258 edac_dbg(1, "F3: %s\n", pci_name(pvt
->F3
));
2263 static void free_mc_sibling_devs(struct amd64_pvt
*pvt
)
2265 pci_dev_put(pvt
->F1
);
2266 pci_dev_put(pvt
->F3
);
2270 * Retrieve the hardware registers of the memory controller (this includes the
2271 * 'Address Map' and 'Misc' device regs)
2273 static void read_mc_regs(struct amd64_pvt
*pvt
)
2280 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
2281 * those are Read-As-Zero
2283 rdmsrl(MSR_K8_TOP_MEM1
, pvt
->top_mem
);
2284 edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt
->top_mem
);
2286 /* check first whether TOP_MEM2 is enabled */
2287 rdmsrl(MSR_K8_SYSCFG
, msr_val
);
2288 if (msr_val
& (1U << 21)) {
2289 rdmsrl(MSR_K8_TOP_MEM2
, pvt
->top_mem2
);
2290 edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt
->top_mem2
);
2292 edac_dbg(0, " TOP_MEM2 disabled\n");
2294 amd64_read_pci_cfg(pvt
->F3
, NBCAP
, &pvt
->nbcap
);
2296 read_dram_ctl_register(pvt
);
2298 for (range
= 0; range
< DRAM_RANGES
; range
++) {
2301 /* read settings for this DRAM range */
2302 read_dram_base_limit_regs(pvt
, range
);
2304 rw
= dram_rw(pvt
, range
);
2308 edac_dbg(1, " DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
2310 get_dram_base(pvt
, range
),
2311 get_dram_limit(pvt
, range
));
2313 edac_dbg(1, " IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
2314 dram_intlv_en(pvt
, range
) ? "Enabled" : "Disabled",
2315 (rw
& 0x1) ? "R" : "-",
2316 (rw
& 0x2) ? "W" : "-",
2317 dram_intlv_sel(pvt
, range
),
2318 dram_dst_node(pvt
, range
));
2321 read_dct_base_mask(pvt
);
2323 amd64_read_pci_cfg(pvt
->F1
, DHAR
, &pvt
->dhar
);
2324 amd64_read_dct_pci_cfg(pvt
, 0, DBAM0
, &pvt
->dbam0
);
2326 amd64_read_pci_cfg(pvt
->F3
, F10_ONLINE_SPARE
, &pvt
->online_spare
);
2328 amd64_read_dct_pci_cfg(pvt
, 0, DCLR0
, &pvt
->dclr0
);
2329 amd64_read_dct_pci_cfg(pvt
, 0, DCHR0
, &pvt
->dchr0
);
2331 if (!dct_ganging_enabled(pvt
)) {
2332 amd64_read_dct_pci_cfg(pvt
, 1, DCLR0
, &pvt
->dclr1
);
2333 amd64_read_dct_pci_cfg(pvt
, 1, DCHR0
, &pvt
->dchr1
);
2336 pvt
->ecc_sym_sz
= 4;
2337 determine_memory_type(pvt
);
2338 edac_dbg(1, " DIMM type: %s\n", edac_mem_types
[pvt
->dram_type
]);
2340 if (pvt
->fam
>= 0x10) {
2341 amd64_read_pci_cfg(pvt
->F3
, EXT_NB_MCA_CFG
, &tmp
);
2342 /* F16h has only DCT0, so no need to read dbam1 */
2343 if (pvt
->fam
!= 0x16)
2344 amd64_read_dct_pci_cfg(pvt
, 1, DBAM0
, &pvt
->dbam1
);
2346 /* F10h, revD and later can do x8 ECC too */
2347 if ((pvt
->fam
> 0x10 || pvt
->model
> 7) && tmp
& BIT(25))
2348 pvt
->ecc_sym_sz
= 8;
2350 dump_misc_regs(pvt
);
2354 * NOTE: CPU Revision Dependent code
2357 * @csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
2358 * k8 private pointer to -->
2359 * DRAM Bank Address mapping register
2361 * DCL register where dual_channel_active is
2363 * The DBAM register consists of 4 sets of 4 bits each definitions:
2366 * 0-3 CSROWs 0 and 1
2367 * 4-7 CSROWs 2 and 3
2368 * 8-11 CSROWs 4 and 5
2369 * 12-15 CSROWs 6 and 7
2371 * Values range from: 0 to 15
2372 * The meaning of the values depends on CPU revision and dual-channel state,
2373 * see relevant BKDG more info.
2375 * The memory controller provides for total of only 8 CSROWs in its current
2376 * architecture. Each "pair" of CSROWs normally represents just one DIMM in
2377 * single channel or two (2) DIMMs in dual channel mode.
2379 * The following code logic collapses the various tables for CSROW based on CPU
2383 * The number of PAGE_SIZE pages on the specified CSROW number it
2387 static u32
get_csrow_nr_pages(struct amd64_pvt
*pvt
, u8 dct
, int csrow_nr
)
2389 u32 cs_mode
, nr_pages
;
2390 u32 dbam
= dct
? pvt
->dbam1
: pvt
->dbam0
;
2394 * The math on this doesn't look right on the surface because x/2*4 can
2395 * be simplified to x*2 but this expression makes use of the fact that
2396 * it is integral math where 1/2=0. This intermediate value becomes the
2397 * number of bits to shift the DBAM register to extract the proper CSROW
2400 cs_mode
= DBAM_DIMM(csrow_nr
/ 2, dbam
);
2402 nr_pages
= pvt
->ops
->dbam_to_cs(pvt
, dct
, cs_mode
, (csrow_nr
/ 2))
2403 << (20 - PAGE_SHIFT
);
2405 edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n",
2406 csrow_nr
, dct
, cs_mode
);
2407 edac_dbg(0, "nr_pages/channel: %u\n", nr_pages
);
2413 * Initialize the array of csrow attribute instances, based on the values
2414 * from pci config hardware registers.
2416 static int init_csrows(struct mem_ctl_info
*mci
)
2418 struct amd64_pvt
*pvt
= mci
->pvt_info
;
2419 struct csrow_info
*csrow
;
2420 struct dimm_info
*dimm
;
2421 enum edac_type edac_mode
;
2422 int i
, j
, empty
= 1;
2426 amd64_read_pci_cfg(pvt
->F3
, NBCFG
, &val
);
2430 edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2431 pvt
->mc_node_id
, val
,
2432 !!(val
& NBCFG_CHIPKILL
), !!(val
& NBCFG_ECC_ENABLE
));
2435 * We iterate over DCT0 here but we look at DCT1 in parallel, if needed.
2437 for_each_chip_select(i
, 0, pvt
) {
2438 bool row_dct0
= !!csrow_enabled(i
, 0, pvt
);
2439 bool row_dct1
= false;
2441 if (pvt
->fam
!= 0xf)
2442 row_dct1
= !!csrow_enabled(i
, 1, pvt
);
2444 if (!row_dct0
&& !row_dct1
)
2447 csrow
= mci
->csrows
[i
];
2450 edac_dbg(1, "MC node: %d, csrow: %d\n",
2451 pvt
->mc_node_id
, i
);
2454 nr_pages
= get_csrow_nr_pages(pvt
, 0, i
);
2455 csrow
->channels
[0]->dimm
->nr_pages
= nr_pages
;
2458 /* K8 has only one DCT */
2459 if (pvt
->fam
!= 0xf && row_dct1
) {
2460 int row_dct1_pages
= get_csrow_nr_pages(pvt
, 1, i
);
2462 csrow
->channels
[1]->dimm
->nr_pages
= row_dct1_pages
;
2463 nr_pages
+= row_dct1_pages
;
2466 edac_dbg(1, "Total csrow%d pages: %u\n", i
, nr_pages
);
2469 * determine whether CHIPKILL or JUST ECC or NO ECC is operating
2471 if (pvt
->nbcfg
& NBCFG_ECC_ENABLE
)
2472 edac_mode
= (pvt
->nbcfg
& NBCFG_CHIPKILL
) ?
2473 EDAC_S4ECD4ED
: EDAC_SECDED
;
2475 edac_mode
= EDAC_NONE
;
2477 for (j
= 0; j
< pvt
->channel_count
; j
++) {
2478 dimm
= csrow
->channels
[j
]->dimm
;
2479 dimm
->mtype
= pvt
->dram_type
;
2480 dimm
->edac_mode
= edac_mode
;
2487 /* get all cores on this DCT */
2488 static void get_cpus_on_this_dct_cpumask(struct cpumask
*mask
, u16 nid
)
2492 for_each_online_cpu(cpu
)
2493 if (amd_get_nb_id(cpu
) == nid
)
2494 cpumask_set_cpu(cpu
, mask
);
2497 /* check MCG_CTL on all the cpus on this node */
2498 static bool nb_mce_bank_enabled_on_node(u16 nid
)
2504 if (!zalloc_cpumask_var(&mask
, GFP_KERNEL
)) {
2505 amd64_warn("%s: Error allocating mask\n", __func__
);
2509 get_cpus_on_this_dct_cpumask(mask
, nid
);
2511 rdmsr_on_cpus(mask
, MSR_IA32_MCG_CTL
, msrs
);
2513 for_each_cpu(cpu
, mask
) {
2514 struct msr
*reg
= per_cpu_ptr(msrs
, cpu
);
2515 nbe
= reg
->l
& MSR_MCGCTL_NBE
;
2517 edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
2519 (nbe
? "enabled" : "disabled"));
2527 free_cpumask_var(mask
);
2531 static int toggle_ecc_err_reporting(struct ecc_settings
*s
, u16 nid
, bool on
)
2533 cpumask_var_t cmask
;
2536 if (!zalloc_cpumask_var(&cmask
, GFP_KERNEL
)) {
2537 amd64_warn("%s: error allocating mask\n", __func__
);
2541 get_cpus_on_this_dct_cpumask(cmask
, nid
);
2543 rdmsr_on_cpus(cmask
, MSR_IA32_MCG_CTL
, msrs
);
2545 for_each_cpu(cpu
, cmask
) {
2547 struct msr
*reg
= per_cpu_ptr(msrs
, cpu
);
2550 if (reg
->l
& MSR_MCGCTL_NBE
)
2551 s
->flags
.nb_mce_enable
= 1;
2553 reg
->l
|= MSR_MCGCTL_NBE
;
2556 * Turn off NB MCE reporting only when it was off before
2558 if (!s
->flags
.nb_mce_enable
)
2559 reg
->l
&= ~MSR_MCGCTL_NBE
;
2562 wrmsr_on_cpus(cmask
, MSR_IA32_MCG_CTL
, msrs
);
2564 free_cpumask_var(cmask
);
2569 static bool enable_ecc_error_reporting(struct ecc_settings
*s
, u16 nid
,
2573 u32 value
, mask
= 0x3; /* UECC/CECC enable */
2575 if (toggle_ecc_err_reporting(s
, nid
, ON
)) {
2576 amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
2580 amd64_read_pci_cfg(F3
, NBCTL
, &value
);
2582 s
->old_nbctl
= value
& mask
;
2583 s
->nbctl_valid
= true;
2586 amd64_write_pci_cfg(F3
, NBCTL
, value
);
2588 amd64_read_pci_cfg(F3
, NBCFG
, &value
);
2590 edac_dbg(0, "1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2591 nid
, value
, !!(value
& NBCFG_ECC_ENABLE
));
2593 if (!(value
& NBCFG_ECC_ENABLE
)) {
2594 amd64_warn("DRAM ECC disabled on this node, enabling...\n");
2596 s
->flags
.nb_ecc_prev
= 0;
2598 /* Attempt to turn on DRAM ECC Enable */
2599 value
|= NBCFG_ECC_ENABLE
;
2600 amd64_write_pci_cfg(F3
, NBCFG
, value
);
2602 amd64_read_pci_cfg(F3
, NBCFG
, &value
);
2604 if (!(value
& NBCFG_ECC_ENABLE
)) {
2605 amd64_warn("Hardware rejected DRAM ECC enable,"
2606 "check memory DIMM configuration.\n");
2609 amd64_info("Hardware accepted DRAM ECC Enable\n");
2612 s
->flags
.nb_ecc_prev
= 1;
2615 edac_dbg(0, "2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2616 nid
, value
, !!(value
& NBCFG_ECC_ENABLE
));
2621 static void restore_ecc_error_reporting(struct ecc_settings
*s
, u16 nid
,
2624 u32 value
, mask
= 0x3; /* UECC/CECC enable */
2627 if (!s
->nbctl_valid
)
2630 amd64_read_pci_cfg(F3
, NBCTL
, &value
);
2632 value
|= s
->old_nbctl
;
2634 amd64_write_pci_cfg(F3
, NBCTL
, value
);
2636 /* restore previous BIOS DRAM ECC "off" setting we force-enabled */
2637 if (!s
->flags
.nb_ecc_prev
) {
2638 amd64_read_pci_cfg(F3
, NBCFG
, &value
);
2639 value
&= ~NBCFG_ECC_ENABLE
;
2640 amd64_write_pci_cfg(F3
, NBCFG
, value
);
2643 /* restore the NB Enable MCGCTL bit */
2644 if (toggle_ecc_err_reporting(s
, nid
, OFF
))
2645 amd64_warn("Error restoring NB MCGCTL settings!\n");
2649 * EDAC requires that the BIOS have ECC enabled before
2650 * taking over the processing of ECC errors. A command line
2651 * option allows to force-enable hardware ECC later in
2652 * enable_ecc_error_reporting().
2654 static const char *ecc_msg
=
2655 "ECC disabled in the BIOS or no ECC capability, module will not load.\n"
2656 " Either enable ECC checking or force module loading by setting "
2657 "'ecc_enable_override'.\n"
2658 " (Note that use of the override may cause unknown side effects.)\n";
2660 static bool ecc_enabled(struct pci_dev
*F3
, u16 nid
)
2664 bool nb_mce_en
= false;
2666 amd64_read_pci_cfg(F3
, NBCFG
, &value
);
2668 ecc_en
= !!(value
& NBCFG_ECC_ENABLE
);
2669 amd64_info("DRAM ECC %s.\n", (ecc_en
? "enabled" : "disabled"));
2671 nb_mce_en
= nb_mce_bank_enabled_on_node(nid
);
2673 amd64_notice("NB MCE bank disabled, set MSR "
2674 "0x%08x[4] on node %d to enable.\n",
2675 MSR_IA32_MCG_CTL
, nid
);
2677 if (!ecc_en
|| !nb_mce_en
) {
2678 amd64_notice("%s", ecc_msg
);
2684 static void setup_mci_misc_attrs(struct mem_ctl_info
*mci
,
2685 struct amd64_family_type
*fam
)
2687 struct amd64_pvt
*pvt
= mci
->pvt_info
;
2689 mci
->mtype_cap
= MEM_FLAG_DDR2
| MEM_FLAG_RDDR2
;
2690 mci
->edac_ctl_cap
= EDAC_FLAG_NONE
;
2692 if (pvt
->nbcap
& NBCAP_SECDED
)
2693 mci
->edac_ctl_cap
|= EDAC_FLAG_SECDED
;
2695 if (pvt
->nbcap
& NBCAP_CHIPKILL
)
2696 mci
->edac_ctl_cap
|= EDAC_FLAG_S4ECD4ED
;
2698 mci
->edac_cap
= determine_edac_cap(pvt
);
2699 mci
->mod_name
= EDAC_MOD_STR
;
2700 mci
->mod_ver
= EDAC_AMD64_VERSION
;
2701 mci
->ctl_name
= fam
->ctl_name
;
2702 mci
->dev_name
= pci_name(pvt
->F2
);
2703 mci
->ctl_page_to_phys
= NULL
;
2705 /* memory scrubber interface */
2706 mci
->set_sdram_scrub_rate
= set_scrub_rate
;
2707 mci
->get_sdram_scrub_rate
= get_scrub_rate
;
2711 * returns a pointer to the family descriptor on success, NULL otherwise.
2713 static struct amd64_family_type
*per_family_init(struct amd64_pvt
*pvt
)
2715 struct amd64_family_type
*fam_type
= NULL
;
2717 pvt
->ext_model
= boot_cpu_data
.x86_model
>> 4;
2718 pvt
->stepping
= boot_cpu_data
.x86_mask
;
2719 pvt
->model
= boot_cpu_data
.x86_model
;
2720 pvt
->fam
= boot_cpu_data
.x86
;
2724 fam_type
= &family_types
[K8_CPUS
];
2725 pvt
->ops
= &family_types
[K8_CPUS
].ops
;
2729 fam_type
= &family_types
[F10_CPUS
];
2730 pvt
->ops
= &family_types
[F10_CPUS
].ops
;
2734 if (pvt
->model
== 0x30) {
2735 fam_type
= &family_types
[F15_M30H_CPUS
];
2736 pvt
->ops
= &family_types
[F15_M30H_CPUS
].ops
;
2738 } else if (pvt
->model
== 0x60) {
2739 fam_type
= &family_types
[F15_M60H_CPUS
];
2740 pvt
->ops
= &family_types
[F15_M60H_CPUS
].ops
;
2744 fam_type
= &family_types
[F15_CPUS
];
2745 pvt
->ops
= &family_types
[F15_CPUS
].ops
;
2749 if (pvt
->model
== 0x30) {
2750 fam_type
= &family_types
[F16_M30H_CPUS
];
2751 pvt
->ops
= &family_types
[F16_M30H_CPUS
].ops
;
2754 fam_type
= &family_types
[F16_CPUS
];
2755 pvt
->ops
= &family_types
[F16_CPUS
].ops
;
2759 amd64_err("Unsupported family!\n");
2763 amd64_info("%s %sdetected (node %d).\n", fam_type
->ctl_name
,
2765 (pvt
->ext_model
>= K8_REV_F
? "revF or later "
2766 : "revE or earlier ")
2767 : ""), pvt
->mc_node_id
);
2771 static const struct attribute_group
*amd64_edac_attr_groups
[] = {
2772 #ifdef CONFIG_EDAC_DEBUG
2773 &amd64_edac_dbg_group
,
2775 #ifdef CONFIG_EDAC_AMD64_ERROR_INJECTION
2776 &amd64_edac_inj_group
,
2781 static int init_one_instance(struct pci_dev
*F2
)
2783 struct amd64_pvt
*pvt
= NULL
;
2784 struct amd64_family_type
*fam_type
= NULL
;
2785 struct mem_ctl_info
*mci
= NULL
;
2786 struct edac_mc_layer layers
[2];
2788 u16 nid
= amd_pci_dev_to_node_id(F2
);
2791 pvt
= kzalloc(sizeof(struct amd64_pvt
), GFP_KERNEL
);
2795 pvt
->mc_node_id
= nid
;
2799 fam_type
= per_family_init(pvt
);
2804 err
= reserve_mc_sibling_devs(pvt
, fam_type
->f1_id
, fam_type
->f3_id
);
2811 * We need to determine how many memory channels there are. Then use
2812 * that information for calculating the size of the dynamic instance
2813 * tables in the 'mci' structure.
2816 pvt
->channel_count
= pvt
->ops
->early_channel_count(pvt
);
2817 if (pvt
->channel_count
< 0)
2821 layers
[0].type
= EDAC_MC_LAYER_CHIP_SELECT
;
2822 layers
[0].size
= pvt
->csels
[0].b_cnt
;
2823 layers
[0].is_virt_csrow
= true;
2824 layers
[1].type
= EDAC_MC_LAYER_CHANNEL
;
2827 * Always allocate two channels since we can have setups with DIMMs on
2828 * only one channel. Also, this simplifies handling later for the price
2829 * of a couple of KBs tops.
2832 layers
[1].is_virt_csrow
= false;
2834 mci
= edac_mc_alloc(nid
, ARRAY_SIZE(layers
), layers
, 0);
2838 mci
->pvt_info
= pvt
;
2839 mci
->pdev
= &pvt
->F2
->dev
;
2841 setup_mci_misc_attrs(mci
, fam_type
);
2843 if (init_csrows(mci
))
2844 mci
->edac_cap
= EDAC_FLAG_NONE
;
2847 if (edac_mc_add_mc_with_groups(mci
, amd64_edac_attr_groups
)) {
2848 edac_dbg(1, "failed edac_mc_add_mc()\n");
2852 /* register stuff with EDAC MCE */
2853 if (report_gart_errors
)
2854 amd_report_gart_errors(true);
2856 amd_register_ecc_decoder(decode_bus_error
);
2858 atomic_inc(&drv_instances
);
2866 free_mc_sibling_devs(pvt
);
2875 static int probe_one_instance(struct pci_dev
*pdev
,
2876 const struct pci_device_id
*mc_type
)
2878 u16 nid
= amd_pci_dev_to_node_id(pdev
);
2879 struct pci_dev
*F3
= node_to_amd_nb(nid
)->misc
;
2880 struct ecc_settings
*s
;
2883 ret
= pci_enable_device(pdev
);
2885 edac_dbg(0, "ret=%d\n", ret
);
2890 s
= kzalloc(sizeof(struct ecc_settings
), GFP_KERNEL
);
2896 if (!ecc_enabled(F3
, nid
)) {
2899 if (!ecc_enable_override
)
2902 amd64_warn("Forcing ECC on!\n");
2904 if (!enable_ecc_error_reporting(s
, nid
, F3
))
2908 ret
= init_one_instance(pdev
);
2910 amd64_err("Error probing instance: %d\n", nid
);
2911 restore_ecc_error_reporting(s
, nid
, F3
);
2918 ecc_stngs
[nid
] = NULL
;
2924 static void remove_one_instance(struct pci_dev
*pdev
)
2926 struct mem_ctl_info
*mci
;
2927 struct amd64_pvt
*pvt
;
2928 u16 nid
= amd_pci_dev_to_node_id(pdev
);
2929 struct pci_dev
*F3
= node_to_amd_nb(nid
)->misc
;
2930 struct ecc_settings
*s
= ecc_stngs
[nid
];
2932 mci
= find_mci_by_dev(&pdev
->dev
);
2935 /* Remove from EDAC CORE tracking list */
2936 mci
= edac_mc_del_mc(&pdev
->dev
);
2940 pvt
= mci
->pvt_info
;
2942 restore_ecc_error_reporting(s
, nid
, F3
);
2944 free_mc_sibling_devs(pvt
);
2946 /* unregister from EDAC MCE */
2947 amd_report_gart_errors(false);
2948 amd_unregister_ecc_decoder(decode_bus_error
);
2950 kfree(ecc_stngs
[nid
]);
2951 ecc_stngs
[nid
] = NULL
;
2953 /* Free the EDAC CORE resources */
2954 mci
->pvt_info
= NULL
;
2961 * This table is part of the interface for loading drivers for PCI devices. The
2962 * PCI core identifies what devices are on a system during boot, and then
2963 * inquiry this table to see if this driver is for a given device found.
2965 static const struct pci_device_id amd64_pci_table
[] = {
2966 { PCI_VDEVICE(AMD
, PCI_DEVICE_ID_AMD_K8_NB_MEMCTL
) },
2967 { PCI_VDEVICE(AMD
, PCI_DEVICE_ID_AMD_10H_NB_DRAM
) },
2968 { PCI_VDEVICE(AMD
, PCI_DEVICE_ID_AMD_15H_NB_F2
) },
2969 { PCI_VDEVICE(AMD
, PCI_DEVICE_ID_AMD_15H_M30H_NB_F2
) },
2970 { PCI_VDEVICE(AMD
, PCI_DEVICE_ID_AMD_15H_M60H_NB_F2
) },
2971 { PCI_VDEVICE(AMD
, PCI_DEVICE_ID_AMD_16H_NB_F2
) },
2972 { PCI_VDEVICE(AMD
, PCI_DEVICE_ID_AMD_16H_M30H_NB_F2
) },
2975 MODULE_DEVICE_TABLE(pci
, amd64_pci_table
);
2977 static struct pci_driver amd64_pci_driver
= {
2978 .name
= EDAC_MOD_STR
,
2979 .probe
= probe_one_instance
,
2980 .remove
= remove_one_instance
,
2981 .id_table
= amd64_pci_table
,
2982 .driver
.probe_type
= PROBE_FORCE_SYNCHRONOUS
,
2985 static void setup_pci_device(void)
2987 struct mem_ctl_info
*mci
;
2988 struct amd64_pvt
*pvt
;
2993 mci
= edac_mc_find(0);
2997 pvt
= mci
->pvt_info
;
2998 pci_ctl
= edac_pci_create_generic_ctl(&pvt
->F2
->dev
, EDAC_MOD_STR
);
3000 pr_warn("%s(): Unable to create PCI control\n", __func__
);
3001 pr_warn("%s(): PCI error report via EDAC not set\n", __func__
);
3005 static int __init
amd64_edac_init(void)
3009 printk(KERN_INFO
"AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION
);
3013 if (amd_cache_northbridges() < 0)
3017 ecc_stngs
= kzalloc(amd_nb_num() * sizeof(ecc_stngs
[0]), GFP_KERNEL
);
3021 msrs
= msrs_alloc();
3025 err
= pci_register_driver(&amd64_pci_driver
);
3030 if (!atomic_read(&drv_instances
))
3031 goto err_no_instances
;
3035 #ifdef CONFIG_X86_32
3036 amd64_err("%s on 32-bit is unsupported. USE AT YOUR OWN RISK!\n", EDAC_MOD_STR
);
3042 pci_unregister_driver(&amd64_pci_driver
);
3056 static void __exit
amd64_edac_exit(void)
3059 edac_pci_release_generic_ctl(pci_ctl
);
3061 pci_unregister_driver(&amd64_pci_driver
);
3070 module_init(amd64_edac_init
);
3071 module_exit(amd64_edac_exit
);
3073 MODULE_LICENSE("GPL");
3074 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
3075 "Dave Peterson, Thayne Harbaugh");
3076 MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
3077 EDAC_AMD64_VERSION
);
3079 module_param(edac_op_state
, int, 0444);
3080 MODULE_PARM_DESC(edac_op_state
, "EDAC Error Reporting state: 0=Poll,1=NMI");