1 #include "amd64_edac.h"
2 #include <asm/amd_nb.h>
4 static struct edac_pci_ctl_info
*pci_ctl
;
6 static int report_gart_errors
;
7 module_param(report_gart_errors
, int, 0644);
10 * Set by command line parameter. If BIOS has enabled the ECC, this override is
11 * cleared to prevent re-enabling the hardware by this driver.
13 static int ecc_enable_override
;
14 module_param(ecc_enable_override
, int, 0644);
16 static struct msr __percpu
*msrs
;
19 * count successfully initialized driver instances for setup_pci_device()
21 static atomic_t drv_instances
= ATOMIC_INIT(0);
23 /* Per-node driver instances */
24 static struct mem_ctl_info
**mcis
;
25 static struct ecc_settings
**ecc_stngs
;
28 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
29 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
32 *FIXME: Produce a better mapping/linearisation.
34 static const struct scrubrate
{
35 u32 scrubval
; /* bit pattern for scrub rate */
36 u32 bandwidth
; /* bandwidth consumed (bytes/sec) */
38 { 0x01, 1600000000UL},
60 { 0x00, 0UL}, /* scrubbing off */
63 int __amd64_read_pci_cfg_dword(struct pci_dev
*pdev
, int offset
,
64 u32
*val
, const char *func
)
68 err
= pci_read_config_dword(pdev
, offset
, val
);
70 amd64_warn("%s: error reading F%dx%03x.\n",
71 func
, PCI_FUNC(pdev
->devfn
), offset
);
76 int __amd64_write_pci_cfg_dword(struct pci_dev
*pdev
, int offset
,
77 u32 val
, const char *func
)
81 err
= pci_write_config_dword(pdev
, offset
, val
);
83 amd64_warn("%s: error writing to F%dx%03x.\n",
84 func
, PCI_FUNC(pdev
->devfn
), offset
);
91 * Depending on the family, F2 DCT reads need special handling:
93 * K8: has a single DCT only
95 * F10h: each DCT has its own set of regs
99 * F15h: we select which DCT we access using F1x10C[DctCfgSel]
101 * F16h: has only 1 DCT
103 static int k8_read_dct_pci_cfg(struct amd64_pvt
*pvt
, int addr
, u32
*val
,
109 return __amd64_read_pci_cfg_dword(pvt
->F2
, addr
, val
, func
);
112 static int f10_read_dct_pci_cfg(struct amd64_pvt
*pvt
, int addr
, u32
*val
,
115 return __amd64_read_pci_cfg_dword(pvt
->F2
, addr
, val
, func
);
119 * Select DCT to which PCI cfg accesses are routed
121 static void f15h_select_dct(struct amd64_pvt
*pvt
, u8 dct
)
125 amd64_read_pci_cfg(pvt
->F1
, DCT_CFG_SEL
, ®
);
126 reg
&= (pvt
->model
>= 0x30) ? ~3 : ~1;
128 amd64_write_pci_cfg(pvt
->F1
, DCT_CFG_SEL
, reg
);
131 static int f15_read_dct_pci_cfg(struct amd64_pvt
*pvt
, int addr
, u32
*val
,
136 /* For F15 M30h, the second dct is DCT 3, refer to BKDG Section 2.10 */
137 if (addr
>= 0x140 && addr
<= 0x1a0) {
138 dct
= (pvt
->model
>= 0x30) ? 3 : 1;
142 f15h_select_dct(pvt
, dct
);
144 return __amd64_read_pci_cfg_dword(pvt
->F2
, addr
, val
, func
);
148 * Memory scrubber control interface. For K8, memory scrubbing is handled by
149 * hardware and can involve L2 cache, dcache as well as the main memory. With
150 * F10, this is extended to L3 cache scrubbing on CPU models sporting that
153 * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
154 * (dram) over to cache lines. This is nasty, so we will use bandwidth in
155 * bytes/sec for the setting.
157 * Currently, we only do dram scrubbing. If the scrubbing is done in software on
158 * other archs, we might not have access to the caches directly.
162 * scan the scrub rate mapping table for a close or matching bandwidth value to
163 * issue. If requested is too big, then use last maximum value found.
165 static int __set_scrub_rate(struct pci_dev
*ctl
, u32 new_bw
, u32 min_rate
)
171 * map the configured rate (new_bw) to a value specific to the AMD64
172 * memory controller and apply to register. Search for the first
173 * bandwidth entry that is greater or equal than the setting requested
174 * and program that. If at last entry, turn off DRAM scrubbing.
176 * If no suitable bandwidth is found, turn off DRAM scrubbing entirely
177 * by falling back to the last element in scrubrates[].
179 for (i
= 0; i
< ARRAY_SIZE(scrubrates
) - 1; i
++) {
181 * skip scrub rates which aren't recommended
182 * (see F10 BKDG, F3x58)
184 if (scrubrates
[i
].scrubval
< min_rate
)
187 if (scrubrates
[i
].bandwidth
<= new_bw
)
191 scrubval
= scrubrates
[i
].scrubval
;
193 pci_write_bits32(ctl
, SCRCTRL
, scrubval
, 0x001F);
196 return scrubrates
[i
].bandwidth
;
201 static int set_scrub_rate(struct mem_ctl_info
*mci
, u32 bw
)
203 struct amd64_pvt
*pvt
= mci
->pvt_info
;
204 u32 min_scrubrate
= 0x5;
210 if (pvt
->fam
== 0x15 && pvt
->model
< 0x10)
211 f15h_select_dct(pvt
, 0);
213 return __set_scrub_rate(pvt
->F3
, bw
, min_scrubrate
);
216 static int get_scrub_rate(struct mem_ctl_info
*mci
)
218 struct amd64_pvt
*pvt
= mci
->pvt_info
;
220 int i
, retval
= -EINVAL
;
223 if (pvt
->fam
== 0x15 && pvt
->model
< 0x10)
224 f15h_select_dct(pvt
, 0);
226 amd64_read_pci_cfg(pvt
->F3
, SCRCTRL
, &scrubval
);
228 scrubval
= scrubval
& 0x001F;
230 for (i
= 0; i
< ARRAY_SIZE(scrubrates
); i
++) {
231 if (scrubrates
[i
].scrubval
== scrubval
) {
232 retval
= scrubrates
[i
].bandwidth
;
240 * returns true if the SysAddr given by sys_addr matches the
241 * DRAM base/limit associated with node_id
243 static bool base_limit_match(struct amd64_pvt
*pvt
, u64 sys_addr
, u8 nid
)
247 /* The K8 treats this as a 40-bit value. However, bits 63-40 will be
248 * all ones if the most significant implemented address bit is 1.
249 * Here we discard bits 63-40. See section 3.4.2 of AMD publication
250 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
251 * Application Programming.
253 addr
= sys_addr
& 0x000000ffffffffffull
;
255 return ((addr
>= get_dram_base(pvt
, nid
)) &&
256 (addr
<= get_dram_limit(pvt
, nid
)));
260 * Attempt to map a SysAddr to a node. On success, return a pointer to the
261 * mem_ctl_info structure for the node that the SysAddr maps to.
263 * On failure, return NULL.
265 static struct mem_ctl_info
*find_mc_by_sys_addr(struct mem_ctl_info
*mci
,
268 struct amd64_pvt
*pvt
;
273 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
274 * 3.4.4.2) registers to map the SysAddr to a node ID.
279 * The value of this field should be the same for all DRAM Base
280 * registers. Therefore we arbitrarily choose to read it from the
281 * register for node 0.
283 intlv_en
= dram_intlv_en(pvt
, 0);
286 for (node_id
= 0; node_id
< DRAM_RANGES
; node_id
++) {
287 if (base_limit_match(pvt
, sys_addr
, node_id
))
293 if (unlikely((intlv_en
!= 0x01) &&
294 (intlv_en
!= 0x03) &&
295 (intlv_en
!= 0x07))) {
296 amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en
);
300 bits
= (((u32
) sys_addr
) >> 12) & intlv_en
;
302 for (node_id
= 0; ; ) {
303 if ((dram_intlv_sel(pvt
, node_id
) & intlv_en
) == bits
)
304 break; /* intlv_sel field matches */
306 if (++node_id
>= DRAM_RANGES
)
310 /* sanity test for sys_addr */
311 if (unlikely(!base_limit_match(pvt
, sys_addr
, node_id
))) {
312 amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
313 "range for node %d with node interleaving enabled.\n",
314 __func__
, sys_addr
, node_id
);
319 return edac_mc_find((int)node_id
);
322 edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n",
323 (unsigned long)sys_addr
);
329 * compute the CS base address of the @csrow on the DRAM controller @dct.
330 * For details see F2x[5C:40] in the processor's BKDG
332 static void get_cs_base_and_mask(struct amd64_pvt
*pvt
, int csrow
, u8 dct
,
333 u64
*base
, u64
*mask
)
335 u64 csbase
, csmask
, base_bits
, mask_bits
;
338 if (pvt
->fam
== 0xf && pvt
->ext_model
< K8_REV_F
) {
339 csbase
= pvt
->csels
[dct
].csbases
[csrow
];
340 csmask
= pvt
->csels
[dct
].csmasks
[csrow
];
341 base_bits
= GENMASK_ULL(31, 21) | GENMASK_ULL(15, 9);
342 mask_bits
= GENMASK_ULL(29, 21) | GENMASK_ULL(15, 9);
346 * F16h and F15h, models 30h and later need two addr_shift values:
347 * 8 for high and 6 for low (cf. F16h BKDG).
349 } else if (pvt
->fam
== 0x16 ||
350 (pvt
->fam
== 0x15 && pvt
->model
>= 0x30)) {
351 csbase
= pvt
->csels
[dct
].csbases
[csrow
];
352 csmask
= pvt
->csels
[dct
].csmasks
[csrow
>> 1];
354 *base
= (csbase
& GENMASK_ULL(15, 5)) << 6;
355 *base
|= (csbase
& GENMASK_ULL(30, 19)) << 8;
358 /* poke holes for the csmask */
359 *mask
&= ~((GENMASK_ULL(15, 5) << 6) |
360 (GENMASK_ULL(30, 19) << 8));
362 *mask
|= (csmask
& GENMASK_ULL(15, 5)) << 6;
363 *mask
|= (csmask
& GENMASK_ULL(30, 19)) << 8;
367 csbase
= pvt
->csels
[dct
].csbases
[csrow
];
368 csmask
= pvt
->csels
[dct
].csmasks
[csrow
>> 1];
371 if (pvt
->fam
== 0x15)
372 base_bits
= mask_bits
=
373 GENMASK_ULL(30,19) | GENMASK_ULL(13,5);
375 base_bits
= mask_bits
=
376 GENMASK_ULL(28,19) | GENMASK_ULL(13,5);
379 *base
= (csbase
& base_bits
) << addr_shift
;
382 /* poke holes for the csmask */
383 *mask
&= ~(mask_bits
<< addr_shift
);
385 *mask
|= (csmask
& mask_bits
) << addr_shift
;
388 #define for_each_chip_select(i, dct, pvt) \
389 for (i = 0; i < pvt->csels[dct].b_cnt; i++)
391 #define chip_select_base(i, dct, pvt) \
392 pvt->csels[dct].csbases[i]
394 #define for_each_chip_select_mask(i, dct, pvt) \
395 for (i = 0; i < pvt->csels[dct].m_cnt; i++)
398 * @input_addr is an InputAddr associated with the node given by mci. Return the
399 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
401 static int input_addr_to_csrow(struct mem_ctl_info
*mci
, u64 input_addr
)
403 struct amd64_pvt
*pvt
;
409 for_each_chip_select(csrow
, 0, pvt
) {
410 if (!csrow_enabled(csrow
, 0, pvt
))
413 get_cs_base_and_mask(pvt
, csrow
, 0, &base
, &mask
);
417 if ((input_addr
& mask
) == (base
& mask
)) {
418 edac_dbg(2, "InputAddr 0x%lx matches csrow %d (node %d)\n",
419 (unsigned long)input_addr
, csrow
,
425 edac_dbg(2, "no matching csrow for InputAddr 0x%lx (MC node %d)\n",
426 (unsigned long)input_addr
, pvt
->mc_node_id
);
432 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
433 * for the node represented by mci. Info is passed back in *hole_base,
434 * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
435 * info is invalid. Info may be invalid for either of the following reasons:
437 * - The revision of the node is not E or greater. In this case, the DRAM Hole
438 * Address Register does not exist.
440 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
441 * indicating that its contents are not valid.
443 * The values passed back in *hole_base, *hole_offset, and *hole_size are
444 * complete 32-bit values despite the fact that the bitfields in the DHAR
445 * only represent bits 31-24 of the base and offset values.
447 int amd64_get_dram_hole_info(struct mem_ctl_info
*mci
, u64
*hole_base
,
448 u64
*hole_offset
, u64
*hole_size
)
450 struct amd64_pvt
*pvt
= mci
->pvt_info
;
452 /* only revE and later have the DRAM Hole Address Register */
453 if (pvt
->fam
== 0xf && pvt
->ext_model
< K8_REV_E
) {
454 edac_dbg(1, " revision %d for node %d does not support DHAR\n",
455 pvt
->ext_model
, pvt
->mc_node_id
);
459 /* valid for Fam10h and above */
460 if (pvt
->fam
>= 0x10 && !dhar_mem_hoist_valid(pvt
)) {
461 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this system\n");
465 if (!dhar_valid(pvt
)) {
466 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this node %d\n",
471 /* This node has Memory Hoisting */
473 /* +------------------+--------------------+--------------------+-----
474 * | memory | DRAM hole | relocated |
475 * | [0, (x - 1)] | [x, 0xffffffff] | addresses from |
477 * | | | [0x100000000, |
478 * | | | (0x100000000+ |
479 * | | | (0xffffffff-x))] |
480 * +------------------+--------------------+--------------------+-----
482 * Above is a diagram of physical memory showing the DRAM hole and the
483 * relocated addresses from the DRAM hole. As shown, the DRAM hole
484 * starts at address x (the base address) and extends through address
485 * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the
486 * addresses in the hole so that they start at 0x100000000.
489 *hole_base
= dhar_base(pvt
);
490 *hole_size
= (1ULL << 32) - *hole_base
;
492 *hole_offset
= (pvt
->fam
> 0xf) ? f10_dhar_offset(pvt
)
493 : k8_dhar_offset(pvt
);
495 edac_dbg(1, " DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
496 pvt
->mc_node_id
, (unsigned long)*hole_base
,
497 (unsigned long)*hole_offset
, (unsigned long)*hole_size
);
501 EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info
);
504 * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
505 * assumed that sys_addr maps to the node given by mci.
507 * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
508 * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
509 * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
510 * then it is also involved in translating a SysAddr to a DramAddr. Sections
511 * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
512 * These parts of the documentation are unclear. I interpret them as follows:
514 * When node n receives a SysAddr, it processes the SysAddr as follows:
516 * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
517 * Limit registers for node n. If the SysAddr is not within the range
518 * specified by the base and limit values, then node n ignores the Sysaddr
519 * (since it does not map to node n). Otherwise continue to step 2 below.
521 * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
522 * disabled so skip to step 3 below. Otherwise see if the SysAddr is within
523 * the range of relocated addresses (starting at 0x100000000) from the DRAM
524 * hole. If not, skip to step 3 below. Else get the value of the
525 * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
526 * offset defined by this value from the SysAddr.
528 * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
529 * Base register for node n. To obtain the DramAddr, subtract the base
530 * address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
532 static u64
sys_addr_to_dram_addr(struct mem_ctl_info
*mci
, u64 sys_addr
)
534 struct amd64_pvt
*pvt
= mci
->pvt_info
;
535 u64 dram_base
, hole_base
, hole_offset
, hole_size
, dram_addr
;
538 dram_base
= get_dram_base(pvt
, pvt
->mc_node_id
);
540 ret
= amd64_get_dram_hole_info(mci
, &hole_base
, &hole_offset
,
543 if ((sys_addr
>= (1ULL << 32)) &&
544 (sys_addr
< ((1ULL << 32) + hole_size
))) {
545 /* use DHAR to translate SysAddr to DramAddr */
546 dram_addr
= sys_addr
- hole_offset
;
548 edac_dbg(2, "using DHAR to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
549 (unsigned long)sys_addr
,
550 (unsigned long)dram_addr
);
557 * Translate the SysAddr to a DramAddr as shown near the start of
558 * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8
559 * only deals with 40-bit values. Therefore we discard bits 63-40 of
560 * sys_addr below. If bit 39 of sys_addr is 1 then the bits we
561 * discard are all 1s. Otherwise the bits we discard are all 0s. See
562 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
563 * Programmer's Manual Volume 1 Application Programming.
565 dram_addr
= (sys_addr
& GENMASK_ULL(39, 0)) - dram_base
;
567 edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
568 (unsigned long)sys_addr
, (unsigned long)dram_addr
);
573 * @intlv_en is the value of the IntlvEn field from a DRAM Base register
574 * (section 3.4.4.1). Return the number of bits from a SysAddr that are used
575 * for node interleaving.
577 static int num_node_interleave_bits(unsigned intlv_en
)
579 static const int intlv_shift_table
[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
582 BUG_ON(intlv_en
> 7);
583 n
= intlv_shift_table
[intlv_en
];
587 /* Translate the DramAddr given by @dram_addr to an InputAddr. */
588 static u64
dram_addr_to_input_addr(struct mem_ctl_info
*mci
, u64 dram_addr
)
590 struct amd64_pvt
*pvt
;
597 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
598 * concerning translating a DramAddr to an InputAddr.
600 intlv_shift
= num_node_interleave_bits(dram_intlv_en(pvt
, 0));
601 input_addr
= ((dram_addr
>> intlv_shift
) & GENMASK_ULL(35, 12)) +
604 edac_dbg(2, " Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
605 intlv_shift
, (unsigned long)dram_addr
,
606 (unsigned long)input_addr
);
612 * Translate the SysAddr represented by @sys_addr to an InputAddr. It is
613 * assumed that @sys_addr maps to the node given by mci.
615 static u64
sys_addr_to_input_addr(struct mem_ctl_info
*mci
, u64 sys_addr
)
620 dram_addr_to_input_addr(mci
, sys_addr_to_dram_addr(mci
, sys_addr
));
622 edac_dbg(2, "SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
623 (unsigned long)sys_addr
, (unsigned long)input_addr
);
628 /* Map the Error address to a PAGE and PAGE OFFSET. */
629 static inline void error_address_to_page_and_offset(u64 error_address
,
630 struct err_info
*err
)
632 err
->page
= (u32
) (error_address
>> PAGE_SHIFT
);
633 err
->offset
= ((u32
) error_address
) & ~PAGE_MASK
;
637 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
638 * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
639 * of a node that detected an ECC memory error. mci represents the node that
640 * the error address maps to (possibly different from the node that detected
641 * the error). Return the number of the csrow that sys_addr maps to, or -1 on
644 static int sys_addr_to_csrow(struct mem_ctl_info
*mci
, u64 sys_addr
)
648 csrow
= input_addr_to_csrow(mci
, sys_addr_to_input_addr(mci
, sys_addr
));
651 amd64_mc_err(mci
, "Failed to translate InputAddr to csrow for "
652 "address 0x%lx\n", (unsigned long)sys_addr
);
656 static int get_channel_from_ecc_syndrome(struct mem_ctl_info
*, u16
);
659 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
662 static unsigned long determine_edac_cap(struct amd64_pvt
*pvt
)
665 unsigned long edac_cap
= EDAC_FLAG_NONE
;
667 bit
= (pvt
->fam
> 0xf || pvt
->ext_model
>= K8_REV_F
)
671 if (pvt
->dclr0
& BIT(bit
))
672 edac_cap
= EDAC_FLAG_SECDED
;
677 static void debug_display_dimm_sizes(struct amd64_pvt
*, u8
);
679 static void debug_dump_dramcfg_low(struct amd64_pvt
*pvt
, u32 dclr
, int chan
)
681 edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan
, dclr
);
683 edac_dbg(1, " DIMM type: %sbuffered; all DIMMs support ECC: %s\n",
684 (dclr
& BIT(16)) ? "un" : "",
685 (dclr
& BIT(19)) ? "yes" : "no");
687 edac_dbg(1, " PAR/ERR parity: %s\n",
688 (dclr
& BIT(8)) ? "enabled" : "disabled");
690 if (pvt
->fam
== 0x10)
691 edac_dbg(1, " DCT 128bit mode width: %s\n",
692 (dclr
& BIT(11)) ? "128b" : "64b");
694 edac_dbg(1, " x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
695 (dclr
& BIT(12)) ? "yes" : "no",
696 (dclr
& BIT(13)) ? "yes" : "no",
697 (dclr
& BIT(14)) ? "yes" : "no",
698 (dclr
& BIT(15)) ? "yes" : "no");
701 /* Display and decode various NB registers for debug purposes. */
702 static void dump_misc_regs(struct amd64_pvt
*pvt
)
704 edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt
->nbcap
);
706 edac_dbg(1, " NB two channel DRAM capable: %s\n",
707 (pvt
->nbcap
& NBCAP_DCT_DUAL
) ? "yes" : "no");
709 edac_dbg(1, " ECC capable: %s, ChipKill ECC capable: %s\n",
710 (pvt
->nbcap
& NBCAP_SECDED
) ? "yes" : "no",
711 (pvt
->nbcap
& NBCAP_CHIPKILL
) ? "yes" : "no");
713 debug_dump_dramcfg_low(pvt
, pvt
->dclr0
, 0);
715 edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt
->online_spare
);
717 edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n",
718 pvt
->dhar
, dhar_base(pvt
),
719 (pvt
->fam
== 0xf) ? k8_dhar_offset(pvt
)
720 : f10_dhar_offset(pvt
));
722 edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt
) ? "yes" : "no");
724 debug_display_dimm_sizes(pvt
, 0);
726 /* everything below this point is Fam10h and above */
730 debug_display_dimm_sizes(pvt
, 1);
732 amd64_info("using %s syndromes.\n", ((pvt
->ecc_sym_sz
== 8) ? "x8" : "x4"));
734 /* Only if NOT ganged does dclr1 have valid info */
735 if (!dct_ganging_enabled(pvt
))
736 debug_dump_dramcfg_low(pvt
, pvt
->dclr1
, 1);
740 * See BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
742 static void prep_chip_selects(struct amd64_pvt
*pvt
)
744 if (pvt
->fam
== 0xf && pvt
->ext_model
< K8_REV_F
) {
745 pvt
->csels
[0].b_cnt
= pvt
->csels
[1].b_cnt
= 8;
746 pvt
->csels
[0].m_cnt
= pvt
->csels
[1].m_cnt
= 8;
747 } else if (pvt
->fam
== 0x15 && pvt
->model
>= 0x30) {
748 pvt
->csels
[0].b_cnt
= pvt
->csels
[1].b_cnt
= 4;
749 pvt
->csels
[0].m_cnt
= pvt
->csels
[1].m_cnt
= 2;
751 pvt
->csels
[0].b_cnt
= pvt
->csels
[1].b_cnt
= 8;
752 pvt
->csels
[0].m_cnt
= pvt
->csels
[1].m_cnt
= 4;
757 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
759 static void read_dct_base_mask(struct amd64_pvt
*pvt
)
763 prep_chip_selects(pvt
);
765 for_each_chip_select(cs
, 0, pvt
) {
766 int reg0
= DCSB0
+ (cs
* 4);
767 int reg1
= DCSB1
+ (cs
* 4);
768 u32
*base0
= &pvt
->csels
[0].csbases
[cs
];
769 u32
*base1
= &pvt
->csels
[1].csbases
[cs
];
771 if (!amd64_read_dct_pci_cfg(pvt
, reg0
, base0
))
772 edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
775 if (pvt
->fam
== 0xf || dct_ganging_enabled(pvt
))
778 if (!amd64_read_dct_pci_cfg(pvt
, reg1
, base1
))
779 edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
783 for_each_chip_select_mask(cs
, 0, pvt
) {
784 int reg0
= DCSM0
+ (cs
* 4);
785 int reg1
= DCSM1
+ (cs
* 4);
786 u32
*mask0
= &pvt
->csels
[0].csmasks
[cs
];
787 u32
*mask1
= &pvt
->csels
[1].csmasks
[cs
];
789 if (!amd64_read_dct_pci_cfg(pvt
, reg0
, mask0
))
790 edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
793 if (pvt
->fam
== 0xf || dct_ganging_enabled(pvt
))
796 if (!amd64_read_dct_pci_cfg(pvt
, reg1
, mask1
))
797 edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
802 static enum mem_type
determine_memory_type(struct amd64_pvt
*pvt
, int cs
)
806 /* F15h supports only DDR3 */
807 if (pvt
->fam
>= 0x15)
808 type
= (pvt
->dclr0
& BIT(16)) ? MEM_DDR3
: MEM_RDDR3
;
809 else if (pvt
->fam
== 0x10 || pvt
->ext_model
>= K8_REV_F
) {
810 if (pvt
->dchr0
& DDR3_MODE
)
811 type
= (pvt
->dclr0
& BIT(16)) ? MEM_DDR3
: MEM_RDDR3
;
813 type
= (pvt
->dclr0
& BIT(16)) ? MEM_DDR2
: MEM_RDDR2
;
815 type
= (pvt
->dclr0
& BIT(18)) ? MEM_DDR
: MEM_RDDR
;
818 amd64_info("CS%d: %s\n", cs
, edac_mem_types
[type
]);
823 /* Get the number of DCT channels the memory controller is using. */
824 static int k8_early_channel_count(struct amd64_pvt
*pvt
)
828 if (pvt
->ext_model
>= K8_REV_F
)
829 /* RevF (NPT) and later */
830 flag
= pvt
->dclr0
& WIDTH_128
;
832 /* RevE and earlier */
833 flag
= pvt
->dclr0
& REVE_WIDTH_128
;
838 return (flag
) ? 2 : 1;
841 /* On F10h and later ErrAddr is MC4_ADDR[47:1] */
842 static u64
get_error_address(struct amd64_pvt
*pvt
, struct mce
*m
)
848 if (pvt
->fam
== 0xf) {
853 addr
= m
->addr
& GENMASK_ULL(end_bit
, start_bit
);
856 * Erratum 637 workaround
858 if (pvt
->fam
== 0x15) {
859 struct amd64_pvt
*pvt
;
860 u64 cc6_base
, tmp_addr
;
865 if ((addr
& GENMASK_ULL(47, 24)) >> 24 != 0x00fdf7)
868 mce_nid
= amd_get_nb_id(m
->extcpu
);
869 pvt
= mcis
[mce_nid
]->pvt_info
;
871 amd64_read_pci_cfg(pvt
->F1
, DRAM_LOCAL_NODE_LIM
, &tmp
);
872 intlv_en
= tmp
>> 21 & 0x7;
874 /* add [47:27] + 3 trailing bits */
875 cc6_base
= (tmp
& GENMASK_ULL(20, 0)) << 3;
877 /* reverse and add DramIntlvEn */
878 cc6_base
|= intlv_en
^ 0x7;
884 return cc6_base
| (addr
& GENMASK_ULL(23, 0));
886 amd64_read_pci_cfg(pvt
->F1
, DRAM_LOCAL_NODE_BASE
, &tmp
);
889 tmp_addr
= (addr
& GENMASK_ULL(23, 12)) << __fls(intlv_en
+ 1);
891 /* OR DramIntlvSel into bits [14:12] */
892 tmp_addr
|= (tmp
& GENMASK_ULL(23, 21)) >> 9;
894 /* add remaining [11:0] bits from original MC4_ADDR */
895 tmp_addr
|= addr
& GENMASK_ULL(11, 0);
897 return cc6_base
| tmp_addr
;
903 static struct pci_dev
*pci_get_related_function(unsigned int vendor
,
905 struct pci_dev
*related
)
907 struct pci_dev
*dev
= NULL
;
909 while ((dev
= pci_get_device(vendor
, device
, dev
))) {
910 if (pci_domain_nr(dev
->bus
) == pci_domain_nr(related
->bus
) &&
911 (dev
->bus
->number
== related
->bus
->number
) &&
912 (PCI_SLOT(dev
->devfn
) == PCI_SLOT(related
->devfn
)))
919 static void read_dram_base_limit_regs(struct amd64_pvt
*pvt
, unsigned range
)
921 struct amd_northbridge
*nb
;
922 struct pci_dev
*f1
= NULL
;
923 unsigned int pci_func
;
924 int off
= range
<< 3;
927 amd64_read_pci_cfg(pvt
->F1
, DRAM_BASE_LO
+ off
, &pvt
->ranges
[range
].base
.lo
);
928 amd64_read_pci_cfg(pvt
->F1
, DRAM_LIMIT_LO
+ off
, &pvt
->ranges
[range
].lim
.lo
);
933 if (!dram_rw(pvt
, range
))
936 amd64_read_pci_cfg(pvt
->F1
, DRAM_BASE_HI
+ off
, &pvt
->ranges
[range
].base
.hi
);
937 amd64_read_pci_cfg(pvt
->F1
, DRAM_LIMIT_HI
+ off
, &pvt
->ranges
[range
].lim
.hi
);
939 /* F15h: factor in CC6 save area by reading dst node's limit reg */
940 if (pvt
->fam
!= 0x15)
943 nb
= node_to_amd_nb(dram_dst_node(pvt
, range
));
947 pci_func
= (pvt
->model
== 0x30) ? PCI_DEVICE_ID_AMD_15H_M30H_NB_F1
948 : PCI_DEVICE_ID_AMD_15H_NB_F1
;
950 f1
= pci_get_related_function(nb
->misc
->vendor
, pci_func
, nb
->misc
);
954 amd64_read_pci_cfg(f1
, DRAM_LOCAL_NODE_LIM
, &llim
);
956 pvt
->ranges
[range
].lim
.lo
&= GENMASK_ULL(15, 0);
959 pvt
->ranges
[range
].lim
.lo
|= ((llim
& 0x1fff) << 3 | 0x7) << 16;
961 pvt
->ranges
[range
].lim
.hi
&= GENMASK_ULL(7, 0);
964 pvt
->ranges
[range
].lim
.hi
|= llim
>> 13;
969 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info
*mci
, u64 sys_addr
,
970 struct err_info
*err
)
972 struct amd64_pvt
*pvt
= mci
->pvt_info
;
974 error_address_to_page_and_offset(sys_addr
, err
);
977 * Find out which node the error address belongs to. This may be
978 * different from the node that detected the error.
980 err
->src_mci
= find_mc_by_sys_addr(mci
, sys_addr
);
982 amd64_mc_err(mci
, "failed to map error addr 0x%lx to a node\n",
983 (unsigned long)sys_addr
);
984 err
->err_code
= ERR_NODE
;
988 /* Now map the sys_addr to a CSROW */
989 err
->csrow
= sys_addr_to_csrow(err
->src_mci
, sys_addr
);
990 if (err
->csrow
< 0) {
991 err
->err_code
= ERR_CSROW
;
995 /* CHIPKILL enabled */
996 if (pvt
->nbcfg
& NBCFG_CHIPKILL
) {
997 err
->channel
= get_channel_from_ecc_syndrome(mci
, err
->syndrome
);
998 if (err
->channel
< 0) {
1000 * Syndrome didn't map, so we don't know which of the
1001 * 2 DIMMs is in error. So we need to ID 'both' of them
1004 amd64_mc_warn(err
->src_mci
, "unknown syndrome 0x%04x - "
1005 "possible error reporting race\n",
1007 err
->err_code
= ERR_CHANNEL
;
1012 * non-chipkill ecc mode
1014 * The k8 documentation is unclear about how to determine the
1015 * channel number when using non-chipkill memory. This method
1016 * was obtained from email communication with someone at AMD.
1017 * (Wish the email was placed in this comment - norsk)
1019 err
->channel
= ((sys_addr
& BIT(3)) != 0);
1023 static int ddr2_cs_size(unsigned i
, bool dct_width
)
1029 else if (!(i
& 0x1))
1032 shift
= (i
+ 1) >> 1;
1034 return 128 << (shift
+ !!dct_width
);
1037 static int k8_dbam_to_chip_select(struct amd64_pvt
*pvt
, u8 dct
,
1040 u32 dclr
= dct
? pvt
->dclr1
: pvt
->dclr0
;
1042 if (pvt
->ext_model
>= K8_REV_F
) {
1043 WARN_ON(cs_mode
> 11);
1044 return ddr2_cs_size(cs_mode
, dclr
& WIDTH_128
);
1046 else if (pvt
->ext_model
>= K8_REV_D
) {
1048 WARN_ON(cs_mode
> 10);
1051 * the below calculation, besides trying to win an obfuscated C
1052 * contest, maps cs_mode values to DIMM chip select sizes. The
1055 * cs_mode CS size (mb)
1056 * ======= ============
1069 * Basically, it calculates a value with which to shift the
1070 * smallest CS size of 32MB.
1072 * ddr[23]_cs_size have a similar purpose.
1074 diff
= cs_mode
/3 + (unsigned)(cs_mode
> 5);
1076 return 32 << (cs_mode
- diff
);
1079 WARN_ON(cs_mode
> 6);
1080 return 32 << cs_mode
;
1085 * Get the number of DCT channels in use.
1088 * number of Memory Channels in operation
1090 * contents of the DCL0_LOW register
1092 static int f1x_early_channel_count(struct amd64_pvt
*pvt
)
1094 int i
, j
, channels
= 0;
1096 /* On F10h, if we are in 128 bit mode, then we are using 2 channels */
1097 if (pvt
->fam
== 0x10 && (pvt
->dclr0
& WIDTH_128
))
1101 * Need to check if in unganged mode: In such, there are 2 channels,
1102 * but they are not in 128 bit mode and thus the above 'dclr0' status
1105 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1106 * their CSEnable bit on. If so, then SINGLE DIMM case.
1108 edac_dbg(0, "Data width is not 128 bits - need more decoding\n");
1111 * Check DRAM Bank Address Mapping values for each DIMM to see if there
1112 * is more than just one DIMM present in unganged mode. Need to check
1113 * both controllers since DIMMs can be placed in either one.
1115 for (i
= 0; i
< 2; i
++) {
1116 u32 dbam
= (i
? pvt
->dbam1
: pvt
->dbam0
);
1118 for (j
= 0; j
< 4; j
++) {
1119 if (DBAM_DIMM(j
, dbam
) > 0) {
1129 amd64_info("MCT channel count: %d\n", channels
);
1134 static int ddr3_cs_size(unsigned i
, bool dct_width
)
1139 if (i
== 0 || i
== 3 || i
== 4)
1145 else if (!(i
& 0x1))
1148 shift
= (i
+ 1) >> 1;
1151 cs_size
= (128 * (1 << !!dct_width
)) << shift
;
1156 static int f10_dbam_to_chip_select(struct amd64_pvt
*pvt
, u8 dct
,
1159 u32 dclr
= dct
? pvt
->dclr1
: pvt
->dclr0
;
1161 WARN_ON(cs_mode
> 11);
1163 if (pvt
->dchr0
& DDR3_MODE
|| pvt
->dchr1
& DDR3_MODE
)
1164 return ddr3_cs_size(cs_mode
, dclr
& WIDTH_128
);
1166 return ddr2_cs_size(cs_mode
, dclr
& WIDTH_128
);
1170 * F15h supports only 64bit DCT interfaces
1172 static int f15_dbam_to_chip_select(struct amd64_pvt
*pvt
, u8 dct
,
1175 WARN_ON(cs_mode
> 12);
1177 return ddr3_cs_size(cs_mode
, false);
1181 * F16h and F15h model 30h have only limited cs_modes.
1183 static int f16_dbam_to_chip_select(struct amd64_pvt
*pvt
, u8 dct
,
1186 WARN_ON(cs_mode
> 12);
1188 if (cs_mode
== 6 || cs_mode
== 8 ||
1189 cs_mode
== 9 || cs_mode
== 12)
1192 return ddr3_cs_size(cs_mode
, false);
1195 static void read_dram_ctl_register(struct amd64_pvt
*pvt
)
1198 if (pvt
->fam
== 0xf)
1201 if (!amd64_read_dct_pci_cfg(pvt
, DCT_SEL_LO
, &pvt
->dct_sel_lo
)) {
1202 edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
1203 pvt
->dct_sel_lo
, dct_sel_baseaddr(pvt
));
1205 edac_dbg(0, " DCTs operate in %s mode\n",
1206 (dct_ganging_enabled(pvt
) ? "ganged" : "unganged"));
1208 if (!dct_ganging_enabled(pvt
))
1209 edac_dbg(0, " Address range split per DCT: %s\n",
1210 (dct_high_range_enabled(pvt
) ? "yes" : "no"));
1212 edac_dbg(0, " data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
1213 (dct_data_intlv_enabled(pvt
) ? "enabled" : "disabled"),
1214 (dct_memory_cleared(pvt
) ? "yes" : "no"));
1216 edac_dbg(0, " channel interleave: %s, "
1217 "interleave bits selector: 0x%x\n",
1218 (dct_interleave_enabled(pvt
) ? "enabled" : "disabled"),
1219 dct_sel_interleave_addr(pvt
));
1222 amd64_read_dct_pci_cfg(pvt
, DCT_SEL_HI
, &pvt
->dct_sel_hi
);
1226 * Determine channel (DCT) based on the interleaving mode (see F15h M30h BKDG,
1227 * 2.10.12 Memory Interleaving Modes).
1229 static u8
f15_m30h_determine_channel(struct amd64_pvt
*pvt
, u64 sys_addr
,
1230 u8 intlv_en
, int num_dcts_intlv
,
1237 return (u8
)(dct_sel
);
1239 if (num_dcts_intlv
== 2) {
1240 select
= (sys_addr
>> 8) & 0x3;
1241 channel
= select
? 0x3 : 0;
1242 } else if (num_dcts_intlv
== 4)
1243 channel
= (sys_addr
>> 8) & 0x7;
1249 * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
1250 * Interleaving Modes.
1252 static u8
f1x_determine_channel(struct amd64_pvt
*pvt
, u64 sys_addr
,
1253 bool hi_range_sel
, u8 intlv_en
)
1255 u8 dct_sel_high
= (pvt
->dct_sel_lo
>> 1) & 1;
1257 if (dct_ganging_enabled(pvt
))
1261 return dct_sel_high
;
1264 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1266 if (dct_interleave_enabled(pvt
)) {
1267 u8 intlv_addr
= dct_sel_interleave_addr(pvt
);
1269 /* return DCT select function: 0=DCT0, 1=DCT1 */
1271 return sys_addr
>> 6 & 1;
1273 if (intlv_addr
& 0x2) {
1274 u8 shift
= intlv_addr
& 0x1 ? 9 : 6;
1275 u32 temp
= hweight_long((u32
) ((sys_addr
>> 16) & 0x1F)) % 2;
1277 return ((sys_addr
>> shift
) & 1) ^ temp
;
1280 return (sys_addr
>> (12 + hweight8(intlv_en
))) & 1;
1283 if (dct_high_range_enabled(pvt
))
1284 return ~dct_sel_high
& 1;
1289 /* Convert the sys_addr to the normalized DCT address */
1290 static u64
f1x_get_norm_dct_addr(struct amd64_pvt
*pvt
, u8 range
,
1291 u64 sys_addr
, bool hi_rng
,
1292 u32 dct_sel_base_addr
)
1295 u64 dram_base
= get_dram_base(pvt
, range
);
1296 u64 hole_off
= f10_dhar_offset(pvt
);
1297 u64 dct_sel_base_off
= (pvt
->dct_sel_hi
& 0xFFFFFC00) << 16;
1302 * base address of high range is below 4Gb
1303 * (bits [47:27] at [31:11])
1304 * DRAM address space on this DCT is hoisted above 4Gb &&
1307 * remove hole offset from sys_addr
1309 * remove high range offset from sys_addr
1311 if ((!(dct_sel_base_addr
>> 16) ||
1312 dct_sel_base_addr
< dhar_base(pvt
)) &&
1314 (sys_addr
>= BIT_64(32)))
1315 chan_off
= hole_off
;
1317 chan_off
= dct_sel_base_off
;
1321 * we have a valid hole &&
1326 * remove dram base to normalize to DCT address
1328 if (dhar_valid(pvt
) && (sys_addr
>= BIT_64(32)))
1329 chan_off
= hole_off
;
1331 chan_off
= dram_base
;
1334 return (sys_addr
& GENMASK_ULL(47,6)) - (chan_off
& GENMASK_ULL(47,23));
1338 * checks if the csrow passed in is marked as SPARED, if so returns the new
1341 static int f10_process_possible_spare(struct amd64_pvt
*pvt
, u8 dct
, int csrow
)
1345 if (online_spare_swap_done(pvt
, dct
) &&
1346 csrow
== online_spare_bad_dramcs(pvt
, dct
)) {
1348 for_each_chip_select(tmp_cs
, dct
, pvt
) {
1349 if (chip_select_base(tmp_cs
, dct
, pvt
) & 0x2) {
1359 * Iterate over the DRAM DCT "base" and "mask" registers looking for a
1360 * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
1363 * -EINVAL: NOT FOUND
1364 * 0..csrow = Chip-Select Row
1366 static int f1x_lookup_addr_in_dct(u64 in_addr
, u8 nid
, u8 dct
)
1368 struct mem_ctl_info
*mci
;
1369 struct amd64_pvt
*pvt
;
1370 u64 cs_base
, cs_mask
;
1371 int cs_found
= -EINVAL
;
1378 pvt
= mci
->pvt_info
;
1380 edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr
, dct
);
1382 for_each_chip_select(csrow
, dct
, pvt
) {
1383 if (!csrow_enabled(csrow
, dct
, pvt
))
1386 get_cs_base_and_mask(pvt
, csrow
, dct
, &cs_base
, &cs_mask
);
1388 edac_dbg(1, " CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
1389 csrow
, cs_base
, cs_mask
);
1393 edac_dbg(1, " (InputAddr & ~CSMask)=0x%llx (CSBase & ~CSMask)=0x%llx\n",
1394 (in_addr
& cs_mask
), (cs_base
& cs_mask
));
1396 if ((in_addr
& cs_mask
) == (cs_base
& cs_mask
)) {
1397 if (pvt
->fam
== 0x15 && pvt
->model
>= 0x30) {
1401 cs_found
= f10_process_possible_spare(pvt
, dct
, csrow
);
1403 edac_dbg(1, " MATCH csrow=%d\n", cs_found
);
1411 * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
1412 * swapped with a region located at the bottom of memory so that the GPU can use
1413 * the interleaved region and thus two channels.
1415 static u64
f1x_swap_interleaved_region(struct amd64_pvt
*pvt
, u64 sys_addr
)
1417 u32 swap_reg
, swap_base
, swap_limit
, rgn_size
, tmp_addr
;
1419 if (pvt
->fam
== 0x10) {
1420 /* only revC3 and revE have that feature */
1421 if (pvt
->model
< 4 || (pvt
->model
< 0xa && pvt
->stepping
< 3))
1425 amd64_read_dct_pci_cfg(pvt
, SWAP_INTLV_REG
, &swap_reg
);
1427 if (!(swap_reg
& 0x1))
1430 swap_base
= (swap_reg
>> 3) & 0x7f;
1431 swap_limit
= (swap_reg
>> 11) & 0x7f;
1432 rgn_size
= (swap_reg
>> 20) & 0x7f;
1433 tmp_addr
= sys_addr
>> 27;
1435 if (!(sys_addr
>> 34) &&
1436 (((tmp_addr
>= swap_base
) &&
1437 (tmp_addr
<= swap_limit
)) ||
1438 (tmp_addr
< rgn_size
)))
1439 return sys_addr
^ (u64
)swap_base
<< 27;
1444 /* For a given @dram_range, check if @sys_addr falls within it. */
1445 static int f1x_match_to_this_node(struct amd64_pvt
*pvt
, unsigned range
,
1446 u64 sys_addr
, int *chan_sel
)
1448 int cs_found
= -EINVAL
;
1452 bool high_range
= false;
1454 u8 node_id
= dram_dst_node(pvt
, range
);
1455 u8 intlv_en
= dram_intlv_en(pvt
, range
);
1456 u32 intlv_sel
= dram_intlv_sel(pvt
, range
);
1458 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1459 range
, sys_addr
, get_dram_limit(pvt
, range
));
1461 if (dhar_valid(pvt
) &&
1462 dhar_base(pvt
) <= sys_addr
&&
1463 sys_addr
< BIT_64(32)) {
1464 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1469 if (intlv_en
&& (intlv_sel
!= ((sys_addr
>> 12) & intlv_en
)))
1472 sys_addr
= f1x_swap_interleaved_region(pvt
, sys_addr
);
1474 dct_sel_base
= dct_sel_baseaddr(pvt
);
1477 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
1478 * select between DCT0 and DCT1.
1480 if (dct_high_range_enabled(pvt
) &&
1481 !dct_ganging_enabled(pvt
) &&
1482 ((sys_addr
>> 27) >= (dct_sel_base
>> 11)))
1485 channel
= f1x_determine_channel(pvt
, sys_addr
, high_range
, intlv_en
);
1487 chan_addr
= f1x_get_norm_dct_addr(pvt
, range
, sys_addr
,
1488 high_range
, dct_sel_base
);
1490 /* Remove node interleaving, see F1x120 */
1492 chan_addr
= ((chan_addr
>> (12 + hweight8(intlv_en
))) << 12) |
1493 (chan_addr
& 0xfff);
1495 /* remove channel interleave */
1496 if (dct_interleave_enabled(pvt
) &&
1497 !dct_high_range_enabled(pvt
) &&
1498 !dct_ganging_enabled(pvt
)) {
1500 if (dct_sel_interleave_addr(pvt
) != 1) {
1501 if (dct_sel_interleave_addr(pvt
) == 0x3)
1503 chan_addr
= ((chan_addr
>> 10) << 9) |
1504 (chan_addr
& 0x1ff);
1506 /* A[6] or hash 6 */
1507 chan_addr
= ((chan_addr
>> 7) << 6) |
1511 chan_addr
= ((chan_addr
>> 13) << 12) |
1512 (chan_addr
& 0xfff);
1515 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr
);
1517 cs_found
= f1x_lookup_addr_in_dct(chan_addr
, node_id
, channel
);
1520 *chan_sel
= channel
;
1525 static int f15_m30h_match_to_this_node(struct amd64_pvt
*pvt
, unsigned range
,
1526 u64 sys_addr
, int *chan_sel
)
1528 int cs_found
= -EINVAL
;
1529 int num_dcts_intlv
= 0;
1530 u64 chan_addr
, chan_offset
;
1531 u64 dct_base
, dct_limit
;
1532 u32 dct_cont_base_reg
, dct_cont_limit_reg
, tmp
;
1533 u8 channel
, alias_channel
, leg_mmio_hole
, dct_sel
, dct_offset_en
;
1535 u64 dhar_offset
= f10_dhar_offset(pvt
);
1536 u8 intlv_addr
= dct_sel_interleave_addr(pvt
);
1537 u8 node_id
= dram_dst_node(pvt
, range
);
1538 u8 intlv_en
= dram_intlv_en(pvt
, range
);
1540 amd64_read_pci_cfg(pvt
->F1
, DRAM_CONT_BASE
, &dct_cont_base_reg
);
1541 amd64_read_pci_cfg(pvt
->F1
, DRAM_CONT_LIMIT
, &dct_cont_limit_reg
);
1543 dct_offset_en
= (u8
) ((dct_cont_base_reg
>> 3) & BIT(0));
1544 dct_sel
= (u8
) ((dct_cont_base_reg
>> 4) & 0x7);
1546 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1547 range
, sys_addr
, get_dram_limit(pvt
, range
));
1549 if (!(get_dram_base(pvt
, range
) <= sys_addr
) &&
1550 !(get_dram_limit(pvt
, range
) >= sys_addr
))
1553 if (dhar_valid(pvt
) &&
1554 dhar_base(pvt
) <= sys_addr
&&
1555 sys_addr
< BIT_64(32)) {
1556 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1561 /* Verify sys_addr is within DCT Range. */
1562 dct_base
= (u64
) dct_sel_baseaddr(pvt
);
1563 dct_limit
= (dct_cont_limit_reg
>> 11) & 0x1FFF;
1565 if (!(dct_cont_base_reg
& BIT(0)) &&
1566 !(dct_base
<= (sys_addr
>> 27) &&
1567 dct_limit
>= (sys_addr
>> 27)))
1570 /* Verify number of dct's that participate in channel interleaving. */
1571 num_dcts_intlv
= (int) hweight8(intlv_en
);
1573 if (!(num_dcts_intlv
% 2 == 0) || (num_dcts_intlv
> 4))
1576 channel
= f15_m30h_determine_channel(pvt
, sys_addr
, intlv_en
,
1577 num_dcts_intlv
, dct_sel
);
1579 /* Verify we stay within the MAX number of channels allowed */
1583 leg_mmio_hole
= (u8
) (dct_cont_base_reg
>> 1 & BIT(0));
1585 /* Get normalized DCT addr */
1586 if (leg_mmio_hole
&& (sys_addr
>= BIT_64(32)))
1587 chan_offset
= dhar_offset
;
1589 chan_offset
= dct_base
<< 27;
1591 chan_addr
= sys_addr
- chan_offset
;
1593 /* remove channel interleave */
1594 if (num_dcts_intlv
== 2) {
1595 if (intlv_addr
== 0x4)
1596 chan_addr
= ((chan_addr
>> 9) << 8) |
1598 else if (intlv_addr
== 0x5)
1599 chan_addr
= ((chan_addr
>> 10) << 9) |
1600 (chan_addr
& 0x1ff);
1604 } else if (num_dcts_intlv
== 4) {
1605 if (intlv_addr
== 0x4)
1606 chan_addr
= ((chan_addr
>> 10) << 8) |
1608 else if (intlv_addr
== 0x5)
1609 chan_addr
= ((chan_addr
>> 11) << 9) |
1610 (chan_addr
& 0x1ff);
1615 if (dct_offset_en
) {
1616 amd64_read_pci_cfg(pvt
->F1
,
1617 DRAM_CONT_HIGH_OFF
+ (int) channel
* 4,
1619 chan_addr
+= (u64
) ((tmp
>> 11) & 0xfff) << 27;
1622 f15h_select_dct(pvt
, channel
);
1624 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr
);
1628 * if channel = 3, then alias it to 1. This is because, in F15 M30h,
1629 * there is support for 4 DCT's, but only 2 are currently functional.
1630 * They are DCT0 and DCT3. But we have read all registers of DCT3 into
1631 * pvt->csels[1]. So we need to use '1' here to get correct info.
1632 * Refer F15 M30h BKDG Section 2.10 and 2.10.3 for clarifications.
1634 alias_channel
= (channel
== 3) ? 1 : channel
;
1636 cs_found
= f1x_lookup_addr_in_dct(chan_addr
, node_id
, alias_channel
);
1639 *chan_sel
= alias_channel
;
1644 static int f1x_translate_sysaddr_to_cs(struct amd64_pvt
*pvt
,
1648 int cs_found
= -EINVAL
;
1651 for (range
= 0; range
< DRAM_RANGES
; range
++) {
1652 if (!dram_rw(pvt
, range
))
1655 if (pvt
->fam
== 0x15 && pvt
->model
>= 0x30)
1656 cs_found
= f15_m30h_match_to_this_node(pvt
, range
,
1660 else if ((get_dram_base(pvt
, range
) <= sys_addr
) &&
1661 (get_dram_limit(pvt
, range
) >= sys_addr
)) {
1662 cs_found
= f1x_match_to_this_node(pvt
, range
,
1663 sys_addr
, chan_sel
);
1672 * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
1673 * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
1675 * The @sys_addr is usually an error address received from the hardware
1678 static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info
*mci
, u64 sys_addr
,
1679 struct err_info
*err
)
1681 struct amd64_pvt
*pvt
= mci
->pvt_info
;
1683 error_address_to_page_and_offset(sys_addr
, err
);
1685 err
->csrow
= f1x_translate_sysaddr_to_cs(pvt
, sys_addr
, &err
->channel
);
1686 if (err
->csrow
< 0) {
1687 err
->err_code
= ERR_CSROW
;
1692 * We need the syndromes for channel detection only when we're
1693 * ganged. Otherwise @chan should already contain the channel at
1696 if (dct_ganging_enabled(pvt
))
1697 err
->channel
= get_channel_from_ecc_syndrome(mci
, err
->syndrome
);
1701 * debug routine to display the memory sizes of all logical DIMMs and its
1704 static void debug_display_dimm_sizes(struct amd64_pvt
*pvt
, u8 ctrl
)
1706 int dimm
, size0
, size1
;
1707 u32
*dcsb
= ctrl
? pvt
->csels
[1].csbases
: pvt
->csels
[0].csbases
;
1708 u32 dbam
= ctrl
? pvt
->dbam1
: pvt
->dbam0
;
1710 if (pvt
->fam
== 0xf) {
1711 /* K8 families < revF not supported yet */
1712 if (pvt
->ext_model
< K8_REV_F
)
1718 dbam
= (ctrl
&& !dct_ganging_enabled(pvt
)) ? pvt
->dbam1
: pvt
->dbam0
;
1719 dcsb
= (ctrl
&& !dct_ganging_enabled(pvt
)) ? pvt
->csels
[1].csbases
1720 : pvt
->csels
[0].csbases
;
1722 edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
1725 edac_printk(KERN_DEBUG
, EDAC_MC
, "DCT%d chip selects:\n", ctrl
);
1727 /* Dump memory sizes for DIMM and its CSROWs */
1728 for (dimm
= 0; dimm
< 4; dimm
++) {
1731 if (dcsb
[dimm
*2] & DCSB_CS_ENABLE
)
1732 size0
= pvt
->ops
->dbam_to_cs(pvt
, ctrl
,
1733 DBAM_DIMM(dimm
, dbam
));
1736 if (dcsb
[dimm
*2 + 1] & DCSB_CS_ENABLE
)
1737 size1
= pvt
->ops
->dbam_to_cs(pvt
, ctrl
,
1738 DBAM_DIMM(dimm
, dbam
));
1740 amd64_info(EDAC_MC
": %d: %5dMB %d: %5dMB\n",
1742 dimm
* 2 + 1, size1
);
1746 static struct amd64_family_type family_types
[] = {
1749 .f1_id
= PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP
,
1750 .f3_id
= PCI_DEVICE_ID_AMD_K8_NB_MISC
,
1752 .early_channel_count
= k8_early_channel_count
,
1753 .map_sysaddr_to_csrow
= k8_map_sysaddr_to_csrow
,
1754 .dbam_to_cs
= k8_dbam_to_chip_select
,
1755 .read_dct_pci_cfg
= k8_read_dct_pci_cfg
,
1760 .f1_id
= PCI_DEVICE_ID_AMD_10H_NB_MAP
,
1761 .f3_id
= PCI_DEVICE_ID_AMD_10H_NB_MISC
,
1763 .early_channel_count
= f1x_early_channel_count
,
1764 .map_sysaddr_to_csrow
= f1x_map_sysaddr_to_csrow
,
1765 .dbam_to_cs
= f10_dbam_to_chip_select
,
1766 .read_dct_pci_cfg
= f10_read_dct_pci_cfg
,
1771 .f1_id
= PCI_DEVICE_ID_AMD_15H_NB_F1
,
1772 .f3_id
= PCI_DEVICE_ID_AMD_15H_NB_F3
,
1774 .early_channel_count
= f1x_early_channel_count
,
1775 .map_sysaddr_to_csrow
= f1x_map_sysaddr_to_csrow
,
1776 .dbam_to_cs
= f15_dbam_to_chip_select
,
1777 .read_dct_pci_cfg
= f15_read_dct_pci_cfg
,
1781 .ctl_name
= "F15h_M30h",
1782 .f1_id
= PCI_DEVICE_ID_AMD_15H_M30H_NB_F1
,
1783 .f3_id
= PCI_DEVICE_ID_AMD_15H_M30H_NB_F3
,
1785 .early_channel_count
= f1x_early_channel_count
,
1786 .map_sysaddr_to_csrow
= f1x_map_sysaddr_to_csrow
,
1787 .dbam_to_cs
= f16_dbam_to_chip_select
,
1788 .read_dct_pci_cfg
= f15_read_dct_pci_cfg
,
1793 .f1_id
= PCI_DEVICE_ID_AMD_16H_NB_F1
,
1794 .f3_id
= PCI_DEVICE_ID_AMD_16H_NB_F3
,
1796 .early_channel_count
= f1x_early_channel_count
,
1797 .map_sysaddr_to_csrow
= f1x_map_sysaddr_to_csrow
,
1798 .dbam_to_cs
= f16_dbam_to_chip_select
,
1799 .read_dct_pci_cfg
= f10_read_dct_pci_cfg
,
1805 * These are tables of eigenvectors (one per line) which can be used for the
1806 * construction of the syndrome tables. The modified syndrome search algorithm
1807 * uses those to find the symbol in error and thus the DIMM.
1809 * Algorithm courtesy of Ross LaFetra from AMD.
1811 static const u16 x4_vectors
[] = {
1812 0x2f57, 0x1afe, 0x66cc, 0xdd88,
1813 0x11eb, 0x3396, 0x7f4c, 0xeac8,
1814 0x0001, 0x0002, 0x0004, 0x0008,
1815 0x1013, 0x3032, 0x4044, 0x8088,
1816 0x106b, 0x30d6, 0x70fc, 0xe0a8,
1817 0x4857, 0xc4fe, 0x13cc, 0x3288,
1818 0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
1819 0x1f39, 0x251e, 0xbd6c, 0x6bd8,
1820 0x15c1, 0x2a42, 0x89ac, 0x4758,
1821 0x2b03, 0x1602, 0x4f0c, 0xca08,
1822 0x1f07, 0x3a0e, 0x6b04, 0xbd08,
1823 0x8ba7, 0x465e, 0x244c, 0x1cc8,
1824 0x2b87, 0x164e, 0x642c, 0xdc18,
1825 0x40b9, 0x80de, 0x1094, 0x20e8,
1826 0x27db, 0x1eb6, 0x9dac, 0x7b58,
1827 0x11c1, 0x2242, 0x84ac, 0x4c58,
1828 0x1be5, 0x2d7a, 0x5e34, 0xa718,
1829 0x4b39, 0x8d1e, 0x14b4, 0x28d8,
1830 0x4c97, 0xc87e, 0x11fc, 0x33a8,
1831 0x8e97, 0x497e, 0x2ffc, 0x1aa8,
1832 0x16b3, 0x3d62, 0x4f34, 0x8518,
1833 0x1e2f, 0x391a, 0x5cac, 0xf858,
1834 0x1d9f, 0x3b7a, 0x572c, 0xfe18,
1835 0x15f5, 0x2a5a, 0x5264, 0xa3b8,
1836 0x1dbb, 0x3b66, 0x715c, 0xe3f8,
1837 0x4397, 0xc27e, 0x17fc, 0x3ea8,
1838 0x1617, 0x3d3e, 0x6464, 0xb8b8,
1839 0x23ff, 0x12aa, 0xab6c, 0x56d8,
1840 0x2dfb, 0x1ba6, 0x913c, 0x7328,
1841 0x185d, 0x2ca6, 0x7914, 0x9e28,
1842 0x171b, 0x3e36, 0x7d7c, 0xebe8,
1843 0x4199, 0x82ee, 0x19f4, 0x2e58,
1844 0x4807, 0xc40e, 0x130c, 0x3208,
1845 0x1905, 0x2e0a, 0x5804, 0xac08,
1846 0x213f, 0x132a, 0xadfc, 0x5ba8,
1847 0x19a9, 0x2efe, 0xb5cc, 0x6f88,
1850 static const u16 x8_vectors
[] = {
1851 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
1852 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
1853 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
1854 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
1855 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
1856 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
1857 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
1858 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
1859 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
1860 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
1861 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
1862 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
1863 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
1864 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
1865 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
1866 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
1867 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
1868 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
1869 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
1872 static int decode_syndrome(u16 syndrome
, const u16
*vectors
, unsigned num_vecs
,
1875 unsigned int i
, err_sym
;
1877 for (err_sym
= 0; err_sym
< num_vecs
/ v_dim
; err_sym
++) {
1879 unsigned v_idx
= err_sym
* v_dim
;
1880 unsigned v_end
= (err_sym
+ 1) * v_dim
;
1882 /* walk over all 16 bits of the syndrome */
1883 for (i
= 1; i
< (1U << 16); i
<<= 1) {
1885 /* if bit is set in that eigenvector... */
1886 if (v_idx
< v_end
&& vectors
[v_idx
] & i
) {
1887 u16 ev_comp
= vectors
[v_idx
++];
1889 /* ... and bit set in the modified syndrome, */
1899 /* can't get to zero, move to next symbol */
1904 edac_dbg(0, "syndrome(%x) not found\n", syndrome
);
1908 static int map_err_sym_to_channel(int err_sym
, int sym_size
)
1921 return err_sym
>> 4;
1927 /* imaginary bits not in a DIMM */
1929 WARN(1, KERN_ERR
"Invalid error symbol: 0x%x\n",
1941 return err_sym
>> 3;
1947 static int get_channel_from_ecc_syndrome(struct mem_ctl_info
*mci
, u16 syndrome
)
1949 struct amd64_pvt
*pvt
= mci
->pvt_info
;
1952 if (pvt
->ecc_sym_sz
== 8)
1953 err_sym
= decode_syndrome(syndrome
, x8_vectors
,
1954 ARRAY_SIZE(x8_vectors
),
1956 else if (pvt
->ecc_sym_sz
== 4)
1957 err_sym
= decode_syndrome(syndrome
, x4_vectors
,
1958 ARRAY_SIZE(x4_vectors
),
1961 amd64_warn("Illegal syndrome type: %u\n", pvt
->ecc_sym_sz
);
1965 return map_err_sym_to_channel(err_sym
, pvt
->ecc_sym_sz
);
1968 static void __log_bus_error(struct mem_ctl_info
*mci
, struct err_info
*err
,
1971 enum hw_event_mc_err_type err_type
;
1975 err_type
= HW_EVENT_ERR_CORRECTED
;
1976 else if (ecc_type
== 1)
1977 err_type
= HW_EVENT_ERR_UNCORRECTED
;
1979 WARN(1, "Something is rotten in the state of Denmark.\n");
1983 switch (err
->err_code
) {
1988 string
= "Failed to map error addr to a node";
1991 string
= "Failed to map error addr to a csrow";
1994 string
= "unknown syndrome - possible error reporting race";
1997 string
= "WTF error";
2001 edac_mc_handle_error(err_type
, mci
, 1,
2002 err
->page
, err
->offset
, err
->syndrome
,
2003 err
->csrow
, err
->channel
, -1,
2007 static inline void decode_bus_error(int node_id
, struct mce
*m
)
2009 struct mem_ctl_info
*mci
= mcis
[node_id
];
2010 struct amd64_pvt
*pvt
= mci
->pvt_info
;
2011 u8 ecc_type
= (m
->status
>> 45) & 0x3;
2012 u8 xec
= XEC(m
->status
, 0x1f);
2013 u16 ec
= EC(m
->status
);
2015 struct err_info err
;
2017 /* Bail out early if this was an 'observed' error */
2018 if (PP(ec
) == NBSL_PP_OBS
)
2021 /* Do only ECC errors */
2022 if (xec
&& xec
!= F10_NBSL_EXT_ERR_ECC
)
2025 memset(&err
, 0, sizeof(err
));
2027 sys_addr
= get_error_address(pvt
, m
);
2030 err
.syndrome
= extract_syndrome(m
->status
);
2032 pvt
->ops
->map_sysaddr_to_csrow(mci
, sys_addr
, &err
);
2034 __log_bus_error(mci
, &err
, ecc_type
);
2038 * Use pvt->F2 which contains the F2 CPU PCI device to get the related
2039 * F1 (AddrMap) and F3 (Misc) devices. Return negative value on error.
2041 static int reserve_mc_sibling_devs(struct amd64_pvt
*pvt
, u16 f1_id
, u16 f3_id
)
2043 /* Reserve the ADDRESS MAP Device */
2044 pvt
->F1
= pci_get_related_function(pvt
->F2
->vendor
, f1_id
, pvt
->F2
);
2046 amd64_err("error address map device not found: "
2047 "vendor %x device 0x%x (broken BIOS?)\n",
2048 PCI_VENDOR_ID_AMD
, f1_id
);
2052 /* Reserve the MISC Device */
2053 pvt
->F3
= pci_get_related_function(pvt
->F2
->vendor
, f3_id
, pvt
->F2
);
2055 pci_dev_put(pvt
->F1
);
2058 amd64_err("error F3 device not found: "
2059 "vendor %x device 0x%x (broken BIOS?)\n",
2060 PCI_VENDOR_ID_AMD
, f3_id
);
2064 edac_dbg(1, "F1: %s\n", pci_name(pvt
->F1
));
2065 edac_dbg(1, "F2: %s\n", pci_name(pvt
->F2
));
2066 edac_dbg(1, "F3: %s\n", pci_name(pvt
->F3
));
2071 static void free_mc_sibling_devs(struct amd64_pvt
*pvt
)
2073 pci_dev_put(pvt
->F1
);
2074 pci_dev_put(pvt
->F3
);
2078 * Retrieve the hardware registers of the memory controller (this includes the
2079 * 'Address Map' and 'Misc' device regs)
2081 static void read_mc_regs(struct amd64_pvt
*pvt
)
2088 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
2089 * those are Read-As-Zero
2091 rdmsrl(MSR_K8_TOP_MEM1
, pvt
->top_mem
);
2092 edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt
->top_mem
);
2094 /* check first whether TOP_MEM2 is enabled */
2095 rdmsrl(MSR_K8_SYSCFG
, msr_val
);
2096 if (msr_val
& (1U << 21)) {
2097 rdmsrl(MSR_K8_TOP_MEM2
, pvt
->top_mem2
);
2098 edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt
->top_mem2
);
2100 edac_dbg(0, " TOP_MEM2 disabled\n");
2102 amd64_read_pci_cfg(pvt
->F3
, NBCAP
, &pvt
->nbcap
);
2104 read_dram_ctl_register(pvt
);
2106 for (range
= 0; range
< DRAM_RANGES
; range
++) {
2109 /* read settings for this DRAM range */
2110 read_dram_base_limit_regs(pvt
, range
);
2112 rw
= dram_rw(pvt
, range
);
2116 edac_dbg(1, " DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
2118 get_dram_base(pvt
, range
),
2119 get_dram_limit(pvt
, range
));
2121 edac_dbg(1, " IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
2122 dram_intlv_en(pvt
, range
) ? "Enabled" : "Disabled",
2123 (rw
& 0x1) ? "R" : "-",
2124 (rw
& 0x2) ? "W" : "-",
2125 dram_intlv_sel(pvt
, range
),
2126 dram_dst_node(pvt
, range
));
2129 read_dct_base_mask(pvt
);
2131 amd64_read_pci_cfg(pvt
->F1
, DHAR
, &pvt
->dhar
);
2132 amd64_read_dct_pci_cfg(pvt
, DBAM0
, &pvt
->dbam0
);
2134 amd64_read_pci_cfg(pvt
->F3
, F10_ONLINE_SPARE
, &pvt
->online_spare
);
2136 amd64_read_dct_pci_cfg(pvt
, DCLR0
, &pvt
->dclr0
);
2137 amd64_read_dct_pci_cfg(pvt
, DCHR0
, &pvt
->dchr0
);
2139 if (!dct_ganging_enabled(pvt
)) {
2140 amd64_read_dct_pci_cfg(pvt
, DCLR1
, &pvt
->dclr1
);
2141 amd64_read_dct_pci_cfg(pvt
, DCHR1
, &pvt
->dchr1
);
2144 pvt
->ecc_sym_sz
= 4;
2146 if (pvt
->fam
>= 0x10) {
2147 amd64_read_pci_cfg(pvt
->F3
, EXT_NB_MCA_CFG
, &tmp
);
2148 if (pvt
->fam
!= 0x16)
2149 /* F16h has only DCT0 */
2150 amd64_read_dct_pci_cfg(pvt
, DBAM1
, &pvt
->dbam1
);
2152 /* F10h, revD and later can do x8 ECC too */
2153 if ((pvt
->fam
> 0x10 || pvt
->model
> 7) && tmp
& BIT(25))
2154 pvt
->ecc_sym_sz
= 8;
2156 dump_misc_regs(pvt
);
2160 * NOTE: CPU Revision Dependent code
2163 * @csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
2164 * k8 private pointer to -->
2165 * DRAM Bank Address mapping register
2167 * DCL register where dual_channel_active is
2169 * The DBAM register consists of 4 sets of 4 bits each definitions:
2172 * 0-3 CSROWs 0 and 1
2173 * 4-7 CSROWs 2 and 3
2174 * 8-11 CSROWs 4 and 5
2175 * 12-15 CSROWs 6 and 7
2177 * Values range from: 0 to 15
2178 * The meaning of the values depends on CPU revision and dual-channel state,
2179 * see relevant BKDG more info.
2181 * The memory controller provides for total of only 8 CSROWs in its current
2182 * architecture. Each "pair" of CSROWs normally represents just one DIMM in
2183 * single channel or two (2) DIMMs in dual channel mode.
2185 * The following code logic collapses the various tables for CSROW based on CPU
2189 * The number of PAGE_SIZE pages on the specified CSROW number it
2193 static u32
get_csrow_nr_pages(struct amd64_pvt
*pvt
, u8 dct
, int csrow_nr
)
2195 u32 cs_mode
, nr_pages
;
2196 u32 dbam
= dct
? pvt
->dbam1
: pvt
->dbam0
;
2200 * The math on this doesn't look right on the surface because x/2*4 can
2201 * be simplified to x*2 but this expression makes use of the fact that
2202 * it is integral math where 1/2=0. This intermediate value becomes the
2203 * number of bits to shift the DBAM register to extract the proper CSROW
2206 cs_mode
= DBAM_DIMM(csrow_nr
/ 2, dbam
);
2208 nr_pages
= pvt
->ops
->dbam_to_cs(pvt
, dct
, cs_mode
) << (20 - PAGE_SHIFT
);
2210 edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n",
2211 csrow_nr
, dct
, cs_mode
);
2212 edac_dbg(0, "nr_pages/channel: %u\n", nr_pages
);
2218 * Initialize the array of csrow attribute instances, based on the values
2219 * from pci config hardware registers.
2221 static int init_csrows(struct mem_ctl_info
*mci
)
2223 struct amd64_pvt
*pvt
= mci
->pvt_info
;
2224 struct csrow_info
*csrow
;
2225 struct dimm_info
*dimm
;
2226 enum edac_type edac_mode
;
2227 enum mem_type mtype
;
2228 int i
, j
, empty
= 1;
2232 amd64_read_pci_cfg(pvt
->F3
, NBCFG
, &val
);
2236 edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2237 pvt
->mc_node_id
, val
,
2238 !!(val
& NBCFG_CHIPKILL
), !!(val
& NBCFG_ECC_ENABLE
));
2241 * We iterate over DCT0 here but we look at DCT1 in parallel, if needed.
2243 for_each_chip_select(i
, 0, pvt
) {
2244 bool row_dct0
= !!csrow_enabled(i
, 0, pvt
);
2245 bool row_dct1
= false;
2247 if (pvt
->fam
!= 0xf)
2248 row_dct1
= !!csrow_enabled(i
, 1, pvt
);
2250 if (!row_dct0
&& !row_dct1
)
2253 csrow
= mci
->csrows
[i
];
2256 edac_dbg(1, "MC node: %d, csrow: %d\n",
2257 pvt
->mc_node_id
, i
);
2260 nr_pages
= get_csrow_nr_pages(pvt
, 0, i
);
2261 csrow
->channels
[0]->dimm
->nr_pages
= nr_pages
;
2264 /* K8 has only one DCT */
2265 if (pvt
->fam
!= 0xf && row_dct1
) {
2266 int row_dct1_pages
= get_csrow_nr_pages(pvt
, 1, i
);
2268 csrow
->channels
[1]->dimm
->nr_pages
= row_dct1_pages
;
2269 nr_pages
+= row_dct1_pages
;
2272 mtype
= determine_memory_type(pvt
, i
);
2274 edac_dbg(1, "Total csrow%d pages: %u\n", i
, nr_pages
);
2277 * determine whether CHIPKILL or JUST ECC or NO ECC is operating
2279 if (pvt
->nbcfg
& NBCFG_ECC_ENABLE
)
2280 edac_mode
= (pvt
->nbcfg
& NBCFG_CHIPKILL
) ?
2281 EDAC_S4ECD4ED
: EDAC_SECDED
;
2283 edac_mode
= EDAC_NONE
;
2285 for (j
= 0; j
< pvt
->channel_count
; j
++) {
2286 dimm
= csrow
->channels
[j
]->dimm
;
2287 dimm
->mtype
= mtype
;
2288 dimm
->edac_mode
= edac_mode
;
2295 /* get all cores on this DCT */
2296 static void get_cpus_on_this_dct_cpumask(struct cpumask
*mask
, u16 nid
)
2300 for_each_online_cpu(cpu
)
2301 if (amd_get_nb_id(cpu
) == nid
)
2302 cpumask_set_cpu(cpu
, mask
);
2305 /* check MCG_CTL on all the cpus on this node */
2306 static bool nb_mce_bank_enabled_on_node(u16 nid
)
2312 if (!zalloc_cpumask_var(&mask
, GFP_KERNEL
)) {
2313 amd64_warn("%s: Error allocating mask\n", __func__
);
2317 get_cpus_on_this_dct_cpumask(mask
, nid
);
2319 rdmsr_on_cpus(mask
, MSR_IA32_MCG_CTL
, msrs
);
2321 for_each_cpu(cpu
, mask
) {
2322 struct msr
*reg
= per_cpu_ptr(msrs
, cpu
);
2323 nbe
= reg
->l
& MSR_MCGCTL_NBE
;
2325 edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
2327 (nbe
? "enabled" : "disabled"));
2335 free_cpumask_var(mask
);
2339 static int toggle_ecc_err_reporting(struct ecc_settings
*s
, u16 nid
, bool on
)
2341 cpumask_var_t cmask
;
2344 if (!zalloc_cpumask_var(&cmask
, GFP_KERNEL
)) {
2345 amd64_warn("%s: error allocating mask\n", __func__
);
2349 get_cpus_on_this_dct_cpumask(cmask
, nid
);
2351 rdmsr_on_cpus(cmask
, MSR_IA32_MCG_CTL
, msrs
);
2353 for_each_cpu(cpu
, cmask
) {
2355 struct msr
*reg
= per_cpu_ptr(msrs
, cpu
);
2358 if (reg
->l
& MSR_MCGCTL_NBE
)
2359 s
->flags
.nb_mce_enable
= 1;
2361 reg
->l
|= MSR_MCGCTL_NBE
;
2364 * Turn off NB MCE reporting only when it was off before
2366 if (!s
->flags
.nb_mce_enable
)
2367 reg
->l
&= ~MSR_MCGCTL_NBE
;
2370 wrmsr_on_cpus(cmask
, MSR_IA32_MCG_CTL
, msrs
);
2372 free_cpumask_var(cmask
);
2377 static bool enable_ecc_error_reporting(struct ecc_settings
*s
, u16 nid
,
2381 u32 value
, mask
= 0x3; /* UECC/CECC enable */
2383 if (toggle_ecc_err_reporting(s
, nid
, ON
)) {
2384 amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
2388 amd64_read_pci_cfg(F3
, NBCTL
, &value
);
2390 s
->old_nbctl
= value
& mask
;
2391 s
->nbctl_valid
= true;
2394 amd64_write_pci_cfg(F3
, NBCTL
, value
);
2396 amd64_read_pci_cfg(F3
, NBCFG
, &value
);
2398 edac_dbg(0, "1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2399 nid
, value
, !!(value
& NBCFG_ECC_ENABLE
));
2401 if (!(value
& NBCFG_ECC_ENABLE
)) {
2402 amd64_warn("DRAM ECC disabled on this node, enabling...\n");
2404 s
->flags
.nb_ecc_prev
= 0;
2406 /* Attempt to turn on DRAM ECC Enable */
2407 value
|= NBCFG_ECC_ENABLE
;
2408 amd64_write_pci_cfg(F3
, NBCFG
, value
);
2410 amd64_read_pci_cfg(F3
, NBCFG
, &value
);
2412 if (!(value
& NBCFG_ECC_ENABLE
)) {
2413 amd64_warn("Hardware rejected DRAM ECC enable,"
2414 "check memory DIMM configuration.\n");
2417 amd64_info("Hardware accepted DRAM ECC Enable\n");
2420 s
->flags
.nb_ecc_prev
= 1;
2423 edac_dbg(0, "2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2424 nid
, value
, !!(value
& NBCFG_ECC_ENABLE
));
2429 static void restore_ecc_error_reporting(struct ecc_settings
*s
, u16 nid
,
2432 u32 value
, mask
= 0x3; /* UECC/CECC enable */
2435 if (!s
->nbctl_valid
)
2438 amd64_read_pci_cfg(F3
, NBCTL
, &value
);
2440 value
|= s
->old_nbctl
;
2442 amd64_write_pci_cfg(F3
, NBCTL
, value
);
2444 /* restore previous BIOS DRAM ECC "off" setting we force-enabled */
2445 if (!s
->flags
.nb_ecc_prev
) {
2446 amd64_read_pci_cfg(F3
, NBCFG
, &value
);
2447 value
&= ~NBCFG_ECC_ENABLE
;
2448 amd64_write_pci_cfg(F3
, NBCFG
, value
);
2451 /* restore the NB Enable MCGCTL bit */
2452 if (toggle_ecc_err_reporting(s
, nid
, OFF
))
2453 amd64_warn("Error restoring NB MCGCTL settings!\n");
2457 * EDAC requires that the BIOS have ECC enabled before
2458 * taking over the processing of ECC errors. A command line
2459 * option allows to force-enable hardware ECC later in
2460 * enable_ecc_error_reporting().
2462 static const char *ecc_msg
=
2463 "ECC disabled in the BIOS or no ECC capability, module will not load.\n"
2464 " Either enable ECC checking or force module loading by setting "
2465 "'ecc_enable_override'.\n"
2466 " (Note that use of the override may cause unknown side effects.)\n";
2468 static bool ecc_enabled(struct pci_dev
*F3
, u16 nid
)
2472 bool nb_mce_en
= false;
2474 amd64_read_pci_cfg(F3
, NBCFG
, &value
);
2476 ecc_en
= !!(value
& NBCFG_ECC_ENABLE
);
2477 amd64_info("DRAM ECC %s.\n", (ecc_en
? "enabled" : "disabled"));
2479 nb_mce_en
= nb_mce_bank_enabled_on_node(nid
);
2481 amd64_notice("NB MCE bank disabled, set MSR "
2482 "0x%08x[4] on node %d to enable.\n",
2483 MSR_IA32_MCG_CTL
, nid
);
2485 if (!ecc_en
|| !nb_mce_en
) {
2486 amd64_notice("%s", ecc_msg
);
2492 static int set_mc_sysfs_attrs(struct mem_ctl_info
*mci
)
2494 struct amd64_pvt
*pvt
= mci
->pvt_info
;
2497 rc
= amd64_create_sysfs_dbg_files(mci
);
2501 if (pvt
->fam
>= 0x10) {
2502 rc
= amd64_create_sysfs_inject_files(mci
);
2510 static void del_mc_sysfs_attrs(struct mem_ctl_info
*mci
)
2512 struct amd64_pvt
*pvt
= mci
->pvt_info
;
2514 amd64_remove_sysfs_dbg_files(mci
);
2516 if (pvt
->fam
>= 0x10)
2517 amd64_remove_sysfs_inject_files(mci
);
2520 static void setup_mci_misc_attrs(struct mem_ctl_info
*mci
,
2521 struct amd64_family_type
*fam
)
2523 struct amd64_pvt
*pvt
= mci
->pvt_info
;
2525 mci
->mtype_cap
= MEM_FLAG_DDR2
| MEM_FLAG_RDDR2
;
2526 mci
->edac_ctl_cap
= EDAC_FLAG_NONE
;
2528 if (pvt
->nbcap
& NBCAP_SECDED
)
2529 mci
->edac_ctl_cap
|= EDAC_FLAG_SECDED
;
2531 if (pvt
->nbcap
& NBCAP_CHIPKILL
)
2532 mci
->edac_ctl_cap
|= EDAC_FLAG_S4ECD4ED
;
2534 mci
->edac_cap
= determine_edac_cap(pvt
);
2535 mci
->mod_name
= EDAC_MOD_STR
;
2536 mci
->mod_ver
= EDAC_AMD64_VERSION
;
2537 mci
->ctl_name
= fam
->ctl_name
;
2538 mci
->dev_name
= pci_name(pvt
->F2
);
2539 mci
->ctl_page_to_phys
= NULL
;
2541 /* memory scrubber interface */
2542 mci
->set_sdram_scrub_rate
= set_scrub_rate
;
2543 mci
->get_sdram_scrub_rate
= get_scrub_rate
;
2547 * returns a pointer to the family descriptor on success, NULL otherwise.
2549 static struct amd64_family_type
*per_family_init(struct amd64_pvt
*pvt
)
2551 struct amd64_family_type
*fam_type
= NULL
;
2553 pvt
->ext_model
= boot_cpu_data
.x86_model
>> 4;
2554 pvt
->stepping
= boot_cpu_data
.x86_mask
;
2555 pvt
->model
= boot_cpu_data
.x86_model
;
2556 pvt
->fam
= boot_cpu_data
.x86
;
2560 fam_type
= &family_types
[K8_CPUS
];
2561 pvt
->ops
= &family_types
[K8_CPUS
].ops
;
2565 fam_type
= &family_types
[F10_CPUS
];
2566 pvt
->ops
= &family_types
[F10_CPUS
].ops
;
2570 if (pvt
->model
== 0x30) {
2571 fam_type
= &family_types
[F15_M30H_CPUS
];
2572 pvt
->ops
= &family_types
[F15_M30H_CPUS
].ops
;
2576 fam_type
= &family_types
[F15_CPUS
];
2577 pvt
->ops
= &family_types
[F15_CPUS
].ops
;
2581 fam_type
= &family_types
[F16_CPUS
];
2582 pvt
->ops
= &family_types
[F16_CPUS
].ops
;
2586 amd64_err("Unsupported family!\n");
2590 amd64_info("%s %sdetected (node %d).\n", fam_type
->ctl_name
,
2592 (pvt
->ext_model
>= K8_REV_F
? "revF or later "
2593 : "revE or earlier ")
2594 : ""), pvt
->mc_node_id
);
2598 static int init_one_instance(struct pci_dev
*F2
)
2600 struct amd64_pvt
*pvt
= NULL
;
2601 struct amd64_family_type
*fam_type
= NULL
;
2602 struct mem_ctl_info
*mci
= NULL
;
2603 struct edac_mc_layer layers
[2];
2605 u16 nid
= amd_get_node_id(F2
);
2608 pvt
= kzalloc(sizeof(struct amd64_pvt
), GFP_KERNEL
);
2612 pvt
->mc_node_id
= nid
;
2616 fam_type
= per_family_init(pvt
);
2621 err
= reserve_mc_sibling_devs(pvt
, fam_type
->f1_id
, fam_type
->f3_id
);
2628 * We need to determine how many memory channels there are. Then use
2629 * that information for calculating the size of the dynamic instance
2630 * tables in the 'mci' structure.
2633 pvt
->channel_count
= pvt
->ops
->early_channel_count(pvt
);
2634 if (pvt
->channel_count
< 0)
2638 layers
[0].type
= EDAC_MC_LAYER_CHIP_SELECT
;
2639 layers
[0].size
= pvt
->csels
[0].b_cnt
;
2640 layers
[0].is_virt_csrow
= true;
2641 layers
[1].type
= EDAC_MC_LAYER_CHANNEL
;
2644 * Always allocate two channels since we can have setups with DIMMs on
2645 * only one channel. Also, this simplifies handling later for the price
2646 * of a couple of KBs tops.
2649 layers
[1].is_virt_csrow
= false;
2651 mci
= edac_mc_alloc(nid
, ARRAY_SIZE(layers
), layers
, 0);
2655 mci
->pvt_info
= pvt
;
2656 mci
->pdev
= &pvt
->F2
->dev
;
2658 setup_mci_misc_attrs(mci
, fam_type
);
2660 if (init_csrows(mci
))
2661 mci
->edac_cap
= EDAC_FLAG_NONE
;
2664 if (edac_mc_add_mc(mci
)) {
2665 edac_dbg(1, "failed edac_mc_add_mc()\n");
2668 if (set_mc_sysfs_attrs(mci
)) {
2669 edac_dbg(1, "failed edac_mc_add_mc()\n");
2673 /* register stuff with EDAC MCE */
2674 if (report_gart_errors
)
2675 amd_report_gart_errors(true);
2677 amd_register_ecc_decoder(decode_bus_error
);
2681 atomic_inc(&drv_instances
);
2686 edac_mc_del_mc(mci
->pdev
);
2691 free_mc_sibling_devs(pvt
);
2700 static int probe_one_instance(struct pci_dev
*pdev
,
2701 const struct pci_device_id
*mc_type
)
2703 u16 nid
= amd_get_node_id(pdev
);
2704 struct pci_dev
*F3
= node_to_amd_nb(nid
)->misc
;
2705 struct ecc_settings
*s
;
2708 ret
= pci_enable_device(pdev
);
2710 edac_dbg(0, "ret=%d\n", ret
);
2715 s
= kzalloc(sizeof(struct ecc_settings
), GFP_KERNEL
);
2721 if (!ecc_enabled(F3
, nid
)) {
2724 if (!ecc_enable_override
)
2727 amd64_warn("Forcing ECC on!\n");
2729 if (!enable_ecc_error_reporting(s
, nid
, F3
))
2733 ret
= init_one_instance(pdev
);
2735 amd64_err("Error probing instance: %d\n", nid
);
2736 restore_ecc_error_reporting(s
, nid
, F3
);
2743 ecc_stngs
[nid
] = NULL
;
2749 static void remove_one_instance(struct pci_dev
*pdev
)
2751 struct mem_ctl_info
*mci
;
2752 struct amd64_pvt
*pvt
;
2753 u16 nid
= amd_get_node_id(pdev
);
2754 struct pci_dev
*F3
= node_to_amd_nb(nid
)->misc
;
2755 struct ecc_settings
*s
= ecc_stngs
[nid
];
2757 mci
= find_mci_by_dev(&pdev
->dev
);
2760 del_mc_sysfs_attrs(mci
);
2761 /* Remove from EDAC CORE tracking list */
2762 mci
= edac_mc_del_mc(&pdev
->dev
);
2766 pvt
= mci
->pvt_info
;
2768 restore_ecc_error_reporting(s
, nid
, F3
);
2770 free_mc_sibling_devs(pvt
);
2772 /* unregister from EDAC MCE */
2773 amd_report_gart_errors(false);
2774 amd_unregister_ecc_decoder(decode_bus_error
);
2776 kfree(ecc_stngs
[nid
]);
2777 ecc_stngs
[nid
] = NULL
;
2779 /* Free the EDAC CORE resources */
2780 mci
->pvt_info
= NULL
;
2788 * This table is part of the interface for loading drivers for PCI devices. The
2789 * PCI core identifies what devices are on a system during boot, and then
2790 * inquiry this table to see if this driver is for a given device found.
2792 static const struct pci_device_id amd64_pci_table
[] = {
2794 .vendor
= PCI_VENDOR_ID_AMD
,
2795 .device
= PCI_DEVICE_ID_AMD_K8_NB_MEMCTL
,
2796 .subvendor
= PCI_ANY_ID
,
2797 .subdevice
= PCI_ANY_ID
,
2802 .vendor
= PCI_VENDOR_ID_AMD
,
2803 .device
= PCI_DEVICE_ID_AMD_10H_NB_DRAM
,
2804 .subvendor
= PCI_ANY_ID
,
2805 .subdevice
= PCI_ANY_ID
,
2810 .vendor
= PCI_VENDOR_ID_AMD
,
2811 .device
= PCI_DEVICE_ID_AMD_15H_NB_F2
,
2812 .subvendor
= PCI_ANY_ID
,
2813 .subdevice
= PCI_ANY_ID
,
2818 .vendor
= PCI_VENDOR_ID_AMD
,
2819 .device
= PCI_DEVICE_ID_AMD_15H_M30H_NB_F2
,
2820 .subvendor
= PCI_ANY_ID
,
2821 .subdevice
= PCI_ANY_ID
,
2826 .vendor
= PCI_VENDOR_ID_AMD
,
2827 .device
= PCI_DEVICE_ID_AMD_16H_NB_F2
,
2828 .subvendor
= PCI_ANY_ID
,
2829 .subdevice
= PCI_ANY_ID
,
2836 MODULE_DEVICE_TABLE(pci
, amd64_pci_table
);
2838 static struct pci_driver amd64_pci_driver
= {
2839 .name
= EDAC_MOD_STR
,
2840 .probe
= probe_one_instance
,
2841 .remove
= remove_one_instance
,
2842 .id_table
= amd64_pci_table
,
2845 static void setup_pci_device(void)
2847 struct mem_ctl_info
*mci
;
2848 struct amd64_pvt
*pvt
;
2857 pvt
= mci
->pvt_info
;
2858 pci_ctl
= edac_pci_create_generic_ctl(&pvt
->F2
->dev
, EDAC_MOD_STR
);
2860 pr_warn("%s(): Unable to create PCI control\n", __func__
);
2861 pr_warn("%s(): PCI error report via EDAC not set\n", __func__
);
2865 static int __init
amd64_edac_init(void)
2869 printk(KERN_INFO
"AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION
);
2873 if (amd_cache_northbridges() < 0)
2877 mcis
= kzalloc(amd_nb_num() * sizeof(mcis
[0]), GFP_KERNEL
);
2878 ecc_stngs
= kzalloc(amd_nb_num() * sizeof(ecc_stngs
[0]), GFP_KERNEL
);
2879 if (!(mcis
&& ecc_stngs
))
2882 msrs
= msrs_alloc();
2886 err
= pci_register_driver(&amd64_pci_driver
);
2891 if (!atomic_read(&drv_instances
))
2892 goto err_no_instances
;
2898 pci_unregister_driver(&amd64_pci_driver
);
2915 static void __exit
amd64_edac_exit(void)
2918 edac_pci_release_generic_ctl(pci_ctl
);
2920 pci_unregister_driver(&amd64_pci_driver
);
2932 module_init(amd64_edac_init
);
2933 module_exit(amd64_edac_exit
);
2935 MODULE_LICENSE("GPL");
2936 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
2937 "Dave Peterson, Thayne Harbaugh");
2938 MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
2939 EDAC_AMD64_VERSION
);
2941 module_param(edac_op_state
, int, 0444);
2942 MODULE_PARM_DESC(edac_op_state
, "EDAC Error Reporting state: 0=Poll,1=NMI");