Merge tag 'for-linus-20190706' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / edac / pnd2_edac.c
blobca25f8fe57ef3272ab90b0b42816bc1b0e73ebab
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Driver for Pondicherry2 memory controller.
5 * Copyright (c) 2016, Intel Corporation.
7 * [Derived from sb_edac.c]
9 * Translation of system physical addresses to DIMM addresses
10 * is a two stage process:
12 * First the Pondicherry 2 memory controller handles slice and channel interleaving
13 * in "sys2pmi()". This is (almost) completley common between platforms.
15 * Then a platform specific dunit (DIMM unit) completes the process to provide DIMM,
16 * rank, bank, row and column using the appropriate "dunit_ops" functions/parameters.
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/pci.h>
22 #include <linux/pci_ids.h>
23 #include <linux/slab.h>
24 #include <linux/delay.h>
25 #include <linux/edac.h>
26 #include <linux/mmzone.h>
27 #include <linux/smp.h>
28 #include <linux/bitmap.h>
29 #include <linux/math64.h>
30 #include <linux/mod_devicetable.h>
31 #include <asm/cpu_device_id.h>
32 #include <asm/intel-family.h>
33 #include <asm/processor.h>
34 #include <asm/mce.h>
36 #include "edac_mc.h"
37 #include "edac_module.h"
38 #include "pnd2_edac.h"
40 #define EDAC_MOD_STR "pnd2_edac"
42 #define APL_NUM_CHANNELS 4
43 #define DNV_NUM_CHANNELS 2
44 #define DNV_MAX_DIMMS 2 /* Max DIMMs per channel */
46 enum type {
47 APL,
48 DNV, /* All requests go to PMI CH0 on each slice (CH1 disabled) */
51 struct dram_addr {
52 int chan;
53 int dimm;
54 int rank;
55 int bank;
56 int row;
57 int col;
60 struct pnd2_pvt {
61 int dimm_geom[APL_NUM_CHANNELS];
62 u64 tolm, tohm;
66 * System address space is divided into multiple regions with
67 * different interleave rules in each. The as0/as1 regions
68 * have no interleaving at all. The as2 region is interleaved
69 * between two channels. The mot region is magic and may overlap
70 * other regions, with its interleave rules taking precedence.
71 * Addresses not in any of these regions are interleaved across
72 * all four channels.
74 static struct region {
75 u64 base;
76 u64 limit;
77 u8 enabled;
78 } mot, as0, as1, as2;
80 static struct dunit_ops {
81 char *name;
82 enum type type;
83 int pmiaddr_shift;
84 int pmiidx_shift;
85 int channels;
86 int dimms_per_channel;
87 int (*rd_reg)(int port, int off, int op, void *data, size_t sz, char *name);
88 int (*get_registers)(void);
89 int (*check_ecc)(void);
90 void (*mk_region)(char *name, struct region *rp, void *asym);
91 void (*get_dimm_config)(struct mem_ctl_info *mci);
92 int (*pmi2mem)(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
93 struct dram_addr *daddr, char *msg);
94 } *ops;
96 static struct mem_ctl_info *pnd2_mci;
98 #define PND2_MSG_SIZE 256
100 /* Debug macros */
101 #define pnd2_printk(level, fmt, arg...) \
102 edac_printk(level, "pnd2", fmt, ##arg)
104 #define pnd2_mc_printk(mci, level, fmt, arg...) \
105 edac_mc_chipset_printk(mci, level, "pnd2", fmt, ##arg)
107 #define MOT_CHAN_INTLV_BIT_1SLC_2CH 12
108 #define MOT_CHAN_INTLV_BIT_2SLC_2CH 13
109 #define SELECTOR_DISABLED (-1)
110 #define _4GB (1ul << 32)
112 #define PMI_ADDRESS_WIDTH 31
113 #define PND_MAX_PHYS_BIT 39
115 #define APL_ASYMSHIFT 28
116 #define DNV_ASYMSHIFT 31
117 #define CH_HASH_MASK_LSB 6
118 #define SLICE_HASH_MASK_LSB 6
119 #define MOT_SLC_INTLV_BIT 12
120 #define LOG2_PMI_ADDR_GRANULARITY 5
121 #define MOT_SHIFT 24
123 #define GET_BITFIELD(v, lo, hi) (((v) & GENMASK_ULL(hi, lo)) >> (lo))
124 #define U64_LSHIFT(val, s) ((u64)(val) << (s))
127 * On Apollo Lake we access memory controller registers via a
128 * side-band mailbox style interface in a hidden PCI device
129 * configuration space.
131 static struct pci_bus *p2sb_bus;
132 #define P2SB_DEVFN PCI_DEVFN(0xd, 0)
133 #define P2SB_ADDR_OFF 0xd0
134 #define P2SB_DATA_OFF 0xd4
135 #define P2SB_STAT_OFF 0xd8
136 #define P2SB_ROUT_OFF 0xda
137 #define P2SB_EADD_OFF 0xdc
138 #define P2SB_HIDE_OFF 0xe1
140 #define P2SB_BUSY 1
142 #define P2SB_READ(size, off, ptr) \
143 pci_bus_read_config_##size(p2sb_bus, P2SB_DEVFN, off, ptr)
144 #define P2SB_WRITE(size, off, val) \
145 pci_bus_write_config_##size(p2sb_bus, P2SB_DEVFN, off, val)
147 static bool p2sb_is_busy(u16 *status)
149 P2SB_READ(word, P2SB_STAT_OFF, status);
151 return !!(*status & P2SB_BUSY);
154 static int _apl_rd_reg(int port, int off, int op, u32 *data)
156 int retries = 0xff, ret;
157 u16 status;
158 u8 hidden;
160 /* Unhide the P2SB device, if it's hidden */
161 P2SB_READ(byte, P2SB_HIDE_OFF, &hidden);
162 if (hidden)
163 P2SB_WRITE(byte, P2SB_HIDE_OFF, 0);
165 if (p2sb_is_busy(&status)) {
166 ret = -EAGAIN;
167 goto out;
170 P2SB_WRITE(dword, P2SB_ADDR_OFF, (port << 24) | off);
171 P2SB_WRITE(dword, P2SB_DATA_OFF, 0);
172 P2SB_WRITE(dword, P2SB_EADD_OFF, 0);
173 P2SB_WRITE(word, P2SB_ROUT_OFF, 0);
174 P2SB_WRITE(word, P2SB_STAT_OFF, (op << 8) | P2SB_BUSY);
176 while (p2sb_is_busy(&status)) {
177 if (retries-- == 0) {
178 ret = -EBUSY;
179 goto out;
183 P2SB_READ(dword, P2SB_DATA_OFF, data);
184 ret = (status >> 1) & 0x3;
185 out:
186 /* Hide the P2SB device, if it was hidden before */
187 if (hidden)
188 P2SB_WRITE(byte, P2SB_HIDE_OFF, hidden);
190 return ret;
193 static int apl_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
195 int ret = 0;
197 edac_dbg(2, "Read %s port=%x off=%x op=%x\n", name, port, off, op);
198 switch (sz) {
199 case 8:
200 ret = _apl_rd_reg(port, off + 4, op, (u32 *)(data + 4));
201 /* fall through */
202 case 4:
203 ret |= _apl_rd_reg(port, off, op, (u32 *)data);
204 pnd2_printk(KERN_DEBUG, "%s=%x%08x ret=%d\n", name,
205 sz == 8 ? *((u32 *)(data + 4)) : 0, *((u32 *)data), ret);
206 break;
209 return ret;
212 static u64 get_mem_ctrl_hub_base_addr(void)
214 struct b_cr_mchbar_lo_pci lo;
215 struct b_cr_mchbar_hi_pci hi;
216 struct pci_dev *pdev;
218 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
219 if (pdev) {
220 pci_read_config_dword(pdev, 0x48, (u32 *)&lo);
221 pci_read_config_dword(pdev, 0x4c, (u32 *)&hi);
222 pci_dev_put(pdev);
223 } else {
224 return 0;
227 if (!lo.enable) {
228 edac_dbg(2, "MMIO via memory controller hub base address is disabled!\n");
229 return 0;
232 return U64_LSHIFT(hi.base, 32) | U64_LSHIFT(lo.base, 15);
235 static u64 get_sideband_reg_base_addr(void)
237 struct pci_dev *pdev;
238 u32 hi, lo;
239 u8 hidden;
241 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x19dd, NULL);
242 if (pdev) {
243 /* Unhide the P2SB device, if it's hidden */
244 pci_read_config_byte(pdev, 0xe1, &hidden);
245 if (hidden)
246 pci_write_config_byte(pdev, 0xe1, 0);
248 pci_read_config_dword(pdev, 0x10, &lo);
249 pci_read_config_dword(pdev, 0x14, &hi);
250 lo &= 0xfffffff0;
252 /* Hide the P2SB device, if it was hidden before */
253 if (hidden)
254 pci_write_config_byte(pdev, 0xe1, hidden);
256 pci_dev_put(pdev);
257 return (U64_LSHIFT(hi, 32) | U64_LSHIFT(lo, 0));
258 } else {
259 return 0xfd000000;
263 static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
265 struct pci_dev *pdev;
266 char *base;
267 u64 addr;
269 if (op == 4) {
270 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
271 if (!pdev)
272 return -ENODEV;
274 pci_read_config_dword(pdev, off, data);
275 pci_dev_put(pdev);
276 } else {
277 /* MMIO via memory controller hub base address */
278 if (op == 0 && port == 0x4c) {
279 addr = get_mem_ctrl_hub_base_addr();
280 if (!addr)
281 return -ENODEV;
282 } else {
283 /* MMIO via sideband register base address */
284 addr = get_sideband_reg_base_addr();
285 if (!addr)
286 return -ENODEV;
287 addr += (port << 16);
290 base = ioremap((resource_size_t)addr, 0x10000);
291 if (!base)
292 return -ENODEV;
294 if (sz == 8)
295 *(u32 *)(data + 4) = *(u32 *)(base + off + 4);
296 *(u32 *)data = *(u32 *)(base + off);
298 iounmap(base);
301 edac_dbg(2, "Read %s=%.8x_%.8x\n", name,
302 (sz == 8) ? *(u32 *)(data + 4) : 0, *(u32 *)data);
304 return 0;
307 #define RD_REGP(regp, regname, port) \
308 ops->rd_reg(port, \
309 regname##_offset, \
310 regname##_r_opcode, \
311 regp, sizeof(struct regname), \
312 #regname)
314 #define RD_REG(regp, regname) \
315 ops->rd_reg(regname ## _port, \
316 regname##_offset, \
317 regname##_r_opcode, \
318 regp, sizeof(struct regname), \
319 #regname)
321 static u64 top_lm, top_hm;
322 static bool two_slices;
323 static bool two_channels; /* Both PMI channels in one slice enabled */
325 static u8 sym_chan_mask;
326 static u8 asym_chan_mask;
327 static u8 chan_mask;
329 static int slice_selector = -1;
330 static int chan_selector = -1;
331 static u64 slice_hash_mask;
332 static u64 chan_hash_mask;
334 static void mk_region(char *name, struct region *rp, u64 base, u64 limit)
336 rp->enabled = 1;
337 rp->base = base;
338 rp->limit = limit;
339 edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, limit);
342 static void mk_region_mask(char *name, struct region *rp, u64 base, u64 mask)
344 if (mask == 0) {
345 pr_info(FW_BUG "MOT mask cannot be zero\n");
346 return;
348 if (mask != GENMASK_ULL(PND_MAX_PHYS_BIT, __ffs(mask))) {
349 pr_info(FW_BUG "MOT mask not power of two\n");
350 return;
352 if (base & ~mask) {
353 pr_info(FW_BUG "MOT region base/mask alignment error\n");
354 return;
356 rp->base = base;
357 rp->limit = (base | ~mask) & GENMASK_ULL(PND_MAX_PHYS_BIT, 0);
358 rp->enabled = 1;
359 edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, rp->limit);
362 static bool in_region(struct region *rp, u64 addr)
364 if (!rp->enabled)
365 return false;
367 return rp->base <= addr && addr <= rp->limit;
370 static int gen_sym_mask(struct b_cr_slice_channel_hash *p)
372 int mask = 0;
374 if (!p->slice_0_mem_disabled)
375 mask |= p->sym_slice0_channel_enabled;
377 if (!p->slice_1_disabled)
378 mask |= p->sym_slice1_channel_enabled << 2;
380 if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
381 mask &= 0x5;
383 return mask;
386 static int gen_asym_mask(struct b_cr_slice_channel_hash *p,
387 struct b_cr_asym_mem_region0_mchbar *as0,
388 struct b_cr_asym_mem_region1_mchbar *as1,
389 struct b_cr_asym_2way_mem_region_mchbar *as2way)
391 const int intlv[] = { 0x5, 0xA, 0x3, 0xC };
392 int mask = 0;
394 if (as2way->asym_2way_interleave_enable)
395 mask = intlv[as2way->asym_2way_intlv_mode];
396 if (as0->slice0_asym_enable)
397 mask |= (1 << as0->slice0_asym_channel_select);
398 if (as1->slice1_asym_enable)
399 mask |= (4 << as1->slice1_asym_channel_select);
400 if (p->slice_0_mem_disabled)
401 mask &= 0xc;
402 if (p->slice_1_disabled)
403 mask &= 0x3;
404 if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
405 mask &= 0x5;
407 return mask;
410 static struct b_cr_tolud_pci tolud;
411 static struct b_cr_touud_lo_pci touud_lo;
412 static struct b_cr_touud_hi_pci touud_hi;
413 static struct b_cr_asym_mem_region0_mchbar asym0;
414 static struct b_cr_asym_mem_region1_mchbar asym1;
415 static struct b_cr_asym_2way_mem_region_mchbar asym_2way;
416 static struct b_cr_mot_out_base_mchbar mot_base;
417 static struct b_cr_mot_out_mask_mchbar mot_mask;
418 static struct b_cr_slice_channel_hash chash;
420 /* Apollo Lake dunit */
422 * Validated on board with just two DIMMs in the [0] and [2] positions
423 * in this array. Other port number matches documentation, but caution
424 * advised.
426 static const int apl_dports[APL_NUM_CHANNELS] = { 0x18, 0x10, 0x11, 0x19 };
427 static struct d_cr_drp0 drp0[APL_NUM_CHANNELS];
429 /* Denverton dunit */
430 static const int dnv_dports[DNV_NUM_CHANNELS] = { 0x10, 0x12 };
431 static struct d_cr_dsch dsch;
432 static struct d_cr_ecc_ctrl ecc_ctrl[DNV_NUM_CHANNELS];
433 static struct d_cr_drp drp[DNV_NUM_CHANNELS];
434 static struct d_cr_dmap dmap[DNV_NUM_CHANNELS];
435 static struct d_cr_dmap1 dmap1[DNV_NUM_CHANNELS];
436 static struct d_cr_dmap2 dmap2[DNV_NUM_CHANNELS];
437 static struct d_cr_dmap3 dmap3[DNV_NUM_CHANNELS];
438 static struct d_cr_dmap4 dmap4[DNV_NUM_CHANNELS];
439 static struct d_cr_dmap5 dmap5[DNV_NUM_CHANNELS];
441 static void apl_mk_region(char *name, struct region *rp, void *asym)
443 struct b_cr_asym_mem_region0_mchbar *a = asym;
445 mk_region(name, rp,
446 U64_LSHIFT(a->slice0_asym_base, APL_ASYMSHIFT),
447 U64_LSHIFT(a->slice0_asym_limit, APL_ASYMSHIFT) +
448 GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
451 static void dnv_mk_region(char *name, struct region *rp, void *asym)
453 struct b_cr_asym_mem_region_denverton *a = asym;
455 mk_region(name, rp,
456 U64_LSHIFT(a->slice_asym_base, DNV_ASYMSHIFT),
457 U64_LSHIFT(a->slice_asym_limit, DNV_ASYMSHIFT) +
458 GENMASK_ULL(DNV_ASYMSHIFT - 1, 0));
461 static int apl_get_registers(void)
463 int ret = -ENODEV;
464 int i;
466 if (RD_REG(&asym_2way, b_cr_asym_2way_mem_region_mchbar))
467 return -ENODEV;
470 * RD_REGP() will fail for unpopulated or non-existent
471 * DIMM slots. Return success if we find at least one DIMM.
473 for (i = 0; i < APL_NUM_CHANNELS; i++)
474 if (!RD_REGP(&drp0[i], d_cr_drp0, apl_dports[i]))
475 ret = 0;
477 return ret;
480 static int dnv_get_registers(void)
482 int i;
484 if (RD_REG(&dsch, d_cr_dsch))
485 return -ENODEV;
487 for (i = 0; i < DNV_NUM_CHANNELS; i++)
488 if (RD_REGP(&ecc_ctrl[i], d_cr_ecc_ctrl, dnv_dports[i]) ||
489 RD_REGP(&drp[i], d_cr_drp, dnv_dports[i]) ||
490 RD_REGP(&dmap[i], d_cr_dmap, dnv_dports[i]) ||
491 RD_REGP(&dmap1[i], d_cr_dmap1, dnv_dports[i]) ||
492 RD_REGP(&dmap2[i], d_cr_dmap2, dnv_dports[i]) ||
493 RD_REGP(&dmap3[i], d_cr_dmap3, dnv_dports[i]) ||
494 RD_REGP(&dmap4[i], d_cr_dmap4, dnv_dports[i]) ||
495 RD_REGP(&dmap5[i], d_cr_dmap5, dnv_dports[i]))
496 return -ENODEV;
498 return 0;
502 * Read all the h/w config registers once here (they don't
503 * change at run time. Figure out which address ranges have
504 * which interleave characteristics.
506 static int get_registers(void)
508 const int intlv[] = { 10, 11, 12, 12 };
510 if (RD_REG(&tolud, b_cr_tolud_pci) ||
511 RD_REG(&touud_lo, b_cr_touud_lo_pci) ||
512 RD_REG(&touud_hi, b_cr_touud_hi_pci) ||
513 RD_REG(&asym0, b_cr_asym_mem_region0_mchbar) ||
514 RD_REG(&asym1, b_cr_asym_mem_region1_mchbar) ||
515 RD_REG(&mot_base, b_cr_mot_out_base_mchbar) ||
516 RD_REG(&mot_mask, b_cr_mot_out_mask_mchbar) ||
517 RD_REG(&chash, b_cr_slice_channel_hash))
518 return -ENODEV;
520 if (ops->get_registers())
521 return -ENODEV;
523 if (ops->type == DNV) {
524 /* PMI channel idx (always 0) for asymmetric region */
525 asym0.slice0_asym_channel_select = 0;
526 asym1.slice1_asym_channel_select = 0;
527 /* PMI channel bitmap (always 1) for symmetric region */
528 chash.sym_slice0_channel_enabled = 0x1;
529 chash.sym_slice1_channel_enabled = 0x1;
532 if (asym0.slice0_asym_enable)
533 ops->mk_region("as0", &as0, &asym0);
535 if (asym1.slice1_asym_enable)
536 ops->mk_region("as1", &as1, &asym1);
538 if (asym_2way.asym_2way_interleave_enable) {
539 mk_region("as2way", &as2,
540 U64_LSHIFT(asym_2way.asym_2way_base, APL_ASYMSHIFT),
541 U64_LSHIFT(asym_2way.asym_2way_limit, APL_ASYMSHIFT) +
542 GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
545 if (mot_base.imr_en) {
546 mk_region_mask("mot", &mot,
547 U64_LSHIFT(mot_base.mot_out_base, MOT_SHIFT),
548 U64_LSHIFT(mot_mask.mot_out_mask, MOT_SHIFT));
551 top_lm = U64_LSHIFT(tolud.tolud, 20);
552 top_hm = U64_LSHIFT(touud_hi.touud, 32) | U64_LSHIFT(touud_lo.touud, 20);
554 two_slices = !chash.slice_1_disabled &&
555 !chash.slice_0_mem_disabled &&
556 (chash.sym_slice0_channel_enabled != 0) &&
557 (chash.sym_slice1_channel_enabled != 0);
558 two_channels = !chash.ch_1_disabled &&
559 !chash.enable_pmi_dual_data_mode &&
560 ((chash.sym_slice0_channel_enabled == 3) ||
561 (chash.sym_slice1_channel_enabled == 3));
563 sym_chan_mask = gen_sym_mask(&chash);
564 asym_chan_mask = gen_asym_mask(&chash, &asym0, &asym1, &asym_2way);
565 chan_mask = sym_chan_mask | asym_chan_mask;
567 if (two_slices && !two_channels) {
568 if (chash.hvm_mode)
569 slice_selector = 29;
570 else
571 slice_selector = intlv[chash.interleave_mode];
572 } else if (!two_slices && two_channels) {
573 if (chash.hvm_mode)
574 chan_selector = 29;
575 else
576 chan_selector = intlv[chash.interleave_mode];
577 } else if (two_slices && two_channels) {
578 if (chash.hvm_mode) {
579 slice_selector = 29;
580 chan_selector = 30;
581 } else {
582 slice_selector = intlv[chash.interleave_mode];
583 chan_selector = intlv[chash.interleave_mode] + 1;
587 if (two_slices) {
588 if (!chash.hvm_mode)
589 slice_hash_mask = chash.slice_hash_mask << SLICE_HASH_MASK_LSB;
590 if (!two_channels)
591 slice_hash_mask |= BIT_ULL(slice_selector);
594 if (two_channels) {
595 if (!chash.hvm_mode)
596 chan_hash_mask = chash.ch_hash_mask << CH_HASH_MASK_LSB;
597 if (!two_slices)
598 chan_hash_mask |= BIT_ULL(chan_selector);
601 return 0;
604 /* Get a contiguous memory address (remove the MMIO gap) */
605 static u64 remove_mmio_gap(u64 sys)
607 return (sys < _4GB) ? sys : sys - (_4GB - top_lm);
610 /* Squeeze out one address bit, shift upper part down to fill gap */
611 static void remove_addr_bit(u64 *addr, int bitidx)
613 u64 mask;
615 if (bitidx == -1)
616 return;
618 mask = (1ull << bitidx) - 1;
619 *addr = ((*addr >> 1) & ~mask) | (*addr & mask);
622 /* XOR all the bits from addr specified in mask */
623 static int hash_by_mask(u64 addr, u64 mask)
625 u64 result = addr & mask;
627 result = (result >> 32) ^ result;
628 result = (result >> 16) ^ result;
629 result = (result >> 8) ^ result;
630 result = (result >> 4) ^ result;
631 result = (result >> 2) ^ result;
632 result = (result >> 1) ^ result;
634 return (int)result & 1;
638 * First stage decode. Take the system address and figure out which
639 * second stage will deal with it based on interleave modes.
641 static int sys2pmi(const u64 addr, u32 *pmiidx, u64 *pmiaddr, char *msg)
643 u64 contig_addr, contig_base, contig_offset, contig_base_adj;
644 int mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
645 MOT_CHAN_INTLV_BIT_1SLC_2CH;
646 int slice_intlv_bit_rm = SELECTOR_DISABLED;
647 int chan_intlv_bit_rm = SELECTOR_DISABLED;
648 /* Determine if address is in the MOT region. */
649 bool mot_hit = in_region(&mot, addr);
650 /* Calculate the number of symmetric regions enabled. */
651 int sym_channels = hweight8(sym_chan_mask);
654 * The amount we need to shift the asym base can be determined by the
655 * number of enabled symmetric channels.
656 * NOTE: This can only work because symmetric memory is not supposed
657 * to do a 3-way interleave.
659 int sym_chan_shift = sym_channels >> 1;
661 /* Give up if address is out of range, or in MMIO gap */
662 if (addr >= (1ul << PND_MAX_PHYS_BIT) ||
663 (addr >= top_lm && addr < _4GB) || addr >= top_hm) {
664 snprintf(msg, PND2_MSG_SIZE, "Error address 0x%llx is not DRAM", addr);
665 return -EINVAL;
668 /* Get a contiguous memory address (remove the MMIO gap) */
669 contig_addr = remove_mmio_gap(addr);
671 if (in_region(&as0, addr)) {
672 *pmiidx = asym0.slice0_asym_channel_select;
674 contig_base = remove_mmio_gap(as0.base);
675 contig_offset = contig_addr - contig_base;
676 contig_base_adj = (contig_base >> sym_chan_shift) *
677 ((chash.sym_slice0_channel_enabled >> (*pmiidx & 1)) & 1);
678 contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
679 } else if (in_region(&as1, addr)) {
680 *pmiidx = 2u + asym1.slice1_asym_channel_select;
682 contig_base = remove_mmio_gap(as1.base);
683 contig_offset = contig_addr - contig_base;
684 contig_base_adj = (contig_base >> sym_chan_shift) *
685 ((chash.sym_slice1_channel_enabled >> (*pmiidx & 1)) & 1);
686 contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
687 } else if (in_region(&as2, addr) && (asym_2way.asym_2way_intlv_mode == 0x3ul)) {
688 bool channel1;
690 mot_intlv_bit = MOT_CHAN_INTLV_BIT_1SLC_2CH;
691 *pmiidx = (asym_2way.asym_2way_intlv_mode & 1) << 1;
692 channel1 = mot_hit ? ((bool)((addr >> mot_intlv_bit) & 1)) :
693 hash_by_mask(contig_addr, chan_hash_mask);
694 *pmiidx |= (u32)channel1;
696 contig_base = remove_mmio_gap(as2.base);
697 chan_intlv_bit_rm = mot_hit ? mot_intlv_bit : chan_selector;
698 contig_offset = contig_addr - contig_base;
699 remove_addr_bit(&contig_offset, chan_intlv_bit_rm);
700 contig_addr = (contig_base >> sym_chan_shift) + contig_offset;
701 } else {
702 /* Otherwise we're in normal, boring symmetric mode. */
703 *pmiidx = 0u;
705 if (two_slices) {
706 bool slice1;
708 if (mot_hit) {
709 slice_intlv_bit_rm = MOT_SLC_INTLV_BIT;
710 slice1 = (addr >> MOT_SLC_INTLV_BIT) & 1;
711 } else {
712 slice_intlv_bit_rm = slice_selector;
713 slice1 = hash_by_mask(addr, slice_hash_mask);
716 *pmiidx = (u32)slice1 << 1;
719 if (two_channels) {
720 bool channel1;
722 mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
723 MOT_CHAN_INTLV_BIT_1SLC_2CH;
725 if (mot_hit) {
726 chan_intlv_bit_rm = mot_intlv_bit;
727 channel1 = (addr >> mot_intlv_bit) & 1;
728 } else {
729 chan_intlv_bit_rm = chan_selector;
730 channel1 = hash_by_mask(contig_addr, chan_hash_mask);
733 *pmiidx |= (u32)channel1;
737 /* Remove the chan_selector bit first */
738 remove_addr_bit(&contig_addr, chan_intlv_bit_rm);
739 /* Remove the slice bit (we remove it second because it must be lower */
740 remove_addr_bit(&contig_addr, slice_intlv_bit_rm);
741 *pmiaddr = contig_addr;
743 return 0;
746 /* Translate PMI address to memory (rank, row, bank, column) */
747 #define C(n) (0x10 | (n)) /* column */
748 #define B(n) (0x20 | (n)) /* bank */
749 #define R(n) (0x40 | (n)) /* row */
750 #define RS (0x80) /* rank */
752 /* addrdec values */
753 #define AMAP_1KB 0
754 #define AMAP_2KB 1
755 #define AMAP_4KB 2
756 #define AMAP_RSVD 3
758 /* dden values */
759 #define DEN_4Gb 0
760 #define DEN_8Gb 2
762 /* dwid values */
763 #define X8 0
764 #define X16 1
766 static struct dimm_geometry {
767 u8 addrdec;
768 u8 dden;
769 u8 dwid;
770 u8 rowbits, colbits;
771 u16 bits[PMI_ADDRESS_WIDTH];
772 } dimms[] = {
774 .addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X16,
775 .rowbits = 15, .colbits = 10,
776 .bits = {
777 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
778 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
779 R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
780 0, 0, 0, 0
784 .addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X8,
785 .rowbits = 16, .colbits = 10,
786 .bits = {
787 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
788 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
789 R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
790 R(15), 0, 0, 0
794 .addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X16,
795 .rowbits = 16, .colbits = 10,
796 .bits = {
797 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
798 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
799 R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
800 R(15), 0, 0, 0
804 .addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X8,
805 .rowbits = 16, .colbits = 11,
806 .bits = {
807 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
808 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
809 R(10), C(7), C(8), C(9), R(11), RS, C(11), R(12), R(13),
810 R(14), R(15), 0, 0
814 .addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X16,
815 .rowbits = 15, .colbits = 10,
816 .bits = {
817 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
818 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
819 R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
820 0, 0, 0, 0
824 .addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X8,
825 .rowbits = 16, .colbits = 10,
826 .bits = {
827 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
828 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
829 R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
830 R(15), 0, 0, 0
834 .addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X16,
835 .rowbits = 16, .colbits = 10,
836 .bits = {
837 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
838 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
839 R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
840 R(15), 0, 0, 0
844 .addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X8,
845 .rowbits = 16, .colbits = 11,
846 .bits = {
847 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
848 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
849 R(9), R(10), C(8), C(9), R(11), RS, C(11), R(12), R(13),
850 R(14), R(15), 0, 0
854 .addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X16,
855 .rowbits = 15, .colbits = 10,
856 .bits = {
857 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
858 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
859 R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
860 0, 0, 0, 0
864 .addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X8,
865 .rowbits = 16, .colbits = 10,
866 .bits = {
867 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
868 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
869 R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
870 R(15), 0, 0, 0
874 .addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X16,
875 .rowbits = 16, .colbits = 10,
876 .bits = {
877 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
878 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
879 R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
880 R(15), 0, 0, 0
884 .addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X8,
885 .rowbits = 16, .colbits = 11,
886 .bits = {
887 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
888 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
889 R(8), R(9), R(10), C(9), R(11), RS, C(11), R(12), R(13),
890 R(14), R(15), 0, 0
895 static int bank_hash(u64 pmiaddr, int idx, int shft)
897 int bhash = 0;
899 switch (idx) {
900 case 0:
901 bhash ^= ((pmiaddr >> (12 + shft)) ^ (pmiaddr >> (9 + shft))) & 1;
902 break;
903 case 1:
904 bhash ^= (((pmiaddr >> (10 + shft)) ^ (pmiaddr >> (8 + shft))) & 1) << 1;
905 bhash ^= ((pmiaddr >> 22) & 1) << 1;
906 break;
907 case 2:
908 bhash ^= (((pmiaddr >> (13 + shft)) ^ (pmiaddr >> (11 + shft))) & 1) << 2;
909 break;
912 return bhash;
915 static int rank_hash(u64 pmiaddr)
917 return ((pmiaddr >> 16) ^ (pmiaddr >> 10)) & 1;
920 /* Second stage decode. Compute rank, bank, row & column. */
921 static int apl_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
922 struct dram_addr *daddr, char *msg)
924 struct d_cr_drp0 *cr_drp0 = &drp0[pmiidx];
925 struct pnd2_pvt *pvt = mci->pvt_info;
926 int g = pvt->dimm_geom[pmiidx];
927 struct dimm_geometry *d = &dimms[g];
928 int column = 0, bank = 0, row = 0, rank = 0;
929 int i, idx, type, skiprs = 0;
931 for (i = 0; i < PMI_ADDRESS_WIDTH; i++) {
932 int bit = (pmiaddr >> i) & 1;
934 if (i + skiprs >= PMI_ADDRESS_WIDTH) {
935 snprintf(msg, PND2_MSG_SIZE, "Bad dimm_geometry[] table\n");
936 return -EINVAL;
939 type = d->bits[i + skiprs] & ~0xf;
940 idx = d->bits[i + skiprs] & 0xf;
943 * On single rank DIMMs ignore the rank select bit
944 * and shift remainder of "bits[]" down one place.
946 if (type == RS && (cr_drp0->rken0 + cr_drp0->rken1) == 1) {
947 skiprs = 1;
948 type = d->bits[i + skiprs] & ~0xf;
949 idx = d->bits[i + skiprs] & 0xf;
952 switch (type) {
953 case C(0):
954 column |= (bit << idx);
955 break;
956 case B(0):
957 bank |= (bit << idx);
958 if (cr_drp0->bahen)
959 bank ^= bank_hash(pmiaddr, idx, d->addrdec);
960 break;
961 case R(0):
962 row |= (bit << idx);
963 break;
964 case RS:
965 rank = bit;
966 if (cr_drp0->rsien)
967 rank ^= rank_hash(pmiaddr);
968 break;
969 default:
970 if (bit) {
971 snprintf(msg, PND2_MSG_SIZE, "Bad translation\n");
972 return -EINVAL;
974 goto done;
978 done:
979 daddr->col = column;
980 daddr->bank = bank;
981 daddr->row = row;
982 daddr->rank = rank;
983 daddr->dimm = 0;
985 return 0;
988 /* Pluck bit "in" from pmiaddr and return value shifted to bit "out" */
989 #define dnv_get_bit(pmi, in, out) ((int)(((pmi) >> (in)) & 1u) << (out))
991 static int dnv_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
992 struct dram_addr *daddr, char *msg)
994 /* Rank 0 or 1 */
995 daddr->rank = dnv_get_bit(pmiaddr, dmap[pmiidx].rs0 + 13, 0);
996 /* Rank 2 or 3 */
997 daddr->rank |= dnv_get_bit(pmiaddr, dmap[pmiidx].rs1 + 13, 1);
1000 * Normally ranks 0,1 are DIMM0, and 2,3 are DIMM1, but we
1001 * flip them if DIMM1 is larger than DIMM0.
1003 daddr->dimm = (daddr->rank >= 2) ^ drp[pmiidx].dimmflip;
1005 daddr->bank = dnv_get_bit(pmiaddr, dmap[pmiidx].ba0 + 6, 0);
1006 daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].ba1 + 6, 1);
1007 daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg0 + 6, 2);
1008 if (dsch.ddr4en)
1009 daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg1 + 6, 3);
1010 if (dmap1[pmiidx].bxor) {
1011 if (dsch.ddr4en) {
1012 daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 0);
1013 daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 1);
1014 if (dsch.chan_width == 0)
1015 /* 64/72 bit dram channel width */
1016 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
1017 else
1018 /* 32/40 bit dram channel width */
1019 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
1020 daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 3);
1021 } else {
1022 daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 0);
1023 daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 1);
1024 if (dsch.chan_width == 0)
1025 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
1026 else
1027 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
1031 daddr->row = dnv_get_bit(pmiaddr, dmap2[pmiidx].row0 + 6, 0);
1032 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row1 + 6, 1);
1033 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 2);
1034 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row3 + 6, 3);
1035 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row4 + 6, 4);
1036 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row5 + 6, 5);
1037 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 6);
1038 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 7);
1039 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row8 + 6, 8);
1040 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row9 + 6, 9);
1041 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row10 + 6, 10);
1042 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row11 + 6, 11);
1043 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row12 + 6, 12);
1044 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row13 + 6, 13);
1045 if (dmap4[pmiidx].row14 != 31)
1046 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row14 + 6, 14);
1047 if (dmap4[pmiidx].row15 != 31)
1048 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row15 + 6, 15);
1049 if (dmap4[pmiidx].row16 != 31)
1050 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row16 + 6, 16);
1051 if (dmap4[pmiidx].row17 != 31)
1052 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row17 + 6, 17);
1054 daddr->col = dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 3);
1055 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 4);
1056 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca5 + 6, 5);
1057 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca6 + 6, 6);
1058 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca7 + 6, 7);
1059 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca8 + 6, 8);
1060 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca9 + 6, 9);
1061 if (!dsch.ddr4en && dmap1[pmiidx].ca11 != 0x3f)
1062 daddr->col |= dnv_get_bit(pmiaddr, dmap1[pmiidx].ca11 + 13, 11);
1064 return 0;
1067 static int check_channel(int ch)
1069 if (drp0[ch].dramtype != 0) {
1070 pnd2_printk(KERN_INFO, "Unsupported DIMM in channel %d\n", ch);
1071 return 1;
1072 } else if (drp0[ch].eccen == 0) {
1073 pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
1074 return 1;
1076 return 0;
1079 static int apl_check_ecc_active(void)
1081 int i, ret = 0;
1083 /* Check dramtype and ECC mode for each present DIMM */
1084 for (i = 0; i < APL_NUM_CHANNELS; i++)
1085 if (chan_mask & BIT(i))
1086 ret += check_channel(i);
1087 return ret ? -EINVAL : 0;
1090 #define DIMMS_PRESENT(d) ((d)->rken0 + (d)->rken1 + (d)->rken2 + (d)->rken3)
1092 static int check_unit(int ch)
1094 struct d_cr_drp *d = &drp[ch];
1096 if (DIMMS_PRESENT(d) && !ecc_ctrl[ch].eccen) {
1097 pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
1098 return 1;
1100 return 0;
1103 static int dnv_check_ecc_active(void)
1105 int i, ret = 0;
1107 for (i = 0; i < DNV_NUM_CHANNELS; i++)
1108 ret += check_unit(i);
1109 return ret ? -EINVAL : 0;
1112 static int get_memory_error_data(struct mem_ctl_info *mci, u64 addr,
1113 struct dram_addr *daddr, char *msg)
1115 u64 pmiaddr;
1116 u32 pmiidx;
1117 int ret;
1119 ret = sys2pmi(addr, &pmiidx, &pmiaddr, msg);
1120 if (ret)
1121 return ret;
1123 pmiaddr >>= ops->pmiaddr_shift;
1124 /* pmi channel idx to dimm channel idx */
1125 pmiidx >>= ops->pmiidx_shift;
1126 daddr->chan = pmiidx;
1128 ret = ops->pmi2mem(mci, pmiaddr, pmiidx, daddr, msg);
1129 if (ret)
1130 return ret;
1132 edac_dbg(0, "SysAddr=%llx PmiAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
1133 addr, pmiaddr, daddr->chan, daddr->dimm, daddr->rank, daddr->bank, daddr->row, daddr->col);
1135 return 0;
1138 static void pnd2_mce_output_error(struct mem_ctl_info *mci, const struct mce *m,
1139 struct dram_addr *daddr)
1141 enum hw_event_mc_err_type tp_event;
1142 char *optype, msg[PND2_MSG_SIZE];
1143 bool ripv = m->mcgstatus & MCG_STATUS_RIPV;
1144 bool overflow = m->status & MCI_STATUS_OVER;
1145 bool uc_err = m->status & MCI_STATUS_UC;
1146 bool recov = m->status & MCI_STATUS_S;
1147 u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
1148 u32 mscod = GET_BITFIELD(m->status, 16, 31);
1149 u32 errcode = GET_BITFIELD(m->status, 0, 15);
1150 u32 optypenum = GET_BITFIELD(m->status, 4, 6);
1151 int rc;
1153 tp_event = uc_err ? (ripv ? HW_EVENT_ERR_FATAL : HW_EVENT_ERR_UNCORRECTED) :
1154 HW_EVENT_ERR_CORRECTED;
1157 * According with Table 15-9 of the Intel Architecture spec vol 3A,
1158 * memory errors should fit in this mask:
1159 * 000f 0000 1mmm cccc (binary)
1160 * where:
1161 * f = Correction Report Filtering Bit. If 1, subsequent errors
1162 * won't be shown
1163 * mmm = error type
1164 * cccc = channel
1165 * If the mask doesn't match, report an error to the parsing logic
1167 if (!((errcode & 0xef80) == 0x80)) {
1168 optype = "Can't parse: it is not a mem";
1169 } else {
1170 switch (optypenum) {
1171 case 0:
1172 optype = "generic undef request error";
1173 break;
1174 case 1:
1175 optype = "memory read error";
1176 break;
1177 case 2:
1178 optype = "memory write error";
1179 break;
1180 case 3:
1181 optype = "addr/cmd error";
1182 break;
1183 case 4:
1184 optype = "memory scrubbing error";
1185 break;
1186 default:
1187 optype = "reserved";
1188 break;
1192 /* Only decode errors with an valid address (ADDRV) */
1193 if (!(m->status & MCI_STATUS_ADDRV))
1194 return;
1196 rc = get_memory_error_data(mci, m->addr, daddr, msg);
1197 if (rc)
1198 goto address_error;
1200 snprintf(msg, sizeof(msg),
1201 "%s%s err_code:%04x:%04x channel:%d DIMM:%d rank:%d row:%d bank:%d col:%d",
1202 overflow ? " OVERFLOW" : "", (uc_err && recov) ? " recoverable" : "", mscod,
1203 errcode, daddr->chan, daddr->dimm, daddr->rank, daddr->row, daddr->bank, daddr->col);
1205 edac_dbg(0, "%s\n", msg);
1207 /* Call the helper to output message */
1208 edac_mc_handle_error(tp_event, mci, core_err_cnt, m->addr >> PAGE_SHIFT,
1209 m->addr & ~PAGE_MASK, 0, daddr->chan, daddr->dimm, -1, optype, msg);
1211 return;
1213 address_error:
1214 edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0, -1, -1, -1, msg, "");
1217 static void apl_get_dimm_config(struct mem_ctl_info *mci)
1219 struct pnd2_pvt *pvt = mci->pvt_info;
1220 struct dimm_info *dimm;
1221 struct d_cr_drp0 *d;
1222 u64 capacity;
1223 int i, g;
1225 for (i = 0; i < APL_NUM_CHANNELS; i++) {
1226 if (!(chan_mask & BIT(i)))
1227 continue;
1229 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, 0, 0);
1230 if (!dimm) {
1231 edac_dbg(0, "No allocated DIMM for channel %d\n", i);
1232 continue;
1235 d = &drp0[i];
1236 for (g = 0; g < ARRAY_SIZE(dimms); g++)
1237 if (dimms[g].addrdec == d->addrdec &&
1238 dimms[g].dden == d->dden &&
1239 dimms[g].dwid == d->dwid)
1240 break;
1242 if (g == ARRAY_SIZE(dimms)) {
1243 edac_dbg(0, "Channel %d: unrecognized DIMM\n", i);
1244 continue;
1247 pvt->dimm_geom[i] = g;
1248 capacity = (d->rken0 + d->rken1) * 8 * (1ul << dimms[g].rowbits) *
1249 (1ul << dimms[g].colbits);
1250 edac_dbg(0, "Channel %d: %lld MByte DIMM\n", i, capacity >> (20 - 3));
1251 dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
1252 dimm->grain = 32;
1253 dimm->dtype = (d->dwid == 0) ? DEV_X8 : DEV_X16;
1254 dimm->mtype = MEM_DDR3;
1255 dimm->edac_mode = EDAC_SECDED;
1256 snprintf(dimm->label, sizeof(dimm->label), "Slice#%d_Chan#%d", i / 2, i % 2);
1260 static const int dnv_dtypes[] = {
1261 DEV_X8, DEV_X4, DEV_X16, DEV_UNKNOWN
1264 static void dnv_get_dimm_config(struct mem_ctl_info *mci)
1266 int i, j, ranks_of_dimm[DNV_MAX_DIMMS], banks, rowbits, colbits, memtype;
1267 struct dimm_info *dimm;
1268 struct d_cr_drp *d;
1269 u64 capacity;
1271 if (dsch.ddr4en) {
1272 memtype = MEM_DDR4;
1273 banks = 16;
1274 colbits = 10;
1275 } else {
1276 memtype = MEM_DDR3;
1277 banks = 8;
1280 for (i = 0; i < DNV_NUM_CHANNELS; i++) {
1281 if (dmap4[i].row14 == 31)
1282 rowbits = 14;
1283 else if (dmap4[i].row15 == 31)
1284 rowbits = 15;
1285 else if (dmap4[i].row16 == 31)
1286 rowbits = 16;
1287 else if (dmap4[i].row17 == 31)
1288 rowbits = 17;
1289 else
1290 rowbits = 18;
1292 if (memtype == MEM_DDR3) {
1293 if (dmap1[i].ca11 != 0x3f)
1294 colbits = 12;
1295 else
1296 colbits = 10;
1299 d = &drp[i];
1300 /* DIMM0 is present if rank0 and/or rank1 is enabled */
1301 ranks_of_dimm[0] = d->rken0 + d->rken1;
1302 /* DIMM1 is present if rank2 and/or rank3 is enabled */
1303 ranks_of_dimm[1] = d->rken2 + d->rken3;
1305 for (j = 0; j < DNV_MAX_DIMMS; j++) {
1306 if (!ranks_of_dimm[j])
1307 continue;
1309 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, j, 0);
1310 if (!dimm) {
1311 edac_dbg(0, "No allocated DIMM for channel %d DIMM %d\n", i, j);
1312 continue;
1315 capacity = ranks_of_dimm[j] * banks * (1ul << rowbits) * (1ul << colbits);
1316 edac_dbg(0, "Channel %d DIMM %d: %lld MByte DIMM\n", i, j, capacity >> (20 - 3));
1317 dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
1318 dimm->grain = 32;
1319 dimm->dtype = dnv_dtypes[j ? d->dimmdwid0 : d->dimmdwid1];
1320 dimm->mtype = memtype;
1321 dimm->edac_mode = EDAC_SECDED;
1322 snprintf(dimm->label, sizeof(dimm->label), "Chan#%d_DIMM#%d", i, j);
1327 static int pnd2_register_mci(struct mem_ctl_info **ppmci)
1329 struct edac_mc_layer layers[2];
1330 struct mem_ctl_info *mci;
1331 struct pnd2_pvt *pvt;
1332 int rc;
1334 rc = ops->check_ecc();
1335 if (rc < 0)
1336 return rc;
1338 /* Allocate a new MC control structure */
1339 layers[0].type = EDAC_MC_LAYER_CHANNEL;
1340 layers[0].size = ops->channels;
1341 layers[0].is_virt_csrow = false;
1342 layers[1].type = EDAC_MC_LAYER_SLOT;
1343 layers[1].size = ops->dimms_per_channel;
1344 layers[1].is_virt_csrow = true;
1345 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
1346 if (!mci)
1347 return -ENOMEM;
1349 pvt = mci->pvt_info;
1350 memset(pvt, 0, sizeof(*pvt));
1352 mci->mod_name = EDAC_MOD_STR;
1353 mci->dev_name = ops->name;
1354 mci->ctl_name = "Pondicherry2";
1356 /* Get dimm basic config and the memory layout */
1357 ops->get_dimm_config(mci);
1359 if (edac_mc_add_mc(mci)) {
1360 edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
1361 edac_mc_free(mci);
1362 return -EINVAL;
1365 *ppmci = mci;
1367 return 0;
1370 static void pnd2_unregister_mci(struct mem_ctl_info *mci)
1372 if (unlikely(!mci || !mci->pvt_info)) {
1373 pnd2_printk(KERN_ERR, "Couldn't find mci handler\n");
1374 return;
1377 /* Remove MC sysfs nodes */
1378 edac_mc_del_mc(NULL);
1379 edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
1380 edac_mc_free(mci);
1384 * Callback function registered with core kernel mce code.
1385 * Called once for each logged error.
1387 static int pnd2_mce_check_error(struct notifier_block *nb, unsigned long val, void *data)
1389 struct mce *mce = (struct mce *)data;
1390 struct mem_ctl_info *mci;
1391 struct dram_addr daddr;
1392 char *type;
1394 if (edac_get_report_status() == EDAC_REPORTING_DISABLED)
1395 return NOTIFY_DONE;
1397 mci = pnd2_mci;
1398 if (!mci)
1399 return NOTIFY_DONE;
1402 * Just let mcelog handle it if the error is
1403 * outside the memory controller. A memory error
1404 * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0.
1405 * bit 12 has an special meaning.
1407 if ((mce->status & 0xefff) >> 7 != 1)
1408 return NOTIFY_DONE;
1410 if (mce->mcgstatus & MCG_STATUS_MCIP)
1411 type = "Exception";
1412 else
1413 type = "Event";
1415 pnd2_mc_printk(mci, KERN_INFO, "HANDLING MCE MEMORY ERROR\n");
1416 pnd2_mc_printk(mci, KERN_INFO, "CPU %u: Machine Check %s: %llx Bank %u: %llx\n",
1417 mce->extcpu, type, mce->mcgstatus, mce->bank, mce->status);
1418 pnd2_mc_printk(mci, KERN_INFO, "TSC %llx ", mce->tsc);
1419 pnd2_mc_printk(mci, KERN_INFO, "ADDR %llx ", mce->addr);
1420 pnd2_mc_printk(mci, KERN_INFO, "MISC %llx ", mce->misc);
1421 pnd2_mc_printk(mci, KERN_INFO, "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n",
1422 mce->cpuvendor, mce->cpuid, mce->time, mce->socketid, mce->apicid);
1424 pnd2_mce_output_error(mci, mce, &daddr);
1426 /* Advice mcelog that the error were handled */
1427 return NOTIFY_STOP;
1430 static struct notifier_block pnd2_mce_dec = {
1431 .notifier_call = pnd2_mce_check_error,
1434 #ifdef CONFIG_EDAC_DEBUG
1436 * Write an address to this file to exercise the address decode
1437 * logic in this driver.
1439 static u64 pnd2_fake_addr;
1440 #define PND2_BLOB_SIZE 1024
1441 static char pnd2_result[PND2_BLOB_SIZE];
1442 static struct dentry *pnd2_test;
1443 static struct debugfs_blob_wrapper pnd2_blob = {
1444 .data = pnd2_result,
1445 .size = 0
1448 static int debugfs_u64_set(void *data, u64 val)
1450 struct dram_addr daddr;
1451 struct mce m;
1453 *(u64 *)data = val;
1454 m.mcgstatus = 0;
1455 /* ADDRV + MemRd + Unknown channel */
1456 m.status = MCI_STATUS_ADDRV + 0x9f;
1457 m.addr = val;
1458 pnd2_mce_output_error(pnd2_mci, &m, &daddr);
1459 snprintf(pnd2_blob.data, PND2_BLOB_SIZE,
1460 "SysAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
1461 m.addr, daddr.chan, daddr.dimm, daddr.rank, daddr.bank, daddr.row, daddr.col);
1462 pnd2_blob.size = strlen(pnd2_blob.data);
1464 return 0;
1466 DEFINE_DEBUGFS_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
1468 static void setup_pnd2_debug(void)
1470 pnd2_test = edac_debugfs_create_dir("pnd2_test");
1471 edac_debugfs_create_file("pnd2_debug_addr", 0200, pnd2_test,
1472 &pnd2_fake_addr, &fops_u64_wo);
1473 debugfs_create_blob("pnd2_debug_results", 0400, pnd2_test, &pnd2_blob);
1476 static void teardown_pnd2_debug(void)
1478 debugfs_remove_recursive(pnd2_test);
1480 #else
1481 static void setup_pnd2_debug(void) {}
1482 static void teardown_pnd2_debug(void) {}
1483 #endif /* CONFIG_EDAC_DEBUG */
1486 static int pnd2_probe(void)
1488 int rc;
1490 edac_dbg(2, "\n");
1491 rc = get_registers();
1492 if (rc)
1493 return rc;
1495 return pnd2_register_mci(&pnd2_mci);
1498 static void pnd2_remove(void)
1500 edac_dbg(0, "\n");
1501 pnd2_unregister_mci(pnd2_mci);
1504 static struct dunit_ops apl_ops = {
1505 .name = "pnd2/apl",
1506 .type = APL,
1507 .pmiaddr_shift = LOG2_PMI_ADDR_GRANULARITY,
1508 .pmiidx_shift = 0,
1509 .channels = APL_NUM_CHANNELS,
1510 .dimms_per_channel = 1,
1511 .rd_reg = apl_rd_reg,
1512 .get_registers = apl_get_registers,
1513 .check_ecc = apl_check_ecc_active,
1514 .mk_region = apl_mk_region,
1515 .get_dimm_config = apl_get_dimm_config,
1516 .pmi2mem = apl_pmi2mem,
1519 static struct dunit_ops dnv_ops = {
1520 .name = "pnd2/dnv",
1521 .type = DNV,
1522 .pmiaddr_shift = 0,
1523 .pmiidx_shift = 1,
1524 .channels = DNV_NUM_CHANNELS,
1525 .dimms_per_channel = 2,
1526 .rd_reg = dnv_rd_reg,
1527 .get_registers = dnv_get_registers,
1528 .check_ecc = dnv_check_ecc_active,
1529 .mk_region = dnv_mk_region,
1530 .get_dimm_config = dnv_get_dimm_config,
1531 .pmi2mem = dnv_pmi2mem,
1534 static const struct x86_cpu_id pnd2_cpuids[] = {
1535 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT, 0, (kernel_ulong_t)&apl_ops },
1536 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT_X, 0, (kernel_ulong_t)&dnv_ops },
1539 MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids);
1541 static int __init pnd2_init(void)
1543 const struct x86_cpu_id *id;
1544 const char *owner;
1545 int rc;
1547 edac_dbg(2, "\n");
1549 owner = edac_get_owner();
1550 if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
1551 return -EBUSY;
1553 id = x86_match_cpu(pnd2_cpuids);
1554 if (!id)
1555 return -ENODEV;
1557 ops = (struct dunit_ops *)id->driver_data;
1559 if (ops->type == APL) {
1560 p2sb_bus = pci_find_bus(0, 0);
1561 if (!p2sb_bus)
1562 return -ENODEV;
1565 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1566 opstate_init();
1568 rc = pnd2_probe();
1569 if (rc < 0) {
1570 pnd2_printk(KERN_ERR, "Failed to register device with error %d.\n", rc);
1571 return rc;
1574 if (!pnd2_mci)
1575 return -ENODEV;
1577 mce_register_decode_chain(&pnd2_mce_dec);
1578 setup_pnd2_debug();
1580 return 0;
1583 static void __exit pnd2_exit(void)
1585 edac_dbg(2, "\n");
1586 teardown_pnd2_debug();
1587 mce_unregister_decode_chain(&pnd2_mce_dec);
1588 pnd2_remove();
1591 module_init(pnd2_init);
1592 module_exit(pnd2_exit);
1594 module_param(edac_op_state, int, 0444);
1595 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
1597 MODULE_LICENSE("GPL v2");
1598 MODULE_AUTHOR("Tony Luck");
1599 MODULE_DESCRIPTION("MC Driver for Intel SoC using Pondicherry memory controller");