2 * Driver for Pondicherry2 memory controller.
4 * Copyright (c) 2016, Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * [Derived from sb_edac.c]
17 * Translation of system physical addresses to DIMM addresses
18 * is a two stage process:
20 * First the Pondicherry 2 memory controller handles slice and channel interleaving
21 * in "sys2pmi()". This is (almost) completley common between platforms.
23 * Then a platform specific dunit (DIMM unit) completes the process to provide DIMM,
24 * rank, bank, row and column using the appropriate "dunit_ops" functions/parameters.
27 #include <linux/module.h>
28 #include <linux/init.h>
29 #include <linux/pci.h>
30 #include <linux/pci_ids.h>
31 #include <linux/slab.h>
32 #include <linux/delay.h>
33 #include <linux/edac.h>
34 #include <linux/mmzone.h>
35 #include <linux/smp.h>
36 #include <linux/bitmap.h>
37 #include <linux/math64.h>
38 #include <linux/mod_devicetable.h>
39 #include <asm/cpu_device_id.h>
40 #include <asm/intel-family.h>
41 #include <asm/processor.h>
45 #include "edac_module.h"
46 #include "pnd2_edac.h"
48 #define EDAC_MOD_STR "pnd2_edac"
50 #define APL_NUM_CHANNELS 4
51 #define DNV_NUM_CHANNELS 2
52 #define DNV_MAX_DIMMS 2 /* Max DIMMs per channel */
56 DNV
, /* All requests go to PMI CH0 on each slice (CH1 disabled) */
69 int dimm_geom
[APL_NUM_CHANNELS
];
74 * System address space is divided into multiple regions with
75 * different interleave rules in each. The as0/as1 regions
76 * have no interleaving at all. The as2 region is interleaved
77 * between two channels. The mot region is magic and may overlap
78 * other regions, with its interleave rules taking precedence.
79 * Addresses not in any of these regions are interleaved across
82 static struct region
{
88 static struct dunit_ops
{
94 int dimms_per_channel
;
95 int (*rd_reg
)(int port
, int off
, int op
, void *data
, size_t sz
, char *name
);
96 int (*get_registers
)(void);
97 int (*check_ecc
)(void);
98 void (*mk_region
)(char *name
, struct region
*rp
, void *asym
);
99 void (*get_dimm_config
)(struct mem_ctl_info
*mci
);
100 int (*pmi2mem
)(struct mem_ctl_info
*mci
, u64 pmiaddr
, u32 pmiidx
,
101 struct dram_addr
*daddr
, char *msg
);
104 static struct mem_ctl_info
*pnd2_mci
;
106 #define PND2_MSG_SIZE 256
109 #define pnd2_printk(level, fmt, arg...) \
110 edac_printk(level, "pnd2", fmt, ##arg)
112 #define pnd2_mc_printk(mci, level, fmt, arg...) \
113 edac_mc_chipset_printk(mci, level, "pnd2", fmt, ##arg)
115 #define MOT_CHAN_INTLV_BIT_1SLC_2CH 12
116 #define MOT_CHAN_INTLV_BIT_2SLC_2CH 13
117 #define SELECTOR_DISABLED (-1)
118 #define _4GB (1ul << 32)
120 #define PMI_ADDRESS_WIDTH 31
121 #define PND_MAX_PHYS_BIT 39
123 #define APL_ASYMSHIFT 28
124 #define DNV_ASYMSHIFT 31
125 #define CH_HASH_MASK_LSB 6
126 #define SLICE_HASH_MASK_LSB 6
127 #define MOT_SLC_INTLV_BIT 12
128 #define LOG2_PMI_ADDR_GRANULARITY 5
131 #define GET_BITFIELD(v, lo, hi) (((v) & GENMASK_ULL(hi, lo)) >> (lo))
132 #define U64_LSHIFT(val, s) ((u64)(val) << (s))
135 * On Apollo Lake we access memory controller registers via a
136 * side-band mailbox style interface in a hidden PCI device
137 * configuration space.
139 static struct pci_bus
*p2sb_bus
;
140 #define P2SB_DEVFN PCI_DEVFN(0xd, 0)
141 #define P2SB_ADDR_OFF 0xd0
142 #define P2SB_DATA_OFF 0xd4
143 #define P2SB_STAT_OFF 0xd8
144 #define P2SB_ROUT_OFF 0xda
145 #define P2SB_EADD_OFF 0xdc
146 #define P2SB_HIDE_OFF 0xe1
150 #define P2SB_READ(size, off, ptr) \
151 pci_bus_read_config_##size(p2sb_bus, P2SB_DEVFN, off, ptr)
152 #define P2SB_WRITE(size, off, val) \
153 pci_bus_write_config_##size(p2sb_bus, P2SB_DEVFN, off, val)
155 static bool p2sb_is_busy(u16
*status
)
157 P2SB_READ(word
, P2SB_STAT_OFF
, status
);
159 return !!(*status
& P2SB_BUSY
);
162 static int _apl_rd_reg(int port
, int off
, int op
, u32
*data
)
164 int retries
= 0xff, ret
;
168 /* Unhide the P2SB device, if it's hidden */
169 P2SB_READ(byte
, P2SB_HIDE_OFF
, &hidden
);
171 P2SB_WRITE(byte
, P2SB_HIDE_OFF
, 0);
173 if (p2sb_is_busy(&status
)) {
178 P2SB_WRITE(dword
, P2SB_ADDR_OFF
, (port
<< 24) | off
);
179 P2SB_WRITE(dword
, P2SB_DATA_OFF
, 0);
180 P2SB_WRITE(dword
, P2SB_EADD_OFF
, 0);
181 P2SB_WRITE(word
, P2SB_ROUT_OFF
, 0);
182 P2SB_WRITE(word
, P2SB_STAT_OFF
, (op
<< 8) | P2SB_BUSY
);
184 while (p2sb_is_busy(&status
)) {
185 if (retries
-- == 0) {
191 P2SB_READ(dword
, P2SB_DATA_OFF
, data
);
192 ret
= (status
>> 1) & 0x3;
194 /* Hide the P2SB device, if it was hidden before */
196 P2SB_WRITE(byte
, P2SB_HIDE_OFF
, hidden
);
201 static int apl_rd_reg(int port
, int off
, int op
, void *data
, size_t sz
, char *name
)
205 edac_dbg(2, "Read %s port=%x off=%x op=%x\n", name
, port
, off
, op
);
208 ret
= _apl_rd_reg(port
, off
+ 4, op
, (u32
*)(data
+ 4));
211 ret
|= _apl_rd_reg(port
, off
, op
, (u32
*)data
);
212 pnd2_printk(KERN_DEBUG
, "%s=%x%08x ret=%d\n", name
,
213 sz
== 8 ? *((u32
*)(data
+ 4)) : 0, *((u32
*)data
), ret
);
220 static u64
get_mem_ctrl_hub_base_addr(void)
222 struct b_cr_mchbar_lo_pci lo
;
223 struct b_cr_mchbar_hi_pci hi
;
224 struct pci_dev
*pdev
;
226 pdev
= pci_get_device(PCI_VENDOR_ID_INTEL
, 0x1980, NULL
);
228 pci_read_config_dword(pdev
, 0x48, (u32
*)&lo
);
229 pci_read_config_dword(pdev
, 0x4c, (u32
*)&hi
);
236 edac_dbg(2, "MMIO via memory controller hub base address is disabled!\n");
240 return U64_LSHIFT(hi
.base
, 32) | U64_LSHIFT(lo
.base
, 15);
243 static u64
get_sideband_reg_base_addr(void)
245 struct pci_dev
*pdev
;
249 pdev
= pci_get_device(PCI_VENDOR_ID_INTEL
, 0x19dd, NULL
);
251 /* Unhide the P2SB device, if it's hidden */
252 pci_read_config_byte(pdev
, 0xe1, &hidden
);
254 pci_write_config_byte(pdev
, 0xe1, 0);
256 pci_read_config_dword(pdev
, 0x10, &lo
);
257 pci_read_config_dword(pdev
, 0x14, &hi
);
260 /* Hide the P2SB device, if it was hidden before */
262 pci_write_config_byte(pdev
, 0xe1, hidden
);
265 return (U64_LSHIFT(hi
, 32) | U64_LSHIFT(lo
, 0));
271 #define DNV_MCHBAR_SIZE 0x8000
272 #define DNV_SB_PORT_SIZE 0x10000
273 static int dnv_rd_reg(int port
, int off
, int op
, void *data
, size_t sz
, char *name
)
275 struct pci_dev
*pdev
;
281 pdev
= pci_get_device(PCI_VENDOR_ID_INTEL
, 0x1980, NULL
);
285 pci_read_config_dword(pdev
, off
, data
);
288 /* MMIO via memory controller hub base address */
289 if (op
== 0 && port
== 0x4c) {
290 addr
= get_mem_ctrl_hub_base_addr();
293 size
= DNV_MCHBAR_SIZE
;
295 /* MMIO via sideband register base address */
296 addr
= get_sideband_reg_base_addr();
299 addr
+= (port
<< 16);
300 size
= DNV_SB_PORT_SIZE
;
303 base
= ioremap((resource_size_t
)addr
, size
);
308 *(u32
*)(data
+ 4) = *(u32
*)(base
+ off
+ 4);
309 *(u32
*)data
= *(u32
*)(base
+ off
);
314 edac_dbg(2, "Read %s=%.8x_%.8x\n", name
,
315 (sz
== 8) ? *(u32
*)(data
+ 4) : 0, *(u32
*)data
);
320 #define RD_REGP(regp, regname, port) \
323 regname##_r_opcode, \
324 regp, sizeof(struct regname), \
327 #define RD_REG(regp, regname) \
328 ops->rd_reg(regname ## _port, \
330 regname##_r_opcode, \
331 regp, sizeof(struct regname), \
334 static u64 top_lm
, top_hm
;
335 static bool two_slices
;
336 static bool two_channels
; /* Both PMI channels in one slice enabled */
338 static u8 sym_chan_mask
;
339 static u8 asym_chan_mask
;
342 static int slice_selector
= -1;
343 static int chan_selector
= -1;
344 static u64 slice_hash_mask
;
345 static u64 chan_hash_mask
;
347 static void mk_region(char *name
, struct region
*rp
, u64 base
, u64 limit
)
352 edac_dbg(2, "Region:%s [%llx, %llx]\n", name
, base
, limit
);
355 static void mk_region_mask(char *name
, struct region
*rp
, u64 base
, u64 mask
)
358 pr_info(FW_BUG
"MOT mask cannot be zero\n");
361 if (mask
!= GENMASK_ULL(PND_MAX_PHYS_BIT
, __ffs(mask
))) {
362 pr_info(FW_BUG
"MOT mask not power of two\n");
366 pr_info(FW_BUG
"MOT region base/mask alignment error\n");
370 rp
->limit
= (base
| ~mask
) & GENMASK_ULL(PND_MAX_PHYS_BIT
, 0);
372 edac_dbg(2, "Region:%s [%llx, %llx]\n", name
, base
, rp
->limit
);
375 static bool in_region(struct region
*rp
, u64 addr
)
380 return rp
->base
<= addr
&& addr
<= rp
->limit
;
383 static int gen_sym_mask(struct b_cr_slice_channel_hash
*p
)
387 if (!p
->slice_0_mem_disabled
)
388 mask
|= p
->sym_slice0_channel_enabled
;
390 if (!p
->slice_1_disabled
)
391 mask
|= p
->sym_slice1_channel_enabled
<< 2;
393 if (p
->ch_1_disabled
|| p
->enable_pmi_dual_data_mode
)
399 static int gen_asym_mask(struct b_cr_slice_channel_hash
*p
,
400 struct b_cr_asym_mem_region0_mchbar
*as0
,
401 struct b_cr_asym_mem_region1_mchbar
*as1
,
402 struct b_cr_asym_2way_mem_region_mchbar
*as2way
)
404 const int intlv
[] = { 0x5, 0xA, 0x3, 0xC };
407 if (as2way
->asym_2way_interleave_enable
)
408 mask
= intlv
[as2way
->asym_2way_intlv_mode
];
409 if (as0
->slice0_asym_enable
)
410 mask
|= (1 << as0
->slice0_asym_channel_select
);
411 if (as1
->slice1_asym_enable
)
412 mask
|= (4 << as1
->slice1_asym_channel_select
);
413 if (p
->slice_0_mem_disabled
)
415 if (p
->slice_1_disabled
)
417 if (p
->ch_1_disabled
|| p
->enable_pmi_dual_data_mode
)
423 static struct b_cr_tolud_pci tolud
;
424 static struct b_cr_touud_lo_pci touud_lo
;
425 static struct b_cr_touud_hi_pci touud_hi
;
426 static struct b_cr_asym_mem_region0_mchbar asym0
;
427 static struct b_cr_asym_mem_region1_mchbar asym1
;
428 static struct b_cr_asym_2way_mem_region_mchbar asym_2way
;
429 static struct b_cr_mot_out_base_mchbar mot_base
;
430 static struct b_cr_mot_out_mask_mchbar mot_mask
;
431 static struct b_cr_slice_channel_hash chash
;
433 /* Apollo Lake dunit */
435 * Validated on board with just two DIMMs in the [0] and [2] positions
436 * in this array. Other port number matches documentation, but caution
439 static const int apl_dports
[APL_NUM_CHANNELS
] = { 0x18, 0x10, 0x11, 0x19 };
440 static struct d_cr_drp0 drp0
[APL_NUM_CHANNELS
];
442 /* Denverton dunit */
443 static const int dnv_dports
[DNV_NUM_CHANNELS
] = { 0x10, 0x12 };
444 static struct d_cr_dsch dsch
;
445 static struct d_cr_ecc_ctrl ecc_ctrl
[DNV_NUM_CHANNELS
];
446 static struct d_cr_drp drp
[DNV_NUM_CHANNELS
];
447 static struct d_cr_dmap dmap
[DNV_NUM_CHANNELS
];
448 static struct d_cr_dmap1 dmap1
[DNV_NUM_CHANNELS
];
449 static struct d_cr_dmap2 dmap2
[DNV_NUM_CHANNELS
];
450 static struct d_cr_dmap3 dmap3
[DNV_NUM_CHANNELS
];
451 static struct d_cr_dmap4 dmap4
[DNV_NUM_CHANNELS
];
452 static struct d_cr_dmap5 dmap5
[DNV_NUM_CHANNELS
];
454 static void apl_mk_region(char *name
, struct region
*rp
, void *asym
)
456 struct b_cr_asym_mem_region0_mchbar
*a
= asym
;
459 U64_LSHIFT(a
->slice0_asym_base
, APL_ASYMSHIFT
),
460 U64_LSHIFT(a
->slice0_asym_limit
, APL_ASYMSHIFT
) +
461 GENMASK_ULL(APL_ASYMSHIFT
- 1, 0));
464 static void dnv_mk_region(char *name
, struct region
*rp
, void *asym
)
466 struct b_cr_asym_mem_region_denverton
*a
= asym
;
469 U64_LSHIFT(a
->slice_asym_base
, DNV_ASYMSHIFT
),
470 U64_LSHIFT(a
->slice_asym_limit
, DNV_ASYMSHIFT
) +
471 GENMASK_ULL(DNV_ASYMSHIFT
- 1, 0));
474 static int apl_get_registers(void)
479 if (RD_REG(&asym_2way
, b_cr_asym_2way_mem_region_mchbar
))
483 * RD_REGP() will fail for unpopulated or non-existent
484 * DIMM slots. Return success if we find at least one DIMM.
486 for (i
= 0; i
< APL_NUM_CHANNELS
; i
++)
487 if (!RD_REGP(&drp0
[i
], d_cr_drp0
, apl_dports
[i
]))
493 static int dnv_get_registers(void)
497 if (RD_REG(&dsch
, d_cr_dsch
))
500 for (i
= 0; i
< DNV_NUM_CHANNELS
; i
++)
501 if (RD_REGP(&ecc_ctrl
[i
], d_cr_ecc_ctrl
, dnv_dports
[i
]) ||
502 RD_REGP(&drp
[i
], d_cr_drp
, dnv_dports
[i
]) ||
503 RD_REGP(&dmap
[i
], d_cr_dmap
, dnv_dports
[i
]) ||
504 RD_REGP(&dmap1
[i
], d_cr_dmap1
, dnv_dports
[i
]) ||
505 RD_REGP(&dmap2
[i
], d_cr_dmap2
, dnv_dports
[i
]) ||
506 RD_REGP(&dmap3
[i
], d_cr_dmap3
, dnv_dports
[i
]) ||
507 RD_REGP(&dmap4
[i
], d_cr_dmap4
, dnv_dports
[i
]) ||
508 RD_REGP(&dmap5
[i
], d_cr_dmap5
, dnv_dports
[i
]))
515 * Read all the h/w config registers once here (they don't
516 * change at run time. Figure out which address ranges have
517 * which interleave characteristics.
519 static int get_registers(void)
521 const int intlv
[] = { 10, 11, 12, 12 };
523 if (RD_REG(&tolud
, b_cr_tolud_pci
) ||
524 RD_REG(&touud_lo
, b_cr_touud_lo_pci
) ||
525 RD_REG(&touud_hi
, b_cr_touud_hi_pci
) ||
526 RD_REG(&asym0
, b_cr_asym_mem_region0_mchbar
) ||
527 RD_REG(&asym1
, b_cr_asym_mem_region1_mchbar
) ||
528 RD_REG(&mot_base
, b_cr_mot_out_base_mchbar
) ||
529 RD_REG(&mot_mask
, b_cr_mot_out_mask_mchbar
) ||
530 RD_REG(&chash
, b_cr_slice_channel_hash
))
533 if (ops
->get_registers())
536 if (ops
->type
== DNV
) {
537 /* PMI channel idx (always 0) for asymmetric region */
538 asym0
.slice0_asym_channel_select
= 0;
539 asym1
.slice1_asym_channel_select
= 0;
540 /* PMI channel bitmap (always 1) for symmetric region */
541 chash
.sym_slice0_channel_enabled
= 0x1;
542 chash
.sym_slice1_channel_enabled
= 0x1;
545 if (asym0
.slice0_asym_enable
)
546 ops
->mk_region("as0", &as0
, &asym0
);
548 if (asym1
.slice1_asym_enable
)
549 ops
->mk_region("as1", &as1
, &asym1
);
551 if (asym_2way
.asym_2way_interleave_enable
) {
552 mk_region("as2way", &as2
,
553 U64_LSHIFT(asym_2way
.asym_2way_base
, APL_ASYMSHIFT
),
554 U64_LSHIFT(asym_2way
.asym_2way_limit
, APL_ASYMSHIFT
) +
555 GENMASK_ULL(APL_ASYMSHIFT
- 1, 0));
558 if (mot_base
.imr_en
) {
559 mk_region_mask("mot", &mot
,
560 U64_LSHIFT(mot_base
.mot_out_base
, MOT_SHIFT
),
561 U64_LSHIFT(mot_mask
.mot_out_mask
, MOT_SHIFT
));
564 top_lm
= U64_LSHIFT(tolud
.tolud
, 20);
565 top_hm
= U64_LSHIFT(touud_hi
.touud
, 32) | U64_LSHIFT(touud_lo
.touud
, 20);
567 two_slices
= !chash
.slice_1_disabled
&&
568 !chash
.slice_0_mem_disabled
&&
569 (chash
.sym_slice0_channel_enabled
!= 0) &&
570 (chash
.sym_slice1_channel_enabled
!= 0);
571 two_channels
= !chash
.ch_1_disabled
&&
572 !chash
.enable_pmi_dual_data_mode
&&
573 ((chash
.sym_slice0_channel_enabled
== 3) ||
574 (chash
.sym_slice1_channel_enabled
== 3));
576 sym_chan_mask
= gen_sym_mask(&chash
);
577 asym_chan_mask
= gen_asym_mask(&chash
, &asym0
, &asym1
, &asym_2way
);
578 chan_mask
= sym_chan_mask
| asym_chan_mask
;
580 if (two_slices
&& !two_channels
) {
584 slice_selector
= intlv
[chash
.interleave_mode
];
585 } else if (!two_slices
&& two_channels
) {
589 chan_selector
= intlv
[chash
.interleave_mode
];
590 } else if (two_slices
&& two_channels
) {
591 if (chash
.hvm_mode
) {
595 slice_selector
= intlv
[chash
.interleave_mode
];
596 chan_selector
= intlv
[chash
.interleave_mode
] + 1;
602 slice_hash_mask
= chash
.slice_hash_mask
<< SLICE_HASH_MASK_LSB
;
604 slice_hash_mask
|= BIT_ULL(slice_selector
);
609 chan_hash_mask
= chash
.ch_hash_mask
<< CH_HASH_MASK_LSB
;
611 chan_hash_mask
|= BIT_ULL(chan_selector
);
617 /* Get a contiguous memory address (remove the MMIO gap) */
618 static u64
remove_mmio_gap(u64 sys
)
620 return (sys
< _4GB
) ? sys
: sys
- (_4GB
- top_lm
);
623 /* Squeeze out one address bit, shift upper part down to fill gap */
624 static void remove_addr_bit(u64
*addr
, int bitidx
)
631 mask
= (1ull << bitidx
) - 1;
632 *addr
= ((*addr
>> 1) & ~mask
) | (*addr
& mask
);
635 /* XOR all the bits from addr specified in mask */
636 static int hash_by_mask(u64 addr
, u64 mask
)
638 u64 result
= addr
& mask
;
640 result
= (result
>> 32) ^ result
;
641 result
= (result
>> 16) ^ result
;
642 result
= (result
>> 8) ^ result
;
643 result
= (result
>> 4) ^ result
;
644 result
= (result
>> 2) ^ result
;
645 result
= (result
>> 1) ^ result
;
647 return (int)result
& 1;
651 * First stage decode. Take the system address and figure out which
652 * second stage will deal with it based on interleave modes.
654 static int sys2pmi(const u64 addr
, u32
*pmiidx
, u64
*pmiaddr
, char *msg
)
656 u64 contig_addr
, contig_base
, contig_offset
, contig_base_adj
;
657 int mot_intlv_bit
= two_slices
? MOT_CHAN_INTLV_BIT_2SLC_2CH
:
658 MOT_CHAN_INTLV_BIT_1SLC_2CH
;
659 int slice_intlv_bit_rm
= SELECTOR_DISABLED
;
660 int chan_intlv_bit_rm
= SELECTOR_DISABLED
;
661 /* Determine if address is in the MOT region. */
662 bool mot_hit
= in_region(&mot
, addr
);
663 /* Calculate the number of symmetric regions enabled. */
664 int sym_channels
= hweight8(sym_chan_mask
);
667 * The amount we need to shift the asym base can be determined by the
668 * number of enabled symmetric channels.
669 * NOTE: This can only work because symmetric memory is not supposed
670 * to do a 3-way interleave.
672 int sym_chan_shift
= sym_channels
>> 1;
674 /* Give up if address is out of range, or in MMIO gap */
675 if (addr
>= (1ul << PND_MAX_PHYS_BIT
) ||
676 (addr
>= top_lm
&& addr
< _4GB
) || addr
>= top_hm
) {
677 snprintf(msg
, PND2_MSG_SIZE
, "Error address 0x%llx is not DRAM", addr
);
681 /* Get a contiguous memory address (remove the MMIO gap) */
682 contig_addr
= remove_mmio_gap(addr
);
684 if (in_region(&as0
, addr
)) {
685 *pmiidx
= asym0
.slice0_asym_channel_select
;
687 contig_base
= remove_mmio_gap(as0
.base
);
688 contig_offset
= contig_addr
- contig_base
;
689 contig_base_adj
= (contig_base
>> sym_chan_shift
) *
690 ((chash
.sym_slice0_channel_enabled
>> (*pmiidx
& 1)) & 1);
691 contig_addr
= contig_offset
+ ((sym_channels
> 0) ? contig_base_adj
: 0ull);
692 } else if (in_region(&as1
, addr
)) {
693 *pmiidx
= 2u + asym1
.slice1_asym_channel_select
;
695 contig_base
= remove_mmio_gap(as1
.base
);
696 contig_offset
= contig_addr
- contig_base
;
697 contig_base_adj
= (contig_base
>> sym_chan_shift
) *
698 ((chash
.sym_slice1_channel_enabled
>> (*pmiidx
& 1)) & 1);
699 contig_addr
= contig_offset
+ ((sym_channels
> 0) ? contig_base_adj
: 0ull);
700 } else if (in_region(&as2
, addr
) && (asym_2way
.asym_2way_intlv_mode
== 0x3ul
)) {
703 mot_intlv_bit
= MOT_CHAN_INTLV_BIT_1SLC_2CH
;
704 *pmiidx
= (asym_2way
.asym_2way_intlv_mode
& 1) << 1;
705 channel1
= mot_hit
? ((bool)((addr
>> mot_intlv_bit
) & 1)) :
706 hash_by_mask(contig_addr
, chan_hash_mask
);
707 *pmiidx
|= (u32
)channel1
;
709 contig_base
= remove_mmio_gap(as2
.base
);
710 chan_intlv_bit_rm
= mot_hit
? mot_intlv_bit
: chan_selector
;
711 contig_offset
= contig_addr
- contig_base
;
712 remove_addr_bit(&contig_offset
, chan_intlv_bit_rm
);
713 contig_addr
= (contig_base
>> sym_chan_shift
) + contig_offset
;
715 /* Otherwise we're in normal, boring symmetric mode. */
722 slice_intlv_bit_rm
= MOT_SLC_INTLV_BIT
;
723 slice1
= (addr
>> MOT_SLC_INTLV_BIT
) & 1;
725 slice_intlv_bit_rm
= slice_selector
;
726 slice1
= hash_by_mask(addr
, slice_hash_mask
);
729 *pmiidx
= (u32
)slice1
<< 1;
735 mot_intlv_bit
= two_slices
? MOT_CHAN_INTLV_BIT_2SLC_2CH
:
736 MOT_CHAN_INTLV_BIT_1SLC_2CH
;
739 chan_intlv_bit_rm
= mot_intlv_bit
;
740 channel1
= (addr
>> mot_intlv_bit
) & 1;
742 chan_intlv_bit_rm
= chan_selector
;
743 channel1
= hash_by_mask(contig_addr
, chan_hash_mask
);
746 *pmiidx
|= (u32
)channel1
;
750 /* Remove the chan_selector bit first */
751 remove_addr_bit(&contig_addr
, chan_intlv_bit_rm
);
752 /* Remove the slice bit (we remove it second because it must be lower */
753 remove_addr_bit(&contig_addr
, slice_intlv_bit_rm
);
754 *pmiaddr
= contig_addr
;
759 /* Translate PMI address to memory (rank, row, bank, column) */
760 #define C(n) (0x10 | (n)) /* column */
761 #define B(n) (0x20 | (n)) /* bank */
762 #define R(n) (0x40 | (n)) /* row */
763 #define RS (0x80) /* rank */
779 static struct dimm_geometry
{
784 u16 bits
[PMI_ADDRESS_WIDTH
];
787 .addrdec
= AMAP_1KB
, .dden
= DEN_4Gb
, .dwid
= X16
,
788 .rowbits
= 15, .colbits
= 10,
790 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
791 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
792 R(10), C(7), C(8), C(9), R(11), RS
, R(12), R(13), R(14),
797 .addrdec
= AMAP_1KB
, .dden
= DEN_4Gb
, .dwid
= X8
,
798 .rowbits
= 16, .colbits
= 10,
800 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
801 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
802 R(10), C(7), C(8), C(9), R(11), RS
, R(12), R(13), R(14),
807 .addrdec
= AMAP_1KB
, .dden
= DEN_8Gb
, .dwid
= X16
,
808 .rowbits
= 16, .colbits
= 10,
810 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
811 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
812 R(10), C(7), C(8), C(9), R(11), RS
, R(12), R(13), R(14),
817 .addrdec
= AMAP_1KB
, .dden
= DEN_8Gb
, .dwid
= X8
,
818 .rowbits
= 16, .colbits
= 11,
820 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
821 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
822 R(10), C(7), C(8), C(9), R(11), RS
, C(11), R(12), R(13),
827 .addrdec
= AMAP_2KB
, .dden
= DEN_4Gb
, .dwid
= X16
,
828 .rowbits
= 15, .colbits
= 10,
830 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
831 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
832 R(9), R(10), C(8), C(9), R(11), RS
, R(12), R(13), R(14),
837 .addrdec
= AMAP_2KB
, .dden
= DEN_4Gb
, .dwid
= X8
,
838 .rowbits
= 16, .colbits
= 10,
840 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
841 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
842 R(9), R(10), C(8), C(9), R(11), RS
, R(12), R(13), R(14),
847 .addrdec
= AMAP_2KB
, .dden
= DEN_8Gb
, .dwid
= X16
,
848 .rowbits
= 16, .colbits
= 10,
850 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
851 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
852 R(9), R(10), C(8), C(9), R(11), RS
, R(12), R(13), R(14),
857 .addrdec
= AMAP_2KB
, .dden
= DEN_8Gb
, .dwid
= X8
,
858 .rowbits
= 16, .colbits
= 11,
860 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
861 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
862 R(9), R(10), C(8), C(9), R(11), RS
, C(11), R(12), R(13),
867 .addrdec
= AMAP_4KB
, .dden
= DEN_4Gb
, .dwid
= X16
,
868 .rowbits
= 15, .colbits
= 10,
870 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
871 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
872 R(8), R(9), R(10), C(9), R(11), RS
, R(12), R(13), R(14),
877 .addrdec
= AMAP_4KB
, .dden
= DEN_4Gb
, .dwid
= X8
,
878 .rowbits
= 16, .colbits
= 10,
880 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
881 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
882 R(8), R(9), R(10), C(9), R(11), RS
, R(12), R(13), R(14),
887 .addrdec
= AMAP_4KB
, .dden
= DEN_8Gb
, .dwid
= X16
,
888 .rowbits
= 16, .colbits
= 10,
890 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
891 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
892 R(8), R(9), R(10), C(9), R(11), RS
, R(12), R(13), R(14),
897 .addrdec
= AMAP_4KB
, .dden
= DEN_8Gb
, .dwid
= X8
,
898 .rowbits
= 16, .colbits
= 11,
900 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
901 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
902 R(8), R(9), R(10), C(9), R(11), RS
, C(11), R(12), R(13),
908 static int bank_hash(u64 pmiaddr
, int idx
, int shft
)
914 bhash
^= ((pmiaddr
>> (12 + shft
)) ^ (pmiaddr
>> (9 + shft
))) & 1;
917 bhash
^= (((pmiaddr
>> (10 + shft
)) ^ (pmiaddr
>> (8 + shft
))) & 1) << 1;
918 bhash
^= ((pmiaddr
>> 22) & 1) << 1;
921 bhash
^= (((pmiaddr
>> (13 + shft
)) ^ (pmiaddr
>> (11 + shft
))) & 1) << 2;
928 static int rank_hash(u64 pmiaddr
)
930 return ((pmiaddr
>> 16) ^ (pmiaddr
>> 10)) & 1;
933 /* Second stage decode. Compute rank, bank, row & column. */
934 static int apl_pmi2mem(struct mem_ctl_info
*mci
, u64 pmiaddr
, u32 pmiidx
,
935 struct dram_addr
*daddr
, char *msg
)
937 struct d_cr_drp0
*cr_drp0
= &drp0
[pmiidx
];
938 struct pnd2_pvt
*pvt
= mci
->pvt_info
;
939 int g
= pvt
->dimm_geom
[pmiidx
];
940 struct dimm_geometry
*d
= &dimms
[g
];
941 int column
= 0, bank
= 0, row
= 0, rank
= 0;
942 int i
, idx
, type
, skiprs
= 0;
944 for (i
= 0; i
< PMI_ADDRESS_WIDTH
; i
++) {
945 int bit
= (pmiaddr
>> i
) & 1;
947 if (i
+ skiprs
>= PMI_ADDRESS_WIDTH
) {
948 snprintf(msg
, PND2_MSG_SIZE
, "Bad dimm_geometry[] table\n");
952 type
= d
->bits
[i
+ skiprs
] & ~0xf;
953 idx
= d
->bits
[i
+ skiprs
] & 0xf;
956 * On single rank DIMMs ignore the rank select bit
957 * and shift remainder of "bits[]" down one place.
959 if (type
== RS
&& (cr_drp0
->rken0
+ cr_drp0
->rken1
) == 1) {
961 type
= d
->bits
[i
+ skiprs
] & ~0xf;
962 idx
= d
->bits
[i
+ skiprs
] & 0xf;
967 column
|= (bit
<< idx
);
970 bank
|= (bit
<< idx
);
972 bank
^= bank_hash(pmiaddr
, idx
, d
->addrdec
);
980 rank
^= rank_hash(pmiaddr
);
984 snprintf(msg
, PND2_MSG_SIZE
, "Bad translation\n");
1001 /* Pluck bit "in" from pmiaddr and return value shifted to bit "out" */
1002 #define dnv_get_bit(pmi, in, out) ((int)(((pmi) >> (in)) & 1u) << (out))
1004 static int dnv_pmi2mem(struct mem_ctl_info
*mci
, u64 pmiaddr
, u32 pmiidx
,
1005 struct dram_addr
*daddr
, char *msg
)
1008 daddr
->rank
= dnv_get_bit(pmiaddr
, dmap
[pmiidx
].rs0
+ 13, 0);
1010 daddr
->rank
|= dnv_get_bit(pmiaddr
, dmap
[pmiidx
].rs1
+ 13, 1);
1013 * Normally ranks 0,1 are DIMM0, and 2,3 are DIMM1, but we
1014 * flip them if DIMM1 is larger than DIMM0.
1016 daddr
->dimm
= (daddr
->rank
>= 2) ^ drp
[pmiidx
].dimmflip
;
1018 daddr
->bank
= dnv_get_bit(pmiaddr
, dmap
[pmiidx
].ba0
+ 6, 0);
1019 daddr
->bank
|= dnv_get_bit(pmiaddr
, dmap
[pmiidx
].ba1
+ 6, 1);
1020 daddr
->bank
|= dnv_get_bit(pmiaddr
, dmap
[pmiidx
].bg0
+ 6, 2);
1022 daddr
->bank
|= dnv_get_bit(pmiaddr
, dmap
[pmiidx
].bg1
+ 6, 3);
1023 if (dmap1
[pmiidx
].bxor
) {
1025 daddr
->bank
^= dnv_get_bit(pmiaddr
, dmap3
[pmiidx
].row6
+ 6, 0);
1026 daddr
->bank
^= dnv_get_bit(pmiaddr
, dmap3
[pmiidx
].row7
+ 6, 1);
1027 if (dsch
.chan_width
== 0)
1028 /* 64/72 bit dram channel width */
1029 daddr
->bank
^= dnv_get_bit(pmiaddr
, dmap5
[pmiidx
].ca3
+ 6, 2);
1031 /* 32/40 bit dram channel width */
1032 daddr
->bank
^= dnv_get_bit(pmiaddr
, dmap5
[pmiidx
].ca4
+ 6, 2);
1033 daddr
->bank
^= dnv_get_bit(pmiaddr
, dmap2
[pmiidx
].row2
+ 6, 3);
1035 daddr
->bank
^= dnv_get_bit(pmiaddr
, dmap2
[pmiidx
].row2
+ 6, 0);
1036 daddr
->bank
^= dnv_get_bit(pmiaddr
, dmap3
[pmiidx
].row6
+ 6, 1);
1037 if (dsch
.chan_width
== 0)
1038 daddr
->bank
^= dnv_get_bit(pmiaddr
, dmap5
[pmiidx
].ca3
+ 6, 2);
1040 daddr
->bank
^= dnv_get_bit(pmiaddr
, dmap5
[pmiidx
].ca4
+ 6, 2);
1044 daddr
->row
= dnv_get_bit(pmiaddr
, dmap2
[pmiidx
].row0
+ 6, 0);
1045 daddr
->row
|= dnv_get_bit(pmiaddr
, dmap2
[pmiidx
].row1
+ 6, 1);
1046 daddr
->row
|= dnv_get_bit(pmiaddr
, dmap2
[pmiidx
].row2
+ 6, 2);
1047 daddr
->row
|= dnv_get_bit(pmiaddr
, dmap2
[pmiidx
].row3
+ 6, 3);
1048 daddr
->row
|= dnv_get_bit(pmiaddr
, dmap2
[pmiidx
].row4
+ 6, 4);
1049 daddr
->row
|= dnv_get_bit(pmiaddr
, dmap2
[pmiidx
].row5
+ 6, 5);
1050 daddr
->row
|= dnv_get_bit(pmiaddr
, dmap3
[pmiidx
].row6
+ 6, 6);
1051 daddr
->row
|= dnv_get_bit(pmiaddr
, dmap3
[pmiidx
].row7
+ 6, 7);
1052 daddr
->row
|= dnv_get_bit(pmiaddr
, dmap3
[pmiidx
].row8
+ 6, 8);
1053 daddr
->row
|= dnv_get_bit(pmiaddr
, dmap3
[pmiidx
].row9
+ 6, 9);
1054 daddr
->row
|= dnv_get_bit(pmiaddr
, dmap3
[pmiidx
].row10
+ 6, 10);
1055 daddr
->row
|= dnv_get_bit(pmiaddr
, dmap3
[pmiidx
].row11
+ 6, 11);
1056 daddr
->row
|= dnv_get_bit(pmiaddr
, dmap4
[pmiidx
].row12
+ 6, 12);
1057 daddr
->row
|= dnv_get_bit(pmiaddr
, dmap4
[pmiidx
].row13
+ 6, 13);
1058 if (dmap4
[pmiidx
].row14
!= 31)
1059 daddr
->row
|= dnv_get_bit(pmiaddr
, dmap4
[pmiidx
].row14
+ 6, 14);
1060 if (dmap4
[pmiidx
].row15
!= 31)
1061 daddr
->row
|= dnv_get_bit(pmiaddr
, dmap4
[pmiidx
].row15
+ 6, 15);
1062 if (dmap4
[pmiidx
].row16
!= 31)
1063 daddr
->row
|= dnv_get_bit(pmiaddr
, dmap4
[pmiidx
].row16
+ 6, 16);
1064 if (dmap4
[pmiidx
].row17
!= 31)
1065 daddr
->row
|= dnv_get_bit(pmiaddr
, dmap4
[pmiidx
].row17
+ 6, 17);
1067 daddr
->col
= dnv_get_bit(pmiaddr
, dmap5
[pmiidx
].ca3
+ 6, 3);
1068 daddr
->col
|= dnv_get_bit(pmiaddr
, dmap5
[pmiidx
].ca4
+ 6, 4);
1069 daddr
->col
|= dnv_get_bit(pmiaddr
, dmap5
[pmiidx
].ca5
+ 6, 5);
1070 daddr
->col
|= dnv_get_bit(pmiaddr
, dmap5
[pmiidx
].ca6
+ 6, 6);
1071 daddr
->col
|= dnv_get_bit(pmiaddr
, dmap5
[pmiidx
].ca7
+ 6, 7);
1072 daddr
->col
|= dnv_get_bit(pmiaddr
, dmap5
[pmiidx
].ca8
+ 6, 8);
1073 daddr
->col
|= dnv_get_bit(pmiaddr
, dmap5
[pmiidx
].ca9
+ 6, 9);
1074 if (!dsch
.ddr4en
&& dmap1
[pmiidx
].ca11
!= 0x3f)
1075 daddr
->col
|= dnv_get_bit(pmiaddr
, dmap1
[pmiidx
].ca11
+ 13, 11);
1080 static int check_channel(int ch
)
1082 if (drp0
[ch
].dramtype
!= 0) {
1083 pnd2_printk(KERN_INFO
, "Unsupported DIMM in channel %d\n", ch
);
1085 } else if (drp0
[ch
].eccen
== 0) {
1086 pnd2_printk(KERN_INFO
, "ECC disabled on channel %d\n", ch
);
1092 static int apl_check_ecc_active(void)
1096 /* Check dramtype and ECC mode for each present DIMM */
1097 for (i
= 0; i
< APL_NUM_CHANNELS
; i
++)
1098 if (chan_mask
& BIT(i
))
1099 ret
+= check_channel(i
);
1100 return ret
? -EINVAL
: 0;
1103 #define DIMMS_PRESENT(d) ((d)->rken0 + (d)->rken1 + (d)->rken2 + (d)->rken3)
1105 static int check_unit(int ch
)
1107 struct d_cr_drp
*d
= &drp
[ch
];
1109 if (DIMMS_PRESENT(d
) && !ecc_ctrl
[ch
].eccen
) {
1110 pnd2_printk(KERN_INFO
, "ECC disabled on channel %d\n", ch
);
1116 static int dnv_check_ecc_active(void)
1120 for (i
= 0; i
< DNV_NUM_CHANNELS
; i
++)
1121 ret
+= check_unit(i
);
1122 return ret
? -EINVAL
: 0;
1125 static int get_memory_error_data(struct mem_ctl_info
*mci
, u64 addr
,
1126 struct dram_addr
*daddr
, char *msg
)
1132 ret
= sys2pmi(addr
, &pmiidx
, &pmiaddr
, msg
);
1136 pmiaddr
>>= ops
->pmiaddr_shift
;
1137 /* pmi channel idx to dimm channel idx */
1138 pmiidx
>>= ops
->pmiidx_shift
;
1139 daddr
->chan
= pmiidx
;
1141 ret
= ops
->pmi2mem(mci
, pmiaddr
, pmiidx
, daddr
, msg
);
1145 edac_dbg(0, "SysAddr=%llx PmiAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
1146 addr
, pmiaddr
, daddr
->chan
, daddr
->dimm
, daddr
->rank
, daddr
->bank
, daddr
->row
, daddr
->col
);
1151 static void pnd2_mce_output_error(struct mem_ctl_info
*mci
, const struct mce
*m
,
1152 struct dram_addr
*daddr
)
1154 enum hw_event_mc_err_type tp_event
;
1155 char *optype
, msg
[PND2_MSG_SIZE
];
1156 bool ripv
= m
->mcgstatus
& MCG_STATUS_RIPV
;
1157 bool overflow
= m
->status
& MCI_STATUS_OVER
;
1158 bool uc_err
= m
->status
& MCI_STATUS_UC
;
1159 bool recov
= m
->status
& MCI_STATUS_S
;
1160 u32 core_err_cnt
= GET_BITFIELD(m
->status
, 38, 52);
1161 u32 mscod
= GET_BITFIELD(m
->status
, 16, 31);
1162 u32 errcode
= GET_BITFIELD(m
->status
, 0, 15);
1163 u32 optypenum
= GET_BITFIELD(m
->status
, 4, 6);
1166 tp_event
= uc_err
? (ripv
? HW_EVENT_ERR_FATAL
: HW_EVENT_ERR_UNCORRECTED
) :
1167 HW_EVENT_ERR_CORRECTED
;
1170 * According with Table 15-9 of the Intel Architecture spec vol 3A,
1171 * memory errors should fit in this mask:
1172 * 000f 0000 1mmm cccc (binary)
1174 * f = Correction Report Filtering Bit. If 1, subsequent errors
1178 * If the mask doesn't match, report an error to the parsing logic
1180 if (!((errcode
& 0xef80) == 0x80)) {
1181 optype
= "Can't parse: it is not a mem";
1183 switch (optypenum
) {
1185 optype
= "generic undef request error";
1188 optype
= "memory read error";
1191 optype
= "memory write error";
1194 optype
= "addr/cmd error";
1197 optype
= "memory scrubbing error";
1200 optype
= "reserved";
1205 /* Only decode errors with an valid address (ADDRV) */
1206 if (!(m
->status
& MCI_STATUS_ADDRV
))
1209 rc
= get_memory_error_data(mci
, m
->addr
, daddr
, msg
);
1213 snprintf(msg
, sizeof(msg
),
1214 "%s%s err_code:%04x:%04x channel:%d DIMM:%d rank:%d row:%d bank:%d col:%d",
1215 overflow
? " OVERFLOW" : "", (uc_err
&& recov
) ? " recoverable" : "", mscod
,
1216 errcode
, daddr
->chan
, daddr
->dimm
, daddr
->rank
, daddr
->row
, daddr
->bank
, daddr
->col
);
1218 edac_dbg(0, "%s\n", msg
);
1220 /* Call the helper to output message */
1221 edac_mc_handle_error(tp_event
, mci
, core_err_cnt
, m
->addr
>> PAGE_SHIFT
,
1222 m
->addr
& ~PAGE_MASK
, 0, daddr
->chan
, daddr
->dimm
, -1, optype
, msg
);
1227 edac_mc_handle_error(tp_event
, mci
, core_err_cnt
, 0, 0, 0, -1, -1, -1, msg
, "");
1230 static void apl_get_dimm_config(struct mem_ctl_info
*mci
)
1232 struct pnd2_pvt
*pvt
= mci
->pvt_info
;
1233 struct dimm_info
*dimm
;
1234 struct d_cr_drp0
*d
;
1238 for (i
= 0; i
< APL_NUM_CHANNELS
; i
++) {
1239 if (!(chan_mask
& BIT(i
)))
1242 dimm
= EDAC_DIMM_PTR(mci
->layers
, mci
->dimms
, mci
->n_layers
, i
, 0, 0);
1244 edac_dbg(0, "No allocated DIMM for channel %d\n", i
);
1249 for (g
= 0; g
< ARRAY_SIZE(dimms
); g
++)
1250 if (dimms
[g
].addrdec
== d
->addrdec
&&
1251 dimms
[g
].dden
== d
->dden
&&
1252 dimms
[g
].dwid
== d
->dwid
)
1255 if (g
== ARRAY_SIZE(dimms
)) {
1256 edac_dbg(0, "Channel %d: unrecognized DIMM\n", i
);
1260 pvt
->dimm_geom
[i
] = g
;
1261 capacity
= (d
->rken0
+ d
->rken1
) * 8 * (1ul << dimms
[g
].rowbits
) *
1262 (1ul << dimms
[g
].colbits
);
1263 edac_dbg(0, "Channel %d: %lld MByte DIMM\n", i
, capacity
>> (20 - 3));
1264 dimm
->nr_pages
= MiB_TO_PAGES(capacity
>> (20 - 3));
1266 dimm
->dtype
= (d
->dwid
== 0) ? DEV_X8
: DEV_X16
;
1267 dimm
->mtype
= MEM_DDR3
;
1268 dimm
->edac_mode
= EDAC_SECDED
;
1269 snprintf(dimm
->label
, sizeof(dimm
->label
), "Slice#%d_Chan#%d", i
/ 2, i
% 2);
1273 static const int dnv_dtypes
[] = {
1274 DEV_X8
, DEV_X4
, DEV_X16
, DEV_UNKNOWN
1277 static void dnv_get_dimm_config(struct mem_ctl_info
*mci
)
1279 int i
, j
, ranks_of_dimm
[DNV_MAX_DIMMS
], banks
, rowbits
, colbits
, memtype
;
1280 struct dimm_info
*dimm
;
1293 for (i
= 0; i
< DNV_NUM_CHANNELS
; i
++) {
1294 if (dmap4
[i
].row14
== 31)
1296 else if (dmap4
[i
].row15
== 31)
1298 else if (dmap4
[i
].row16
== 31)
1300 else if (dmap4
[i
].row17
== 31)
1305 if (memtype
== MEM_DDR3
) {
1306 if (dmap1
[i
].ca11
!= 0x3f)
1313 /* DIMM0 is present if rank0 and/or rank1 is enabled */
1314 ranks_of_dimm
[0] = d
->rken0
+ d
->rken1
;
1315 /* DIMM1 is present if rank2 and/or rank3 is enabled */
1316 ranks_of_dimm
[1] = d
->rken2
+ d
->rken3
;
1318 for (j
= 0; j
< DNV_MAX_DIMMS
; j
++) {
1319 if (!ranks_of_dimm
[j
])
1322 dimm
= EDAC_DIMM_PTR(mci
->layers
, mci
->dimms
, mci
->n_layers
, i
, j
, 0);
1324 edac_dbg(0, "No allocated DIMM for channel %d DIMM %d\n", i
, j
);
1328 capacity
= ranks_of_dimm
[j
] * banks
* (1ul << rowbits
) * (1ul << colbits
);
1329 edac_dbg(0, "Channel %d DIMM %d: %lld MByte DIMM\n", i
, j
, capacity
>> (20 - 3));
1330 dimm
->nr_pages
= MiB_TO_PAGES(capacity
>> (20 - 3));
1332 dimm
->dtype
= dnv_dtypes
[j
? d
->dimmdwid0
: d
->dimmdwid1
];
1333 dimm
->mtype
= memtype
;
1334 dimm
->edac_mode
= EDAC_SECDED
;
1335 snprintf(dimm
->label
, sizeof(dimm
->label
), "Chan#%d_DIMM#%d", i
, j
);
1340 static int pnd2_register_mci(struct mem_ctl_info
**ppmci
)
1342 struct edac_mc_layer layers
[2];
1343 struct mem_ctl_info
*mci
;
1344 struct pnd2_pvt
*pvt
;
1347 rc
= ops
->check_ecc();
1351 /* Allocate a new MC control structure */
1352 layers
[0].type
= EDAC_MC_LAYER_CHANNEL
;
1353 layers
[0].size
= ops
->channels
;
1354 layers
[0].is_virt_csrow
= false;
1355 layers
[1].type
= EDAC_MC_LAYER_SLOT
;
1356 layers
[1].size
= ops
->dimms_per_channel
;
1357 layers
[1].is_virt_csrow
= true;
1358 mci
= edac_mc_alloc(0, ARRAY_SIZE(layers
), layers
, sizeof(*pvt
));
1362 pvt
= mci
->pvt_info
;
1363 memset(pvt
, 0, sizeof(*pvt
));
1365 mci
->mod_name
= EDAC_MOD_STR
;
1366 mci
->dev_name
= ops
->name
;
1367 mci
->ctl_name
= "Pondicherry2";
1369 /* Get dimm basic config and the memory layout */
1370 ops
->get_dimm_config(mci
);
1372 if (edac_mc_add_mc(mci
)) {
1373 edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
1383 static void pnd2_unregister_mci(struct mem_ctl_info
*mci
)
1385 if (unlikely(!mci
|| !mci
->pvt_info
)) {
1386 pnd2_printk(KERN_ERR
, "Couldn't find mci handler\n");
1390 /* Remove MC sysfs nodes */
1391 edac_mc_del_mc(NULL
);
1392 edac_dbg(1, "%s: free mci struct\n", mci
->ctl_name
);
1397 * Callback function registered with core kernel mce code.
1398 * Called once for each logged error.
1400 static int pnd2_mce_check_error(struct notifier_block
*nb
, unsigned long val
, void *data
)
1402 struct mce
*mce
= (struct mce
*)data
;
1403 struct mem_ctl_info
*mci
;
1404 struct dram_addr daddr
;
1407 if (edac_get_report_status() == EDAC_REPORTING_DISABLED
)
1415 * Just let mcelog handle it if the error is
1416 * outside the memory controller. A memory error
1417 * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0.
1418 * bit 12 has an special meaning.
1420 if ((mce
->status
& 0xefff) >> 7 != 1)
1423 if (mce
->mcgstatus
& MCG_STATUS_MCIP
)
1428 pnd2_mc_printk(mci
, KERN_INFO
, "HANDLING MCE MEMORY ERROR\n");
1429 pnd2_mc_printk(mci
, KERN_INFO
, "CPU %u: Machine Check %s: %llx Bank %u: %llx\n",
1430 mce
->extcpu
, type
, mce
->mcgstatus
, mce
->bank
, mce
->status
);
1431 pnd2_mc_printk(mci
, KERN_INFO
, "TSC %llx ", mce
->tsc
);
1432 pnd2_mc_printk(mci
, KERN_INFO
, "ADDR %llx ", mce
->addr
);
1433 pnd2_mc_printk(mci
, KERN_INFO
, "MISC %llx ", mce
->misc
);
1434 pnd2_mc_printk(mci
, KERN_INFO
, "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n",
1435 mce
->cpuvendor
, mce
->cpuid
, mce
->time
, mce
->socketid
, mce
->apicid
);
1437 pnd2_mce_output_error(mci
, mce
, &daddr
);
1439 /* Advice mcelog that the error were handled */
1443 static struct notifier_block pnd2_mce_dec
= {
1444 .notifier_call
= pnd2_mce_check_error
,
1447 #ifdef CONFIG_EDAC_DEBUG
1449 * Write an address to this file to exercise the address decode
1450 * logic in this driver.
1452 static u64 pnd2_fake_addr
;
1453 #define PND2_BLOB_SIZE 1024
1454 static char pnd2_result
[PND2_BLOB_SIZE
];
1455 static struct dentry
*pnd2_test
;
1456 static struct debugfs_blob_wrapper pnd2_blob
= {
1457 .data
= pnd2_result
,
1461 static int debugfs_u64_set(void *data
, u64 val
)
1463 struct dram_addr daddr
;
1468 /* ADDRV + MemRd + Unknown channel */
1469 m
.status
= MCI_STATUS_ADDRV
+ 0x9f;
1471 pnd2_mce_output_error(pnd2_mci
, &m
, &daddr
);
1472 snprintf(pnd2_blob
.data
, PND2_BLOB_SIZE
,
1473 "SysAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
1474 m
.addr
, daddr
.chan
, daddr
.dimm
, daddr
.rank
, daddr
.bank
, daddr
.row
, daddr
.col
);
1475 pnd2_blob
.size
= strlen(pnd2_blob
.data
);
1479 DEFINE_DEBUGFS_ATTRIBUTE(fops_u64_wo
, NULL
, debugfs_u64_set
, "%llu\n");
1481 static void setup_pnd2_debug(void)
1483 pnd2_test
= edac_debugfs_create_dir("pnd2_test");
1484 edac_debugfs_create_file("pnd2_debug_addr", 0200, pnd2_test
,
1485 &pnd2_fake_addr
, &fops_u64_wo
);
1486 debugfs_create_blob("pnd2_debug_results", 0400, pnd2_test
, &pnd2_blob
);
1489 static void teardown_pnd2_debug(void)
1491 debugfs_remove_recursive(pnd2_test
);
1494 static void setup_pnd2_debug(void) {}
1495 static void teardown_pnd2_debug(void) {}
1496 #endif /* CONFIG_EDAC_DEBUG */
1499 static int pnd2_probe(void)
1504 rc
= get_registers();
1508 return pnd2_register_mci(&pnd2_mci
);
1511 static void pnd2_remove(void)
1514 pnd2_unregister_mci(pnd2_mci
);
1517 static struct dunit_ops apl_ops
= {
1520 .pmiaddr_shift
= LOG2_PMI_ADDR_GRANULARITY
,
1522 .channels
= APL_NUM_CHANNELS
,
1523 .dimms_per_channel
= 1,
1524 .rd_reg
= apl_rd_reg
,
1525 .get_registers
= apl_get_registers
,
1526 .check_ecc
= apl_check_ecc_active
,
1527 .mk_region
= apl_mk_region
,
1528 .get_dimm_config
= apl_get_dimm_config
,
1529 .pmi2mem
= apl_pmi2mem
,
1532 static struct dunit_ops dnv_ops
= {
1537 .channels
= DNV_NUM_CHANNELS
,
1538 .dimms_per_channel
= 2,
1539 .rd_reg
= dnv_rd_reg
,
1540 .get_registers
= dnv_get_registers
,
1541 .check_ecc
= dnv_check_ecc_active
,
1542 .mk_region
= dnv_mk_region
,
1543 .get_dimm_config
= dnv_get_dimm_config
,
1544 .pmi2mem
= dnv_pmi2mem
,
1547 static const struct x86_cpu_id pnd2_cpuids
[] = {
1548 { X86_VENDOR_INTEL
, 6, INTEL_FAM6_ATOM_GOLDMONT
, 0, (kernel_ulong_t
)&apl_ops
},
1549 { X86_VENDOR_INTEL
, 6, INTEL_FAM6_ATOM_GOLDMONT_X
, 0, (kernel_ulong_t
)&dnv_ops
},
1552 MODULE_DEVICE_TABLE(x86cpu
, pnd2_cpuids
);
1554 static int __init
pnd2_init(void)
1556 const struct x86_cpu_id
*id
;
1562 owner
= edac_get_owner();
1563 if (owner
&& strncmp(owner
, EDAC_MOD_STR
, sizeof(EDAC_MOD_STR
)))
1566 id
= x86_match_cpu(pnd2_cpuids
);
1570 ops
= (struct dunit_ops
*)id
->driver_data
;
1572 if (ops
->type
== APL
) {
1573 p2sb_bus
= pci_find_bus(0, 0);
1578 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1583 pnd2_printk(KERN_ERR
, "Failed to register device with error %d.\n", rc
);
1590 mce_register_decode_chain(&pnd2_mce_dec
);
1596 static void __exit
pnd2_exit(void)
1599 teardown_pnd2_debug();
1600 mce_unregister_decode_chain(&pnd2_mce_dec
);
1604 module_init(pnd2_init
);
1605 module_exit(pnd2_exit
);
1607 module_param(edac_op_state
, int, 0444);
1608 MODULE_PARM_DESC(edac_op_state
, "EDAC Error Reporting state: 0=Poll,1=NMI");
1610 MODULE_LICENSE("GPL v2");
1611 MODULE_AUTHOR("Tony Luck");
1612 MODULE_DESCRIPTION("MC Driver for Intel SoC using Pondicherry memory controller");