1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * AMD Address Translation Library
5 * map.c : Functions to read and decode DRAM address maps
7 * Copyright (c) 2023, Advanced Micro Devices, Inc.
10 * Author: Yazen Ghannam <Yazen.Ghannam@amd.com>
15 static int df2_get_intlv_mode(struct addr_ctx
*ctx
)
17 ctx
->map
.intlv_mode
= FIELD_GET(DF2_INTLV_NUM_CHAN
, ctx
->map
.base
);
19 if (ctx
->map
.intlv_mode
== 8)
20 ctx
->map
.intlv_mode
= DF2_2CHAN_HASH
;
22 if (ctx
->map
.intlv_mode
!= NONE
&&
23 ctx
->map
.intlv_mode
!= NOHASH_2CHAN
&&
24 ctx
->map
.intlv_mode
!= DF2_2CHAN_HASH
)
30 static int df3_get_intlv_mode(struct addr_ctx
*ctx
)
32 ctx
->map
.intlv_mode
= FIELD_GET(DF3_INTLV_NUM_CHAN
, ctx
->map
.base
);
36 static int df3p5_get_intlv_mode(struct addr_ctx
*ctx
)
38 ctx
->map
.intlv_mode
= FIELD_GET(DF3p5_INTLV_NUM_CHAN
, ctx
->map
.base
);
40 if (ctx
->map
.intlv_mode
== DF3_6CHAN
)
46 static int df4_get_intlv_mode(struct addr_ctx
*ctx
)
48 ctx
->map
.intlv_mode
= FIELD_GET(DF4_INTLV_NUM_CHAN
, ctx
->map
.intlv
);
50 if (ctx
->map
.intlv_mode
== DF3_COD4_2CHAN_HASH
||
51 ctx
->map
.intlv_mode
== DF3_COD2_4CHAN_HASH
||
52 ctx
->map
.intlv_mode
== DF3_COD1_8CHAN_HASH
||
53 ctx
->map
.intlv_mode
== DF3_6CHAN
)
59 static int df4p5_get_intlv_mode(struct addr_ctx
*ctx
)
61 ctx
->map
.intlv_mode
= FIELD_GET(DF4p5_INTLV_NUM_CHAN
, ctx
->map
.intlv
);
63 if (ctx
->map
.intlv_mode
<= NOHASH_32CHAN
)
66 if (ctx
->map
.intlv_mode
>= MI3_HASH_8CHAN
&&
67 ctx
->map
.intlv_mode
<= MI3_HASH_32CHAN
)
71 * Modes matching the ranges above are returned as-is.
73 * All other modes are "fixed up" by adding 20h to make a unique value.
75 ctx
->map
.intlv_mode
+= 0x20;
80 static int get_intlv_mode(struct addr_ctx
*ctx
)
86 ret
= df2_get_intlv_mode(ctx
);
89 ret
= df3_get_intlv_mode(ctx
);
92 ret
= df3p5_get_intlv_mode(ctx
);
95 ret
= df4_get_intlv_mode(ctx
);
98 ret
= df4p5_get_intlv_mode(ctx
);
105 atl_debug_on_bad_df_rev();
110 static u64
get_hi_addr_offset(u32 reg_dram_offset
)
112 u8 shift
= DF_DRAM_BASE_LIMIT_LSB
;
115 switch (df_cfg
.rev
) {
117 hi_addr_offset
= FIELD_GET(DF2_HI_ADDR_OFFSET
, reg_dram_offset
);
121 hi_addr_offset
= FIELD_GET(DF3_HI_ADDR_OFFSET
, reg_dram_offset
);
125 hi_addr_offset
= FIELD_GET(DF4_HI_ADDR_OFFSET
, reg_dram_offset
);
129 atl_debug_on_bad_df_rev();
132 if (df_cfg
.rev
== DF4p5
&& df_cfg
.flags
.heterogeneous
)
133 shift
= MI300_DRAM_LIMIT_LSB
;
135 return hi_addr_offset
<< shift
;
139 * Returns: 0 if offset is disabled.
140 * 1 if offset is enabled.
143 static int get_dram_offset(struct addr_ctx
*ctx
, u64
*norm_offset
)
148 /* Should not be called for map 0. */
150 atl_debug(ctx
, "Trying to find DRAM offset for map 0");
155 * DramOffset registers don't exist for map 0, so the base register
156 * actually refers to map 1.
157 * Adjust the map_num for the register offsets.
159 map_num
= ctx
->map
.num
- 1;
161 if (df_cfg
.rev
>= DF4
) {
162 /* Read D18F7x140 (DramOffset) */
163 if (df_indirect_read_instance(ctx
->node_id
, 7, 0x140 + (4 * map_num
),
164 ctx
->inst_id
, ®_dram_offset
))
168 /* Read D18F0x1B4 (DramOffset) */
169 if (df_indirect_read_instance(ctx
->node_id
, 0, 0x1B4 + (4 * map_num
),
170 ctx
->inst_id
, ®_dram_offset
))
174 if (!FIELD_GET(DF_HI_ADDR_OFFSET_EN
, reg_dram_offset
))
177 *norm_offset
= get_hi_addr_offset(reg_dram_offset
);
182 static int df3_6ch_get_dram_addr_map(struct addr_ctx
*ctx
)
184 u16 dst_fabric_id
= FIELD_GET(DF3_DST_FABRIC_ID
, ctx
->map
.limit
);
185 u8 i
, j
, shift
= 4, mask
= 0xF;
186 u32 reg
, offset
= 0x60;
189 /* Get Socket 1 register. */
190 if (dst_fabric_id
& df_cfg
.socket_id_mask
)
193 /* Read D18F0x06{0,8} (DF::Skt0CsTargetRemap0)/(DF::Skt0CsTargetRemap1) */
194 if (df_indirect_read_broadcast(ctx
->node_id
, 0, offset
, ®
))
197 /* Save 8 remap entries. */
198 for (i
= 0, j
= 0; i
< 8; i
++, j
++)
199 ctx
->map
.remap_array
[i
] = (reg
>> (j
* shift
)) & mask
;
201 dst_node_id
= dst_fabric_id
& df_cfg
.node_id_mask
;
202 dst_node_id
>>= df_cfg
.node_id_shift
;
204 /* Read D18F2x090 (DF::Np2ChannelConfig) */
205 if (df_indirect_read_broadcast(dst_node_id
, 2, 0x90, ®
))
208 ctx
->map
.np2_bits
= FIELD_GET(DF_LOG2_ADDR_64K_SPACE0
, reg
);
212 static int df2_get_dram_addr_map(struct addr_ctx
*ctx
)
214 /* Read D18F0x110 (DramBaseAddress). */
215 if (df_indirect_read_instance(ctx
->node_id
, 0, 0x110 + (8 * ctx
->map
.num
),
216 ctx
->inst_id
, &ctx
->map
.base
))
219 /* Read D18F0x114 (DramLimitAddress). */
220 if (df_indirect_read_instance(ctx
->node_id
, 0, 0x114 + (8 * ctx
->map
.num
),
221 ctx
->inst_id
, &ctx
->map
.limit
))
227 static int df3_get_dram_addr_map(struct addr_ctx
*ctx
)
229 if (df2_get_dram_addr_map(ctx
))
232 /* Read D18F0x3F8 (DfGlobalCtl). */
233 if (df_indirect_read_instance(ctx
->node_id
, 0, 0x3F8,
234 ctx
->inst_id
, &ctx
->map
.ctl
))
240 static int df4_get_dram_addr_map(struct addr_ctx
*ctx
)
242 u8 remap_sel
, i
, j
, shift
= 4, mask
= 0xF;
245 /* Read D18F7xE00 (DramBaseAddress). */
246 if (df_indirect_read_instance(ctx
->node_id
, 7, 0xE00 + (16 * ctx
->map
.num
),
247 ctx
->inst_id
, &ctx
->map
.base
))
250 /* Read D18F7xE04 (DramLimitAddress). */
251 if (df_indirect_read_instance(ctx
->node_id
, 7, 0xE04 + (16 * ctx
->map
.num
),
252 ctx
->inst_id
, &ctx
->map
.limit
))
255 /* Read D18F7xE08 (DramAddressCtl). */
256 if (df_indirect_read_instance(ctx
->node_id
, 7, 0xE08 + (16 * ctx
->map
.num
),
257 ctx
->inst_id
, &ctx
->map
.ctl
))
260 /* Read D18F7xE0C (DramAddressIntlv). */
261 if (df_indirect_read_instance(ctx
->node_id
, 7, 0xE0C + (16 * ctx
->map
.num
),
262 ctx
->inst_id
, &ctx
->map
.intlv
))
265 /* Check if Remap Enable bit is valid. */
266 if (!FIELD_GET(DF4_REMAP_EN
, ctx
->map
.ctl
))
269 /* Fill with bogus values, because '0' is a valid value. */
270 memset(&ctx
->map
.remap_array
, 0xFF, sizeof(ctx
->map
.remap_array
));
272 /* Get Remap registers. */
273 remap_sel
= FIELD_GET(DF4_REMAP_SEL
, ctx
->map
.ctl
);
275 /* Read D18F7x180 (CsTargetRemap0A). */
276 if (df_indirect_read_instance(ctx
->node_id
, 7, 0x180 + (8 * remap_sel
),
277 ctx
->inst_id
, &remap_reg
))
280 /* Save first 8 remap entries. */
281 for (i
= 0, j
= 0; i
< 8; i
++, j
++)
282 ctx
->map
.remap_array
[i
] = (remap_reg
>> (j
* shift
)) & mask
;
284 /* Read D18F7x184 (CsTargetRemap0B). */
285 if (df_indirect_read_instance(ctx
->node_id
, 7, 0x184 + (8 * remap_sel
),
286 ctx
->inst_id
, &remap_reg
))
289 /* Save next 8 remap entries. */
290 for (i
= 8, j
= 0; i
< 16; i
++, j
++)
291 ctx
->map
.remap_array
[i
] = (remap_reg
>> (j
* shift
)) & mask
;
296 static int df4p5_get_dram_addr_map(struct addr_ctx
*ctx
)
298 u8 remap_sel
, i
, j
, shift
= 5, mask
= 0x1F;
301 /* Read D18F7x200 (DramBaseAddress). */
302 if (df_indirect_read_instance(ctx
->node_id
, 7, 0x200 + (16 * ctx
->map
.num
),
303 ctx
->inst_id
, &ctx
->map
.base
))
306 /* Read D18F7x204 (DramLimitAddress). */
307 if (df_indirect_read_instance(ctx
->node_id
, 7, 0x204 + (16 * ctx
->map
.num
),
308 ctx
->inst_id
, &ctx
->map
.limit
))
311 /* Read D18F7x208 (DramAddressCtl). */
312 if (df_indirect_read_instance(ctx
->node_id
, 7, 0x208 + (16 * ctx
->map
.num
),
313 ctx
->inst_id
, &ctx
->map
.ctl
))
316 /* Read D18F7x20C (DramAddressIntlv). */
317 if (df_indirect_read_instance(ctx
->node_id
, 7, 0x20C + (16 * ctx
->map
.num
),
318 ctx
->inst_id
, &ctx
->map
.intlv
))
321 /* Check if Remap Enable bit is valid. */
322 if (!FIELD_GET(DF4_REMAP_EN
, ctx
->map
.ctl
))
325 /* Fill with bogus values, because '0' is a valid value. */
326 memset(&ctx
->map
.remap_array
, 0xFF, sizeof(ctx
->map
.remap_array
));
328 /* Get Remap registers. */
329 remap_sel
= FIELD_GET(DF4p5_REMAP_SEL
, ctx
->map
.ctl
);
331 /* Read D18F7x180 (CsTargetRemap0A). */
332 if (df_indirect_read_instance(ctx
->node_id
, 7, 0x180 + (24 * remap_sel
),
333 ctx
->inst_id
, &remap_reg
))
336 /* Save first 6 remap entries. */
337 for (i
= 0, j
= 0; i
< 6; i
++, j
++)
338 ctx
->map
.remap_array
[i
] = (remap_reg
>> (j
* shift
)) & mask
;
340 /* Read D18F7x184 (CsTargetRemap0B). */
341 if (df_indirect_read_instance(ctx
->node_id
, 7, 0x184 + (24 * remap_sel
),
342 ctx
->inst_id
, &remap_reg
))
345 /* Save next 6 remap entries. */
346 for (i
= 6, j
= 0; i
< 12; i
++, j
++)
347 ctx
->map
.remap_array
[i
] = (remap_reg
>> (j
* shift
)) & mask
;
349 /* Read D18F7x188 (CsTargetRemap0C). */
350 if (df_indirect_read_instance(ctx
->node_id
, 7, 0x188 + (24 * remap_sel
),
351 ctx
->inst_id
, &remap_reg
))
354 /* Save next 6 remap entries. */
355 for (i
= 12, j
= 0; i
< 18; i
++, j
++)
356 ctx
->map
.remap_array
[i
] = (remap_reg
>> (j
* shift
)) & mask
;
361 static int get_dram_addr_map(struct addr_ctx
*ctx
)
363 switch (df_cfg
.rev
) {
364 case DF2
: return df2_get_dram_addr_map(ctx
);
366 case DF3p5
: return df3_get_dram_addr_map(ctx
);
367 case DF4
: return df4_get_dram_addr_map(ctx
);
368 case DF4p5
: return df4p5_get_dram_addr_map(ctx
);
370 atl_debug_on_bad_df_rev();
375 static int get_coh_st_fabric_id(struct addr_ctx
*ctx
)
380 * On MI300 systems, the Coherent Station Fabric ID is derived
381 * later. And it does not depend on the register value.
383 if (df_cfg
.rev
== DF4p5
&& df_cfg
.flags
.heterogeneous
)
386 /* Read D18F0x50 (FabricBlockInstanceInformation3). */
387 if (df_indirect_read_instance(ctx
->node_id
, 0, 0x50, ctx
->inst_id
, ®
))
390 if (df_cfg
.rev
< DF4p5
)
391 ctx
->coh_st_fabric_id
= FIELD_GET(DF2_COH_ST_FABRIC_ID
, reg
);
393 ctx
->coh_st_fabric_id
= FIELD_GET(DF4p5_COH_ST_FABRIC_ID
, reg
);
398 static int find_normalized_offset(struct addr_ctx
*ctx
, u64
*norm_offset
)
403 for (ctx
->map
.num
= 1; ctx
->map
.num
< df_cfg
.num_coh_st_maps
; ctx
->map
.num
++) {
404 ret
= get_dram_offset(ctx
, norm_offset
);
408 /* Continue search if this map's offset is not enabled. */
412 /* Enabled offsets should never be 0. */
413 if (*norm_offset
== 0) {
414 atl_debug(ctx
, "Enabled map %u offset is 0", ctx
->map
.num
);
418 /* Offsets should always increase from one map to the next. */
419 if (*norm_offset
<= last_offset
) {
420 atl_debug(ctx
, "Map %u offset (0x%016llx) <= previous (0x%016llx)",
421 ctx
->map
.num
, *norm_offset
, last_offset
);
425 /* Match if this map's offset is less than the current calculated address. */
426 if (ctx
->ret_addr
>= *norm_offset
)
429 last_offset
= *norm_offset
;
433 * Finished search without finding a match.
434 * Reset to map 0 and no offset.
436 if (ctx
->map
.num
>= df_cfg
.num_coh_st_maps
) {
444 static bool valid_map(struct addr_ctx
*ctx
)
446 if (df_cfg
.rev
>= DF4
)
447 return FIELD_GET(DF_ADDR_RANGE_VAL
, ctx
->map
.ctl
);
449 return FIELD_GET(DF_ADDR_RANGE_VAL
, ctx
->map
.base
);
452 static int get_address_map_common(struct addr_ctx
*ctx
)
456 if (get_coh_st_fabric_id(ctx
))
459 if (find_normalized_offset(ctx
, &norm_offset
))
462 if (get_dram_addr_map(ctx
))
468 ctx
->ret_addr
-= norm_offset
;
473 static u8
get_num_intlv_chan(struct addr_ctx
*ctx
)
475 switch (ctx
->map
.intlv_mode
) {
480 case DF3_COD4_2CHAN_HASH
:
481 case DF4_NPS4_2CHAN_HASH
:
482 case DF4p5_NPS4_2CHAN_1K_HASH
:
483 case DF4p5_NPS4_2CHAN_2K_HASH
:
485 case DF4_NPS4_3CHAN_HASH
:
486 case DF4p5_NPS4_3CHAN_1K_HASH
:
487 case DF4p5_NPS4_3CHAN_2K_HASH
:
490 case DF3_COD2_4CHAN_HASH
:
491 case DF4_NPS2_4CHAN_HASH
:
492 case DF4p5_NPS2_4CHAN_1K_HASH
:
493 case DF4p5_NPS2_4CHAN_2K_HASH
:
495 case DF4_NPS2_5CHAN_HASH
:
496 case DF4p5_NPS2_5CHAN_1K_HASH
:
497 case DF4p5_NPS2_5CHAN_2K_HASH
:
500 case DF4_NPS2_6CHAN_HASH
:
501 case DF4p5_NPS2_6CHAN_1K_HASH
:
502 case DF4p5_NPS2_6CHAN_2K_HASH
:
505 case DF3_COD1_8CHAN_HASH
:
506 case DF4_NPS1_8CHAN_HASH
:
508 case DF4p5_NPS1_8CHAN_1K_HASH
:
509 case DF4p5_NPS1_8CHAN_2K_HASH
:
511 case DF4_NPS1_10CHAN_HASH
:
512 case DF4p5_NPS1_10CHAN_1K_HASH
:
513 case DF4p5_NPS1_10CHAN_2K_HASH
:
515 case DF4_NPS1_12CHAN_HASH
:
516 case DF4p5_NPS1_12CHAN_1K_HASH
:
517 case DF4p5_NPS1_12CHAN_2K_HASH
:
520 case MI3_HASH_16CHAN
:
521 case DF4p5_NPS1_16CHAN_1K_HASH
:
522 case DF4p5_NPS1_16CHAN_2K_HASH
:
524 case DF4p5_NPS0_24CHAN_1K_HASH
:
525 case DF4p5_NPS0_24CHAN_2K_HASH
:
528 case MI3_HASH_32CHAN
:
531 atl_debug_on_bad_intlv_mode(ctx
);
536 static void calculate_intlv_bits(struct addr_ctx
*ctx
)
538 ctx
->map
.num_intlv_chan
= get_num_intlv_chan(ctx
);
540 ctx
->map
.total_intlv_chan
= ctx
->map
.num_intlv_chan
;
541 ctx
->map
.total_intlv_chan
*= ctx
->map
.num_intlv_dies
;
542 ctx
->map
.total_intlv_chan
*= ctx
->map
.num_intlv_sockets
;
545 * Get the number of bits needed to cover this many channels.
546 * order_base_2() rounds up automatically.
548 ctx
->map
.total_intlv_bits
= order_base_2(ctx
->map
.total_intlv_chan
);
551 static u8
get_intlv_bit_pos(struct addr_ctx
*ctx
)
555 switch (df_cfg
.rev
) {
557 addr_sel
= FIELD_GET(DF2_INTLV_ADDR_SEL
, ctx
->map
.base
);
561 addr_sel
= FIELD_GET(DF3_INTLV_ADDR_SEL
, ctx
->map
.base
);
565 addr_sel
= FIELD_GET(DF4_INTLV_ADDR_SEL
, ctx
->map
.intlv
);
568 atl_debug_on_bad_df_rev();
572 /* Add '8' to get the 'interleave bit position'. */
576 static u8
get_num_intlv_dies(struct addr_ctx
*ctx
)
580 switch (df_cfg
.rev
) {
582 dies
= FIELD_GET(DF2_INTLV_NUM_DIES
, ctx
->map
.limit
);
585 dies
= FIELD_GET(DF3_INTLV_NUM_DIES
, ctx
->map
.base
);
588 dies
= FIELD_GET(DF3p5_INTLV_NUM_DIES
, ctx
->map
.base
);
592 dies
= FIELD_GET(DF4_INTLV_NUM_DIES
, ctx
->map
.intlv
);
595 atl_debug_on_bad_df_rev();
599 /* Register value is log2, e.g. 0 -> 1 die, 1 -> 2 dies, etc. */
603 static u8
get_num_intlv_sockets(struct addr_ctx
*ctx
)
607 switch (df_cfg
.rev
) {
609 sockets
= FIELD_GET(DF2_INTLV_NUM_SOCKETS
, ctx
->map
.limit
);
613 sockets
= FIELD_GET(DF2_INTLV_NUM_SOCKETS
, ctx
->map
.base
);
617 sockets
= FIELD_GET(DF4_INTLV_NUM_SOCKETS
, ctx
->map
.intlv
);
620 atl_debug_on_bad_df_rev();
624 /* Register value is log2, e.g. 0 -> 1 sockets, 1 -> 2 sockets, etc. */
628 static int get_global_map_data(struct addr_ctx
*ctx
)
630 if (get_intlv_mode(ctx
))
633 if (ctx
->map
.intlv_mode
== DF3_6CHAN
&&
634 df3_6ch_get_dram_addr_map(ctx
))
637 ctx
->map
.intlv_bit_pos
= get_intlv_bit_pos(ctx
);
638 ctx
->map
.num_intlv_dies
= get_num_intlv_dies(ctx
);
639 ctx
->map
.num_intlv_sockets
= get_num_intlv_sockets(ctx
);
640 calculate_intlv_bits(ctx
);
646 * Verify the interleave bits are correct in the different interleaving
649 * If @num_intlv_dies and/or @num_intlv_sockets are 1, it means the
650 * respective interleaving is disabled.
652 static inline bool map_bits_valid(struct addr_ctx
*ctx
, u8 bit1
, u8 bit2
,
653 u8 num_intlv_dies
, u8 num_intlv_sockets
)
655 if (!(ctx
->map
.intlv_bit_pos
== bit1
|| ctx
->map
.intlv_bit_pos
== bit2
)) {
656 pr_debug("Invalid interleave bit: %u", ctx
->map
.intlv_bit_pos
);
660 if (ctx
->map
.num_intlv_dies
> num_intlv_dies
) {
661 pr_debug("Invalid number of interleave dies: %u", ctx
->map
.num_intlv_dies
);
665 if (ctx
->map
.num_intlv_sockets
> num_intlv_sockets
) {
666 pr_debug("Invalid number of interleave sockets: %u", ctx
->map
.num_intlv_sockets
);
673 static int validate_address_map(struct addr_ctx
*ctx
)
675 switch (ctx
->map
.intlv_mode
) {
677 case DF3_COD4_2CHAN_HASH
:
678 case DF3_COD2_4CHAN_HASH
:
679 case DF3_COD1_8CHAN_HASH
:
680 if (!map_bits_valid(ctx
, 8, 9, 1, 1))
684 case DF4_NPS4_2CHAN_HASH
:
685 case DF4_NPS2_4CHAN_HASH
:
686 case DF4_NPS1_8CHAN_HASH
:
687 case DF4p5_NPS4_2CHAN_1K_HASH
:
688 case DF4p5_NPS4_2CHAN_2K_HASH
:
689 case DF4p5_NPS2_4CHAN_1K_HASH
:
690 case DF4p5_NPS2_4CHAN_2K_HASH
:
691 case DF4p5_NPS1_8CHAN_1K_HASH
:
692 case DF4p5_NPS1_8CHAN_2K_HASH
:
693 case DF4p5_NPS1_16CHAN_1K_HASH
:
694 case DF4p5_NPS1_16CHAN_2K_HASH
:
695 if (!map_bits_valid(ctx
, 8, 8, 1, 2))
699 case DF4p5_NPS4_3CHAN_1K_HASH
:
700 case DF4p5_NPS4_3CHAN_2K_HASH
:
701 case DF4p5_NPS2_5CHAN_1K_HASH
:
702 case DF4p5_NPS2_5CHAN_2K_HASH
:
703 case DF4p5_NPS2_6CHAN_1K_HASH
:
704 case DF4p5_NPS2_6CHAN_2K_HASH
:
705 case DF4p5_NPS1_10CHAN_1K_HASH
:
706 case DF4p5_NPS1_10CHAN_2K_HASH
:
707 case DF4p5_NPS1_12CHAN_1K_HASH
:
708 case DF4p5_NPS1_12CHAN_2K_HASH
:
709 if (ctx
->map
.num_intlv_sockets
!= 1 || !map_bits_valid(ctx
, 8, 0, 1, 1))
713 case DF4p5_NPS0_24CHAN_1K_HASH
:
714 case DF4p5_NPS0_24CHAN_2K_HASH
:
715 if (ctx
->map
.num_intlv_sockets
< 2 || !map_bits_valid(ctx
, 8, 0, 1, 2))
720 case MI3_HASH_16CHAN
:
721 case MI3_HASH_32CHAN
:
722 if (!map_bits_valid(ctx
, 8, 8, 4, 1))
726 /* Nothing to do for modes that don't need special validation checks. */
734 atl_debug(ctx
, "Inconsistent address map");
738 static void dump_address_map(struct dram_addr_map
*map
)
742 pr_debug("intlv_mode=0x%x", map
->intlv_mode
);
743 pr_debug("num=0x%x", map
->num
);
744 pr_debug("base=0x%x", map
->base
);
745 pr_debug("limit=0x%x", map
->limit
);
746 pr_debug("ctl=0x%x", map
->ctl
);
747 pr_debug("intlv=0x%x", map
->intlv
);
749 for (i
= 0; i
< MAX_COH_ST_CHANNELS
; i
++)
750 pr_debug("remap_array[%u]=0x%x", i
, map
->remap_array
[i
]);
752 pr_debug("intlv_bit_pos=%u", map
->intlv_bit_pos
);
753 pr_debug("num_intlv_chan=%u", map
->num_intlv_chan
);
754 pr_debug("num_intlv_dies=%u", map
->num_intlv_dies
);
755 pr_debug("num_intlv_sockets=%u", map
->num_intlv_sockets
);
756 pr_debug("total_intlv_chan=%u", map
->total_intlv_chan
);
757 pr_debug("total_intlv_bits=%u", map
->total_intlv_bits
);
760 int get_address_map(struct addr_ctx
*ctx
)
764 ret
= get_address_map_common(ctx
);
768 ret
= get_global_map_data(ctx
);
772 dump_address_map(&ctx
->map
);
774 ret
= validate_address_map(ctx
);