mb/starlabs/*: Correct config for SATA DEVSLP GPIO
[coreboot2.git] / src / soc / intel / alderlake / meminit.c
blob6b3143eadd10b8eb2ddcb0bdc27738418ef7b3ef
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 #include <console/console.h>
4 #include <fsp/util.h>
5 #include <soc/meminit.h>
6 #include <string.h>
8 #define LPX_PHYSICAL_CH_WIDTH 16
9 #define LPX_CHANNELS CHANNEL_COUNT(LPX_PHYSICAL_CH_WIDTH)
11 #define DDR4_PHYSICAL_CH_WIDTH 64
12 #define DDR4_CHANNELS CHANNEL_COUNT(DDR4_PHYSICAL_CH_WIDTH)
14 #define DDR5_PHYSICAL_CH_WIDTH 64 /* 32*2 */
15 #define DDR5_CHANNELS CHANNEL_COUNT(DDR5_PHYSICAL_CH_WIDTH)
17 static void set_rcomp_config(FSP_M_CONFIG *mem_cfg, const struct mb_cfg *mb_cfg)
19 if (mb_cfg->rcomp.resistor != 0)
20 mem_cfg->RcompResistor = mb_cfg->rcomp.resistor;
22 for (size_t i = 0; i < ARRAY_SIZE(mem_cfg->RcompTarget); i++) {
23 if (mb_cfg->rcomp.targets[i] != 0)
24 mem_cfg->RcompTarget[i] = mb_cfg->rcomp.targets[i];
28 static void meminit_lp4x(FSP_M_CONFIG *mem_cfg)
30 mem_cfg->DqPinsInterleaved = 0;
33 static void meminit_lp5x(FSP_M_CONFIG *mem_cfg, const struct mem_lp5x_config *lp5x_config)
35 mem_cfg->DqPinsInterleaved = 0;
36 mem_cfg->Lp5CccConfig = lp5x_config->ccc_config;
39 static void meminit_ddr(FSP_M_CONFIG *mem_cfg, const struct mem_ddr_config *ddr_config)
41 mem_cfg->DqPinsInterleaved = ddr_config->dq_pins_interleaved;
44 static const struct soc_mem_cfg soc_mem_cfg[] = {
45 [MEM_TYPE_DDR4] = {
46 .num_phys_channels = DDR4_CHANNELS,
47 .phys_to_mrc_map = {
48 [0] = 0,
49 [1] = 4,
51 .md_phy_masks = {
53 * Only physical channel 0 is populated in case of half-populated
54 * configuration.
56 .half_channel = BIT(0),
57 /* In mixed topologies, either channel 0 or 1 can be memory-down. */
58 .mixed_topo = BIT(0) | BIT(1),
61 [MEM_TYPE_DDR5] = {
62 .num_phys_channels = DDR5_CHANNELS,
63 .phys_to_mrc_map = {
64 [0] = 0,
65 [1] = 4,
67 .md_phy_masks = {
69 * Only channel 0 is populated in case of half-populated
70 * configuration.
72 .half_channel = BIT(0),
73 /* In mixed topologies, either channel 0 or 1 can be memory-down. */
74 .mixed_topo = BIT(0) | BIT(1),
77 [MEM_TYPE_LP4X] = {
78 .num_phys_channels = LPX_CHANNELS,
79 .phys_to_mrc_map = {
80 [0] = 0,
81 [1] = 1,
82 [2] = 2,
83 [3] = 3,
84 [4] = 4,
85 [5] = 5,
86 [6] = 6,
87 [7] = 7,
89 .md_phy_masks = {
91 * Physical channels 0, 1, 2 and 3 are populated in case of
92 * half-populated configurations.
94 .half_channel = BIT(0) | BIT(1) | BIT(2) | BIT(3),
95 /* LP4x does not support mixed topologies. */
98 [MEM_TYPE_LP5X] = {
99 .num_phys_channels = LPX_CHANNELS,
100 .phys_to_mrc_map = {
101 [0] = 0,
102 [1] = 1,
103 [2] = 2,
104 [3] = 3,
105 [4] = 4,
106 [5] = 5,
107 [6] = 6,
108 [7] = 7,
110 .md_phy_masks = {
112 * Physical channels 0, 1, 2 and 3 are populated in case of
113 * half-populated configurations.
115 .half_channel = BIT(0) | BIT(1) | BIT(2) | BIT(3),
116 /* LP5x does not support mixed topologies. */
121 static void mem_init_spd_upds(FSP_M_CONFIG *mem_cfg, const struct mem_channel_data *data)
123 uint32_t *spd_upds[MRC_CHANNELS][CONFIG_DIMMS_PER_CHANNEL] = {
124 [0] = { &mem_cfg->MemorySpdPtr000, &mem_cfg->MemorySpdPtr001, },
125 [1] = { &mem_cfg->MemorySpdPtr010, &mem_cfg->MemorySpdPtr011, },
126 [2] = { &mem_cfg->MemorySpdPtr020, &mem_cfg->MemorySpdPtr021, },
127 [3] = { &mem_cfg->MemorySpdPtr030, &mem_cfg->MemorySpdPtr031, },
128 [4] = { &mem_cfg->MemorySpdPtr100, &mem_cfg->MemorySpdPtr101, },
129 [5] = { &mem_cfg->MemorySpdPtr110, &mem_cfg->MemorySpdPtr111, },
130 [6] = { &mem_cfg->MemorySpdPtr120, &mem_cfg->MemorySpdPtr121, },
131 [7] = { &mem_cfg->MemorySpdPtr130, &mem_cfg->MemorySpdPtr131, },
133 uint8_t *disable_channel_upds[MRC_CHANNELS] = {
134 &mem_cfg->DisableMc0Ch0,
135 &mem_cfg->DisableMc0Ch1,
136 &mem_cfg->DisableMc0Ch2,
137 &mem_cfg->DisableMc0Ch3,
138 &mem_cfg->DisableMc1Ch0,
139 &mem_cfg->DisableMc1Ch1,
140 &mem_cfg->DisableMc1Ch2,
141 &mem_cfg->DisableMc1Ch3,
143 size_t ch, dimm;
145 mem_cfg->MemorySpdDataLen = data->spd_len;
147 for (ch = 0; ch < MRC_CHANNELS; ch++) {
148 uint8_t *disable_channel_ptr = disable_channel_upds[ch];
149 bool enable_channel = 0;
151 for (dimm = 0; dimm < CONFIG_DIMMS_PER_CHANNEL; dimm++) {
152 uint32_t *spd_ptr = spd_upds[ch][dimm];
154 *spd_ptr = data->spd[ch][dimm];
155 if (*spd_ptr)
156 enable_channel = 1;
158 *disable_channel_ptr = !enable_channel;
162 static void mem_init_dq_dqs_upds(void *upds[MRC_CHANNELS], const void *map, size_t upd_size,
163 const struct mem_channel_data *data, bool auto_detect)
165 size_t i;
167 for (i = 0; i < MRC_CHANNELS; i++, map += upd_size) {
168 if (auto_detect ||
169 !channel_is_populated(i, MRC_CHANNELS, data->ch_population_flags))
170 memset(upds[i], 0, upd_size);
171 else
172 memcpy(upds[i], map, upd_size);
176 static void mem_init_dq_upds(FSP_M_CONFIG *mem_cfg, const struct mem_channel_data *data,
177 const struct mb_cfg *mb_cfg, bool auto_detect)
179 void *dq_upds[MRC_CHANNELS] = {
180 &mem_cfg->DqMapCpu2DramMc0Ch0,
181 &mem_cfg->DqMapCpu2DramMc0Ch1,
182 &mem_cfg->DqMapCpu2DramMc0Ch2,
183 &mem_cfg->DqMapCpu2DramMc0Ch3,
184 &mem_cfg->DqMapCpu2DramMc1Ch0,
185 &mem_cfg->DqMapCpu2DramMc1Ch1,
186 &mem_cfg->DqMapCpu2DramMc1Ch2,
187 &mem_cfg->DqMapCpu2DramMc1Ch3,
190 const size_t upd_size = sizeof(mem_cfg->DqMapCpu2DramMc0Ch0);
192 _Static_assert(sizeof(mem_cfg->DqMapCpu2DramMc0Ch0) == CONFIG_MRC_CHANNEL_WIDTH,
193 "Incorrect DQ UPD size!");
195 mem_init_dq_dqs_upds(dq_upds, mb_cfg->dq_map, upd_size, data, auto_detect);
198 static void mem_init_dqs_upds(FSP_M_CONFIG *mem_cfg, const struct mem_channel_data *data,
199 const struct mb_cfg *mb_cfg, bool auto_detect)
201 void *dqs_upds[MRC_CHANNELS] = {
202 &mem_cfg->DqsMapCpu2DramMc0Ch0,
203 &mem_cfg->DqsMapCpu2DramMc0Ch1,
204 &mem_cfg->DqsMapCpu2DramMc0Ch2,
205 &mem_cfg->DqsMapCpu2DramMc0Ch3,
206 &mem_cfg->DqsMapCpu2DramMc1Ch0,
207 &mem_cfg->DqsMapCpu2DramMc1Ch1,
208 &mem_cfg->DqsMapCpu2DramMc1Ch2,
209 &mem_cfg->DqsMapCpu2DramMc1Ch3,
212 const size_t upd_size = sizeof(mem_cfg->DqsMapCpu2DramMc0Ch0);
214 _Static_assert(sizeof(mem_cfg->DqsMapCpu2DramMc0Ch0) == CONFIG_MRC_CHANNEL_WIDTH / 8,
215 "Incorrect DQS UPD size!");
217 mem_init_dq_dqs_upds(dqs_upds, mb_cfg->dqs_map, upd_size, data, auto_detect);
220 #define DDR5_CH_DIMM_OFFSET(ch, dimm) ((ch) * CONFIG_DIMMS_PER_CHANNEL + (dimm))
222 static void ddr5_fill_dimm_module_info(FSP_M_CONFIG *mem_cfg, const struct mb_cfg *mb_cfg,
223 const struct mem_spd *spd_info)
225 for (size_t ch = 0; ch < soc_mem_cfg[MEM_TYPE_DDR5].num_phys_channels; ch++) {
226 for (size_t dimm = 0; dimm < CONFIG_DIMMS_PER_CHANNEL; dimm++) {
227 size_t mrc_ch = soc_mem_cfg[MEM_TYPE_DDR5].phys_to_mrc_map[ch];
228 mem_cfg->SpdAddressTable[DDR5_CH_DIMM_OFFSET(mrc_ch, dimm)] =
229 spd_info->smbus[ch].addr_dimm[dimm] << 1;
232 mem_init_dq_upds(mem_cfg, NULL, mb_cfg, true);
233 mem_init_dqs_upds(mem_cfg, NULL, mb_cfg, true);
237 * A memory channel will be disabled if corresponding bit in
238 * ch_disable_mask is set.
240 static void mem_init_override_channel_mask(FSP_M_CONFIG *mem_cfg)
242 uint8_t *disable_channel_upds[MRC_CHANNELS] = {
243 &mem_cfg->DisableMc0Ch0,
244 &mem_cfg->DisableMc0Ch1,
245 &mem_cfg->DisableMc0Ch2,
246 &mem_cfg->DisableMc0Ch3,
247 &mem_cfg->DisableMc1Ch0,
248 &mem_cfg->DisableMc1Ch1,
249 &mem_cfg->DisableMc1Ch2,
250 &mem_cfg->DisableMc1Ch3,
253 uint8_t ch_disable_mask = mb_get_channel_disable_mask();
254 if (ch_disable_mask == 0)
255 return;
257 /* Mc0Ch0 cannot be disabled */
258 if (ch_disable_mask & BIT(0)) {
259 printk(BIOS_ERR, "Cannot disable the first memory channel (Mc0Ch0).\n");
260 return;
263 for (size_t ch = 0; ch < MRC_CHANNELS; ch++) {
264 if (ch_disable_mask & BIT(ch))
265 *disable_channel_upds[ch] = 1;
269 void memcfg_init(FSPM_UPD *memupd, const struct mb_cfg *mb_cfg,
270 const struct mem_spd *spd_info, bool half_populated)
272 struct mem_channel_data data;
273 bool dq_dqs_auto_detect = false;
274 FSP_M_CONFIG *mem_cfg = &memupd->FspmConfig;
276 #if CONFIG(SOC_INTEL_RAPTORLAKE) && !CONFIG(FSP_USE_REPO)
277 mem_cfg->CsPiStartHighinEct = mb_cfg->cs_pi_start_high_in_ect;
278 #endif
279 mem_cfg->ECT = mb_cfg->ect;
280 mem_cfg->UserBd = mb_cfg->UserBd;
281 set_rcomp_config(mem_cfg, mb_cfg);
283 /* Fill command mirror for memory */
284 mem_cfg->CmdMirror = mb_cfg->CmdMirror;
286 /* Fill LpDdrrDqDqs Retraining for memory */
287 mem_cfg->LpDdrDqDqsReTraining = mb_cfg->LpDdrDqDqsReTraining;
289 switch (mb_cfg->type) {
290 case MEM_TYPE_DDR4:
291 meminit_ddr(mem_cfg, &mb_cfg->ddr_config);
292 dq_dqs_auto_detect = true;
293 break;
294 case MEM_TYPE_DDR5:
295 meminit_ddr(mem_cfg, &mb_cfg->ddr_config);
296 dq_dqs_auto_detect = true;
298 * TODO: Drop this workaround once SMBus driver in coreboot is updated to
299 * support DDR5 EEPROM reading.
301 if (spd_info->topo == MEM_TOPO_DIMM_MODULE) {
302 ddr5_fill_dimm_module_info(mem_cfg, mb_cfg, spd_info);
303 return;
305 break;
306 case MEM_TYPE_LP4X:
307 meminit_lp4x(mem_cfg);
308 break;
309 case MEM_TYPE_LP5X:
310 meminit_lp5x(mem_cfg, &mb_cfg->lp5x_config);
311 break;
312 default:
313 die("Unsupported memory type(%d)\n", mb_cfg->type);
316 mem_populate_channel_data(memupd, &soc_mem_cfg[mb_cfg->type], spd_info, half_populated,
317 &data);
318 mem_init_spd_upds(mem_cfg, &data);
319 mem_init_override_channel_mask(mem_cfg);
320 mem_init_dq_upds(mem_cfg, &data, mb_cfg, dq_dqs_auto_detect);
321 mem_init_dqs_upds(mem_cfg, &data, mb_cfg, dq_dqs_auto_detect);