1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #include <acpi/acpi_crat.h>
4 #include <acpi/acpi_ivrs.h>
6 #include <amdblocks/acpi.h>
7 #include <amdblocks/cpu.h>
8 #include <amdblocks/data_fabric.h>
9 #include <amdblocks/ioapic.h>
10 #include <cpu/amd/cpuid.h>
12 #include <device/device.h>
13 #include <device/mmio.h>
14 #include <device/pci_def.h>
15 #include <device/pci_ops.h>
17 #include <soc/data_fabric.h>
18 #include <soc/pci_devs.h>
21 static unsigned long gen_crat_hsa_entry(struct acpi_crat_header
*crat
, unsigned long current
)
23 struct crat_hsa_processing_unit
*hsa_entry
= (struct crat_hsa_processing_unit
*)current
;
24 memset(hsa_entry
, 0, sizeof(struct crat_hsa_processing_unit
));
26 hsa_entry
->flags
= CRAT_HSA_PR_FLAG_EN
| CRAT_HSA_PR_FLAG_CPU_PRES
;
27 hsa_entry
->wave_front_size
= 4;
28 hsa_entry
->num_cpu_cores
= get_cpu_count();
29 hsa_entry
->length
= sizeof(struct crat_hsa_processing_unit
);
30 crat
->total_entries
++;
32 current
+= hsa_entry
->length
;
36 static unsigned long create_crat_memory_entry(uint32_t domain
, uint64_t region_base
,
37 uint64_t region_size
, unsigned long current
)
39 struct crat_memory
*mem_affinity
= (struct crat_memory
*)current
;
40 memset(mem_affinity
, 0, sizeof(struct crat_memory
));
42 mem_affinity
->type
= CRAT_MEMORY_TYPE
;
43 mem_affinity
->length
= sizeof(struct crat_memory
);
44 mem_affinity
->proximity_domain
= 0;
45 mem_affinity
->base_address_low
= region_base
& 0xffffffff;
46 mem_affinity
->base_address_high
= (region_base
>> 32) & 0xffffffff;
47 mem_affinity
->length_low
= region_size
& 0xffffffff;
48 mem_affinity
->length_high
= (region_size
>> 32) & 0xffffffff;
49 mem_affinity
->flags
= CRAT_MEM_FLAG_EN
;
50 mem_affinity
->width
= 64;
52 current
+= mem_affinity
->length
;
56 static unsigned long gen_crat_memory_entries(struct acpi_crat_header
*crat
,
57 unsigned long current
)
59 uint32_t dram_base_reg
, dram_limit_reg
, dram_hole_ctl
;
60 uint64_t memory_length
, memory_base
, hole_base
, size_below_hole
;
61 size_t new_entries
= 0;
63 for (size_t dram_map_idx
= 0; dram_map_idx
< PICASSO_NUM_DRAM_REG
;
66 data_fabric_read32(DF_DRAM_BASE(dram_map_idx
), IOMS0_FABRIC_ID
);
68 if (dram_base_reg
& DRAM_BASE_REG_VALID
) {
69 dram_limit_reg
= data_fabric_read32(DF_DRAM_LIMIT(dram_map_idx
),
72 ((dram_limit_reg
& DRAM_LIMIT_ADDR
) >> DRAM_LIMIT_ADDR_SHFT
) + 1
73 - ((dram_base_reg
& DRAM_BASE_ADDR
) >> DRAM_BASE_ADDR_SHFT
);
74 memory_length
= memory_length
<< 28;
75 memory_base
= (uint64_t)(dram_base_reg
& DRAM_BASE_ADDR
)
76 << (28 - DRAM_BASE_ADDR_SHFT
);
78 if (memory_base
== 0) {
80 create_crat_memory_entry(0, 0ull, 0xa0000ull
, current
);
81 memory_base
= 1 * MiB
;
82 memory_length
= memory_base
;
86 if (dram_base_reg
& DRAM_BASE_HOLE_EN
) {
87 dram_hole_ctl
= data_fabric_read32(DF_DRAM_HOLE_CTL
,
89 hole_base
= (dram_hole_ctl
& DRAM_HOLE_CTL_BASE
);
90 size_below_hole
= hole_base
- memory_base
;
91 current
= create_crat_memory_entry(0, memory_base
,
92 size_below_hole
, current
);
93 memory_length
= (uint64_t)(((dram_limit_reg
& DRAM_LIMIT_ADDR
)
94 >> DRAM_LIMIT_ADDR_SHFT
)
97 memory_base
= 0x100000000;
101 current
= create_crat_memory_entry(0, memory_base
, memory_length
,
106 crat
->total_entries
+= new_entries
;
110 static unsigned long add_crat_cache_entry(struct crat_cache
**cache_affinity
,
111 unsigned long current
)
113 *cache_affinity
= (struct crat_cache
*)current
;
114 memset(*cache_affinity
, 0, sizeof(struct crat_cache
));
116 (*cache_affinity
)->type
= CRAT_CACHE_TYPE
;
117 (*cache_affinity
)->length
= sizeof(struct crat_cache
);
118 (*cache_affinity
)->flags
= CRAT_CACHE_FLAG_EN
| CRAT_CACHE_FLAG_CPU_CACHE
;
120 current
+= sizeof(struct crat_cache
);
124 static uint8_t get_associativity(uint32_t encoded_associativity
)
126 uint8_t associativity
= 0;
128 switch (encoded_associativity
) {
134 return encoded_associativity
;
160 associativity
= 0xFF;
166 return associativity
;
169 static unsigned long gen_crat_cache_entry(struct acpi_crat_header
*crat
, unsigned long current
)
171 size_t total_num_threads
, num_threads_sharing0
, num_threads_sharing1
,
172 num_threads_sharing2
, num_threads_sharing3
, thread
, new_entries
;
173 struct cpuid_result cache_props0
, cache_props1
, cache_props2
, cache_props3
;
174 uint8_t sibling_mask
= 0;
175 uint32_t l1_data_cache_ids
, l1_inst_cache_ids
, l2_cache_ids
, l3_cache_ids
;
176 struct crat_cache
*cache_affinity
= NULL
;
178 total_num_threads
= get_cpu_count();
180 cache_props0
= cpuid_ext(CPUID_CACHE_PROPS
, CACHE_PROPS_0
);
181 cache_props1
= cpuid_ext(CPUID_CACHE_PROPS
, CACHE_PROPS_1
);
182 cache_props2
= cpuid_ext(CPUID_CACHE_PROPS
, CACHE_PROPS_2
);
183 cache_props3
= cpuid_ext(CPUID_CACHE_PROPS
, CACHE_PROPS_3
);
185 l1_data_cache_ids
= cpuid_ecx(CPUID_L1_TLB_CACHE_IDS
);
186 l1_inst_cache_ids
= cpuid_edx(CPUID_L1_TLB_CACHE_IDS
);
187 l2_cache_ids
= cpuid_ecx(CPUID_L2_L3_CACHE_L2_TLB_IDS
);
188 l3_cache_ids
= cpuid_edx(CPUID_L2_L3_CACHE_L2_TLB_IDS
);
190 num_threads_sharing0
=
191 ((cache_props0
.eax
& NUM_SHARE_CACHE_MASK
) >> NUM_SHARE_CACHE_SHFT
) + 1;
192 num_threads_sharing1
=
193 ((cache_props1
.eax
& NUM_SHARE_CACHE_MASK
) >> NUM_SHARE_CACHE_SHFT
) + 1;
194 num_threads_sharing2
=
195 ((cache_props2
.eax
& NUM_SHARE_CACHE_MASK
) >> NUM_SHARE_CACHE_SHFT
) + 1;
196 num_threads_sharing3
=
197 ((cache_props3
.eax
& NUM_SHARE_CACHE_MASK
) >> NUM_SHARE_CACHE_SHFT
) + 1;
200 for (thread
= 0; thread
< total_num_threads
; thread
++) {
202 if (thread
% num_threads_sharing0
== 0) {
203 current
= add_crat_cache_entry(&cache_affinity
, current
);
206 cache_affinity
->flags
|= CRAT_CACHE_FLAG_DATA_CACHE
;
207 cache_affinity
->proc_id_low
= thread
;
209 for (size_t sibling
= 1; sibling
< num_threads_sharing0
; sibling
++)
210 sibling_mask
= (sibling_mask
<< 1) + 1;
211 cache_affinity
->sibling_map
[thread
/ 8] = sibling_mask
<< (thread
% 8);
212 cache_affinity
->cache_properties
=
213 (cache_props0
.edx
& CACHE_INCLUSIVE_MASK
) ? 2 : 0;
214 cache_affinity
->cache_size
=
215 (l1_data_cache_ids
& L1_DC_SIZE_MASK
) >> L1_DC_SIZE_SHFT
;
216 cache_affinity
->cache_level
= CRAT_L1_CACHE
;
217 cache_affinity
->lines_per_tag
=
218 (l1_data_cache_ids
& L1_DC_LINE_TAG_MASK
)
219 >> L1_DC_LINE_TAG_SHFT
;
220 cache_affinity
->cache_line_size
=
221 (l1_data_cache_ids
& L1_DC_LINE_SIZE_MASK
)
222 >> L1_DC_LINE_SIZE_SHFT
;
223 cache_affinity
->associativity
=
224 (l1_data_cache_ids
& L1_DC_ASSOC_MASK
) >> L1_DC_ASSOC_SHFT
;
225 cache_affinity
->cache_latency
= 1;
228 /* L1 instruction cache */
229 if (thread
% num_threads_sharing1
== 0) {
230 current
= add_crat_cache_entry(&cache_affinity
, current
);
233 cache_affinity
->flags
|= CRAT_CACHE_FLAG_INSTR_CACHE
;
234 cache_affinity
->proc_id_low
= thread
;
236 for (size_t sibling
= 1; sibling
< num_threads_sharing1
; sibling
++)
237 sibling_mask
= (sibling_mask
<< 1) + 1;
238 cache_affinity
->sibling_map
[thread
/ 8] = sibling_mask
<< (thread
% 8);
239 cache_affinity
->cache_properties
=
240 (cache_props1
.edx
& CACHE_INCLUSIVE_MASK
) ? 2 : 0;
241 cache_affinity
->cache_size
=
242 (l1_inst_cache_ids
& L1_IC_SIZE_MASK
) >> L1_IC_SIZE_SHFT
;
243 cache_affinity
->cache_level
= CRAT_L1_CACHE
;
244 cache_affinity
->lines_per_tag
=
245 (l1_inst_cache_ids
& L1_IC_LINE_TAG_MASK
)
246 >> L1_IC_LINE_TAG_SHFT
;
247 cache_affinity
->cache_line_size
=
248 (l1_inst_cache_ids
& L1_IC_LINE_SIZE_MASK
)
249 >> L1_IC_LINE_SIZE_SHFT
;
250 cache_affinity
->associativity
=
251 (l1_inst_cache_ids
& L1_IC_ASSOC_MASK
) >> L1_IC_ASSOC_SHFT
;
252 cache_affinity
->cache_latency
= 1;
256 if (thread
% num_threads_sharing2
== 0) {
257 current
= add_crat_cache_entry(&cache_affinity
, current
);
260 cache_affinity
->flags
|=
261 CRAT_CACHE_FLAG_DATA_CACHE
| CRAT_CACHE_FLAG_INSTR_CACHE
;
262 cache_affinity
->proc_id_low
= thread
;
264 for (size_t sibling
= 1; sibling
< num_threads_sharing2
; sibling
++)
265 sibling_mask
= (sibling_mask
<< 1) + 1;
266 cache_affinity
->sibling_map
[thread
/ 8] = sibling_mask
<< (thread
% 8);
267 cache_affinity
->cache_properties
=
268 (cache_props2
.edx
& CACHE_INCLUSIVE_MASK
) ? 2 : 0;
269 cache_affinity
->cache_size
=
270 (l2_cache_ids
& L2_DC_SIZE_MASK
) >> L2_DC_SIZE_SHFT
;
271 cache_affinity
->cache_level
= CRAT_L2_CACHE
;
272 cache_affinity
->lines_per_tag
=
273 (l2_cache_ids
& L2_DC_LINE_TAG_MASK
) >> L2_DC_LINE_TAG_SHFT
;
274 cache_affinity
->cache_line_size
=
275 (l2_cache_ids
& L2_DC_LINE_SIZE_MASK
) >> L2_DC_LINE_SIZE_SHFT
;
276 cache_affinity
->associativity
= get_associativity(
277 (l2_cache_ids
& L2_DC_ASSOC_MASK
) >> L2_DC_ASSOC_SHFT
);
278 cache_affinity
->cache_latency
= 1;
282 if (thread
% num_threads_sharing3
== 0) {
283 current
= add_crat_cache_entry(&cache_affinity
, current
);
286 cache_affinity
->flags
|=
287 CRAT_CACHE_FLAG_DATA_CACHE
| CRAT_CACHE_FLAG_INSTR_CACHE
;
288 cache_affinity
->proc_id_low
= thread
;
290 for (size_t sibling
= 1; sibling
< num_threads_sharing3
; sibling
++)
291 sibling_mask
= (sibling_mask
<< 1) + 1;
292 cache_affinity
->sibling_map
[thread
/ 8] = sibling_mask
<< (thread
% 8);
293 cache_affinity
->cache_properties
=
294 (cache_props0
.edx
& CACHE_INCLUSIVE_MASK
) ? 2 : 0;
295 cache_affinity
->cache_size
=
296 ((l3_cache_ids
& L3_DC_SIZE_MASK
) >> L3_DC_SIZE_SHFT
) * 512;
297 cache_affinity
->cache_level
= CRAT_L3_CACHE
;
298 cache_affinity
->lines_per_tag
=
299 (l3_cache_ids
& L3_DC_LINE_TAG_MASK
) >> L3_DC_LINE_TAG_SHFT
;
300 cache_affinity
->cache_line_size
=
301 (l3_cache_ids
& L3_DC_LINE_SIZE_MASK
) >> L3_DC_LINE_SIZE_SHFT
;
302 cache_affinity
->associativity
= get_associativity(
303 (l3_cache_ids
& L3_DC_ASSOC_MASK
) >> L3_DC_ASSOC_SHFT
);
304 cache_affinity
->cache_latency
= 1;
307 crat
->total_entries
+= new_entries
;
311 static uint8_t get_tlb_size(enum tlb_type type
, struct crat_tlb
*crat_tlb_entry
,
312 uint16_t raw_assoc_size
)
316 if (raw_assoc_size
>= 256) {
317 tlbsize
= (uint8_t)(raw_assoc_size
/ 256);
320 crat_tlb_entry
->flags
|= CRAT_TLB_FLAG_2MB_BASE_256
;
321 else if (type
== tlb_4k
)
322 crat_tlb_entry
->flags
|= CRAT_TLB_FLAG_4K_BASE_256
;
323 else if (type
== tlb_1g
)
324 crat_tlb_entry
->flags
|= CRAT_TLB_FLAG_1GB_BASE_256
;
326 tlbsize
= (uint8_t)(raw_assoc_size
);
331 static unsigned long add_crat_tlb_entry(struct crat_tlb
**tlb_affinity
, unsigned long current
)
333 *tlb_affinity
= (struct crat_tlb
*)current
;
334 memset(*tlb_affinity
, 0, sizeof(struct crat_tlb
));
336 (*tlb_affinity
)->type
= CRAT_TLB_TYPE
;
337 (*tlb_affinity
)->length
= sizeof(struct crat_tlb
);
338 (*tlb_affinity
)->flags
= CRAT_TLB_FLAG_EN
| CRAT_TLB_FLAG_CPU_TLB
;
340 current
+= sizeof(struct crat_tlb
);
344 static unsigned long gen_crat_tlb_entry(struct acpi_crat_header
*crat
, unsigned long current
)
346 size_t total_num_threads
, num_threads_sharing0
, num_threads_sharing1
,
347 num_threads_sharing2
, thread
, new_entries
;
348 struct cpuid_result cache_props0
, cache_props1
, cache_props2
;
349 uint8_t sibling_mask
= 0;
350 uint32_t l1_tlb_2M4M_ids
, l1_tlb_4K_ids
, l2_tlb_2M4M_ids
, l2_tlb_4K_ids
, l1_tlb_1G_ids
,
352 struct crat_tlb
*tlb_affinity
= NULL
;
354 total_num_threads
= get_cpu_count();
355 cache_props0
= cpuid_ext(CPUID_CACHE_PROPS
, CACHE_PROPS_0
);
356 cache_props1
= cpuid_ext(CPUID_CACHE_PROPS
, CACHE_PROPS_1
);
357 cache_props2
= cpuid_ext(CPUID_CACHE_PROPS
, CACHE_PROPS_2
);
359 l1_tlb_2M4M_ids
= cpuid_eax(CPUID_L1_TLB_CACHE_IDS
);
360 l2_tlb_2M4M_ids
= cpuid_eax(CPUID_L2_L3_CACHE_L2_TLB_IDS
);
361 l1_tlb_4K_ids
= cpuid_ebx(CPUID_L1_TLB_CACHE_IDS
);
362 l2_tlb_4K_ids
= cpuid_ebx(CPUID_L2_L3_CACHE_L2_TLB_IDS
);
363 l1_tlb_1G_ids
= cpuid_eax(CPUID_TLB_L1L2_1G_IDS
);
364 l2_tlb_1G_ids
= cpuid_ebx(CPUID_TLB_L1L2_1G_IDS
);
366 num_threads_sharing0
=
367 ((cache_props0
.eax
& NUM_SHARE_CACHE_MASK
) >> NUM_SHARE_CACHE_SHFT
) + 1;
368 num_threads_sharing1
=
369 ((cache_props1
.eax
& NUM_SHARE_CACHE_MASK
) >> NUM_SHARE_CACHE_SHFT
) + 1;
370 num_threads_sharing2
=
371 ((cache_props2
.eax
& NUM_SHARE_CACHE_MASK
) >> NUM_SHARE_CACHE_SHFT
) + 1;
374 for (thread
= 0; thread
< total_num_threads
; thread
++) {
376 if (thread
% num_threads_sharing0
== 0) {
377 current
= add_crat_tlb_entry(&tlb_affinity
, current
);
380 tlb_affinity
->flags
|= CRAT_TLB_FLAG_DATA_TLB
;
381 tlb_affinity
->proc_id_low
= thread
;
383 for (size_t sibling
= 1; sibling
< num_threads_sharing0
; sibling
++)
384 sibling_mask
= (sibling_mask
<< 1) + 1;
385 tlb_affinity
->sibling_map
[thread
/ 8] = sibling_mask
<< (thread
% 8);
386 tlb_affinity
->tlb_level
= CRAT_L1_CACHE
;
388 tlb_affinity
->data_tlb_2mb_assoc
=
389 (l1_tlb_2M4M_ids
& L1_DAT_TLB_2M4M_ASSOC_MASK
)
390 >> L1_DAT_TLB_2M4M_ASSOC_SHFT
;
391 tlb_affinity
->data_tlb_2mb_size
=
392 get_tlb_size(tlb_2m
, tlb_affinity
,
393 (l1_tlb_2M4M_ids
& L1_DAT_TLB_2M4M_SIZE_MASK
)
394 >> L1_DAT_TLB_2M4M_SIZE_SHFT
);
396 tlb_affinity
->data_tlb_4k_assoc
=
397 (l1_tlb_4K_ids
& L1_DAT_TLB_4K_ASSOC_MASK
)
398 >> L1_DAT_TLB_4K_ASSOC_SHFT
;
399 tlb_affinity
->data_tlb_4k_size
=
400 get_tlb_size(tlb_4k
, tlb_affinity
,
401 (l1_tlb_4K_ids
& L1_DAT_TLB_4K_SIZE_MASK
)
402 >> L1_DAT_TLB_4K_SIZE_SHFT
);
404 tlb_affinity
->data_tlb_1g_assoc
=
405 (l1_tlb_1G_ids
& L1_DAT_TLB_1G_ASSOC_MASK
)
406 >> L1_DAT_TLB_1G_ASSOC_SHFT
;
407 tlb_affinity
->data_tlb_1g_size
=
408 get_tlb_size(tlb_1g
, tlb_affinity
,
409 (l1_tlb_1G_ids
& L1_DAT_TLB_1G_SIZE_MASK
)
410 >> L1_DAT_TLB_1G_SIZE_SHFT
);
413 /* L1 instruction TLB */
414 if (thread
% num_threads_sharing1
== 0) {
415 current
= add_crat_tlb_entry(&tlb_affinity
, current
);
418 tlb_affinity
->flags
|= CRAT_TLB_FLAG_INSTR_TLB
;
419 tlb_affinity
->proc_id_low
= thread
;
421 for (size_t sibling
= 1; sibling
< num_threads_sharing1
; sibling
++)
422 sibling_mask
= (sibling_mask
<< 1) + 1;
423 tlb_affinity
->sibling_map
[thread
/ 8] = sibling_mask
<< (thread
% 8);
424 tlb_affinity
->tlb_level
= CRAT_L1_CACHE
;
425 tlb_affinity
->instr_tlb_2mb_assoc
=
426 (l1_tlb_2M4M_ids
& L1_INST_TLB_2M4M_ASSOC_MASK
)
427 >> L1_INST_TLB_2M4M_ASSOC_SHFT
;
428 tlb_affinity
->instr_tlb_2mb_size
=
429 get_tlb_size(tlb_2m
, tlb_affinity
,
430 (l1_tlb_2M4M_ids
& L1_INST_TLB_2M4M_SIZE_MASK
)
431 >> L1_INST_TLB_2M4M_SIZE_SHFT
);
433 tlb_affinity
->instr_tlb_4k_assoc
=
434 (l1_tlb_4K_ids
& L1_INST_TLB_4K_ASSOC_MASK
)
435 >> L1_INST_TLB_4K_ASSOC_SHFT
;
436 tlb_affinity
->instr_tlb_4k_size
=
437 get_tlb_size(tlb_4k
, tlb_affinity
,
438 (l1_tlb_4K_ids
& L1_INST_TLB_4K_SIZE_MASK
)
439 >> L1_INST_TLB_4K_SIZE_SHFT
);
441 tlb_affinity
->instr_tlb_1g_assoc
=
442 (l1_tlb_1G_ids
& L1_INST_TLB_1G_ASSOC_MASK
)
443 >> L1_INST_TLB_1G_ASSOC_SHFT
;
444 tlb_affinity
->instr_tlb_1g_size
=
445 get_tlb_size(tlb_1g
, tlb_affinity
,
446 (l1_tlb_1G_ids
& L1_INST_TLB_1G_SIZE_MASK
)
447 >> L1_INST_TLB_1G_SIZE_SHFT
);
451 if (thread
% num_threads_sharing2
== 0) {
452 current
= add_crat_tlb_entry(&tlb_affinity
, current
);
455 tlb_affinity
->flags
|= CRAT_TLB_FLAG_DATA_TLB
;
456 tlb_affinity
->proc_id_low
= thread
;
458 for (size_t sibling
= 1; sibling
< num_threads_sharing2
; sibling
++)
459 sibling_mask
= (sibling_mask
<< 1) + 1;
460 tlb_affinity
->sibling_map
[thread
/ 8] = sibling_mask
<< (thread
% 8);
461 tlb_affinity
->tlb_level
= CRAT_L2_CACHE
;
462 tlb_affinity
->data_tlb_2mb_assoc
=
463 (l2_tlb_2M4M_ids
& L2_DAT_TLB_2M4M_ASSOC_MASK
)
464 >> L2_DAT_TLB_2M4M_ASSOC_SHFT
;
465 tlb_affinity
->data_tlb_2mb_size
=
466 get_tlb_size(tlb_2m
, tlb_affinity
,
467 (l2_tlb_2M4M_ids
& L2_DAT_TLB_2M4M_SIZE_MASK
)
468 >> L2_DAT_TLB_2M4M_SIZE_SHFT
);
470 tlb_affinity
->data_tlb_4k_assoc
=
471 get_associativity((l2_tlb_4K_ids
& L2_DAT_TLB_2M4M_ASSOC_MASK
)
472 >> L2_DAT_TLB_4K_ASSOC_SHFT
);
473 tlb_affinity
->data_tlb_4k_size
=
474 get_tlb_size(tlb_4k
, tlb_affinity
,
475 (l2_tlb_2M4M_ids
& L2_DAT_TLB_4K_SIZE_MASK
)
476 >> L2_DAT_TLB_4K_SIZE_SHFT
);
478 tlb_affinity
->data_tlb_1g_assoc
=
479 get_associativity((l2_tlb_1G_ids
& L2_DAT_TLB_1G_ASSOC_MASK
)
480 >> L2_DAT_TLB_1G_ASSOC_SHFT
);
481 tlb_affinity
->data_tlb_1g_size
=
482 get_tlb_size(tlb_1g
, tlb_affinity
,
483 (l2_tlb_1G_ids
& L2_DAT_TLB_1G_SIZE_MASK
)
484 >> L2_DAT_TLB_1G_SIZE_SHFT
);
487 /* L2 Instruction TLB */
488 if (thread
% num_threads_sharing2
== 0) {
489 current
= add_crat_tlb_entry(&tlb_affinity
, current
);
492 tlb_affinity
->flags
|= CRAT_TLB_FLAG_INSTR_TLB
;
493 tlb_affinity
->proc_id_low
= thread
;
495 for (size_t sibling
= 1; sibling
< num_threads_sharing2
; sibling
++)
496 sibling_mask
= (sibling_mask
<< 1) + 1;
497 tlb_affinity
->sibling_map
[thread
/ 8] = sibling_mask
<< (thread
% 8);
498 tlb_affinity
->tlb_level
= CRAT_L2_CACHE
;
499 tlb_affinity
->instr_tlb_2mb_assoc
= get_associativity(
500 (l2_tlb_2M4M_ids
& L2_INST_TLB_2M4M_ASSOC_MASK
)
501 >> L2_INST_TLB_2M4M_ASSOC_SHFT
);
502 tlb_affinity
->instr_tlb_2mb_size
=
503 get_tlb_size(tlb_2m
, tlb_affinity
,
504 (l2_tlb_2M4M_ids
& L2_INST_TLB_2M4M_SIZE_MASK
)
505 >> L2_INST_TLB_2M4M_SIZE_SHFT
);
507 tlb_affinity
->instr_tlb_4k_assoc
=
508 get_associativity((l2_tlb_4K_ids
& L2_INST_TLB_4K_ASSOC_MASK
)
509 >> L2_INST_TLB_4K_ASSOC_SHFT
);
510 tlb_affinity
->instr_tlb_4k_size
=
511 get_tlb_size(tlb_4k
, tlb_affinity
,
512 (l2_tlb_4K_ids
& L2_INST_TLB_4K_SIZE_MASK
)
513 >> L2_INST_TLB_4K_SIZE_SHFT
);
515 tlb_affinity
->instr_tlb_1g_assoc
=
516 get_associativity((l2_tlb_1G_ids
& L2_INST_TLB_1G_ASSOC_MASK
)
517 >> L2_INST_TLB_1G_ASSOC_SHFT
);
518 tlb_affinity
->instr_tlb_1g_size
=
519 get_tlb_size(tlb_1g
, tlb_affinity
,
520 (l2_tlb_1G_ids
& L2_INST_TLB_1G_SIZE_MASK
)
521 >> L2_INST_TLB_1G_SIZE_SHFT
);
525 crat
->total_entries
+= new_entries
;
529 static unsigned long acpi_fill_crat(struct acpi_crat_header
*crat
, unsigned long current
)
531 current
= gen_crat_hsa_entry(crat
, current
);
532 current
= gen_crat_memory_entries(crat
, current
);
533 current
= gen_crat_cache_entry(crat
, current
);
534 current
= gen_crat_tlb_entry(crat
, current
);
540 unsigned long acpi_add_crat_table(unsigned long current
, acpi_rsdp_t
*rsdp
)
542 struct acpi_crat_header
*crat
;
545 current
= acpi_align_current(current
);
546 crat
= (struct acpi_crat_header
*)current
;
547 acpi_create_crat(crat
, acpi_fill_crat
);
548 current
+= crat
->header
.length
;
549 acpi_add_table(rsdp
, crat
);