1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2019, Intel Corporation.
5 * Heterogeneous Memory Attributes Table (HMAT) representation
7 * This program parses and reports the platform's HMAT tables, and registers
8 * the applicable attributes with the node's interfaces.
11 #define pr_fmt(fmt) "acpi/hmat: " fmt
13 #include <linux/acpi.h>
14 #include <linux/bitops.h>
15 #include <linux/device.h>
16 #include <linux/init.h>
17 #include <linux/list.h>
19 #include <linux/platform_device.h>
20 #include <linux/list_sort.h>
21 #include <linux/memregion.h>
22 #include <linux/memory.h>
23 #include <linux/mutex.h>
24 #include <linux/node.h>
25 #include <linux/sysfs.h>
26 #include <linux/dax.h>
27 #include <linux/memory-tiers.h>
29 static u8 hmat_revision
;
30 static int hmat_disable __initdata
;
32 void __init
disable_hmat(void)
37 static LIST_HEAD(targets
);
38 static LIST_HEAD(initiators
);
39 static LIST_HEAD(localities
);
41 static DEFINE_MUTEX(target_lock
);
44 * The defined enum order is used to prioritize attributes to break ties when
45 * selecting the best performing node.
54 static struct memory_locality
*localities_types
[4];
57 struct list_head node
;
58 struct node_cache_attrs cache_attrs
;
62 NODE_ACCESS_CLASS_GENPORT_SINK_LOCAL
= ACCESS_COORDINATE_MAX
,
63 NODE_ACCESS_CLASS_GENPORT_SINK_CPU
,
64 NODE_ACCESS_CLASS_MAX
,
67 struct memory_target
{
68 struct list_head node
;
69 unsigned int memory_pxm
;
70 unsigned int processor_pxm
;
71 struct resource memregions
;
72 struct access_coordinate coord
[NODE_ACCESS_CLASS_MAX
];
73 struct list_head caches
;
74 struct node_cache_attrs cache_attrs
;
75 u8 gen_port_device_handle
[ACPI_SRAT_DEVICE_HANDLE_SIZE
];
77 bool ext_updated
; /* externally updated */
80 struct memory_initiator
{
81 struct list_head node
;
82 unsigned int processor_pxm
;
86 struct memory_locality
{
87 struct list_head node
;
88 struct acpi_hmat_locality
*hmat_loc
;
91 static struct memory_initiator
*find_mem_initiator(unsigned int cpu_pxm
)
93 struct memory_initiator
*initiator
;
95 list_for_each_entry(initiator
, &initiators
, node
)
96 if (initiator
->processor_pxm
== cpu_pxm
)
101 static struct memory_target
*find_mem_target(unsigned int mem_pxm
)
103 struct memory_target
*target
;
105 list_for_each_entry(target
, &targets
, node
)
106 if (target
->memory_pxm
== mem_pxm
)
111 static struct memory_target
*acpi_find_genport_target(u32 uid
)
113 struct memory_target
*target
;
117 list_for_each_entry(target
, &targets
, node
) {
118 uid_ptr
= target
->gen_port_device_handle
+ 8;
119 target_uid
= *(u32
*)uid_ptr
;
120 if (uid
== target_uid
)
128 * acpi_get_genport_coordinates - Retrieve the access coordinates for a generic port
129 * @uid: ACPI unique id
130 * @coord: The access coordinates written back out for the generic port.
131 * Expect 2 levels array.
133 * Return: 0 on success. Errno on failure.
135 * Only supports device handles that are ACPI. Assume ACPI0016 HID for CXL.
137 int acpi_get_genport_coordinates(u32 uid
,
138 struct access_coordinate
*coord
)
140 struct memory_target
*target
;
142 guard(mutex
)(&target_lock
);
143 target
= acpi_find_genport_target(uid
);
147 coord
[ACCESS_COORDINATE_LOCAL
] =
148 target
->coord
[NODE_ACCESS_CLASS_GENPORT_SINK_LOCAL
];
149 coord
[ACCESS_COORDINATE_CPU
] =
150 target
->coord
[NODE_ACCESS_CLASS_GENPORT_SINK_CPU
];
154 EXPORT_SYMBOL_NS_GPL(acpi_get_genport_coordinates
, "CXL");
156 static __init
void alloc_memory_initiator(unsigned int cpu_pxm
)
158 struct memory_initiator
*initiator
;
160 if (pxm_to_node(cpu_pxm
) == NUMA_NO_NODE
)
163 initiator
= find_mem_initiator(cpu_pxm
);
167 initiator
= kzalloc(sizeof(*initiator
), GFP_KERNEL
);
171 initiator
->processor_pxm
= cpu_pxm
;
172 initiator
->has_cpu
= node_state(pxm_to_node(cpu_pxm
), N_CPU
);
173 list_add_tail(&initiator
->node
, &initiators
);
176 static __init
struct memory_target
*alloc_target(unsigned int mem_pxm
)
178 struct memory_target
*target
;
180 target
= find_mem_target(mem_pxm
);
182 target
= kzalloc(sizeof(*target
), GFP_KERNEL
);
185 target
->memory_pxm
= mem_pxm
;
186 target
->processor_pxm
= PXM_INVAL
;
187 target
->memregions
= (struct resource
) {
191 .flags
= IORESOURCE_MEM
,
193 list_add_tail(&target
->node
, &targets
);
194 INIT_LIST_HEAD(&target
->caches
);
200 static __init
void alloc_memory_target(unsigned int mem_pxm
,
201 resource_size_t start
,
204 struct memory_target
*target
;
206 target
= alloc_target(mem_pxm
);
211 * There are potentially multiple ranges per PXM, so record each
212 * in the per-target memregions resource tree.
214 if (!__request_region(&target
->memregions
, start
, len
, "memory target",
216 pr_warn("failed to reserve %#llx - %#llx in pxm: %d\n",
217 start
, start
+ len
, mem_pxm
);
220 static __init
void alloc_genport_target(unsigned int mem_pxm
, u8
*handle
)
222 struct memory_target
*target
;
224 target
= alloc_target(mem_pxm
);
228 memcpy(target
->gen_port_device_handle
, handle
,
229 ACPI_SRAT_DEVICE_HANDLE_SIZE
);
232 static __init
const char *hmat_data_type(u8 type
)
235 case ACPI_HMAT_ACCESS_LATENCY
:
236 return "Access Latency";
237 case ACPI_HMAT_READ_LATENCY
:
238 return "Read Latency";
239 case ACPI_HMAT_WRITE_LATENCY
:
240 return "Write Latency";
241 case ACPI_HMAT_ACCESS_BANDWIDTH
:
242 return "Access Bandwidth";
243 case ACPI_HMAT_READ_BANDWIDTH
:
244 return "Read Bandwidth";
245 case ACPI_HMAT_WRITE_BANDWIDTH
:
246 return "Write Bandwidth";
252 static __init
const char *hmat_data_type_suffix(u8 type
)
255 case ACPI_HMAT_ACCESS_LATENCY
:
256 case ACPI_HMAT_READ_LATENCY
:
257 case ACPI_HMAT_WRITE_LATENCY
:
259 case ACPI_HMAT_ACCESS_BANDWIDTH
:
260 case ACPI_HMAT_READ_BANDWIDTH
:
261 case ACPI_HMAT_WRITE_BANDWIDTH
:
268 static u32
hmat_normalize(u16 entry
, u64 base
, u8 type
)
273 * Check for invalid and overflow values
275 if (entry
== 0xffff || !entry
)
277 else if (base
> (UINT_MAX
/ (entry
)))
281 * Divide by the base unit for version 1, convert latency from
282 * picosenonds to nanoseconds if revision 2.
284 value
= entry
* base
;
285 if (hmat_revision
== 1) {
288 value
= DIV_ROUND_UP(value
, 10);
289 } else if (hmat_revision
== 2) {
291 case ACPI_HMAT_ACCESS_LATENCY
:
292 case ACPI_HMAT_READ_LATENCY
:
293 case ACPI_HMAT_WRITE_LATENCY
:
294 value
= DIV_ROUND_UP(value
, 1000);
303 static void hmat_update_target_access(struct memory_target
*target
,
304 u8 type
, u32 value
, int access
)
307 case ACPI_HMAT_ACCESS_LATENCY
:
308 target
->coord
[access
].read_latency
= value
;
309 target
->coord
[access
].write_latency
= value
;
311 case ACPI_HMAT_READ_LATENCY
:
312 target
->coord
[access
].read_latency
= value
;
314 case ACPI_HMAT_WRITE_LATENCY
:
315 target
->coord
[access
].write_latency
= value
;
317 case ACPI_HMAT_ACCESS_BANDWIDTH
:
318 target
->coord
[access
].read_bandwidth
= value
;
319 target
->coord
[access
].write_bandwidth
= value
;
321 case ACPI_HMAT_READ_BANDWIDTH
:
322 target
->coord
[access
].read_bandwidth
= value
;
324 case ACPI_HMAT_WRITE_BANDWIDTH
:
325 target
->coord
[access
].write_bandwidth
= value
;
332 int hmat_update_target_coordinates(int nid
, struct access_coordinate
*coord
,
333 enum access_coordinate_class access
)
335 struct memory_target
*target
;
338 if (nid
== NUMA_NO_NODE
)
341 pxm
= node_to_pxm(nid
);
342 guard(mutex
)(&target_lock
);
343 target
= find_mem_target(pxm
);
347 hmat_update_target_access(target
, ACPI_HMAT_READ_LATENCY
,
348 coord
->read_latency
, access
);
349 hmat_update_target_access(target
, ACPI_HMAT_WRITE_LATENCY
,
350 coord
->write_latency
, access
);
351 hmat_update_target_access(target
, ACPI_HMAT_READ_BANDWIDTH
,
352 coord
->read_bandwidth
, access
);
353 hmat_update_target_access(target
, ACPI_HMAT_WRITE_BANDWIDTH
,
354 coord
->write_bandwidth
, access
);
355 target
->ext_updated
= true;
359 EXPORT_SYMBOL_GPL(hmat_update_target_coordinates
);
361 static __init
void hmat_add_locality(struct acpi_hmat_locality
*hmat_loc
)
363 struct memory_locality
*loc
;
365 loc
= kzalloc(sizeof(*loc
), GFP_KERNEL
);
367 pr_notice_once("Failed to allocate HMAT locality\n");
371 loc
->hmat_loc
= hmat_loc
;
372 list_add_tail(&loc
->node
, &localities
);
374 switch (hmat_loc
->data_type
) {
375 case ACPI_HMAT_ACCESS_LATENCY
:
376 localities_types
[READ_LATENCY
] = loc
;
377 localities_types
[WRITE_LATENCY
] = loc
;
379 case ACPI_HMAT_READ_LATENCY
:
380 localities_types
[READ_LATENCY
] = loc
;
382 case ACPI_HMAT_WRITE_LATENCY
:
383 localities_types
[WRITE_LATENCY
] = loc
;
385 case ACPI_HMAT_ACCESS_BANDWIDTH
:
386 localities_types
[READ_BANDWIDTH
] = loc
;
387 localities_types
[WRITE_BANDWIDTH
] = loc
;
389 case ACPI_HMAT_READ_BANDWIDTH
:
390 localities_types
[READ_BANDWIDTH
] = loc
;
392 case ACPI_HMAT_WRITE_BANDWIDTH
:
393 localities_types
[WRITE_BANDWIDTH
] = loc
;
400 static __init
void hmat_update_target(unsigned int tgt_pxm
, unsigned int init_pxm
,
401 u8 mem_hier
, u8 type
, u32 value
)
403 struct memory_target
*target
= find_mem_target(tgt_pxm
);
405 if (mem_hier
!= ACPI_HMAT_MEMORY
)
408 if (target
&& target
->processor_pxm
== init_pxm
) {
409 hmat_update_target_access(target
, type
, value
,
410 ACCESS_COORDINATE_LOCAL
);
411 /* If the node has a CPU, update access ACCESS_COORDINATE_CPU */
412 if (node_state(pxm_to_node(init_pxm
), N_CPU
))
413 hmat_update_target_access(target
, type
, value
,
414 ACCESS_COORDINATE_CPU
);
418 static __init
int hmat_parse_locality(union acpi_subtable_headers
*header
,
419 const unsigned long end
)
421 struct acpi_hmat_locality
*hmat_loc
= (void *)header
;
422 unsigned int init
, targ
, total_size
, ipds
, tpds
;
423 u32
*inits
, *targs
, value
;
427 if (hmat_loc
->header
.length
< sizeof(*hmat_loc
)) {
428 pr_notice("Unexpected locality header length: %u\n",
429 hmat_loc
->header
.length
);
433 type
= hmat_loc
->data_type
;
434 mem_hier
= hmat_loc
->flags
& ACPI_HMAT_MEMORY_HIERARCHY
;
435 ipds
= hmat_loc
->number_of_initiator_Pds
;
436 tpds
= hmat_loc
->number_of_target_Pds
;
437 total_size
= sizeof(*hmat_loc
) + sizeof(*entries
) * ipds
* tpds
+
438 sizeof(*inits
) * ipds
+ sizeof(*targs
) * tpds
;
439 if (hmat_loc
->header
.length
< total_size
) {
440 pr_notice("Unexpected locality header length:%u, minimum required:%u\n",
441 hmat_loc
->header
.length
, total_size
);
445 pr_info("Locality: Flags:%02x Type:%s Initiator Domains:%u Target Domains:%u Base:%lld\n",
446 hmat_loc
->flags
, hmat_data_type(type
), ipds
, tpds
,
447 hmat_loc
->entry_base_unit
);
449 inits
= (u32
*)(hmat_loc
+ 1);
450 targs
= inits
+ ipds
;
451 entries
= (u16
*)(targs
+ tpds
);
452 for (init
= 0; init
< ipds
; init
++) {
453 alloc_memory_initiator(inits
[init
]);
454 for (targ
= 0; targ
< tpds
; targ
++) {
455 value
= hmat_normalize(entries
[init
* tpds
+ targ
],
456 hmat_loc
->entry_base_unit
,
458 pr_info(" Initiator-Target[%u-%u]:%u%s\n",
459 inits
[init
], targs
[targ
], value
,
460 hmat_data_type_suffix(type
));
462 hmat_update_target(targs
[targ
], inits
[init
],
463 mem_hier
, type
, value
);
467 if (mem_hier
== ACPI_HMAT_MEMORY
)
468 hmat_add_locality(hmat_loc
);
473 static __init
int hmat_parse_cache(union acpi_subtable_headers
*header
,
474 const unsigned long end
)
476 struct acpi_hmat_cache
*cache
= (void *)header
;
477 struct memory_target
*target
;
478 struct target_cache
*tcache
;
481 if (cache
->header
.length
< sizeof(*cache
)) {
482 pr_notice("Unexpected cache header length: %u\n",
483 cache
->header
.length
);
487 attrs
= cache
->cache_attributes
;
488 pr_info("Cache: Domain:%u Size:%llu Attrs:%08x SMBIOS Handles:%d\n",
489 cache
->memory_PD
, cache
->cache_size
, attrs
,
490 cache
->number_of_SMBIOShandles
);
492 target
= find_mem_target(cache
->memory_PD
);
496 tcache
= kzalloc(sizeof(*tcache
), GFP_KERNEL
);
498 pr_notice_once("Failed to allocate HMAT cache info\n");
502 tcache
->cache_attrs
.size
= cache
->cache_size
;
503 tcache
->cache_attrs
.level
= (attrs
& ACPI_HMAT_CACHE_LEVEL
) >> 4;
504 tcache
->cache_attrs
.line_size
= (attrs
& ACPI_HMAT_CACHE_LINE_SIZE
) >> 16;
506 switch ((attrs
& ACPI_HMAT_CACHE_ASSOCIATIVITY
) >> 8) {
507 case ACPI_HMAT_CA_DIRECT_MAPPED
:
508 tcache
->cache_attrs
.indexing
= NODE_CACHE_DIRECT_MAP
;
510 case ACPI_HMAT_CA_COMPLEX_CACHE_INDEXING
:
511 tcache
->cache_attrs
.indexing
= NODE_CACHE_INDEXED
;
513 case ACPI_HMAT_CA_NONE
:
515 tcache
->cache_attrs
.indexing
= NODE_CACHE_OTHER
;
519 switch ((attrs
& ACPI_HMAT_WRITE_POLICY
) >> 12) {
520 case ACPI_HMAT_CP_WB
:
521 tcache
->cache_attrs
.write_policy
= NODE_CACHE_WRITE_BACK
;
523 case ACPI_HMAT_CP_WT
:
524 tcache
->cache_attrs
.write_policy
= NODE_CACHE_WRITE_THROUGH
;
526 case ACPI_HMAT_CP_NONE
:
528 tcache
->cache_attrs
.write_policy
= NODE_CACHE_WRITE_OTHER
;
531 list_add_tail(&tcache
->node
, &target
->caches
);
536 static int __init
hmat_parse_proximity_domain(union acpi_subtable_headers
*header
,
537 const unsigned long end
)
539 struct acpi_hmat_proximity_domain
*p
= (void *)header
;
540 struct memory_target
*target
= NULL
;
542 if (p
->header
.length
!= sizeof(*p
)) {
543 pr_notice("Unexpected address range header length: %u\n",
548 if (hmat_revision
== 1)
549 pr_info("Memory (%#llx length %#llx) Flags:%04x Processor Domain:%u Memory Domain:%u\n",
550 p
->reserved3
, p
->reserved4
, p
->flags
, p
->processor_PD
,
553 pr_info("Memory Flags:%04x Processor Domain:%u Memory Domain:%u\n",
554 p
->flags
, p
->processor_PD
, p
->memory_PD
);
556 if ((hmat_revision
== 1 && p
->flags
& ACPI_HMAT_MEMORY_PD_VALID
) ||
558 target
= find_mem_target(p
->memory_PD
);
560 pr_debug("Memory Domain missing from SRAT\n");
564 if (target
&& p
->flags
& ACPI_HMAT_PROCESSOR_PD_VALID
) {
565 int p_node
= pxm_to_node(p
->processor_PD
);
567 if (p_node
== NUMA_NO_NODE
) {
568 pr_debug("Invalid Processor Domain\n");
571 target
->processor_pxm
= p
->processor_PD
;
577 static int __init
hmat_parse_subtable(union acpi_subtable_headers
*header
,
578 const unsigned long end
)
580 struct acpi_hmat_structure
*hdr
= (void *)header
;
586 case ACPI_HMAT_TYPE_PROXIMITY
:
587 return hmat_parse_proximity_domain(header
, end
);
588 case ACPI_HMAT_TYPE_LOCALITY
:
589 return hmat_parse_locality(header
, end
);
590 case ACPI_HMAT_TYPE_CACHE
:
591 return hmat_parse_cache(header
, end
);
597 static __init
int srat_parse_mem_affinity(union acpi_subtable_headers
*header
,
598 const unsigned long end
)
600 struct acpi_srat_mem_affinity
*ma
= (void *)header
;
604 if (!(ma
->flags
& ACPI_SRAT_MEM_ENABLED
))
606 alloc_memory_target(ma
->proximity_domain
, ma
->base_address
, ma
->length
);
610 static __init
int srat_parse_genport_affinity(union acpi_subtable_headers
*header
,
611 const unsigned long end
)
613 struct acpi_srat_generic_affinity
*ga
= (void *)header
;
618 if (!(ga
->flags
& ACPI_SRAT_GENERIC_AFFINITY_ENABLED
))
621 /* Skip PCI device_handle for now */
622 if (ga
->device_handle_type
!= 0)
625 alloc_genport_target(ga
->proximity_domain
,
626 (u8
*)ga
->device_handle
);
631 static u32
hmat_initiator_perf(struct memory_target
*target
,
632 struct memory_initiator
*initiator
,
633 struct acpi_hmat_locality
*hmat_loc
)
635 unsigned int ipds
, tpds
, i
, idx
= 0, tdx
= 0;
639 ipds
= hmat_loc
->number_of_initiator_Pds
;
640 tpds
= hmat_loc
->number_of_target_Pds
;
641 inits
= (u32
*)(hmat_loc
+ 1);
642 targs
= inits
+ ipds
;
643 entries
= (u16
*)(targs
+ tpds
);
645 for (i
= 0; i
< ipds
; i
++) {
646 if (inits
[i
] == initiator
->processor_pxm
) {
655 for (i
= 0; i
< tpds
; i
++) {
656 if (targs
[i
] == target
->memory_pxm
) {
664 return hmat_normalize(entries
[idx
* tpds
+ tdx
],
665 hmat_loc
->entry_base_unit
,
666 hmat_loc
->data_type
);
669 static bool hmat_update_best(u8 type
, u32 value
, u32
*best
)
671 bool updated
= false;
677 case ACPI_HMAT_ACCESS_LATENCY
:
678 case ACPI_HMAT_READ_LATENCY
:
679 case ACPI_HMAT_WRITE_LATENCY
:
680 if (!*best
|| *best
> value
) {
685 case ACPI_HMAT_ACCESS_BANDWIDTH
:
686 case ACPI_HMAT_READ_BANDWIDTH
:
687 case ACPI_HMAT_WRITE_BANDWIDTH
:
688 if (!*best
|| *best
< value
) {
698 static int initiator_cmp(void *priv
, const struct list_head
*a
,
699 const struct list_head
*b
)
701 struct memory_initiator
*ia
;
702 struct memory_initiator
*ib
;
704 ia
= list_entry(a
, struct memory_initiator
, node
);
705 ib
= list_entry(b
, struct memory_initiator
, node
);
707 return ia
->processor_pxm
- ib
->processor_pxm
;
710 static int initiators_to_nodemask(unsigned long *p_nodes
)
712 struct memory_initiator
*initiator
;
714 if (list_empty(&initiators
))
717 list_for_each_entry(initiator
, &initiators
, node
)
718 set_bit(initiator
->processor_pxm
, p_nodes
);
723 static void hmat_update_target_attrs(struct memory_target
*target
,
724 unsigned long *p_nodes
, int access
)
726 struct memory_initiator
*initiator
;
727 unsigned int cpu_nid
;
728 struct memory_locality
*loc
= NULL
;
732 /* Don't update if an external agent has changed the data. */
733 if (target
->ext_updated
)
736 /* Don't update for generic port if there's no device handle */
737 if ((access
== NODE_ACCESS_CLASS_GENPORT_SINK_LOCAL
||
738 access
== NODE_ACCESS_CLASS_GENPORT_SINK_CPU
) &&
739 !(*(u16
*)target
->gen_port_device_handle
))
742 bitmap_zero(p_nodes
, MAX_NUMNODES
);
744 * If the Address Range Structure provides a local processor pxm, set
745 * only that one. Otherwise, find the best performance attributes and
746 * collect all initiators that match.
748 if (target
->processor_pxm
!= PXM_INVAL
) {
749 cpu_nid
= pxm_to_node(target
->processor_pxm
);
750 if (access
== ACCESS_COORDINATE_LOCAL
||
751 node_state(cpu_nid
, N_CPU
)) {
752 set_bit(target
->processor_pxm
, p_nodes
);
757 if (list_empty(&localities
))
761 * We need the initiator list sorted so we can use bitmap_clear for
762 * previously set initiators when we find a better memory accessor.
763 * We'll also use the sorting to prime the candidate nodes with known
766 list_sort(NULL
, &initiators
, initiator_cmp
);
767 if (initiators_to_nodemask(p_nodes
) < 0)
770 for (i
= WRITE_LATENCY
; i
<= READ_BANDWIDTH
; i
++) {
771 loc
= localities_types
[i
];
776 list_for_each_entry(initiator
, &initiators
, node
) {
779 if ((access
== ACCESS_COORDINATE_CPU
||
780 access
== NODE_ACCESS_CLASS_GENPORT_SINK_CPU
) &&
781 !initiator
->has_cpu
) {
782 clear_bit(initiator
->processor_pxm
, p_nodes
);
785 if (!test_bit(initiator
->processor_pxm
, p_nodes
))
788 value
= hmat_initiator_perf(target
, initiator
, loc
->hmat_loc
);
789 if (hmat_update_best(loc
->hmat_loc
->data_type
, value
, &best
))
790 bitmap_clear(p_nodes
, 0, initiator
->processor_pxm
);
792 clear_bit(initiator
->processor_pxm
, p_nodes
);
795 hmat_update_target_access(target
, loc
->hmat_loc
->data_type
, best
, access
);
799 static void __hmat_register_target_initiators(struct memory_target
*target
,
800 unsigned long *p_nodes
,
803 unsigned int mem_nid
, cpu_nid
;
806 mem_nid
= pxm_to_node(target
->memory_pxm
);
807 hmat_update_target_attrs(target
, p_nodes
, access
);
808 for_each_set_bit(i
, p_nodes
, MAX_NUMNODES
) {
809 cpu_nid
= pxm_to_node(i
);
810 register_memory_node_under_compute_node(mem_nid
, cpu_nid
, access
);
814 static void hmat_update_generic_target(struct memory_target
*target
)
816 static DECLARE_BITMAP(p_nodes
, MAX_NUMNODES
);
818 hmat_update_target_attrs(target
, p_nodes
,
819 NODE_ACCESS_CLASS_GENPORT_SINK_LOCAL
);
820 hmat_update_target_attrs(target
, p_nodes
,
821 NODE_ACCESS_CLASS_GENPORT_SINK_CPU
);
824 static void hmat_register_target_initiators(struct memory_target
*target
)
826 static DECLARE_BITMAP(p_nodes
, MAX_NUMNODES
);
828 __hmat_register_target_initiators(target
, p_nodes
,
829 ACCESS_COORDINATE_LOCAL
);
830 __hmat_register_target_initiators(target
, p_nodes
,
831 ACCESS_COORDINATE_CPU
);
834 static void hmat_register_target_cache(struct memory_target
*target
)
836 unsigned mem_nid
= pxm_to_node(target
->memory_pxm
);
837 struct target_cache
*tcache
;
839 list_for_each_entry(tcache
, &target
->caches
, node
)
840 node_add_cache(mem_nid
, &tcache
->cache_attrs
);
843 static void hmat_register_target_perf(struct memory_target
*target
, int access
)
845 unsigned mem_nid
= pxm_to_node(target
->memory_pxm
);
846 node_set_perf_attrs(mem_nid
, &target
->coord
[access
], access
);
849 static void hmat_register_target_devices(struct memory_target
*target
)
851 struct resource
*res
;
854 * Do not bother creating devices if no driver is available to
857 if (!IS_ENABLED(CONFIG_DEV_DAX_HMEM
))
860 for (res
= target
->memregions
.child
; res
; res
= res
->sibling
) {
861 int target_nid
= pxm_to_node(target
->memory_pxm
);
863 hmem_register_resource(target_nid
, res
);
867 static void hmat_register_target(struct memory_target
*target
)
869 int nid
= pxm_to_node(target
->memory_pxm
);
872 * Devices may belong to either an offline or online
873 * node, so unconditionally add them.
875 hmat_register_target_devices(target
);
878 * Register generic port perf numbers. The nid may not be
879 * initialized and is still NUMA_NO_NODE.
881 mutex_lock(&target_lock
);
882 if (*(u16
*)target
->gen_port_device_handle
) {
883 hmat_update_generic_target(target
);
884 target
->registered
= true;
886 mutex_unlock(&target_lock
);
889 * Skip offline nodes. This can happen when memory
890 * marked EFI_MEMORY_SP, "specific purpose", is applied
891 * to all the memory in a proximity domain leading to
892 * the node being marked offline / unplugged, or if
893 * memory-only "hotplug" node is offline.
895 if (nid
== NUMA_NO_NODE
|| !node_online(nid
))
898 mutex_lock(&target_lock
);
899 if (!target
->registered
) {
900 hmat_register_target_initiators(target
);
901 hmat_register_target_cache(target
);
902 hmat_register_target_perf(target
, ACCESS_COORDINATE_LOCAL
);
903 hmat_register_target_perf(target
, ACCESS_COORDINATE_CPU
);
904 target
->registered
= true;
906 mutex_unlock(&target_lock
);
909 static void hmat_register_targets(void)
911 struct memory_target
*target
;
913 list_for_each_entry(target
, &targets
, node
)
914 hmat_register_target(target
);
917 static int hmat_callback(struct notifier_block
*self
,
918 unsigned long action
, void *arg
)
920 struct memory_target
*target
;
921 struct memory_notify
*mnb
= arg
;
922 int pxm
, nid
= mnb
->status_change_nid
;
924 if (nid
== NUMA_NO_NODE
|| action
!= MEM_ONLINE
)
927 pxm
= node_to_pxm(nid
);
928 target
= find_mem_target(pxm
);
932 hmat_register_target(target
);
936 static int __init
hmat_set_default_dram_perf(void)
940 struct memory_target
*target
;
941 struct access_coordinate
*attrs
;
943 for_each_node_mask(nid
, default_dram_nodes
) {
944 pxm
= node_to_pxm(nid
);
945 target
= find_mem_target(pxm
);
948 attrs
= &target
->coord
[ACCESS_COORDINATE_CPU
];
949 rc
= mt_set_default_dram_perf(nid
, attrs
, "ACPI HMAT");
957 static int hmat_calculate_adistance(struct notifier_block
*self
,
958 unsigned long nid
, void *data
)
960 static DECLARE_BITMAP(p_nodes
, MAX_NUMNODES
);
961 struct memory_target
*target
;
962 struct access_coordinate
*perf
;
966 pxm
= node_to_pxm(nid
);
967 target
= find_mem_target(pxm
);
971 mutex_lock(&target_lock
);
972 hmat_update_target_attrs(target
, p_nodes
, ACCESS_COORDINATE_CPU
);
973 mutex_unlock(&target_lock
);
975 perf
= &target
->coord
[ACCESS_COORDINATE_CPU
];
977 if (mt_perf_to_adistance(perf
, adist
))
983 static struct notifier_block hmat_adist_nb __meminitdata
= {
984 .notifier_call
= hmat_calculate_adistance
,
988 static __init
void hmat_free_structures(void)
990 struct memory_target
*target
, *tnext
;
991 struct memory_locality
*loc
, *lnext
;
992 struct memory_initiator
*initiator
, *inext
;
993 struct target_cache
*tcache
, *cnext
;
995 list_for_each_entry_safe(target
, tnext
, &targets
, node
) {
996 struct resource
*res
, *res_next
;
998 list_for_each_entry_safe(tcache
, cnext
, &target
->caches
, node
) {
999 list_del(&tcache
->node
);
1003 list_del(&target
->node
);
1004 res
= target
->memregions
.child
;
1006 res_next
= res
->sibling
;
1007 __release_region(&target
->memregions
, res
->start
,
1008 resource_size(res
));
1014 list_for_each_entry_safe(initiator
, inext
, &initiators
, node
) {
1015 list_del(&initiator
->node
);
1019 list_for_each_entry_safe(loc
, lnext
, &localities
, node
) {
1020 list_del(&loc
->node
);
1025 static __init
int hmat_init(void)
1027 struct acpi_table_header
*tbl
;
1028 enum acpi_hmat_type i
;
1031 if (srat_disabled() || hmat_disable
)
1034 status
= acpi_get_table(ACPI_SIG_SRAT
, 0, &tbl
);
1035 if (ACPI_FAILURE(status
))
1038 if (acpi_table_parse_entries(ACPI_SIG_SRAT
,
1039 sizeof(struct acpi_table_srat
),
1040 ACPI_SRAT_TYPE_MEMORY_AFFINITY
,
1041 srat_parse_mem_affinity
, 0) < 0)
1044 if (acpi_table_parse_entries(ACPI_SIG_SRAT
,
1045 sizeof(struct acpi_table_srat
),
1046 ACPI_SRAT_TYPE_GENERIC_PORT_AFFINITY
,
1047 srat_parse_genport_affinity
, 0) < 0)
1050 acpi_put_table(tbl
);
1052 status
= acpi_get_table(ACPI_SIG_HMAT
, 0, &tbl
);
1053 if (ACPI_FAILURE(status
))
1056 hmat_revision
= tbl
->revision
;
1057 switch (hmat_revision
) {
1062 pr_notice("Ignoring: Unknown revision:%d\n", hmat_revision
);
1066 for (i
= ACPI_HMAT_TYPE_PROXIMITY
; i
< ACPI_HMAT_TYPE_RESERVED
; i
++) {
1067 if (acpi_table_parse_entries(ACPI_SIG_HMAT
,
1068 sizeof(struct acpi_table_hmat
), i
,
1069 hmat_parse_subtable
, 0) < 0) {
1070 pr_notice("Ignoring: Invalid table");
1074 hmat_register_targets();
1076 /* Keep the table and structures if the notifier may use them */
1077 if (hotplug_memory_notifier(hmat_callback
, HMAT_CALLBACK_PRI
))
1080 if (!hmat_set_default_dram_perf())
1081 register_mt_adistance_algorithm(&hmat_adist_nb
);
1085 hmat_free_structures();
1086 acpi_put_table(tbl
);
1089 subsys_initcall(hmat_init
);