Merge tag 'clk-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux.git] / drivers / acpi / numa / hmat.c
blob80a3481c04701d39a63162ce51db81f2c58c9d0a
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2019, Intel Corporation.
5 * Heterogeneous Memory Attributes Table (HMAT) representation
7 * This program parses and reports the platform's HMAT tables, and registers
8 * the applicable attributes with the node's interfaces.
9 */
11 #define pr_fmt(fmt) "acpi/hmat: " fmt
13 #include <linux/acpi.h>
14 #include <linux/bitops.h>
15 #include <linux/device.h>
16 #include <linux/init.h>
17 #include <linux/list.h>
18 #include <linux/mm.h>
19 #include <linux/platform_device.h>
20 #include <linux/list_sort.h>
21 #include <linux/memregion.h>
22 #include <linux/memory.h>
23 #include <linux/mutex.h>
24 #include <linux/node.h>
25 #include <linux/sysfs.h>
26 #include <linux/dax.h>
27 #include <linux/memory-tiers.h>
29 static u8 hmat_revision;
30 static int hmat_disable __initdata;
32 void __init disable_hmat(void)
34 hmat_disable = 1;
37 static LIST_HEAD(targets);
38 static LIST_HEAD(initiators);
39 static LIST_HEAD(localities);
41 static DEFINE_MUTEX(target_lock);
44 * The defined enum order is used to prioritize attributes to break ties when
45 * selecting the best performing node.
47 enum locality_types {
48 WRITE_LATENCY,
49 READ_LATENCY,
50 WRITE_BANDWIDTH,
51 READ_BANDWIDTH,
54 static struct memory_locality *localities_types[4];
56 struct target_cache {
57 struct list_head node;
58 struct node_cache_attrs cache_attrs;
61 enum {
62 NODE_ACCESS_CLASS_GENPORT_SINK_LOCAL = ACCESS_COORDINATE_MAX,
63 NODE_ACCESS_CLASS_GENPORT_SINK_CPU,
64 NODE_ACCESS_CLASS_MAX,
67 struct memory_target {
68 struct list_head node;
69 unsigned int memory_pxm;
70 unsigned int processor_pxm;
71 struct resource memregions;
72 struct access_coordinate coord[NODE_ACCESS_CLASS_MAX];
73 struct list_head caches;
74 struct node_cache_attrs cache_attrs;
75 u8 gen_port_device_handle[ACPI_SRAT_DEVICE_HANDLE_SIZE];
76 bool registered;
77 bool ext_updated; /* externally updated */
80 struct memory_initiator {
81 struct list_head node;
82 unsigned int processor_pxm;
83 bool has_cpu;
86 struct memory_locality {
87 struct list_head node;
88 struct acpi_hmat_locality *hmat_loc;
91 static struct memory_initiator *find_mem_initiator(unsigned int cpu_pxm)
93 struct memory_initiator *initiator;
95 list_for_each_entry(initiator, &initiators, node)
96 if (initiator->processor_pxm == cpu_pxm)
97 return initiator;
98 return NULL;
101 static struct memory_target *find_mem_target(unsigned int mem_pxm)
103 struct memory_target *target;
105 list_for_each_entry(target, &targets, node)
106 if (target->memory_pxm == mem_pxm)
107 return target;
108 return NULL;
111 static struct memory_target *acpi_find_genport_target(u32 uid)
113 struct memory_target *target;
114 u32 target_uid;
115 u8 *uid_ptr;
117 list_for_each_entry(target, &targets, node) {
118 uid_ptr = target->gen_port_device_handle + 8;
119 target_uid = *(u32 *)uid_ptr;
120 if (uid == target_uid)
121 return target;
124 return NULL;
128 * acpi_get_genport_coordinates - Retrieve the access coordinates for a generic port
129 * @uid: ACPI unique id
130 * @coord: The access coordinates written back out for the generic port.
131 * Expect 2 levels array.
133 * Return: 0 on success. Errno on failure.
135 * Only supports device handles that are ACPI. Assume ACPI0016 HID for CXL.
137 int acpi_get_genport_coordinates(u32 uid,
138 struct access_coordinate *coord)
140 struct memory_target *target;
142 guard(mutex)(&target_lock);
143 target = acpi_find_genport_target(uid);
144 if (!target)
145 return -ENOENT;
147 coord[ACCESS_COORDINATE_LOCAL] =
148 target->coord[NODE_ACCESS_CLASS_GENPORT_SINK_LOCAL];
149 coord[ACCESS_COORDINATE_CPU] =
150 target->coord[NODE_ACCESS_CLASS_GENPORT_SINK_CPU];
152 return 0;
154 EXPORT_SYMBOL_NS_GPL(acpi_get_genport_coordinates, "CXL");
156 static __init void alloc_memory_initiator(unsigned int cpu_pxm)
158 struct memory_initiator *initiator;
160 if (pxm_to_node(cpu_pxm) == NUMA_NO_NODE)
161 return;
163 initiator = find_mem_initiator(cpu_pxm);
164 if (initiator)
165 return;
167 initiator = kzalloc(sizeof(*initiator), GFP_KERNEL);
168 if (!initiator)
169 return;
171 initiator->processor_pxm = cpu_pxm;
172 initiator->has_cpu = node_state(pxm_to_node(cpu_pxm), N_CPU);
173 list_add_tail(&initiator->node, &initiators);
176 static __init struct memory_target *alloc_target(unsigned int mem_pxm)
178 struct memory_target *target;
180 target = find_mem_target(mem_pxm);
181 if (!target) {
182 target = kzalloc(sizeof(*target), GFP_KERNEL);
183 if (!target)
184 return NULL;
185 target->memory_pxm = mem_pxm;
186 target->processor_pxm = PXM_INVAL;
187 target->memregions = (struct resource) {
188 .name = "ACPI mem",
189 .start = 0,
190 .end = -1,
191 .flags = IORESOURCE_MEM,
193 list_add_tail(&target->node, &targets);
194 INIT_LIST_HEAD(&target->caches);
197 return target;
200 static __init void alloc_memory_target(unsigned int mem_pxm,
201 resource_size_t start,
202 resource_size_t len)
204 struct memory_target *target;
206 target = alloc_target(mem_pxm);
207 if (!target)
208 return;
211 * There are potentially multiple ranges per PXM, so record each
212 * in the per-target memregions resource tree.
214 if (!__request_region(&target->memregions, start, len, "memory target",
215 IORESOURCE_MEM))
216 pr_warn("failed to reserve %#llx - %#llx in pxm: %d\n",
217 start, start + len, mem_pxm);
220 static __init void alloc_genport_target(unsigned int mem_pxm, u8 *handle)
222 struct memory_target *target;
224 target = alloc_target(mem_pxm);
225 if (!target)
226 return;
228 memcpy(target->gen_port_device_handle, handle,
229 ACPI_SRAT_DEVICE_HANDLE_SIZE);
232 static __init const char *hmat_data_type(u8 type)
234 switch (type) {
235 case ACPI_HMAT_ACCESS_LATENCY:
236 return "Access Latency";
237 case ACPI_HMAT_READ_LATENCY:
238 return "Read Latency";
239 case ACPI_HMAT_WRITE_LATENCY:
240 return "Write Latency";
241 case ACPI_HMAT_ACCESS_BANDWIDTH:
242 return "Access Bandwidth";
243 case ACPI_HMAT_READ_BANDWIDTH:
244 return "Read Bandwidth";
245 case ACPI_HMAT_WRITE_BANDWIDTH:
246 return "Write Bandwidth";
247 default:
248 return "Reserved";
252 static __init const char *hmat_data_type_suffix(u8 type)
254 switch (type) {
255 case ACPI_HMAT_ACCESS_LATENCY:
256 case ACPI_HMAT_READ_LATENCY:
257 case ACPI_HMAT_WRITE_LATENCY:
258 return " nsec";
259 case ACPI_HMAT_ACCESS_BANDWIDTH:
260 case ACPI_HMAT_READ_BANDWIDTH:
261 case ACPI_HMAT_WRITE_BANDWIDTH:
262 return " MB/s";
263 default:
264 return "";
268 static u32 hmat_normalize(u16 entry, u64 base, u8 type)
270 u32 value;
273 * Check for invalid and overflow values
275 if (entry == 0xffff || !entry)
276 return 0;
277 else if (base > (UINT_MAX / (entry)))
278 return 0;
281 * Divide by the base unit for version 1, convert latency from
282 * picosenonds to nanoseconds if revision 2.
284 value = entry * base;
285 if (hmat_revision == 1) {
286 if (value < 10)
287 return 0;
288 value = DIV_ROUND_UP(value, 10);
289 } else if (hmat_revision == 2) {
290 switch (type) {
291 case ACPI_HMAT_ACCESS_LATENCY:
292 case ACPI_HMAT_READ_LATENCY:
293 case ACPI_HMAT_WRITE_LATENCY:
294 value = DIV_ROUND_UP(value, 1000);
295 break;
296 default:
297 break;
300 return value;
303 static void hmat_update_target_access(struct memory_target *target,
304 u8 type, u32 value, int access)
306 switch (type) {
307 case ACPI_HMAT_ACCESS_LATENCY:
308 target->coord[access].read_latency = value;
309 target->coord[access].write_latency = value;
310 break;
311 case ACPI_HMAT_READ_LATENCY:
312 target->coord[access].read_latency = value;
313 break;
314 case ACPI_HMAT_WRITE_LATENCY:
315 target->coord[access].write_latency = value;
316 break;
317 case ACPI_HMAT_ACCESS_BANDWIDTH:
318 target->coord[access].read_bandwidth = value;
319 target->coord[access].write_bandwidth = value;
320 break;
321 case ACPI_HMAT_READ_BANDWIDTH:
322 target->coord[access].read_bandwidth = value;
323 break;
324 case ACPI_HMAT_WRITE_BANDWIDTH:
325 target->coord[access].write_bandwidth = value;
326 break;
327 default:
328 break;
332 int hmat_update_target_coordinates(int nid, struct access_coordinate *coord,
333 enum access_coordinate_class access)
335 struct memory_target *target;
336 int pxm;
338 if (nid == NUMA_NO_NODE)
339 return -EINVAL;
341 pxm = node_to_pxm(nid);
342 guard(mutex)(&target_lock);
343 target = find_mem_target(pxm);
344 if (!target)
345 return -ENODEV;
347 hmat_update_target_access(target, ACPI_HMAT_READ_LATENCY,
348 coord->read_latency, access);
349 hmat_update_target_access(target, ACPI_HMAT_WRITE_LATENCY,
350 coord->write_latency, access);
351 hmat_update_target_access(target, ACPI_HMAT_READ_BANDWIDTH,
352 coord->read_bandwidth, access);
353 hmat_update_target_access(target, ACPI_HMAT_WRITE_BANDWIDTH,
354 coord->write_bandwidth, access);
355 target->ext_updated = true;
357 return 0;
359 EXPORT_SYMBOL_GPL(hmat_update_target_coordinates);
361 static __init void hmat_add_locality(struct acpi_hmat_locality *hmat_loc)
363 struct memory_locality *loc;
365 loc = kzalloc(sizeof(*loc), GFP_KERNEL);
366 if (!loc) {
367 pr_notice_once("Failed to allocate HMAT locality\n");
368 return;
371 loc->hmat_loc = hmat_loc;
372 list_add_tail(&loc->node, &localities);
374 switch (hmat_loc->data_type) {
375 case ACPI_HMAT_ACCESS_LATENCY:
376 localities_types[READ_LATENCY] = loc;
377 localities_types[WRITE_LATENCY] = loc;
378 break;
379 case ACPI_HMAT_READ_LATENCY:
380 localities_types[READ_LATENCY] = loc;
381 break;
382 case ACPI_HMAT_WRITE_LATENCY:
383 localities_types[WRITE_LATENCY] = loc;
384 break;
385 case ACPI_HMAT_ACCESS_BANDWIDTH:
386 localities_types[READ_BANDWIDTH] = loc;
387 localities_types[WRITE_BANDWIDTH] = loc;
388 break;
389 case ACPI_HMAT_READ_BANDWIDTH:
390 localities_types[READ_BANDWIDTH] = loc;
391 break;
392 case ACPI_HMAT_WRITE_BANDWIDTH:
393 localities_types[WRITE_BANDWIDTH] = loc;
394 break;
395 default:
396 break;
400 static __init void hmat_update_target(unsigned int tgt_pxm, unsigned int init_pxm,
401 u8 mem_hier, u8 type, u32 value)
403 struct memory_target *target = find_mem_target(tgt_pxm);
405 if (mem_hier != ACPI_HMAT_MEMORY)
406 return;
408 if (target && target->processor_pxm == init_pxm) {
409 hmat_update_target_access(target, type, value,
410 ACCESS_COORDINATE_LOCAL);
411 /* If the node has a CPU, update access ACCESS_COORDINATE_CPU */
412 if (node_state(pxm_to_node(init_pxm), N_CPU))
413 hmat_update_target_access(target, type, value,
414 ACCESS_COORDINATE_CPU);
418 static __init int hmat_parse_locality(union acpi_subtable_headers *header,
419 const unsigned long end)
421 struct acpi_hmat_locality *hmat_loc = (void *)header;
422 unsigned int init, targ, total_size, ipds, tpds;
423 u32 *inits, *targs, value;
424 u16 *entries;
425 u8 type, mem_hier;
427 if (hmat_loc->header.length < sizeof(*hmat_loc)) {
428 pr_notice("Unexpected locality header length: %u\n",
429 hmat_loc->header.length);
430 return -EINVAL;
433 type = hmat_loc->data_type;
434 mem_hier = hmat_loc->flags & ACPI_HMAT_MEMORY_HIERARCHY;
435 ipds = hmat_loc->number_of_initiator_Pds;
436 tpds = hmat_loc->number_of_target_Pds;
437 total_size = sizeof(*hmat_loc) + sizeof(*entries) * ipds * tpds +
438 sizeof(*inits) * ipds + sizeof(*targs) * tpds;
439 if (hmat_loc->header.length < total_size) {
440 pr_notice("Unexpected locality header length:%u, minimum required:%u\n",
441 hmat_loc->header.length, total_size);
442 return -EINVAL;
445 pr_info("Locality: Flags:%02x Type:%s Initiator Domains:%u Target Domains:%u Base:%lld\n",
446 hmat_loc->flags, hmat_data_type(type), ipds, tpds,
447 hmat_loc->entry_base_unit);
449 inits = (u32 *)(hmat_loc + 1);
450 targs = inits + ipds;
451 entries = (u16 *)(targs + tpds);
452 for (init = 0; init < ipds; init++) {
453 alloc_memory_initiator(inits[init]);
454 for (targ = 0; targ < tpds; targ++) {
455 value = hmat_normalize(entries[init * tpds + targ],
456 hmat_loc->entry_base_unit,
457 type);
458 pr_info(" Initiator-Target[%u-%u]:%u%s\n",
459 inits[init], targs[targ], value,
460 hmat_data_type_suffix(type));
462 hmat_update_target(targs[targ], inits[init],
463 mem_hier, type, value);
467 if (mem_hier == ACPI_HMAT_MEMORY)
468 hmat_add_locality(hmat_loc);
470 return 0;
473 static __init int hmat_parse_cache(union acpi_subtable_headers *header,
474 const unsigned long end)
476 struct acpi_hmat_cache *cache = (void *)header;
477 struct memory_target *target;
478 struct target_cache *tcache;
479 u32 attrs;
481 if (cache->header.length < sizeof(*cache)) {
482 pr_notice("Unexpected cache header length: %u\n",
483 cache->header.length);
484 return -EINVAL;
487 attrs = cache->cache_attributes;
488 pr_info("Cache: Domain:%u Size:%llu Attrs:%08x SMBIOS Handles:%d\n",
489 cache->memory_PD, cache->cache_size, attrs,
490 cache->number_of_SMBIOShandles);
492 target = find_mem_target(cache->memory_PD);
493 if (!target)
494 return 0;
496 tcache = kzalloc(sizeof(*tcache), GFP_KERNEL);
497 if (!tcache) {
498 pr_notice_once("Failed to allocate HMAT cache info\n");
499 return 0;
502 tcache->cache_attrs.size = cache->cache_size;
503 tcache->cache_attrs.level = (attrs & ACPI_HMAT_CACHE_LEVEL) >> 4;
504 tcache->cache_attrs.line_size = (attrs & ACPI_HMAT_CACHE_LINE_SIZE) >> 16;
506 switch ((attrs & ACPI_HMAT_CACHE_ASSOCIATIVITY) >> 8) {
507 case ACPI_HMAT_CA_DIRECT_MAPPED:
508 tcache->cache_attrs.indexing = NODE_CACHE_DIRECT_MAP;
509 break;
510 case ACPI_HMAT_CA_COMPLEX_CACHE_INDEXING:
511 tcache->cache_attrs.indexing = NODE_CACHE_INDEXED;
512 break;
513 case ACPI_HMAT_CA_NONE:
514 default:
515 tcache->cache_attrs.indexing = NODE_CACHE_OTHER;
516 break;
519 switch ((attrs & ACPI_HMAT_WRITE_POLICY) >> 12) {
520 case ACPI_HMAT_CP_WB:
521 tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_BACK;
522 break;
523 case ACPI_HMAT_CP_WT:
524 tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_THROUGH;
525 break;
526 case ACPI_HMAT_CP_NONE:
527 default:
528 tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_OTHER;
529 break;
531 list_add_tail(&tcache->node, &target->caches);
533 return 0;
536 static int __init hmat_parse_proximity_domain(union acpi_subtable_headers *header,
537 const unsigned long end)
539 struct acpi_hmat_proximity_domain *p = (void *)header;
540 struct memory_target *target = NULL;
542 if (p->header.length != sizeof(*p)) {
543 pr_notice("Unexpected address range header length: %u\n",
544 p->header.length);
545 return -EINVAL;
548 if (hmat_revision == 1)
549 pr_info("Memory (%#llx length %#llx) Flags:%04x Processor Domain:%u Memory Domain:%u\n",
550 p->reserved3, p->reserved4, p->flags, p->processor_PD,
551 p->memory_PD);
552 else
553 pr_info("Memory Flags:%04x Processor Domain:%u Memory Domain:%u\n",
554 p->flags, p->processor_PD, p->memory_PD);
556 if ((hmat_revision == 1 && p->flags & ACPI_HMAT_MEMORY_PD_VALID) ||
557 hmat_revision > 1) {
558 target = find_mem_target(p->memory_PD);
559 if (!target) {
560 pr_debug("Memory Domain missing from SRAT\n");
561 return -EINVAL;
564 if (target && p->flags & ACPI_HMAT_PROCESSOR_PD_VALID) {
565 int p_node = pxm_to_node(p->processor_PD);
567 if (p_node == NUMA_NO_NODE) {
568 pr_debug("Invalid Processor Domain\n");
569 return -EINVAL;
571 target->processor_pxm = p->processor_PD;
574 return 0;
577 static int __init hmat_parse_subtable(union acpi_subtable_headers *header,
578 const unsigned long end)
580 struct acpi_hmat_structure *hdr = (void *)header;
582 if (!hdr)
583 return -EINVAL;
585 switch (hdr->type) {
586 case ACPI_HMAT_TYPE_PROXIMITY:
587 return hmat_parse_proximity_domain(header, end);
588 case ACPI_HMAT_TYPE_LOCALITY:
589 return hmat_parse_locality(header, end);
590 case ACPI_HMAT_TYPE_CACHE:
591 return hmat_parse_cache(header, end);
592 default:
593 return -EINVAL;
597 static __init int srat_parse_mem_affinity(union acpi_subtable_headers *header,
598 const unsigned long end)
600 struct acpi_srat_mem_affinity *ma = (void *)header;
602 if (!ma)
603 return -EINVAL;
604 if (!(ma->flags & ACPI_SRAT_MEM_ENABLED))
605 return 0;
606 alloc_memory_target(ma->proximity_domain, ma->base_address, ma->length);
607 return 0;
610 static __init int srat_parse_genport_affinity(union acpi_subtable_headers *header,
611 const unsigned long end)
613 struct acpi_srat_generic_affinity *ga = (void *)header;
615 if (!ga)
616 return -EINVAL;
618 if (!(ga->flags & ACPI_SRAT_GENERIC_AFFINITY_ENABLED))
619 return 0;
621 /* Skip PCI device_handle for now */
622 if (ga->device_handle_type != 0)
623 return 0;
625 alloc_genport_target(ga->proximity_domain,
626 (u8 *)ga->device_handle);
628 return 0;
631 static u32 hmat_initiator_perf(struct memory_target *target,
632 struct memory_initiator *initiator,
633 struct acpi_hmat_locality *hmat_loc)
635 unsigned int ipds, tpds, i, idx = 0, tdx = 0;
636 u32 *inits, *targs;
637 u16 *entries;
639 ipds = hmat_loc->number_of_initiator_Pds;
640 tpds = hmat_loc->number_of_target_Pds;
641 inits = (u32 *)(hmat_loc + 1);
642 targs = inits + ipds;
643 entries = (u16 *)(targs + tpds);
645 for (i = 0; i < ipds; i++) {
646 if (inits[i] == initiator->processor_pxm) {
647 idx = i;
648 break;
652 if (i == ipds)
653 return 0;
655 for (i = 0; i < tpds; i++) {
656 if (targs[i] == target->memory_pxm) {
657 tdx = i;
658 break;
661 if (i == tpds)
662 return 0;
664 return hmat_normalize(entries[idx * tpds + tdx],
665 hmat_loc->entry_base_unit,
666 hmat_loc->data_type);
669 static bool hmat_update_best(u8 type, u32 value, u32 *best)
671 bool updated = false;
673 if (!value)
674 return false;
676 switch (type) {
677 case ACPI_HMAT_ACCESS_LATENCY:
678 case ACPI_HMAT_READ_LATENCY:
679 case ACPI_HMAT_WRITE_LATENCY:
680 if (!*best || *best > value) {
681 *best = value;
682 updated = true;
684 break;
685 case ACPI_HMAT_ACCESS_BANDWIDTH:
686 case ACPI_HMAT_READ_BANDWIDTH:
687 case ACPI_HMAT_WRITE_BANDWIDTH:
688 if (!*best || *best < value) {
689 *best = value;
690 updated = true;
692 break;
695 return updated;
698 static int initiator_cmp(void *priv, const struct list_head *a,
699 const struct list_head *b)
701 struct memory_initiator *ia;
702 struct memory_initiator *ib;
704 ia = list_entry(a, struct memory_initiator, node);
705 ib = list_entry(b, struct memory_initiator, node);
707 return ia->processor_pxm - ib->processor_pxm;
710 static int initiators_to_nodemask(unsigned long *p_nodes)
712 struct memory_initiator *initiator;
714 if (list_empty(&initiators))
715 return -ENXIO;
717 list_for_each_entry(initiator, &initiators, node)
718 set_bit(initiator->processor_pxm, p_nodes);
720 return 0;
723 static void hmat_update_target_attrs(struct memory_target *target,
724 unsigned long *p_nodes, int access)
726 struct memory_initiator *initiator;
727 unsigned int cpu_nid;
728 struct memory_locality *loc = NULL;
729 u32 best = 0;
730 int i;
732 /* Don't update if an external agent has changed the data. */
733 if (target->ext_updated)
734 return;
736 /* Don't update for generic port if there's no device handle */
737 if ((access == NODE_ACCESS_CLASS_GENPORT_SINK_LOCAL ||
738 access == NODE_ACCESS_CLASS_GENPORT_SINK_CPU) &&
739 !(*(u16 *)target->gen_port_device_handle))
740 return;
742 bitmap_zero(p_nodes, MAX_NUMNODES);
744 * If the Address Range Structure provides a local processor pxm, set
745 * only that one. Otherwise, find the best performance attributes and
746 * collect all initiators that match.
748 if (target->processor_pxm != PXM_INVAL) {
749 cpu_nid = pxm_to_node(target->processor_pxm);
750 if (access == ACCESS_COORDINATE_LOCAL ||
751 node_state(cpu_nid, N_CPU)) {
752 set_bit(target->processor_pxm, p_nodes);
753 return;
757 if (list_empty(&localities))
758 return;
761 * We need the initiator list sorted so we can use bitmap_clear for
762 * previously set initiators when we find a better memory accessor.
763 * We'll also use the sorting to prime the candidate nodes with known
764 * initiators.
766 list_sort(NULL, &initiators, initiator_cmp);
767 if (initiators_to_nodemask(p_nodes) < 0)
768 return;
770 for (i = WRITE_LATENCY; i <= READ_BANDWIDTH; i++) {
771 loc = localities_types[i];
772 if (!loc)
773 continue;
775 best = 0;
776 list_for_each_entry(initiator, &initiators, node) {
777 u32 value;
779 if ((access == ACCESS_COORDINATE_CPU ||
780 access == NODE_ACCESS_CLASS_GENPORT_SINK_CPU) &&
781 !initiator->has_cpu) {
782 clear_bit(initiator->processor_pxm, p_nodes);
783 continue;
785 if (!test_bit(initiator->processor_pxm, p_nodes))
786 continue;
788 value = hmat_initiator_perf(target, initiator, loc->hmat_loc);
789 if (hmat_update_best(loc->hmat_loc->data_type, value, &best))
790 bitmap_clear(p_nodes, 0, initiator->processor_pxm);
791 if (value != best)
792 clear_bit(initiator->processor_pxm, p_nodes);
794 if (best)
795 hmat_update_target_access(target, loc->hmat_loc->data_type, best, access);
799 static void __hmat_register_target_initiators(struct memory_target *target,
800 unsigned long *p_nodes,
801 int access)
803 unsigned int mem_nid, cpu_nid;
804 int i;
806 mem_nid = pxm_to_node(target->memory_pxm);
807 hmat_update_target_attrs(target, p_nodes, access);
808 for_each_set_bit(i, p_nodes, MAX_NUMNODES) {
809 cpu_nid = pxm_to_node(i);
810 register_memory_node_under_compute_node(mem_nid, cpu_nid, access);
814 static void hmat_update_generic_target(struct memory_target *target)
816 static DECLARE_BITMAP(p_nodes, MAX_NUMNODES);
818 hmat_update_target_attrs(target, p_nodes,
819 NODE_ACCESS_CLASS_GENPORT_SINK_LOCAL);
820 hmat_update_target_attrs(target, p_nodes,
821 NODE_ACCESS_CLASS_GENPORT_SINK_CPU);
824 static void hmat_register_target_initiators(struct memory_target *target)
826 static DECLARE_BITMAP(p_nodes, MAX_NUMNODES);
828 __hmat_register_target_initiators(target, p_nodes,
829 ACCESS_COORDINATE_LOCAL);
830 __hmat_register_target_initiators(target, p_nodes,
831 ACCESS_COORDINATE_CPU);
834 static void hmat_register_target_cache(struct memory_target *target)
836 unsigned mem_nid = pxm_to_node(target->memory_pxm);
837 struct target_cache *tcache;
839 list_for_each_entry(tcache, &target->caches, node)
840 node_add_cache(mem_nid, &tcache->cache_attrs);
843 static void hmat_register_target_perf(struct memory_target *target, int access)
845 unsigned mem_nid = pxm_to_node(target->memory_pxm);
846 node_set_perf_attrs(mem_nid, &target->coord[access], access);
849 static void hmat_register_target_devices(struct memory_target *target)
851 struct resource *res;
854 * Do not bother creating devices if no driver is available to
855 * consume them.
857 if (!IS_ENABLED(CONFIG_DEV_DAX_HMEM))
858 return;
860 for (res = target->memregions.child; res; res = res->sibling) {
861 int target_nid = pxm_to_node(target->memory_pxm);
863 hmem_register_resource(target_nid, res);
867 static void hmat_register_target(struct memory_target *target)
869 int nid = pxm_to_node(target->memory_pxm);
872 * Devices may belong to either an offline or online
873 * node, so unconditionally add them.
875 hmat_register_target_devices(target);
878 * Register generic port perf numbers. The nid may not be
879 * initialized and is still NUMA_NO_NODE.
881 mutex_lock(&target_lock);
882 if (*(u16 *)target->gen_port_device_handle) {
883 hmat_update_generic_target(target);
884 target->registered = true;
886 mutex_unlock(&target_lock);
889 * Skip offline nodes. This can happen when memory
890 * marked EFI_MEMORY_SP, "specific purpose", is applied
891 * to all the memory in a proximity domain leading to
892 * the node being marked offline / unplugged, or if
893 * memory-only "hotplug" node is offline.
895 if (nid == NUMA_NO_NODE || !node_online(nid))
896 return;
898 mutex_lock(&target_lock);
899 if (!target->registered) {
900 hmat_register_target_initiators(target);
901 hmat_register_target_cache(target);
902 hmat_register_target_perf(target, ACCESS_COORDINATE_LOCAL);
903 hmat_register_target_perf(target, ACCESS_COORDINATE_CPU);
904 target->registered = true;
906 mutex_unlock(&target_lock);
909 static void hmat_register_targets(void)
911 struct memory_target *target;
913 list_for_each_entry(target, &targets, node)
914 hmat_register_target(target);
917 static int hmat_callback(struct notifier_block *self,
918 unsigned long action, void *arg)
920 struct memory_target *target;
921 struct memory_notify *mnb = arg;
922 int pxm, nid = mnb->status_change_nid;
924 if (nid == NUMA_NO_NODE || action != MEM_ONLINE)
925 return NOTIFY_OK;
927 pxm = node_to_pxm(nid);
928 target = find_mem_target(pxm);
929 if (!target)
930 return NOTIFY_OK;
932 hmat_register_target(target);
933 return NOTIFY_OK;
936 static int __init hmat_set_default_dram_perf(void)
938 int rc;
939 int nid, pxm;
940 struct memory_target *target;
941 struct access_coordinate *attrs;
943 for_each_node_mask(nid, default_dram_nodes) {
944 pxm = node_to_pxm(nid);
945 target = find_mem_target(pxm);
946 if (!target)
947 continue;
948 attrs = &target->coord[ACCESS_COORDINATE_CPU];
949 rc = mt_set_default_dram_perf(nid, attrs, "ACPI HMAT");
950 if (rc)
951 return rc;
954 return 0;
957 static int hmat_calculate_adistance(struct notifier_block *self,
958 unsigned long nid, void *data)
960 static DECLARE_BITMAP(p_nodes, MAX_NUMNODES);
961 struct memory_target *target;
962 struct access_coordinate *perf;
963 int *adist = data;
964 int pxm;
966 pxm = node_to_pxm(nid);
967 target = find_mem_target(pxm);
968 if (!target)
969 return NOTIFY_OK;
971 mutex_lock(&target_lock);
972 hmat_update_target_attrs(target, p_nodes, ACCESS_COORDINATE_CPU);
973 mutex_unlock(&target_lock);
975 perf = &target->coord[ACCESS_COORDINATE_CPU];
977 if (mt_perf_to_adistance(perf, adist))
978 return NOTIFY_OK;
980 return NOTIFY_STOP;
983 static struct notifier_block hmat_adist_nb __meminitdata = {
984 .notifier_call = hmat_calculate_adistance,
985 .priority = 100,
988 static __init void hmat_free_structures(void)
990 struct memory_target *target, *tnext;
991 struct memory_locality *loc, *lnext;
992 struct memory_initiator *initiator, *inext;
993 struct target_cache *tcache, *cnext;
995 list_for_each_entry_safe(target, tnext, &targets, node) {
996 struct resource *res, *res_next;
998 list_for_each_entry_safe(tcache, cnext, &target->caches, node) {
999 list_del(&tcache->node);
1000 kfree(tcache);
1003 list_del(&target->node);
1004 res = target->memregions.child;
1005 while (res) {
1006 res_next = res->sibling;
1007 __release_region(&target->memregions, res->start,
1008 resource_size(res));
1009 res = res_next;
1011 kfree(target);
1014 list_for_each_entry_safe(initiator, inext, &initiators, node) {
1015 list_del(&initiator->node);
1016 kfree(initiator);
1019 list_for_each_entry_safe(loc, lnext, &localities, node) {
1020 list_del(&loc->node);
1021 kfree(loc);
1025 static __init int hmat_init(void)
1027 struct acpi_table_header *tbl;
1028 enum acpi_hmat_type i;
1029 acpi_status status;
1031 if (srat_disabled() || hmat_disable)
1032 return 0;
1034 status = acpi_get_table(ACPI_SIG_SRAT, 0, &tbl);
1035 if (ACPI_FAILURE(status))
1036 return 0;
1038 if (acpi_table_parse_entries(ACPI_SIG_SRAT,
1039 sizeof(struct acpi_table_srat),
1040 ACPI_SRAT_TYPE_MEMORY_AFFINITY,
1041 srat_parse_mem_affinity, 0) < 0)
1042 goto out_put;
1044 if (acpi_table_parse_entries(ACPI_SIG_SRAT,
1045 sizeof(struct acpi_table_srat),
1046 ACPI_SRAT_TYPE_GENERIC_PORT_AFFINITY,
1047 srat_parse_genport_affinity, 0) < 0)
1048 goto out_put;
1050 acpi_put_table(tbl);
1052 status = acpi_get_table(ACPI_SIG_HMAT, 0, &tbl);
1053 if (ACPI_FAILURE(status))
1054 goto out_put;
1056 hmat_revision = tbl->revision;
1057 switch (hmat_revision) {
1058 case 1:
1059 case 2:
1060 break;
1061 default:
1062 pr_notice("Ignoring: Unknown revision:%d\n", hmat_revision);
1063 goto out_put;
1066 for (i = ACPI_HMAT_TYPE_PROXIMITY; i < ACPI_HMAT_TYPE_RESERVED; i++) {
1067 if (acpi_table_parse_entries(ACPI_SIG_HMAT,
1068 sizeof(struct acpi_table_hmat), i,
1069 hmat_parse_subtable, 0) < 0) {
1070 pr_notice("Ignoring: Invalid table");
1071 goto out_put;
1074 hmat_register_targets();
1076 /* Keep the table and structures if the notifier may use them */
1077 if (hotplug_memory_notifier(hmat_callback, HMAT_CALLBACK_PRI))
1078 goto out_put;
1080 if (!hmat_set_default_dram_perf())
1081 register_mt_adistance_algorithm(&hmat_adist_nb);
1083 return 0;
1084 out_put:
1085 hmat_free_structures();
1086 acpi_put_table(tbl);
1087 return 0;
1089 subsys_initcall(hmat_init);