1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2019, Intel Corporation.
5 * Heterogeneous Memory Attributes Table (HMAT) representation
7 * This program parses and reports the platform's HMAT tables, and registers
8 * the applicable attributes with the node's interfaces.
11 #define pr_fmt(fmt) "acpi/hmat: " fmt
12 #define dev_fmt(fmt) "acpi/hmat: " fmt
14 #include <linux/acpi.h>
15 #include <linux/bitops.h>
16 #include <linux/device.h>
17 #include <linux/init.h>
18 #include <linux/list.h>
20 #include <linux/platform_device.h>
21 #include <linux/list_sort.h>
22 #include <linux/memregion.h>
23 #include <linux/memory.h>
24 #include <linux/mutex.h>
25 #include <linux/node.h>
26 #include <linux/sysfs.h>
27 #include <linux/dax.h>
29 static u8 hmat_revision
;
30 static int hmat_disable __initdata
;
32 void __init
disable_hmat(void)
37 static LIST_HEAD(targets
);
38 static LIST_HEAD(initiators
);
39 static LIST_HEAD(localities
);
41 static DEFINE_MUTEX(target_lock
);
44 * The defined enum order is used to prioritize attributes to break ties when
45 * selecting the best performing node.
54 static struct memory_locality
*localities_types
[4];
57 struct list_head node
;
58 struct node_cache_attrs cache_attrs
;
61 struct memory_target
{
62 struct list_head node
;
63 unsigned int memory_pxm
;
64 unsigned int processor_pxm
;
65 struct resource memregions
;
66 struct node_hmem_attrs hmem_attrs
[2];
67 struct list_head caches
;
68 struct node_cache_attrs cache_attrs
;
72 struct memory_initiator
{
73 struct list_head node
;
74 unsigned int processor_pxm
;
78 struct memory_locality
{
79 struct list_head node
;
80 struct acpi_hmat_locality
*hmat_loc
;
83 static struct memory_initiator
*find_mem_initiator(unsigned int cpu_pxm
)
85 struct memory_initiator
*initiator
;
87 list_for_each_entry(initiator
, &initiators
, node
)
88 if (initiator
->processor_pxm
== cpu_pxm
)
93 static struct memory_target
*find_mem_target(unsigned int mem_pxm
)
95 struct memory_target
*target
;
97 list_for_each_entry(target
, &targets
, node
)
98 if (target
->memory_pxm
== mem_pxm
)
103 static __init
void alloc_memory_initiator(unsigned int cpu_pxm
)
105 struct memory_initiator
*initiator
;
107 if (pxm_to_node(cpu_pxm
) == NUMA_NO_NODE
)
110 initiator
= find_mem_initiator(cpu_pxm
);
114 initiator
= kzalloc(sizeof(*initiator
), GFP_KERNEL
);
118 initiator
->processor_pxm
= cpu_pxm
;
119 initiator
->has_cpu
= node_state(pxm_to_node(cpu_pxm
), N_CPU
);
120 list_add_tail(&initiator
->node
, &initiators
);
123 static __init
void alloc_memory_target(unsigned int mem_pxm
,
124 resource_size_t start
, resource_size_t len
)
126 struct memory_target
*target
;
128 target
= find_mem_target(mem_pxm
);
130 target
= kzalloc(sizeof(*target
), GFP_KERNEL
);
133 target
->memory_pxm
= mem_pxm
;
134 target
->processor_pxm
= PXM_INVAL
;
135 target
->memregions
= (struct resource
) {
139 .flags
= IORESOURCE_MEM
,
141 list_add_tail(&target
->node
, &targets
);
142 INIT_LIST_HEAD(&target
->caches
);
146 * There are potentially multiple ranges per PXM, so record each
147 * in the per-target memregions resource tree.
149 if (!__request_region(&target
->memregions
, start
, len
, "memory target",
151 pr_warn("failed to reserve %#llx - %#llx in pxm: %d\n",
152 start
, start
+ len
, mem_pxm
);
155 static __init
const char *hmat_data_type(u8 type
)
158 case ACPI_HMAT_ACCESS_LATENCY
:
159 return "Access Latency";
160 case ACPI_HMAT_READ_LATENCY
:
161 return "Read Latency";
162 case ACPI_HMAT_WRITE_LATENCY
:
163 return "Write Latency";
164 case ACPI_HMAT_ACCESS_BANDWIDTH
:
165 return "Access Bandwidth";
166 case ACPI_HMAT_READ_BANDWIDTH
:
167 return "Read Bandwidth";
168 case ACPI_HMAT_WRITE_BANDWIDTH
:
169 return "Write Bandwidth";
175 static __init
const char *hmat_data_type_suffix(u8 type
)
178 case ACPI_HMAT_ACCESS_LATENCY
:
179 case ACPI_HMAT_READ_LATENCY
:
180 case ACPI_HMAT_WRITE_LATENCY
:
182 case ACPI_HMAT_ACCESS_BANDWIDTH
:
183 case ACPI_HMAT_READ_BANDWIDTH
:
184 case ACPI_HMAT_WRITE_BANDWIDTH
:
191 static u32
hmat_normalize(u16 entry
, u64 base
, u8 type
)
196 * Check for invalid and overflow values
198 if (entry
== 0xffff || !entry
)
200 else if (base
> (UINT_MAX
/ (entry
)))
204 * Divide by the base unit for version 1, convert latency from
205 * picosenonds to nanoseconds if revision 2.
207 value
= entry
* base
;
208 if (hmat_revision
== 1) {
211 value
= DIV_ROUND_UP(value
, 10);
212 } else if (hmat_revision
== 2) {
214 case ACPI_HMAT_ACCESS_LATENCY
:
215 case ACPI_HMAT_READ_LATENCY
:
216 case ACPI_HMAT_WRITE_LATENCY
:
217 value
= DIV_ROUND_UP(value
, 1000);
226 static void hmat_update_target_access(struct memory_target
*target
,
227 u8 type
, u32 value
, int access
)
230 case ACPI_HMAT_ACCESS_LATENCY
:
231 target
->hmem_attrs
[access
].read_latency
= value
;
232 target
->hmem_attrs
[access
].write_latency
= value
;
234 case ACPI_HMAT_READ_LATENCY
:
235 target
->hmem_attrs
[access
].read_latency
= value
;
237 case ACPI_HMAT_WRITE_LATENCY
:
238 target
->hmem_attrs
[access
].write_latency
= value
;
240 case ACPI_HMAT_ACCESS_BANDWIDTH
:
241 target
->hmem_attrs
[access
].read_bandwidth
= value
;
242 target
->hmem_attrs
[access
].write_bandwidth
= value
;
244 case ACPI_HMAT_READ_BANDWIDTH
:
245 target
->hmem_attrs
[access
].read_bandwidth
= value
;
247 case ACPI_HMAT_WRITE_BANDWIDTH
:
248 target
->hmem_attrs
[access
].write_bandwidth
= value
;
255 static __init
void hmat_add_locality(struct acpi_hmat_locality
*hmat_loc
)
257 struct memory_locality
*loc
;
259 loc
= kzalloc(sizeof(*loc
), GFP_KERNEL
);
261 pr_notice_once("Failed to allocate HMAT locality\n");
265 loc
->hmat_loc
= hmat_loc
;
266 list_add_tail(&loc
->node
, &localities
);
268 switch (hmat_loc
->data_type
) {
269 case ACPI_HMAT_ACCESS_LATENCY
:
270 localities_types
[READ_LATENCY
] = loc
;
271 localities_types
[WRITE_LATENCY
] = loc
;
273 case ACPI_HMAT_READ_LATENCY
:
274 localities_types
[READ_LATENCY
] = loc
;
276 case ACPI_HMAT_WRITE_LATENCY
:
277 localities_types
[WRITE_LATENCY
] = loc
;
279 case ACPI_HMAT_ACCESS_BANDWIDTH
:
280 localities_types
[READ_BANDWIDTH
] = loc
;
281 localities_types
[WRITE_BANDWIDTH
] = loc
;
283 case ACPI_HMAT_READ_BANDWIDTH
:
284 localities_types
[READ_BANDWIDTH
] = loc
;
286 case ACPI_HMAT_WRITE_BANDWIDTH
:
287 localities_types
[WRITE_BANDWIDTH
] = loc
;
294 static __init
int hmat_parse_locality(union acpi_subtable_headers
*header
,
295 const unsigned long end
)
297 struct acpi_hmat_locality
*hmat_loc
= (void *)header
;
298 struct memory_target
*target
;
299 unsigned int init
, targ
, total_size
, ipds
, tpds
;
300 u32
*inits
, *targs
, value
;
304 if (hmat_loc
->header
.length
< sizeof(*hmat_loc
)) {
305 pr_notice("HMAT: Unexpected locality header length: %u\n",
306 hmat_loc
->header
.length
);
310 type
= hmat_loc
->data_type
;
311 mem_hier
= hmat_loc
->flags
& ACPI_HMAT_MEMORY_HIERARCHY
;
312 ipds
= hmat_loc
->number_of_initiator_Pds
;
313 tpds
= hmat_loc
->number_of_target_Pds
;
314 total_size
= sizeof(*hmat_loc
) + sizeof(*entries
) * ipds
* tpds
+
315 sizeof(*inits
) * ipds
+ sizeof(*targs
) * tpds
;
316 if (hmat_loc
->header
.length
< total_size
) {
317 pr_notice("HMAT: Unexpected locality header length:%u, minimum required:%u\n",
318 hmat_loc
->header
.length
, total_size
);
322 pr_info("HMAT: Locality: Flags:%02x Type:%s Initiator Domains:%u Target Domains:%u Base:%lld\n",
323 hmat_loc
->flags
, hmat_data_type(type
), ipds
, tpds
,
324 hmat_loc
->entry_base_unit
);
326 inits
= (u32
*)(hmat_loc
+ 1);
327 targs
= inits
+ ipds
;
328 entries
= (u16
*)(targs
+ tpds
);
329 for (init
= 0; init
< ipds
; init
++) {
330 alloc_memory_initiator(inits
[init
]);
331 for (targ
= 0; targ
< tpds
; targ
++) {
332 value
= hmat_normalize(entries
[init
* tpds
+ targ
],
333 hmat_loc
->entry_base_unit
,
335 pr_info(" Initiator-Target[%u-%u]:%u%s\n",
336 inits
[init
], targs
[targ
], value
,
337 hmat_data_type_suffix(type
));
339 if (mem_hier
== ACPI_HMAT_MEMORY
) {
340 target
= find_mem_target(targs
[targ
]);
341 if (target
&& target
->processor_pxm
== inits
[init
]) {
342 hmat_update_target_access(target
, type
, value
, 0);
343 /* If the node has a CPU, update access 1 */
344 if (node_state(pxm_to_node(inits
[init
]), N_CPU
))
345 hmat_update_target_access(target
, type
, value
, 1);
351 if (mem_hier
== ACPI_HMAT_MEMORY
)
352 hmat_add_locality(hmat_loc
);
357 static __init
int hmat_parse_cache(union acpi_subtable_headers
*header
,
358 const unsigned long end
)
360 struct acpi_hmat_cache
*cache
= (void *)header
;
361 struct memory_target
*target
;
362 struct target_cache
*tcache
;
365 if (cache
->header
.length
< sizeof(*cache
)) {
366 pr_notice("HMAT: Unexpected cache header length: %u\n",
367 cache
->header
.length
);
371 attrs
= cache
->cache_attributes
;
372 pr_info("HMAT: Cache: Domain:%u Size:%llu Attrs:%08x SMBIOS Handles:%d\n",
373 cache
->memory_PD
, cache
->cache_size
, attrs
,
374 cache
->number_of_SMBIOShandles
);
376 target
= find_mem_target(cache
->memory_PD
);
380 tcache
= kzalloc(sizeof(*tcache
), GFP_KERNEL
);
382 pr_notice_once("Failed to allocate HMAT cache info\n");
386 tcache
->cache_attrs
.size
= cache
->cache_size
;
387 tcache
->cache_attrs
.level
= (attrs
& ACPI_HMAT_CACHE_LEVEL
) >> 4;
388 tcache
->cache_attrs
.line_size
= (attrs
& ACPI_HMAT_CACHE_LINE_SIZE
) >> 16;
390 switch ((attrs
& ACPI_HMAT_CACHE_ASSOCIATIVITY
) >> 8) {
391 case ACPI_HMAT_CA_DIRECT_MAPPED
:
392 tcache
->cache_attrs
.indexing
= NODE_CACHE_DIRECT_MAP
;
394 case ACPI_HMAT_CA_COMPLEX_CACHE_INDEXING
:
395 tcache
->cache_attrs
.indexing
= NODE_CACHE_INDEXED
;
397 case ACPI_HMAT_CA_NONE
:
399 tcache
->cache_attrs
.indexing
= NODE_CACHE_OTHER
;
403 switch ((attrs
& ACPI_HMAT_WRITE_POLICY
) >> 12) {
404 case ACPI_HMAT_CP_WB
:
405 tcache
->cache_attrs
.write_policy
= NODE_CACHE_WRITE_BACK
;
407 case ACPI_HMAT_CP_WT
:
408 tcache
->cache_attrs
.write_policy
= NODE_CACHE_WRITE_THROUGH
;
410 case ACPI_HMAT_CP_NONE
:
412 tcache
->cache_attrs
.write_policy
= NODE_CACHE_WRITE_OTHER
;
415 list_add_tail(&tcache
->node
, &target
->caches
);
420 static int __init
hmat_parse_proximity_domain(union acpi_subtable_headers
*header
,
421 const unsigned long end
)
423 struct acpi_hmat_proximity_domain
*p
= (void *)header
;
424 struct memory_target
*target
= NULL
;
426 if (p
->header
.length
!= sizeof(*p
)) {
427 pr_notice("HMAT: Unexpected address range header length: %u\n",
432 if (hmat_revision
== 1)
433 pr_info("HMAT: Memory (%#llx length %#llx) Flags:%04x Processor Domain:%u Memory Domain:%u\n",
434 p
->reserved3
, p
->reserved4
, p
->flags
, p
->processor_PD
,
437 pr_info("HMAT: Memory Flags:%04x Processor Domain:%u Memory Domain:%u\n",
438 p
->flags
, p
->processor_PD
, p
->memory_PD
);
440 if ((hmat_revision
== 1 && p
->flags
& ACPI_HMAT_MEMORY_PD_VALID
) ||
442 target
= find_mem_target(p
->memory_PD
);
444 pr_debug("HMAT: Memory Domain missing from SRAT\n");
448 if (target
&& p
->flags
& ACPI_HMAT_PROCESSOR_PD_VALID
) {
449 int p_node
= pxm_to_node(p
->processor_PD
);
451 if (p_node
== NUMA_NO_NODE
) {
452 pr_debug("HMAT: Invalid Processor Domain\n");
455 target
->processor_pxm
= p
->processor_PD
;
461 static int __init
hmat_parse_subtable(union acpi_subtable_headers
*header
,
462 const unsigned long end
)
464 struct acpi_hmat_structure
*hdr
= (void *)header
;
470 case ACPI_HMAT_TYPE_PROXIMITY
:
471 return hmat_parse_proximity_domain(header
, end
);
472 case ACPI_HMAT_TYPE_LOCALITY
:
473 return hmat_parse_locality(header
, end
);
474 case ACPI_HMAT_TYPE_CACHE
:
475 return hmat_parse_cache(header
, end
);
481 static __init
int srat_parse_mem_affinity(union acpi_subtable_headers
*header
,
482 const unsigned long end
)
484 struct acpi_srat_mem_affinity
*ma
= (void *)header
;
488 if (!(ma
->flags
& ACPI_SRAT_MEM_ENABLED
))
490 alloc_memory_target(ma
->proximity_domain
, ma
->base_address
, ma
->length
);
494 static u32
hmat_initiator_perf(struct memory_target
*target
,
495 struct memory_initiator
*initiator
,
496 struct acpi_hmat_locality
*hmat_loc
)
498 unsigned int ipds
, tpds
, i
, idx
= 0, tdx
= 0;
502 ipds
= hmat_loc
->number_of_initiator_Pds
;
503 tpds
= hmat_loc
->number_of_target_Pds
;
504 inits
= (u32
*)(hmat_loc
+ 1);
505 targs
= inits
+ ipds
;
506 entries
= (u16
*)(targs
+ tpds
);
508 for (i
= 0; i
< ipds
; i
++) {
509 if (inits
[i
] == initiator
->processor_pxm
) {
518 for (i
= 0; i
< tpds
; i
++) {
519 if (targs
[i
] == target
->memory_pxm
) {
527 return hmat_normalize(entries
[idx
* tpds
+ tdx
],
528 hmat_loc
->entry_base_unit
,
529 hmat_loc
->data_type
);
532 static bool hmat_update_best(u8 type
, u32 value
, u32
*best
)
534 bool updated
= false;
540 case ACPI_HMAT_ACCESS_LATENCY
:
541 case ACPI_HMAT_READ_LATENCY
:
542 case ACPI_HMAT_WRITE_LATENCY
:
543 if (!*best
|| *best
> value
) {
548 case ACPI_HMAT_ACCESS_BANDWIDTH
:
549 case ACPI_HMAT_READ_BANDWIDTH
:
550 case ACPI_HMAT_WRITE_BANDWIDTH
:
551 if (!*best
|| *best
< value
) {
561 static int initiator_cmp(void *priv
, struct list_head
*a
, struct list_head
*b
)
563 struct memory_initiator
*ia
;
564 struct memory_initiator
*ib
;
565 unsigned long *p_nodes
= priv
;
567 ia
= list_entry(a
, struct memory_initiator
, node
);
568 ib
= list_entry(b
, struct memory_initiator
, node
);
570 set_bit(ia
->processor_pxm
, p_nodes
);
571 set_bit(ib
->processor_pxm
, p_nodes
);
573 return ia
->processor_pxm
- ib
->processor_pxm
;
576 static void hmat_register_target_initiators(struct memory_target
*target
)
578 static DECLARE_BITMAP(p_nodes
, MAX_NUMNODES
);
579 struct memory_initiator
*initiator
;
580 unsigned int mem_nid
, cpu_nid
;
581 struct memory_locality
*loc
= NULL
;
583 bool access0done
= false;
586 mem_nid
= pxm_to_node(target
->memory_pxm
);
588 * If the Address Range Structure provides a local processor pxm, link
589 * only that one. Otherwise, find the best performance attributes and
590 * register all initiators that match.
592 if (target
->processor_pxm
!= PXM_INVAL
) {
593 cpu_nid
= pxm_to_node(target
->processor_pxm
);
594 register_memory_node_under_compute_node(mem_nid
, cpu_nid
, 0);
596 if (node_state(cpu_nid
, N_CPU
)) {
597 register_memory_node_under_compute_node(mem_nid
, cpu_nid
, 1);
602 if (list_empty(&localities
))
606 * We need the initiator list sorted so we can use bitmap_clear for
607 * previously set initiators when we find a better memory accessor.
608 * We'll also use the sorting to prime the candidate nodes with known
611 bitmap_zero(p_nodes
, MAX_NUMNODES
);
612 list_sort(p_nodes
, &initiators
, initiator_cmp
);
614 for (i
= WRITE_LATENCY
; i
<= READ_BANDWIDTH
; i
++) {
615 loc
= localities_types
[i
];
620 list_for_each_entry(initiator
, &initiators
, node
) {
623 if (!test_bit(initiator
->processor_pxm
, p_nodes
))
626 value
= hmat_initiator_perf(target
, initiator
,
628 if (hmat_update_best(loc
->hmat_loc
->data_type
, value
, &best
))
629 bitmap_clear(p_nodes
, 0, initiator
->processor_pxm
);
631 clear_bit(initiator
->processor_pxm
, p_nodes
);
634 hmat_update_target_access(target
, loc
->hmat_loc
->data_type
,
638 for_each_set_bit(i
, p_nodes
, MAX_NUMNODES
) {
639 cpu_nid
= pxm_to_node(i
);
640 register_memory_node_under_compute_node(mem_nid
, cpu_nid
, 0);
644 /* Access 1 ignores Generic Initiators */
645 bitmap_zero(p_nodes
, MAX_NUMNODES
);
646 list_sort(p_nodes
, &initiators
, initiator_cmp
);
648 for (i
= WRITE_LATENCY
; i
<= READ_BANDWIDTH
; i
++) {
649 loc
= localities_types
[i
];
654 list_for_each_entry(initiator
, &initiators
, node
) {
657 if (!initiator
->has_cpu
) {
658 clear_bit(initiator
->processor_pxm
, p_nodes
);
661 if (!test_bit(initiator
->processor_pxm
, p_nodes
))
664 value
= hmat_initiator_perf(target
, initiator
, loc
->hmat_loc
);
665 if (hmat_update_best(loc
->hmat_loc
->data_type
, value
, &best
))
666 bitmap_clear(p_nodes
, 0, initiator
->processor_pxm
);
668 clear_bit(initiator
->processor_pxm
, p_nodes
);
671 hmat_update_target_access(target
, loc
->hmat_loc
->data_type
, best
, 1);
673 for_each_set_bit(i
, p_nodes
, MAX_NUMNODES
) {
674 cpu_nid
= pxm_to_node(i
);
675 register_memory_node_under_compute_node(mem_nid
, cpu_nid
, 1);
679 static void hmat_register_target_cache(struct memory_target
*target
)
681 unsigned mem_nid
= pxm_to_node(target
->memory_pxm
);
682 struct target_cache
*tcache
;
684 list_for_each_entry(tcache
, &target
->caches
, node
)
685 node_add_cache(mem_nid
, &tcache
->cache_attrs
);
688 static void hmat_register_target_perf(struct memory_target
*target
, int access
)
690 unsigned mem_nid
= pxm_to_node(target
->memory_pxm
);
691 node_set_perf_attrs(mem_nid
, &target
->hmem_attrs
[access
], access
);
694 static void hmat_register_target_devices(struct memory_target
*target
)
696 struct resource
*res
;
699 * Do not bother creating devices if no driver is available to
702 if (!IS_ENABLED(CONFIG_DEV_DAX_HMEM
))
705 for (res
= target
->memregions
.child
; res
; res
= res
->sibling
) {
706 int target_nid
= pxm_to_node(target
->memory_pxm
);
708 hmem_register_device(target_nid
, res
);
712 static void hmat_register_target(struct memory_target
*target
)
714 int nid
= pxm_to_node(target
->memory_pxm
);
717 * Devices may belong to either an offline or online
718 * node, so unconditionally add them.
720 hmat_register_target_devices(target
);
723 * Skip offline nodes. This can happen when memory
724 * marked EFI_MEMORY_SP, "specific purpose", is applied
725 * to all the memory in a promixity domain leading to
726 * the node being marked offline / unplugged, or if
727 * memory-only "hotplug" node is offline.
729 if (nid
== NUMA_NO_NODE
|| !node_online(nid
))
732 mutex_lock(&target_lock
);
733 if (!target
->registered
) {
734 hmat_register_target_initiators(target
);
735 hmat_register_target_cache(target
);
736 hmat_register_target_perf(target
, 0);
737 hmat_register_target_perf(target
, 1);
738 target
->registered
= true;
740 mutex_unlock(&target_lock
);
743 static void hmat_register_targets(void)
745 struct memory_target
*target
;
747 list_for_each_entry(target
, &targets
, node
)
748 hmat_register_target(target
);
751 static int hmat_callback(struct notifier_block
*self
,
752 unsigned long action
, void *arg
)
754 struct memory_target
*target
;
755 struct memory_notify
*mnb
= arg
;
756 int pxm
, nid
= mnb
->status_change_nid
;
758 if (nid
== NUMA_NO_NODE
|| action
!= MEM_ONLINE
)
761 pxm
= node_to_pxm(nid
);
762 target
= find_mem_target(pxm
);
766 hmat_register_target(target
);
770 static struct notifier_block hmat_callback_nb
= {
771 .notifier_call
= hmat_callback
,
775 static __init
void hmat_free_structures(void)
777 struct memory_target
*target
, *tnext
;
778 struct memory_locality
*loc
, *lnext
;
779 struct memory_initiator
*initiator
, *inext
;
780 struct target_cache
*tcache
, *cnext
;
782 list_for_each_entry_safe(target
, tnext
, &targets
, node
) {
783 struct resource
*res
, *res_next
;
785 list_for_each_entry_safe(tcache
, cnext
, &target
->caches
, node
) {
786 list_del(&tcache
->node
);
790 list_del(&target
->node
);
791 res
= target
->memregions
.child
;
793 res_next
= res
->sibling
;
794 __release_region(&target
->memregions
, res
->start
,
801 list_for_each_entry_safe(initiator
, inext
, &initiators
, node
) {
802 list_del(&initiator
->node
);
806 list_for_each_entry_safe(loc
, lnext
, &localities
, node
) {
807 list_del(&loc
->node
);
812 static __init
int hmat_init(void)
814 struct acpi_table_header
*tbl
;
815 enum acpi_hmat_type i
;
818 if (srat_disabled() || hmat_disable
)
821 status
= acpi_get_table(ACPI_SIG_SRAT
, 0, &tbl
);
822 if (ACPI_FAILURE(status
))
825 if (acpi_table_parse_entries(ACPI_SIG_SRAT
,
826 sizeof(struct acpi_table_srat
),
827 ACPI_SRAT_TYPE_MEMORY_AFFINITY
,
828 srat_parse_mem_affinity
, 0) < 0)
832 status
= acpi_get_table(ACPI_SIG_HMAT
, 0, &tbl
);
833 if (ACPI_FAILURE(status
))
836 hmat_revision
= tbl
->revision
;
837 switch (hmat_revision
) {
842 pr_notice("Ignoring HMAT: Unknown revision:%d\n", hmat_revision
);
846 for (i
= ACPI_HMAT_TYPE_PROXIMITY
; i
< ACPI_HMAT_TYPE_RESERVED
; i
++) {
847 if (acpi_table_parse_entries(ACPI_SIG_HMAT
,
848 sizeof(struct acpi_table_hmat
), i
,
849 hmat_parse_subtable
, 0) < 0) {
850 pr_notice("Ignoring HMAT: Invalid table");
854 hmat_register_targets();
856 /* Keep the table and structures if the notifier may use them */
857 if (!register_hotmemory_notifier(&hmat_callback_nb
))
860 hmat_free_structures();
864 device_initcall(hmat_init
);