1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2019, Intel Corporation.
5 * Heterogeneous Memory Attributes Table (HMAT) representation
7 * This program parses and reports the platform's HMAT tables, and registers
8 * the applicable attributes with the node's interfaces.
11 #define pr_fmt(fmt) "acpi/hmat: " fmt
12 #define dev_fmt(fmt) "acpi/hmat: " fmt
14 #include <linux/acpi.h>
15 #include <linux/bitops.h>
16 #include <linux/device.h>
17 #include <linux/init.h>
18 #include <linux/list.h>
20 #include <linux/platform_device.h>
21 #include <linux/list_sort.h>
22 #include <linux/memregion.h>
23 #include <linux/memory.h>
24 #include <linux/mutex.h>
25 #include <linux/node.h>
26 #include <linux/sysfs.h>
28 static u8 hmat_revision
;
30 static LIST_HEAD(targets
);
31 static LIST_HEAD(initiators
);
32 static LIST_HEAD(localities
);
34 static DEFINE_MUTEX(target_lock
);
37 * The defined enum order is used to prioritize attributes to break ties when
38 * selecting the best performing node.
47 static struct memory_locality
*localities_types
[4];
50 struct list_head node
;
51 struct node_cache_attrs cache_attrs
;
54 struct memory_target
{
55 struct list_head node
;
56 unsigned int memory_pxm
;
57 unsigned int processor_pxm
;
58 struct resource memregions
;
59 struct node_hmem_attrs hmem_attrs
;
60 struct list_head caches
;
61 struct node_cache_attrs cache_attrs
;
65 struct memory_initiator
{
66 struct list_head node
;
67 unsigned int processor_pxm
;
70 struct memory_locality
{
71 struct list_head node
;
72 struct acpi_hmat_locality
*hmat_loc
;
75 static struct memory_initiator
*find_mem_initiator(unsigned int cpu_pxm
)
77 struct memory_initiator
*initiator
;
79 list_for_each_entry(initiator
, &initiators
, node
)
80 if (initiator
->processor_pxm
== cpu_pxm
)
85 static struct memory_target
*find_mem_target(unsigned int mem_pxm
)
87 struct memory_target
*target
;
89 list_for_each_entry(target
, &targets
, node
)
90 if (target
->memory_pxm
== mem_pxm
)
95 static __init
void alloc_memory_initiator(unsigned int cpu_pxm
)
97 struct memory_initiator
*initiator
;
99 if (pxm_to_node(cpu_pxm
) == NUMA_NO_NODE
)
102 initiator
= find_mem_initiator(cpu_pxm
);
106 initiator
= kzalloc(sizeof(*initiator
), GFP_KERNEL
);
110 initiator
->processor_pxm
= cpu_pxm
;
111 list_add_tail(&initiator
->node
, &initiators
);
114 static __init
void alloc_memory_target(unsigned int mem_pxm
,
115 resource_size_t start
, resource_size_t len
)
117 struct memory_target
*target
;
119 target
= find_mem_target(mem_pxm
);
121 target
= kzalloc(sizeof(*target
), GFP_KERNEL
);
124 target
->memory_pxm
= mem_pxm
;
125 target
->processor_pxm
= PXM_INVAL
;
126 target
->memregions
= (struct resource
) {
130 .flags
= IORESOURCE_MEM
,
132 list_add_tail(&target
->node
, &targets
);
133 INIT_LIST_HEAD(&target
->caches
);
137 * There are potentially multiple ranges per PXM, so record each
138 * in the per-target memregions resource tree.
140 if (!__request_region(&target
->memregions
, start
, len
, "memory target",
142 pr_warn("failed to reserve %#llx - %#llx in pxm: %d\n",
143 start
, start
+ len
, mem_pxm
);
146 static __init
const char *hmat_data_type(u8 type
)
149 case ACPI_HMAT_ACCESS_LATENCY
:
150 return "Access Latency";
151 case ACPI_HMAT_READ_LATENCY
:
152 return "Read Latency";
153 case ACPI_HMAT_WRITE_LATENCY
:
154 return "Write Latency";
155 case ACPI_HMAT_ACCESS_BANDWIDTH
:
156 return "Access Bandwidth";
157 case ACPI_HMAT_READ_BANDWIDTH
:
158 return "Read Bandwidth";
159 case ACPI_HMAT_WRITE_BANDWIDTH
:
160 return "Write Bandwidth";
166 static __init
const char *hmat_data_type_suffix(u8 type
)
169 case ACPI_HMAT_ACCESS_LATENCY
:
170 case ACPI_HMAT_READ_LATENCY
:
171 case ACPI_HMAT_WRITE_LATENCY
:
173 case ACPI_HMAT_ACCESS_BANDWIDTH
:
174 case ACPI_HMAT_READ_BANDWIDTH
:
175 case ACPI_HMAT_WRITE_BANDWIDTH
:
182 static u32
hmat_normalize(u16 entry
, u64 base
, u8 type
)
187 * Check for invalid and overflow values
189 if (entry
== 0xffff || !entry
)
191 else if (base
> (UINT_MAX
/ (entry
)))
195 * Divide by the base unit for version 1, convert latency from
196 * picosenonds to nanoseconds if revision 2.
198 value
= entry
* base
;
199 if (hmat_revision
== 1) {
202 value
= DIV_ROUND_UP(value
, 10);
203 } else if (hmat_revision
== 2) {
205 case ACPI_HMAT_ACCESS_LATENCY
:
206 case ACPI_HMAT_READ_LATENCY
:
207 case ACPI_HMAT_WRITE_LATENCY
:
208 value
= DIV_ROUND_UP(value
, 1000);
217 static void hmat_update_target_access(struct memory_target
*target
,
221 case ACPI_HMAT_ACCESS_LATENCY
:
222 target
->hmem_attrs
.read_latency
= value
;
223 target
->hmem_attrs
.write_latency
= value
;
225 case ACPI_HMAT_READ_LATENCY
:
226 target
->hmem_attrs
.read_latency
= value
;
228 case ACPI_HMAT_WRITE_LATENCY
:
229 target
->hmem_attrs
.write_latency
= value
;
231 case ACPI_HMAT_ACCESS_BANDWIDTH
:
232 target
->hmem_attrs
.read_bandwidth
= value
;
233 target
->hmem_attrs
.write_bandwidth
= value
;
235 case ACPI_HMAT_READ_BANDWIDTH
:
236 target
->hmem_attrs
.read_bandwidth
= value
;
238 case ACPI_HMAT_WRITE_BANDWIDTH
:
239 target
->hmem_attrs
.write_bandwidth
= value
;
246 static __init
void hmat_add_locality(struct acpi_hmat_locality
*hmat_loc
)
248 struct memory_locality
*loc
;
250 loc
= kzalloc(sizeof(*loc
), GFP_KERNEL
);
252 pr_notice_once("Failed to allocate HMAT locality\n");
256 loc
->hmat_loc
= hmat_loc
;
257 list_add_tail(&loc
->node
, &localities
);
259 switch (hmat_loc
->data_type
) {
260 case ACPI_HMAT_ACCESS_LATENCY
:
261 localities_types
[READ_LATENCY
] = loc
;
262 localities_types
[WRITE_LATENCY
] = loc
;
264 case ACPI_HMAT_READ_LATENCY
:
265 localities_types
[READ_LATENCY
] = loc
;
267 case ACPI_HMAT_WRITE_LATENCY
:
268 localities_types
[WRITE_LATENCY
] = loc
;
270 case ACPI_HMAT_ACCESS_BANDWIDTH
:
271 localities_types
[READ_BANDWIDTH
] = loc
;
272 localities_types
[WRITE_BANDWIDTH
] = loc
;
274 case ACPI_HMAT_READ_BANDWIDTH
:
275 localities_types
[READ_BANDWIDTH
] = loc
;
277 case ACPI_HMAT_WRITE_BANDWIDTH
:
278 localities_types
[WRITE_BANDWIDTH
] = loc
;
285 static __init
int hmat_parse_locality(union acpi_subtable_headers
*header
,
286 const unsigned long end
)
288 struct acpi_hmat_locality
*hmat_loc
= (void *)header
;
289 struct memory_target
*target
;
290 unsigned int init
, targ
, total_size
, ipds
, tpds
;
291 u32
*inits
, *targs
, value
;
295 if (hmat_loc
->header
.length
< sizeof(*hmat_loc
)) {
296 pr_notice("HMAT: Unexpected locality header length: %u\n",
297 hmat_loc
->header
.length
);
301 type
= hmat_loc
->data_type
;
302 mem_hier
= hmat_loc
->flags
& ACPI_HMAT_MEMORY_HIERARCHY
;
303 ipds
= hmat_loc
->number_of_initiator_Pds
;
304 tpds
= hmat_loc
->number_of_target_Pds
;
305 total_size
= sizeof(*hmat_loc
) + sizeof(*entries
) * ipds
* tpds
+
306 sizeof(*inits
) * ipds
+ sizeof(*targs
) * tpds
;
307 if (hmat_loc
->header
.length
< total_size
) {
308 pr_notice("HMAT: Unexpected locality header length:%u, minimum required:%u\n",
309 hmat_loc
->header
.length
, total_size
);
313 pr_info("HMAT: Locality: Flags:%02x Type:%s Initiator Domains:%u Target Domains:%u Base:%lld\n",
314 hmat_loc
->flags
, hmat_data_type(type
), ipds
, tpds
,
315 hmat_loc
->entry_base_unit
);
317 inits
= (u32
*)(hmat_loc
+ 1);
318 targs
= inits
+ ipds
;
319 entries
= (u16
*)(targs
+ tpds
);
320 for (init
= 0; init
< ipds
; init
++) {
321 alloc_memory_initiator(inits
[init
]);
322 for (targ
= 0; targ
< tpds
; targ
++) {
323 value
= hmat_normalize(entries
[init
* tpds
+ targ
],
324 hmat_loc
->entry_base_unit
,
326 pr_info(" Initiator-Target[%u-%u]:%u%s\n",
327 inits
[init
], targs
[targ
], value
,
328 hmat_data_type_suffix(type
));
330 if (mem_hier
== ACPI_HMAT_MEMORY
) {
331 target
= find_mem_target(targs
[targ
]);
332 if (target
&& target
->processor_pxm
== inits
[init
])
333 hmat_update_target_access(target
, type
, value
);
338 if (mem_hier
== ACPI_HMAT_MEMORY
)
339 hmat_add_locality(hmat_loc
);
344 static __init
int hmat_parse_cache(union acpi_subtable_headers
*header
,
345 const unsigned long end
)
347 struct acpi_hmat_cache
*cache
= (void *)header
;
348 struct memory_target
*target
;
349 struct target_cache
*tcache
;
352 if (cache
->header
.length
< sizeof(*cache
)) {
353 pr_notice("HMAT: Unexpected cache header length: %u\n",
354 cache
->header
.length
);
358 attrs
= cache
->cache_attributes
;
359 pr_info("HMAT: Cache: Domain:%u Size:%llu Attrs:%08x SMBIOS Handles:%d\n",
360 cache
->memory_PD
, cache
->cache_size
, attrs
,
361 cache
->number_of_SMBIOShandles
);
363 target
= find_mem_target(cache
->memory_PD
);
367 tcache
= kzalloc(sizeof(*tcache
), GFP_KERNEL
);
369 pr_notice_once("Failed to allocate HMAT cache info\n");
373 tcache
->cache_attrs
.size
= cache
->cache_size
;
374 tcache
->cache_attrs
.level
= (attrs
& ACPI_HMAT_CACHE_LEVEL
) >> 4;
375 tcache
->cache_attrs
.line_size
= (attrs
& ACPI_HMAT_CACHE_LINE_SIZE
) >> 16;
377 switch ((attrs
& ACPI_HMAT_CACHE_ASSOCIATIVITY
) >> 8) {
378 case ACPI_HMAT_CA_DIRECT_MAPPED
:
379 tcache
->cache_attrs
.indexing
= NODE_CACHE_DIRECT_MAP
;
381 case ACPI_HMAT_CA_COMPLEX_CACHE_INDEXING
:
382 tcache
->cache_attrs
.indexing
= NODE_CACHE_INDEXED
;
384 case ACPI_HMAT_CA_NONE
:
386 tcache
->cache_attrs
.indexing
= NODE_CACHE_OTHER
;
390 switch ((attrs
& ACPI_HMAT_WRITE_POLICY
) >> 12) {
391 case ACPI_HMAT_CP_WB
:
392 tcache
->cache_attrs
.write_policy
= NODE_CACHE_WRITE_BACK
;
394 case ACPI_HMAT_CP_WT
:
395 tcache
->cache_attrs
.write_policy
= NODE_CACHE_WRITE_THROUGH
;
397 case ACPI_HMAT_CP_NONE
:
399 tcache
->cache_attrs
.write_policy
= NODE_CACHE_WRITE_OTHER
;
402 list_add_tail(&tcache
->node
, &target
->caches
);
407 static int __init
hmat_parse_proximity_domain(union acpi_subtable_headers
*header
,
408 const unsigned long end
)
410 struct acpi_hmat_proximity_domain
*p
= (void *)header
;
411 struct memory_target
*target
= NULL
;
413 if (p
->header
.length
!= sizeof(*p
)) {
414 pr_notice("HMAT: Unexpected address range header length: %u\n",
419 if (hmat_revision
== 1)
420 pr_info("HMAT: Memory (%#llx length %#llx) Flags:%04x Processor Domain:%u Memory Domain:%u\n",
421 p
->reserved3
, p
->reserved4
, p
->flags
, p
->processor_PD
,
424 pr_info("HMAT: Memory Flags:%04x Processor Domain:%u Memory Domain:%u\n",
425 p
->flags
, p
->processor_PD
, p
->memory_PD
);
427 if (p
->flags
& ACPI_HMAT_MEMORY_PD_VALID
&& hmat_revision
== 1) {
428 target
= find_mem_target(p
->memory_PD
);
430 pr_debug("HMAT: Memory Domain missing from SRAT\n");
434 if (target
&& p
->flags
& ACPI_HMAT_PROCESSOR_PD_VALID
) {
435 int p_node
= pxm_to_node(p
->processor_PD
);
437 if (p_node
== NUMA_NO_NODE
) {
438 pr_debug("HMAT: Invalid Processor Domain\n");
441 target
->processor_pxm
= p
->processor_PD
;
447 static int __init
hmat_parse_subtable(union acpi_subtable_headers
*header
,
448 const unsigned long end
)
450 struct acpi_hmat_structure
*hdr
= (void *)header
;
456 case ACPI_HMAT_TYPE_PROXIMITY
:
457 return hmat_parse_proximity_domain(header
, end
);
458 case ACPI_HMAT_TYPE_LOCALITY
:
459 return hmat_parse_locality(header
, end
);
460 case ACPI_HMAT_TYPE_CACHE
:
461 return hmat_parse_cache(header
, end
);
467 static __init
int srat_parse_mem_affinity(union acpi_subtable_headers
*header
,
468 const unsigned long end
)
470 struct acpi_srat_mem_affinity
*ma
= (void *)header
;
474 if (!(ma
->flags
& ACPI_SRAT_MEM_ENABLED
))
476 alloc_memory_target(ma
->proximity_domain
, ma
->base_address
, ma
->length
);
480 static u32
hmat_initiator_perf(struct memory_target
*target
,
481 struct memory_initiator
*initiator
,
482 struct acpi_hmat_locality
*hmat_loc
)
484 unsigned int ipds
, tpds
, i
, idx
= 0, tdx
= 0;
488 ipds
= hmat_loc
->number_of_initiator_Pds
;
489 tpds
= hmat_loc
->number_of_target_Pds
;
490 inits
= (u32
*)(hmat_loc
+ 1);
491 targs
= inits
+ ipds
;
492 entries
= (u16
*)(targs
+ tpds
);
494 for (i
= 0; i
< ipds
; i
++) {
495 if (inits
[i
] == initiator
->processor_pxm
) {
504 for (i
= 0; i
< tpds
; i
++) {
505 if (targs
[i
] == target
->memory_pxm
) {
513 return hmat_normalize(entries
[idx
* tpds
+ tdx
],
514 hmat_loc
->entry_base_unit
,
515 hmat_loc
->data_type
);
518 static bool hmat_update_best(u8 type
, u32 value
, u32
*best
)
520 bool updated
= false;
526 case ACPI_HMAT_ACCESS_LATENCY
:
527 case ACPI_HMAT_READ_LATENCY
:
528 case ACPI_HMAT_WRITE_LATENCY
:
529 if (!*best
|| *best
> value
) {
534 case ACPI_HMAT_ACCESS_BANDWIDTH
:
535 case ACPI_HMAT_READ_BANDWIDTH
:
536 case ACPI_HMAT_WRITE_BANDWIDTH
:
537 if (!*best
|| *best
< value
) {
547 static int initiator_cmp(void *priv
, struct list_head
*a
, struct list_head
*b
)
549 struct memory_initiator
*ia
;
550 struct memory_initiator
*ib
;
551 unsigned long *p_nodes
= priv
;
553 ia
= list_entry(a
, struct memory_initiator
, node
);
554 ib
= list_entry(b
, struct memory_initiator
, node
);
556 set_bit(ia
->processor_pxm
, p_nodes
);
557 set_bit(ib
->processor_pxm
, p_nodes
);
559 return ia
->processor_pxm
- ib
->processor_pxm
;
562 static void hmat_register_target_initiators(struct memory_target
*target
)
564 static DECLARE_BITMAP(p_nodes
, MAX_NUMNODES
);
565 struct memory_initiator
*initiator
;
566 unsigned int mem_nid
, cpu_nid
;
567 struct memory_locality
*loc
= NULL
;
571 mem_nid
= pxm_to_node(target
->memory_pxm
);
573 * If the Address Range Structure provides a local processor pxm, link
574 * only that one. Otherwise, find the best performance attributes and
575 * register all initiators that match.
577 if (target
->processor_pxm
!= PXM_INVAL
) {
578 cpu_nid
= pxm_to_node(target
->processor_pxm
);
579 register_memory_node_under_compute_node(mem_nid
, cpu_nid
, 0);
583 if (list_empty(&localities
))
587 * We need the initiator list sorted so we can use bitmap_clear for
588 * previously set initiators when we find a better memory accessor.
589 * We'll also use the sorting to prime the candidate nodes with known
592 bitmap_zero(p_nodes
, MAX_NUMNODES
);
593 list_sort(p_nodes
, &initiators
, initiator_cmp
);
594 for (i
= WRITE_LATENCY
; i
<= READ_BANDWIDTH
; i
++) {
595 loc
= localities_types
[i
];
600 list_for_each_entry(initiator
, &initiators
, node
) {
603 if (!test_bit(initiator
->processor_pxm
, p_nodes
))
606 value
= hmat_initiator_perf(target
, initiator
, loc
->hmat_loc
);
607 if (hmat_update_best(loc
->hmat_loc
->data_type
, value
, &best
))
608 bitmap_clear(p_nodes
, 0, initiator
->processor_pxm
);
610 clear_bit(initiator
->processor_pxm
, p_nodes
);
613 hmat_update_target_access(target
, loc
->hmat_loc
->data_type
, best
);
616 for_each_set_bit(i
, p_nodes
, MAX_NUMNODES
) {
617 cpu_nid
= pxm_to_node(i
);
618 register_memory_node_under_compute_node(mem_nid
, cpu_nid
, 0);
622 static void hmat_register_target_cache(struct memory_target
*target
)
624 unsigned mem_nid
= pxm_to_node(target
->memory_pxm
);
625 struct target_cache
*tcache
;
627 list_for_each_entry(tcache
, &target
->caches
, node
)
628 node_add_cache(mem_nid
, &tcache
->cache_attrs
);
631 static void hmat_register_target_perf(struct memory_target
*target
)
633 unsigned mem_nid
= pxm_to_node(target
->memory_pxm
);
634 node_set_perf_attrs(mem_nid
, &target
->hmem_attrs
, 0);
637 static void hmat_register_target_device(struct memory_target
*target
,
640 /* define a clean / non-busy resource for the platform device */
641 struct resource res
= {
644 .flags
= IORESOURCE_MEM
,
646 struct platform_device
*pdev
;
647 struct memregion_info info
;
650 rc
= region_intersects(res
.start
, resource_size(&res
), IORESOURCE_MEM
,
651 IORES_DESC_SOFT_RESERVED
);
652 if (rc
!= REGION_INTERSECTS
)
655 id
= memregion_alloc(GFP_KERNEL
);
657 pr_err("memregion allocation failure for %pr\n", &res
);
661 pdev
= platform_device_alloc("hmem", id
);
663 pr_err("hmem device allocation failure for %pr\n", &res
);
667 pdev
->dev
.numa_node
= acpi_map_pxm_to_online_node(target
->memory_pxm
);
668 info
= (struct memregion_info
) {
669 .target_node
= acpi_map_pxm_to_node(target
->memory_pxm
),
671 rc
= platform_device_add_data(pdev
, &info
, sizeof(info
));
673 pr_err("hmem memregion_info allocation failure for %pr\n", &res
);
677 rc
= platform_device_add_resources(pdev
, &res
, 1);
679 pr_err("hmem resource allocation failure for %pr\n", &res
);
683 rc
= platform_device_add(pdev
);
685 dev_err(&pdev
->dev
, "device add failed for %pr\n", &res
);
692 put_device(&pdev
->dev
);
697 static void hmat_register_target_devices(struct memory_target
*target
)
699 struct resource
*res
;
702 * Do not bother creating devices if no driver is available to
705 if (!IS_ENABLED(CONFIG_DEV_DAX_HMEM
))
708 for (res
= target
->memregions
.child
; res
; res
= res
->sibling
)
709 hmat_register_target_device(target
, res
);
712 static void hmat_register_target(struct memory_target
*target
)
714 int nid
= pxm_to_node(target
->memory_pxm
);
717 * Devices may belong to either an offline or online
718 * node, so unconditionally add them.
720 hmat_register_target_devices(target
);
723 * Skip offline nodes. This can happen when memory
724 * marked EFI_MEMORY_SP, "specific purpose", is applied
725 * to all the memory in a promixity domain leading to
726 * the node being marked offline / unplugged, or if
727 * memory-only "hotplug" node is offline.
729 if (nid
== NUMA_NO_NODE
|| !node_online(nid
))
732 mutex_lock(&target_lock
);
733 if (!target
->registered
) {
734 hmat_register_target_initiators(target
);
735 hmat_register_target_cache(target
);
736 hmat_register_target_perf(target
);
737 target
->registered
= true;
739 mutex_unlock(&target_lock
);
742 static void hmat_register_targets(void)
744 struct memory_target
*target
;
746 list_for_each_entry(target
, &targets
, node
)
747 hmat_register_target(target
);
750 static int hmat_callback(struct notifier_block
*self
,
751 unsigned long action
, void *arg
)
753 struct memory_target
*target
;
754 struct memory_notify
*mnb
= arg
;
755 int pxm
, nid
= mnb
->status_change_nid
;
757 if (nid
== NUMA_NO_NODE
|| action
!= MEM_ONLINE
)
760 pxm
= node_to_pxm(nid
);
761 target
= find_mem_target(pxm
);
765 hmat_register_target(target
);
769 static struct notifier_block hmat_callback_nb
= {
770 .notifier_call
= hmat_callback
,
774 static __init
void hmat_free_structures(void)
776 struct memory_target
*target
, *tnext
;
777 struct memory_locality
*loc
, *lnext
;
778 struct memory_initiator
*initiator
, *inext
;
779 struct target_cache
*tcache
, *cnext
;
781 list_for_each_entry_safe(target
, tnext
, &targets
, node
) {
782 struct resource
*res
, *res_next
;
784 list_for_each_entry_safe(tcache
, cnext
, &target
->caches
, node
) {
785 list_del(&tcache
->node
);
789 list_del(&target
->node
);
790 res
= target
->memregions
.child
;
792 res_next
= res
->sibling
;
793 __release_region(&target
->memregions
, res
->start
,
800 list_for_each_entry_safe(initiator
, inext
, &initiators
, node
) {
801 list_del(&initiator
->node
);
805 list_for_each_entry_safe(loc
, lnext
, &localities
, node
) {
806 list_del(&loc
->node
);
811 static __init
int hmat_init(void)
813 struct acpi_table_header
*tbl
;
814 enum acpi_hmat_type i
;
820 status
= acpi_get_table(ACPI_SIG_SRAT
, 0, &tbl
);
821 if (ACPI_FAILURE(status
))
824 if (acpi_table_parse_entries(ACPI_SIG_SRAT
,
825 sizeof(struct acpi_table_srat
),
826 ACPI_SRAT_TYPE_MEMORY_AFFINITY
,
827 srat_parse_mem_affinity
, 0) < 0)
831 status
= acpi_get_table(ACPI_SIG_HMAT
, 0, &tbl
);
832 if (ACPI_FAILURE(status
))
835 hmat_revision
= tbl
->revision
;
836 switch (hmat_revision
) {
841 pr_notice("Ignoring HMAT: Unknown revision:%d\n", hmat_revision
);
845 for (i
= ACPI_HMAT_TYPE_PROXIMITY
; i
< ACPI_HMAT_TYPE_RESERVED
; i
++) {
846 if (acpi_table_parse_entries(ACPI_SIG_HMAT
,
847 sizeof(struct acpi_table_hmat
), i
,
848 hmat_parse_subtable
, 0) < 0) {
849 pr_notice("Ignoring HMAT: Invalid table");
853 hmat_register_targets();
855 /* Keep the table and structures if the notifier may use them */
856 if (!register_hotmemory_notifier(&hmat_callback_nb
))
859 hmat_free_structures();
863 device_initcall(hmat_init
);