1 // SPDX-License-Identifier: GPL-2.0-only
4 #include <linux/memblock.h>
5 #include <linux/spinlock.h>
6 #include <linux/crash_dump.h>
8 #include <asm/unaccepted_memory.h>
10 /* Protects unaccepted memory bitmap and accepting_list */
11 static DEFINE_SPINLOCK(unaccepted_memory_lock
);
14 struct list_head list
;
19 static LIST_HEAD(accepting_list
);
22 * accept_memory() -- Consult bitmap and accept the memory if needed.
24 * Only memory that is explicitly marked as unaccepted in the bitmap requires
25 * an action. All the remaining memory is implicitly accepted and doesn't need
29 * - anything if the system has no unaccepted table;
30 * - memory that is below phys_base;
31 * - memory that is above the memory that addressable by the bitmap;
33 void accept_memory(phys_addr_t start
, unsigned long size
)
35 struct efi_unaccepted_memory
*unaccepted
;
36 unsigned long range_start
, range_end
;
37 struct accept_range range
, *entry
;
38 phys_addr_t end
= start
+ size
;
42 unaccepted
= efi_get_unaccepted_table();
46 unit_size
= unaccepted
->unit_size
;
49 * Only care for the part of the range that is represented
52 if (start
< unaccepted
->phys_base
)
53 start
= unaccepted
->phys_base
;
54 if (end
< unaccepted
->phys_base
)
57 /* Translate to offsets from the beginning of the bitmap */
58 start
-= unaccepted
->phys_base
;
59 end
-= unaccepted
->phys_base
;
62 * load_unaligned_zeropad() can lead to unwanted loads across page
63 * boundaries. The unwanted loads are typically harmless. But, they
64 * might be made to totally unrelated or even unmapped memory.
65 * load_unaligned_zeropad() relies on exception fixup (#PF, #GP and now
66 * #VE) to recover from these unwanted loads.
68 * But, this approach does not work for unaccepted memory. For TDX, a
69 * load from unaccepted memory will not lead to a recoverable exception
70 * within the guest. The guest will exit to the VMM where the only
71 * recourse is to terminate the guest.
73 * There are two parts to fix this issue and comprehensively avoid
74 * access to unaccepted memory. Together these ensure that an extra
75 * "guard" page is accepted in addition to the memory that needs to be
78 * 1. Implicitly extend the range_contains_unaccepted_memory(start, size)
79 * checks up to the next unit_size if 'start+size' is aligned on a
82 * 2. Implicitly extend accept_memory(start, size) to the next unit_size
83 * if 'size+end' is aligned on a unit_size boundary. (immediately
84 * following this comment)
86 if (!(end
% unit_size
))
89 /* Make sure not to overrun the bitmap */
90 if (end
> unaccepted
->size
* unit_size
* BITS_PER_BYTE
)
91 end
= unaccepted
->size
* unit_size
* BITS_PER_BYTE
;
93 range
.start
= start
/ unit_size
;
94 range
.end
= DIV_ROUND_UP(end
, unit_size
);
96 spin_lock_irqsave(&unaccepted_memory_lock
, flags
);
99 * Check if anybody works on accepting the same range of the memory.
101 * The check is done with unit_size granularity. It is crucial to catch
102 * all accept requests to the same unit_size block, even if they don't
103 * overlap on physical address level.
105 list_for_each_entry(entry
, &accepting_list
, list
) {
106 if (entry
->end
<= range
.start
)
108 if (entry
->start
>= range
.end
)
112 * Somebody else accepting the range. Or at least part of it.
114 * Drop the lock and retry until it is complete.
116 spin_unlock_irqrestore(&unaccepted_memory_lock
, flags
);
121 * Register that the range is about to be accepted.
122 * Make sure nobody else will accept it.
124 list_add(&range
.list
, &accepting_list
);
126 range_start
= range
.start
;
127 for_each_set_bitrange_from(range_start
, range_end
, unaccepted
->bitmap
,
129 unsigned long phys_start
, phys_end
;
130 unsigned long len
= range_end
- range_start
;
132 phys_start
= range_start
* unit_size
+ unaccepted
->phys_base
;
133 phys_end
= range_end
* unit_size
+ unaccepted
->phys_base
;
136 * Keep interrupts disabled until the accept operation is
137 * complete in order to prevent deadlocks.
139 * Enabling interrupts before calling arch_accept_memory()
140 * creates an opportunity for an interrupt handler to request
141 * acceptance for the same memory. The handler will continuously
142 * spin with interrupts disabled, preventing other task from
143 * making progress with the acceptance process.
145 spin_unlock(&unaccepted_memory_lock
);
147 arch_accept_memory(phys_start
, phys_end
);
149 spin_lock(&unaccepted_memory_lock
);
150 bitmap_clear(unaccepted
->bitmap
, range_start
, len
);
153 list_del(&range
.list
);
155 touch_softlockup_watchdog();
157 spin_unlock_irqrestore(&unaccepted_memory_lock
, flags
);
160 bool range_contains_unaccepted_memory(phys_addr_t start
, unsigned long size
)
162 struct efi_unaccepted_memory
*unaccepted
;
163 phys_addr_t end
= start
+ size
;
168 unaccepted
= efi_get_unaccepted_table();
172 unit_size
= unaccepted
->unit_size
;
175 * Only care for the part of the range that is represented
178 if (start
< unaccepted
->phys_base
)
179 start
= unaccepted
->phys_base
;
180 if (end
< unaccepted
->phys_base
)
183 /* Translate to offsets from the beginning of the bitmap */
184 start
-= unaccepted
->phys_base
;
185 end
-= unaccepted
->phys_base
;
188 * Also consider the unaccepted state of the *next* page. See fix #1 in
189 * the comment on load_unaligned_zeropad() in accept_memory().
191 if (!(end
% unit_size
))
194 /* Make sure not to overrun the bitmap */
195 if (end
> unaccepted
->size
* unit_size
* BITS_PER_BYTE
)
196 end
= unaccepted
->size
* unit_size
* BITS_PER_BYTE
;
198 spin_lock_irqsave(&unaccepted_memory_lock
, flags
);
199 while (start
< end
) {
200 if (test_bit(start
/ unit_size
, unaccepted
->bitmap
)) {
207 spin_unlock_irqrestore(&unaccepted_memory_lock
, flags
);
212 #ifdef CONFIG_PROC_VMCORE
213 static bool unaccepted_memory_vmcore_pfn_is_ram(struct vmcore_cb
*cb
,
216 return !pfn_is_unaccepted_memory(pfn
);
219 static struct vmcore_cb vmcore_cb
= {
220 .pfn_is_ram
= unaccepted_memory_vmcore_pfn_is_ram
,
223 static int __init
unaccepted_memory_init_kdump(void)
225 register_vmcore_cb(&vmcore_cb
);
228 core_initcall(unaccepted_memory_init_kdump
);
229 #endif /* CONFIG_PROC_VMCORE */