ARM: dma-api: fix max_pfn off-by-one error in __dma_supported()
[linux/fpc-iii.git] / arch / x86 / mm / mem_encrypt.c
bloba03614bd3e1a26045c99ded2d20cd490657a72fa
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * AMD Memory Encryption Support
5 * Copyright (C) 2016 Advanced Micro Devices, Inc.
7 * Author: Tom Lendacky <thomas.lendacky@amd.com>
8 */
10 #define DISABLE_BRANCH_PROFILING
12 #include <linux/linkage.h>
13 #include <linux/init.h>
14 #include <linux/mm.h>
15 #include <linux/dma-direct.h>
16 #include <linux/swiotlb.h>
17 #include <linux/mem_encrypt.h>
18 #include <linux/device.h>
19 #include <linux/kernel.h>
20 #include <linux/bitops.h>
21 #include <linux/dma-mapping.h>
23 #include <asm/tlbflush.h>
24 #include <asm/fixmap.h>
25 #include <asm/setup.h>
26 #include <asm/bootparam.h>
27 #include <asm/set_memory.h>
28 #include <asm/cacheflush.h>
29 #include <asm/processor-flags.h>
30 #include <asm/msr.h>
31 #include <asm/cmdline.h>
33 #include "mm_internal.h"
36 * Since SME related variables are set early in the boot process they must
37 * reside in the .data section so as not to be zeroed out when the .bss
38 * section is later cleared.
40 u64 sme_me_mask __section(.data) = 0;
41 EXPORT_SYMBOL(sme_me_mask);
42 DEFINE_STATIC_KEY_FALSE(sev_enable_key);
43 EXPORT_SYMBOL_GPL(sev_enable_key);
45 bool sev_enabled __section(.data);
47 /* Buffer used for early in-place encryption by BSP, no locking needed */
48 static char sme_early_buffer[PAGE_SIZE] __initdata __aligned(PAGE_SIZE);
51 * This routine does not change the underlying encryption setting of the
52 * page(s) that map this memory. It assumes that eventually the memory is
53 * meant to be accessed as either encrypted or decrypted but the contents
54 * are currently not in the desired state.
56 * This routine follows the steps outlined in the AMD64 Architecture
57 * Programmer's Manual Volume 2, Section 7.10.8 Encrypt-in-Place.
59 static void __init __sme_early_enc_dec(resource_size_t paddr,
60 unsigned long size, bool enc)
62 void *src, *dst;
63 size_t len;
65 if (!sme_me_mask)
66 return;
68 wbinvd();
71 * There are limited number of early mapping slots, so map (at most)
72 * one page at time.
74 while (size) {
75 len = min_t(size_t, sizeof(sme_early_buffer), size);
78 * Create mappings for the current and desired format of
79 * the memory. Use a write-protected mapping for the source.
81 src = enc ? early_memremap_decrypted_wp(paddr, len) :
82 early_memremap_encrypted_wp(paddr, len);
84 dst = enc ? early_memremap_encrypted(paddr, len) :
85 early_memremap_decrypted(paddr, len);
88 * If a mapping can't be obtained to perform the operation,
89 * then eventual access of that area in the desired mode
90 * will cause a crash.
92 BUG_ON(!src || !dst);
95 * Use a temporary buffer, of cache-line multiple size, to
96 * avoid data corruption as documented in the APM.
98 memcpy(sme_early_buffer, src, len);
99 memcpy(dst, sme_early_buffer, len);
101 early_memunmap(dst, len);
102 early_memunmap(src, len);
104 paddr += len;
105 size -= len;
109 void __init sme_early_encrypt(resource_size_t paddr, unsigned long size)
111 __sme_early_enc_dec(paddr, size, true);
114 void __init sme_early_decrypt(resource_size_t paddr, unsigned long size)
116 __sme_early_enc_dec(paddr, size, false);
119 static void __init __sme_early_map_unmap_mem(void *vaddr, unsigned long size,
120 bool map)
122 unsigned long paddr = (unsigned long)vaddr - __PAGE_OFFSET;
123 pmdval_t pmd_flags, pmd;
125 /* Use early_pmd_flags but remove the encryption mask */
126 pmd_flags = __sme_clr(early_pmd_flags);
128 do {
129 pmd = map ? (paddr & PMD_MASK) + pmd_flags : 0;
130 __early_make_pgtable((unsigned long)vaddr, pmd);
132 vaddr += PMD_SIZE;
133 paddr += PMD_SIZE;
134 size = (size <= PMD_SIZE) ? 0 : size - PMD_SIZE;
135 } while (size);
137 __native_flush_tlb();
140 void __init sme_unmap_bootdata(char *real_mode_data)
142 struct boot_params *boot_data;
143 unsigned long cmdline_paddr;
145 if (!sme_active())
146 return;
148 /* Get the command line address before unmapping the real_mode_data */
149 boot_data = (struct boot_params *)real_mode_data;
150 cmdline_paddr = boot_data->hdr.cmd_line_ptr | ((u64)boot_data->ext_cmd_line_ptr << 32);
152 __sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), false);
154 if (!cmdline_paddr)
155 return;
157 __sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, false);
160 void __init sme_map_bootdata(char *real_mode_data)
162 struct boot_params *boot_data;
163 unsigned long cmdline_paddr;
165 if (!sme_active())
166 return;
168 __sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), true);
170 /* Get the command line address after mapping the real_mode_data */
171 boot_data = (struct boot_params *)real_mode_data;
172 cmdline_paddr = boot_data->hdr.cmd_line_ptr | ((u64)boot_data->ext_cmd_line_ptr << 32);
174 if (!cmdline_paddr)
175 return;
177 __sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, true);
180 void __init sme_early_init(void)
182 unsigned int i;
184 if (!sme_me_mask)
185 return;
187 early_pmd_flags = __sme_set(early_pmd_flags);
189 __supported_pte_mask = __sme_set(__supported_pte_mask);
191 /* Update the protection map with memory encryption mask */
192 for (i = 0; i < ARRAY_SIZE(protection_map); i++)
193 protection_map[i] = pgprot_encrypted(protection_map[i]);
195 if (sev_active())
196 swiotlb_force = SWIOTLB_FORCE;
199 static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
201 pgprot_t old_prot, new_prot;
202 unsigned long pfn, pa, size;
203 pte_t new_pte;
205 switch (level) {
206 case PG_LEVEL_4K:
207 pfn = pte_pfn(*kpte);
208 old_prot = pte_pgprot(*kpte);
209 break;
210 case PG_LEVEL_2M:
211 pfn = pmd_pfn(*(pmd_t *)kpte);
212 old_prot = pmd_pgprot(*(pmd_t *)kpte);
213 break;
214 case PG_LEVEL_1G:
215 pfn = pud_pfn(*(pud_t *)kpte);
216 old_prot = pud_pgprot(*(pud_t *)kpte);
217 break;
218 default:
219 return;
222 new_prot = old_prot;
223 if (enc)
224 pgprot_val(new_prot) |= _PAGE_ENC;
225 else
226 pgprot_val(new_prot) &= ~_PAGE_ENC;
228 /* If prot is same then do nothing. */
229 if (pgprot_val(old_prot) == pgprot_val(new_prot))
230 return;
232 pa = pfn << page_level_shift(level);
233 size = page_level_size(level);
236 * We are going to perform in-place en-/decryption and change the
237 * physical page attribute from C=1 to C=0 or vice versa. Flush the
238 * caches to ensure that data gets accessed with the correct C-bit.
240 clflush_cache_range(__va(pa), size);
242 /* Encrypt/decrypt the contents in-place */
243 if (enc)
244 sme_early_encrypt(pa, size);
245 else
246 sme_early_decrypt(pa, size);
248 /* Change the page encryption mask. */
249 new_pte = pfn_pte(pfn, new_prot);
250 set_pte_atomic(kpte, new_pte);
253 static int __init early_set_memory_enc_dec(unsigned long vaddr,
254 unsigned long size, bool enc)
256 unsigned long vaddr_end, vaddr_next;
257 unsigned long psize, pmask;
258 int split_page_size_mask;
259 int level, ret;
260 pte_t *kpte;
262 vaddr_next = vaddr;
263 vaddr_end = vaddr + size;
265 for (; vaddr < vaddr_end; vaddr = vaddr_next) {
266 kpte = lookup_address(vaddr, &level);
267 if (!kpte || pte_none(*kpte)) {
268 ret = 1;
269 goto out;
272 if (level == PG_LEVEL_4K) {
273 __set_clr_pte_enc(kpte, level, enc);
274 vaddr_next = (vaddr & PAGE_MASK) + PAGE_SIZE;
275 continue;
278 psize = page_level_size(level);
279 pmask = page_level_mask(level);
282 * Check whether we can change the large page in one go.
283 * We request a split when the address is not aligned and
284 * the number of pages to set/clear encryption bit is smaller
285 * than the number of pages in the large page.
287 if (vaddr == (vaddr & pmask) &&
288 ((vaddr_end - vaddr) >= psize)) {
289 __set_clr_pte_enc(kpte, level, enc);
290 vaddr_next = (vaddr & pmask) + psize;
291 continue;
295 * The virtual address is part of a larger page, create the next
296 * level page table mapping (4K or 2M). If it is part of a 2M
297 * page then we request a split of the large page into 4K
298 * chunks. A 1GB large page is split into 2M pages, resp.
300 if (level == PG_LEVEL_2M)
301 split_page_size_mask = 0;
302 else
303 split_page_size_mask = 1 << PG_LEVEL_2M;
306 * kernel_physical_mapping_change() does not flush the TLBs, so
307 * a TLB flush is required after we exit from the for loop.
309 kernel_physical_mapping_change(__pa(vaddr & pmask),
310 __pa((vaddr_end & pmask) + psize),
311 split_page_size_mask);
314 ret = 0;
316 out:
317 __flush_tlb_all();
318 return ret;
321 int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size)
323 return early_set_memory_enc_dec(vaddr, size, false);
326 int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size)
328 return early_set_memory_enc_dec(vaddr, size, true);
332 * SME and SEV are very similar but they are not the same, so there are
333 * times that the kernel will need to distinguish between SME and SEV. The
334 * sme_active() and sev_active() functions are used for this. When a
335 * distinction isn't needed, the mem_encrypt_active() function can be used.
337 * The trampoline code is a good example for this requirement. Before
338 * paging is activated, SME will access all memory as decrypted, but SEV
339 * will access all memory as encrypted. So, when APs are being brought
340 * up under SME the trampoline area cannot be encrypted, whereas under SEV
341 * the trampoline area must be encrypted.
343 bool sme_active(void)
345 return sme_me_mask && !sev_enabled;
348 bool sev_active(void)
350 return sme_me_mask && sev_enabled;
353 /* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
354 bool force_dma_unencrypted(struct device *dev)
357 * For SEV, all DMA must be to unencrypted addresses.
359 if (sev_active())
360 return true;
363 * For SME, all DMA must be to unencrypted addresses if the
364 * device does not support DMA to addresses that include the
365 * encryption mask.
367 if (sme_active()) {
368 u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask));
369 u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask,
370 dev->bus_dma_limit);
372 if (dma_dev_mask <= dma_enc_mask)
373 return true;
376 return false;
379 /* Architecture __weak replacement functions */
380 void __init mem_encrypt_free_decrypted_mem(void)
382 unsigned long vaddr, vaddr_end, npages;
383 int r;
385 vaddr = (unsigned long)__start_bss_decrypted_unused;
386 vaddr_end = (unsigned long)__end_bss_decrypted;
387 npages = (vaddr_end - vaddr) >> PAGE_SHIFT;
390 * The unused memory range was mapped decrypted, change the encryption
391 * attribute from decrypted to encrypted before freeing it.
393 if (mem_encrypt_active()) {
394 r = set_memory_encrypted(vaddr, npages);
395 if (r) {
396 pr_warn("failed to free unused decrypted pages\n");
397 return;
401 free_init_pages("unused decrypted", vaddr, vaddr_end);
404 void __init mem_encrypt_init(void)
406 if (!sme_me_mask)
407 return;
409 /* Call into SWIOTLB to update the SWIOTLB DMA buffers */
410 swiotlb_update_mem_attributes();
413 * With SEV, we need to unroll the rep string I/O instructions.
415 if (sev_active())
416 static_branch_enable(&sev_enable_key);
418 pr_info("AMD %s active\n",
419 sev_active() ? "Secure Encrypted Virtualization (SEV)"
420 : "Secure Memory Encryption (SME)");