1 // SPDX-License-Identifier: GPL-2.0-only
3 * Memory Encryption Support Common Code
5 * Copyright (C) 2016 Advanced Micro Devices, Inc.
7 * Author: Tom Lendacky <thomas.lendacky@amd.com>
10 #include <linux/dma-direct.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/swiotlb.h>
13 #include <linux/cc_platform.h>
14 #include <linux/mem_encrypt.h>
15 #include <linux/virtio_anchor.h>
19 /* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
20 bool force_dma_unencrypted(struct device
*dev
)
23 * For SEV, all DMA must be to unencrypted addresses.
25 if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT
))
29 * For SME, all DMA must be to unencrypted addresses if the
30 * device does not support DMA to addresses that include the
33 if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT
)) {
34 u64 dma_enc_mask
= DMA_BIT_MASK(__ffs64(sme_me_mask
));
35 u64 dma_dev_mask
= min_not_zero(dev
->coherent_dma_mask
,
38 if (dma_dev_mask
<= dma_enc_mask
)
45 static void print_mem_encrypt_feature_info(void)
47 pr_info("Memory Encryption Features active: ");
51 pr_cont("Intel TDX\n");
56 /* Secure Memory Encryption */
57 if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT
)) {
59 * SME is mutually exclusive with any of the SEV
66 /* Secure Encrypted Virtualization */
67 if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT
))
70 /* Encrypted Register State */
71 if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT
))
74 /* Secure Nested Paging */
75 if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP
))
88 /* Architecture __weak replacement functions */
89 void __init
mem_encrypt_init(void)
91 if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT
))
94 /* Call into SWIOTLB to update the SWIOTLB DMA buffers */
95 swiotlb_update_mem_attributes();
97 print_mem_encrypt_feature_info();
100 void __init
mem_encrypt_setup_arch(void)
102 phys_addr_t total_mem
= memblock_phys_mem_size();
106 * Do RMP table fixups after the e820 tables have been setup by
107 * e820__memory_setup().
109 if (cc_platform_has(CC_ATTR_HOST_SEV_SNP
))
110 snp_fixup_e820_tables();
112 if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT
))
116 * For SEV and TDX, all DMA has to occur via shared/unencrypted pages.
117 * Kernel uses SWIOTLB to make this happen without changing device
118 * drivers. However, depending on the workload being run, the
119 * default 64MB of SWIOTLB may not be enough and SWIOTLB may
120 * run out of buffers for DMA, resulting in I/O errors and/or
121 * performance degradation especially with high I/O workloads.
123 * Adjust the default size of SWIOTLB using a percentage of guest
124 * memory for SWIOTLB buffers. Also, as the SWIOTLB bounce buffer
125 * memory is allocated from low memory, ensure that the adjusted size
126 * is within the limits of low available memory.
128 * The percentage of guest memory used here for SWIOTLB buffers
129 * is more of an approximation of the static adjustment which
130 * 64MB for <1G, and ~128M to 256M for 1G-to-4G, i.e., the 6%
132 size
= total_mem
* 6 / 100;
133 size
= clamp_val(size
, IO_TLB_DEFAULT_SIZE
, SZ_1G
);
134 swiotlb_adjust_size(size
);
136 /* Set restricted memory access for virtio. */
137 virtio_set_mem_acc_cb(virtio_require_restricted_mem_acc
);