1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2016-20 Intel Corporation. */
4 #include <linux/acpi.h>
5 #include <linux/miscdevice.h>
6 #include <linux/mman.h>
7 #include <linux/security.h>
8 #include <linux/suspend.h>
13 u64 sgx_attributes_reserved_mask
;
14 u64 sgx_xfrm_reserved_mask
= ~0x3;
15 u32 sgx_misc_reserved_mask
;
17 static int sgx_open(struct inode
*inode
, struct file
*file
)
19 struct sgx_encl
*encl
;
22 encl
= kzalloc(sizeof(*encl
), GFP_KERNEL
);
26 kref_init(&encl
->refcount
);
27 xa_init(&encl
->page_array
);
28 mutex_init(&encl
->lock
);
29 INIT_LIST_HEAD(&encl
->va_pages
);
30 INIT_LIST_HEAD(&encl
->mm_list
);
31 spin_lock_init(&encl
->mm_lock
);
33 ret
= init_srcu_struct(&encl
->srcu
);
39 file
->private_data
= encl
;
44 static int sgx_release(struct inode
*inode
, struct file
*file
)
46 struct sgx_encl
*encl
= file
->private_data
;
47 struct sgx_encl_mm
*encl_mm
;
50 * Drain the remaining mm_list entries. At this point the list contains
51 * entries for processes, which have closed the enclave file but have
52 * not exited yet. The processes, which have exited, are gone from the
53 * list by sgx_mmu_notifier_release().
56 spin_lock(&encl
->mm_lock
);
58 if (list_empty(&encl
->mm_list
)) {
61 encl_mm
= list_first_entry(&encl
->mm_list
,
62 struct sgx_encl_mm
, list
);
63 list_del_rcu(&encl_mm
->list
);
66 spin_unlock(&encl
->mm_lock
);
68 /* The enclave is no longer mapped by any mm. */
72 synchronize_srcu(&encl
->srcu
);
73 mmu_notifier_unregister(&encl_mm
->mmu_notifier
, encl_mm
->mm
);
76 /* 'encl_mm' is gone, put encl_mm->encl reference: */
77 kref_put(&encl
->refcount
, sgx_encl_release
);
80 kref_put(&encl
->refcount
, sgx_encl_release
);
84 static int sgx_mmap(struct file
*file
, struct vm_area_struct
*vma
)
86 struct sgx_encl
*encl
= file
->private_data
;
89 ret
= sgx_encl_may_map(encl
, vma
->vm_start
, vma
->vm_end
, vma
->vm_flags
);
93 ret
= sgx_encl_mm_add(encl
, vma
->vm_mm
);
97 vma
->vm_ops
= &sgx_vm_ops
;
98 vm_flags_set(vma
, VM_PFNMAP
| VM_DONTEXPAND
| VM_DONTDUMP
| VM_IO
);
99 vma
->vm_private_data
= encl
;
104 static unsigned long sgx_get_unmapped_area(struct file
*file
,
110 if ((flags
& MAP_TYPE
) == MAP_PRIVATE
)
113 if (flags
& MAP_FIXED
)
116 return mm_get_unmapped_area(current
->mm
, file
, addr
, len
, pgoff
, flags
);
120 static long sgx_compat_ioctl(struct file
*filep
, unsigned int cmd
,
123 return sgx_ioctl(filep
, cmd
, arg
);
127 static const struct file_operations sgx_encl_fops
= {
128 .owner
= THIS_MODULE
,
130 .release
= sgx_release
,
131 .unlocked_ioctl
= sgx_ioctl
,
133 .compat_ioctl
= sgx_compat_ioctl
,
136 .get_unmapped_area
= sgx_get_unmapped_area
,
139 static struct miscdevice sgx_dev_enclave
= {
140 .minor
= MISC_DYNAMIC_MINOR
,
141 .name
= "sgx_enclave",
142 .nodename
= "sgx_enclave",
143 .fops
= &sgx_encl_fops
,
146 int __init
sgx_drv_init(void)
148 unsigned int eax
, ebx
, ecx
, edx
;
153 if (!cpu_feature_enabled(X86_FEATURE_SGX_LC
))
156 cpuid_count(SGX_CPUID
, 0, &eax
, &ebx
, &ecx
, &edx
);
159 pr_err("SGX disabled: SGX1 instruction support not available.\n");
163 sgx_misc_reserved_mask
= ~ebx
| SGX_MISC_RESERVED_MASK
;
165 cpuid_count(SGX_CPUID
, 1, &eax
, &ebx
, &ecx
, &edx
);
167 attr_mask
= (((u64
)ebx
) << 32) + (u64
)eax
;
168 sgx_attributes_reserved_mask
= ~attr_mask
| SGX_ATTR_RESERVED_MASK
;
170 if (cpu_feature_enabled(X86_FEATURE_OSXSAVE
)) {
171 xfrm_mask
= (((u64
)edx
) << 32) + (u64
)ecx
;
172 sgx_xfrm_reserved_mask
= ~xfrm_mask
;
175 ret
= misc_register(&sgx_dev_enclave
);