1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2016-20 Intel Corporation. */
14 #include <sys/ioctl.h>
18 #include <sys/types.h>
22 void encl_delete(struct encl
*encl
)
24 struct encl_segment
*heap_seg
;
27 munmap((void *)encl
->encl_base
, encl
->encl_size
);
30 munmap(encl
->bin
, encl
->bin_size
);
35 if (encl
->segment_tbl
) {
36 heap_seg
= &encl
->segment_tbl
[encl
->nr_segments
- 1];
37 munmap(heap_seg
->src
, heap_seg
->size
);
38 free(encl
->segment_tbl
);
41 memset(encl
, 0, sizeof(*encl
));
44 static bool encl_map_bin(const char *path
, struct encl
*encl
)
51 fd
= open(path
, O_RDONLY
);
53 perror("enclave executable open()");
57 ret
= stat(path
, &sb
);
59 perror("enclave executable stat()");
63 bin
= mmap(NULL
, sb
.st_size
, PROT_READ
, MAP_PRIVATE
, fd
, 0);
64 if (bin
== MAP_FAILED
) {
65 perror("enclave executable mmap()");
70 encl
->bin_size
= sb
.st_size
;
80 static bool encl_ioc_create(struct encl
*encl
)
82 struct sgx_secs
*secs
= &encl
->secs
;
83 struct sgx_enclave_create ioc
;
86 assert(encl
->encl_base
!= 0);
88 memset(secs
, 0, sizeof(*secs
));
89 secs
->ssa_frame_size
= 1;
90 secs
->attributes
= SGX_ATTR_MODE64BIT
;
92 secs
->base
= encl
->encl_base
;
93 secs
->size
= encl
->encl_size
;
95 ioc
.src
= (unsigned long)secs
;
96 rc
= ioctl(encl
->fd
, SGX_IOC_ENCLAVE_CREATE
, &ioc
);
98 perror("SGX_IOC_ENCLAVE_CREATE failed");
99 munmap((void *)secs
->base
, encl
->encl_size
);
106 static bool encl_ioc_add_pages(struct encl
*encl
, struct encl_segment
*seg
)
108 struct sgx_enclave_add_pages ioc
;
109 struct sgx_secinfo secinfo
;
112 memset(&secinfo
, 0, sizeof(secinfo
));
113 secinfo
.flags
= seg
->flags
;
115 ioc
.src
= (uint64_t)seg
->src
;
116 ioc
.offset
= seg
->offset
;
117 ioc
.length
= seg
->size
;
118 ioc
.secinfo
= (unsigned long)&secinfo
;
120 ioc
.flags
= SGX_PAGE_MEASURE
;
124 rc
= ioctl(encl
->fd
, SGX_IOC_ENCLAVE_ADD_PAGES
, &ioc
);
126 perror("SGX_IOC_ENCLAVE_ADD_PAGES failed");
134 * Parse the enclave code's symbol table to locate and return address of
135 * the provided symbol
137 uint64_t encl_get_entry(struct encl
*encl
, const char *symbol
)
139 Elf64_Sym
*symtab
= NULL
;
140 char *sym_names
= NULL
;
141 Elf64_Shdr
*sections
;
147 sections
= encl
->bin
+ ehdr
->e_shoff
;
149 for (i
= 0; i
< ehdr
->e_shnum
; i
++) {
150 if (sections
[i
].sh_type
== SHT_SYMTAB
) {
151 symtab
= (Elf64_Sym
*)((char *)encl
->bin
+ sections
[i
].sh_offset
);
152 num_sym
= sections
[i
].sh_size
/ sections
[i
].sh_entsize
;
157 for (i
= 0; i
< ehdr
->e_shnum
; i
++) {
158 if (sections
[i
].sh_type
== SHT_STRTAB
) {
159 sym_names
= (char *)encl
->bin
+ sections
[i
].sh_offset
;
164 if (!symtab
|| !sym_names
)
167 for (i
= 0; i
< num_sym
; i
++) {
168 Elf64_Sym
*sym
= &symtab
[i
];
170 if (!strcmp(symbol
, sym_names
+ sym
->st_name
))
171 return (uint64_t)sym
->st_value
;
177 bool encl_load(const char *path
, struct encl
*encl
, unsigned long heap_size
)
179 const char device_path
[] = "/dev/sgx_enclave";
180 struct encl_segment
*seg
;
181 Elf64_Phdr
*phdr_tbl
;
190 memset(encl
, 0, sizeof(*encl
));
192 fd
= open(device_path
, O_RDWR
);
194 perror("Unable to open /dev/sgx_enclave");
198 ret
= stat(device_path
, &sb
);
200 perror("device file stat()");
204 ptr
= mmap(NULL
, PAGE_SIZE
, PROT_READ
, MAP_SHARED
, fd
, 0);
205 if (ptr
== (void *)-1) {
206 perror("mmap for read");
209 munmap(ptr
, PAGE_SIZE
);
212 "mmap() succeeded for PROT_READ, but failed for PROT_EXEC.\n" \
213 " Check that /dev does not have noexec set:\n" \
214 " \tmount | grep \"/dev .*noexec\"\n" \
215 " If so, remount it executable: mount -o remount,exec /dev\n\n"
217 ptr
= mmap(NULL
, PAGE_SIZE
, PROT_EXEC
, MAP_SHARED
, fd
, 0);
218 if (ptr
== (void *)-1) {
219 fprintf(stderr
, ERR_MSG
);
222 munmap(ptr
, PAGE_SIZE
);
226 if (!encl_map_bin(path
, encl
))
230 phdr_tbl
= encl
->bin
+ ehdr
->e_phoff
;
232 encl
->nr_segments
= 1; /* one for the heap */
234 for (i
= 0; i
< ehdr
->e_phnum
; i
++) {
235 Elf64_Phdr
*phdr
= &phdr_tbl
[i
];
237 if (phdr
->p_type
== PT_LOAD
)
241 encl
->segment_tbl
= calloc(encl
->nr_segments
,
242 sizeof(struct encl_segment
));
243 if (!encl
->segment_tbl
)
246 for (i
= 0, j
= 0; i
< ehdr
->e_phnum
; i
++) {
247 Elf64_Phdr
*phdr
= &phdr_tbl
[i
];
248 unsigned int flags
= phdr
->p_flags
;
250 if (phdr
->p_type
!= PT_LOAD
)
253 seg
= &encl
->segment_tbl
[j
];
255 if (!!(flags
& ~(PF_R
| PF_W
| PF_X
))) {
257 "%d has invalid segment flags 0x%02x.\n", i
,
262 if (j
== 0 && flags
!= (PF_R
| PF_W
)) {
264 "TCS has invalid segment flags 0x%02x.\n",
270 src_offset
= phdr
->p_offset
& PAGE_MASK
;
271 encl
->src
= encl
->bin
+ src_offset
;
273 seg
->prot
= PROT_READ
| PROT_WRITE
;
274 seg
->flags
= SGX_PAGE_TYPE_TCS
<< 8;
276 seg
->prot
= (phdr
->p_flags
& PF_R
) ? PROT_READ
: 0;
277 seg
->prot
|= (phdr
->p_flags
& PF_W
) ? PROT_WRITE
: 0;
278 seg
->prot
|= (phdr
->p_flags
& PF_X
) ? PROT_EXEC
: 0;
279 seg
->flags
= (SGX_PAGE_TYPE_REG
<< 8) | seg
->prot
;
282 seg
->offset
= (phdr
->p_offset
& PAGE_MASK
) - src_offset
;
283 seg
->size
= (phdr
->p_filesz
+ PAGE_SIZE
- 1) & PAGE_MASK
;
284 seg
->src
= encl
->src
+ seg
->offset
;
290 assert(j
== encl
->nr_segments
- 1);
292 seg
= &encl
->segment_tbl
[j
];
293 seg
->offset
= encl
->segment_tbl
[j
- 1].offset
+ encl
->segment_tbl
[j
- 1].size
;
294 seg
->size
= heap_size
;
295 seg
->src
= mmap(NULL
, heap_size
, PROT_READ
| PROT_WRITE
,
296 MAP_ANONYMOUS
| MAP_PRIVATE
, -1, 0);
297 seg
->prot
= PROT_READ
| PROT_WRITE
;
298 seg
->flags
= (SGX_PAGE_TYPE_REG
<< 8) | seg
->prot
;
299 seg
->measure
= false;
301 if (seg
->src
== MAP_FAILED
)
304 encl
->src_size
= encl
->segment_tbl
[j
].offset
+ encl
->segment_tbl
[j
].size
;
306 for (encl
->encl_size
= 4096; encl
->encl_size
< encl
->src_size
; )
307 encl
->encl_size
<<= 1;
318 static bool encl_map_area(struct encl
*encl
)
320 size_t encl_size
= encl
->encl_size
;
323 area
= mmap(NULL
, encl_size
* 2, PROT_NONE
,
324 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
325 if (area
== MAP_FAILED
) {
326 perror("reservation mmap()");
330 encl
->encl_base
= ((uint64_t)area
+ encl_size
- 1) & ~(encl_size
- 1);
332 munmap(area
, encl
->encl_base
- (uint64_t)area
);
333 munmap((void *)(encl
->encl_base
+ encl_size
),
334 (uint64_t)area
+ encl_size
- encl
->encl_base
);
339 bool encl_build(struct encl
*encl
)
341 struct sgx_enclave_init ioc
;
345 if (!encl_map_area(encl
))
348 if (!encl_ioc_create(encl
))
352 * Pages must be added before mapping VMAs because their permissions
353 * cap the VMA permissions.
355 for (i
= 0; i
< encl
->nr_segments
; i
++) {
356 struct encl_segment
*seg
= &encl
->segment_tbl
[i
];
358 if (!encl_ioc_add_pages(encl
, seg
))
362 ioc
.sigstruct
= (uint64_t)&encl
->sigstruct
;
363 ret
= ioctl(encl
->fd
, SGX_IOC_ENCLAVE_INIT
, &ioc
);
365 perror("SGX_IOC_ENCLAVE_INIT failed");