1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2016-20 Intel Corporation. */
14 #include <sys/ioctl.h>
18 #include <sys/types.h>
21 #include "../kselftest_harness.h"
24 static const uint64_t MAGIC
= 0x1122334455667788ULL
;
25 static const uint64_t MAGIC2
= 0x8877665544332211ULL
;
26 vdso_sgx_enter_enclave_t vdso_sgx_enter_enclave
;
29 * Security Information (SECINFO) data structure needed by a few SGX
30 * instructions (eg. ENCLU[EACCEPT] and ENCLU[EMODPE]) holds meta-data
31 * about an enclave page. &enum sgx_secinfo_page_state specifies the
32 * secinfo flags used for page state.
34 enum sgx_secinfo_page_state
{
35 SGX_SECINFO_PENDING
= (1 << 3),
36 SGX_SECINFO_MODIFIED
= (1 << 4),
37 SGX_SECINFO_PR
= (1 << 5),
41 Elf64_Sym
*elf_symtab
;
42 const char *elf_symstrtab
;
43 Elf64_Word
*elf_hashtab
;
46 static Elf64_Dyn
*vdso_get_dyntab(void *addr
)
48 Elf64_Ehdr
*ehdr
= addr
;
49 Elf64_Phdr
*phdrtab
= addr
+ ehdr
->e_phoff
;
52 for (i
= 0; i
< ehdr
->e_phnum
; i
++)
53 if (phdrtab
[i
].p_type
== PT_DYNAMIC
)
54 return addr
+ phdrtab
[i
].p_offset
;
59 static void *vdso_get_dyn(void *addr
, Elf64_Dyn
*dyntab
, Elf64_Sxword tag
)
63 for (i
= 0; dyntab
[i
].d_tag
!= DT_NULL
; i
++)
64 if (dyntab
[i
].d_tag
== tag
)
65 return addr
+ dyntab
[i
].d_un
.d_ptr
;
70 static bool vdso_get_symtab(void *addr
, struct vdso_symtab
*symtab
)
72 Elf64_Dyn
*dyntab
= vdso_get_dyntab(addr
);
74 symtab
->elf_symtab
= vdso_get_dyn(addr
, dyntab
, DT_SYMTAB
);
75 if (!symtab
->elf_symtab
)
78 symtab
->elf_symstrtab
= vdso_get_dyn(addr
, dyntab
, DT_STRTAB
);
79 if (!symtab
->elf_symstrtab
)
82 symtab
->elf_hashtab
= vdso_get_dyn(addr
, dyntab
, DT_HASH
);
83 if (!symtab
->elf_hashtab
)
89 static inline int sgx2_supported(void)
91 unsigned int eax
, ebx
, ecx
, edx
;
93 __cpuid_count(SGX_CPUID
, 0x0, eax
, ebx
, ecx
, edx
);
98 static unsigned long elf_sym_hash(const char *name
)
100 unsigned long h
= 0, high
;
103 h
= (h
<< 4) + *name
++;
104 high
= h
& 0xf0000000;
115 static Elf64_Sym
*vdso_symtab_get(struct vdso_symtab
*symtab
, const char *name
)
117 Elf64_Word bucketnum
= symtab
->elf_hashtab
[0];
118 Elf64_Word
*buckettab
= &symtab
->elf_hashtab
[2];
119 Elf64_Word
*chaintab
= &symtab
->elf_hashtab
[2 + bucketnum
];
123 for (i
= buckettab
[elf_sym_hash(name
) % bucketnum
]; i
!= STN_UNDEF
;
125 sym
= &symtab
->elf_symtab
[i
];
126 if (!strcmp(name
, &symtab
->elf_symstrtab
[sym
->st_name
]))
134 * Return the offset in the enclave where the TCS segment can be found.
135 * The first RW segment loaded is the TCS.
137 static off_t
encl_get_tcs_offset(struct encl
*encl
)
141 for (i
= 0; i
< encl
->nr_segments
; i
++) {
142 struct encl_segment
*seg
= &encl
->segment_tbl
[i
];
144 if (i
== 0 && seg
->prot
== (PROT_READ
| PROT_WRITE
))
152 * Return the offset in the enclave where the data segment can be found.
153 * The first RW segment loaded is the TCS, skip that to get info on the
156 static off_t
encl_get_data_offset(struct encl
*encl
)
160 for (i
= 1; i
< encl
->nr_segments
; i
++) {
161 struct encl_segment
*seg
= &encl
->segment_tbl
[i
];
163 if (seg
->prot
== (PROT_READ
| PROT_WRITE
))
172 struct sgx_enclave_run run
;
175 static bool setup_test_encl(unsigned long heap_size
, struct encl
*encl
,
176 struct __test_metadata
*_metadata
)
178 Elf64_Sym
*sgx_enter_enclave_sym
= NULL
;
179 struct vdso_symtab symtab
;
180 struct encl_segment
*seg
;
186 if (!encl_load("test_encl.elf", encl
, heap_size
)) {
188 TH_LOG("Failed to load the test enclave.");
192 if (!encl_measure(encl
))
195 if (!encl_build(encl
))
199 * An enclave consumer only must do this.
201 for (i
= 0; i
< encl
->nr_segments
; i
++) {
202 struct encl_segment
*seg
= &encl
->segment_tbl
[i
];
204 addr
= mmap((void *)encl
->encl_base
+ seg
->offset
, seg
->size
,
205 seg
->prot
, MAP_SHARED
| MAP_FIXED
, encl
->fd
, 0);
206 EXPECT_NE(addr
, MAP_FAILED
);
207 if (addr
== MAP_FAILED
)
211 /* Get vDSO base address */
212 addr
= (void *)getauxval(AT_SYSINFO_EHDR
);
216 if (!vdso_get_symtab(addr
, &symtab
))
219 sgx_enter_enclave_sym
= vdso_symtab_get(&symtab
, "__vdso_sgx_enter_enclave");
220 if (!sgx_enter_enclave_sym
)
223 vdso_sgx_enter_enclave
= addr
+ sgx_enter_enclave_sym
->st_value
;
228 for (i
= 0; i
< encl
->nr_segments
; i
++) {
229 seg
= &encl
->segment_tbl
[i
];
231 TH_LOG("0x%016lx 0x%016lx 0x%02x", seg
->offset
, seg
->size
, seg
->prot
);
234 maps_file
= fopen("/proc/self/maps", "r");
235 if (maps_file
!= NULL
) {
236 while (fgets(maps_line
, sizeof(maps_line
), maps_file
) != NULL
) {
237 maps_line
[strlen(maps_line
) - 1] = '\0';
239 if (strstr(maps_line
, "/dev/sgx_enclave"))
240 TH_LOG("%s", maps_line
);
246 TH_LOG("Failed to initialize the test enclave.");
253 FIXTURE_SETUP(enclave
)
257 FIXTURE_TEARDOWN(enclave
)
259 encl_delete(&self
->encl
);
262 #define ENCL_CALL(op, run, clobbered) \
266 ret = vdso_sgx_enter_enclave((unsigned long)(op), 0, 0, \
267 EENTER, 0, 0, (run)); \
269 ret = sgx_enter_enclave((void *)(op), NULL, 0, EENTER, NULL, NULL, \
274 #define EXPECT_EEXIT(run) \
276 EXPECT_EQ((run)->function, EEXIT); \
277 if ((run)->function != EEXIT) \
278 TH_LOG("0x%02x 0x%02x 0x%016llx", (run)->exception_vector, \
279 (run)->exception_error_code, (run)->exception_addr); \
282 TEST_F(enclave
, unclobbered_vdso
)
284 struct encl_op_get_from_buf get_op
;
285 struct encl_op_put_to_buf put_op
;
287 ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT
, &self
->encl
, _metadata
));
289 memset(&self
->run
, 0, sizeof(self
->run
));
290 self
->run
.tcs
= self
->encl
.encl_base
;
292 put_op
.header
.type
= ENCL_OP_PUT_TO_BUFFER
;
293 put_op
.value
= MAGIC
;
295 EXPECT_EQ(ENCL_CALL(&put_op
, &self
->run
, false), 0);
297 EXPECT_EEXIT(&self
->run
);
298 EXPECT_EQ(self
->run
.user_data
, 0);
300 get_op
.header
.type
= ENCL_OP_GET_FROM_BUFFER
;
303 EXPECT_EQ(ENCL_CALL(&get_op
, &self
->run
, false), 0);
305 EXPECT_EQ(get_op
.value
, MAGIC
);
306 EXPECT_EEXIT(&self
->run
);
307 EXPECT_EQ(self
->run
.user_data
, 0);
311 * A section metric is concatenated in a way that @low bits 12-31 define the
312 * bits 12-31 of the metric and @high bits 0-19 define the bits 32-51 of the
315 static unsigned long sgx_calc_section_metric(unsigned int low
,
318 return (low
& GENMASK_ULL(31, 12)) +
319 ((high
& GENMASK_ULL(19, 0)) << 32);
323 * Sum total available physical SGX memory across all EPC sections
325 * Return: total available physical SGX memory available on system
327 static unsigned long get_total_epc_mem(void)
329 unsigned int eax
, ebx
, ecx
, edx
;
330 unsigned long total_size
= 0;
335 __cpuid_count(SGX_CPUID
, section
+ SGX_CPUID_EPC
, eax
, ebx
, ecx
, edx
);
337 type
= eax
& SGX_CPUID_EPC_MASK
;
338 if (type
== SGX_CPUID_EPC_INVALID
)
341 if (type
!= SGX_CPUID_EPC_SECTION
)
344 total_size
+= sgx_calc_section_metric(ecx
, edx
);
352 TEST_F(enclave
, unclobbered_vdso_oversubscribed
)
354 struct encl_op_get_from_buf get_op
;
355 struct encl_op_put_to_buf put_op
;
356 unsigned long total_mem
;
358 total_mem
= get_total_epc_mem();
359 ASSERT_NE(total_mem
, 0);
360 ASSERT_TRUE(setup_test_encl(total_mem
, &self
->encl
, _metadata
));
362 memset(&self
->run
, 0, sizeof(self
->run
));
363 self
->run
.tcs
= self
->encl
.encl_base
;
365 put_op
.header
.type
= ENCL_OP_PUT_TO_BUFFER
;
366 put_op
.value
= MAGIC
;
368 EXPECT_EQ(ENCL_CALL(&put_op
, &self
->run
, false), 0);
370 EXPECT_EEXIT(&self
->run
);
371 EXPECT_EQ(self
->run
.user_data
, 0);
373 get_op
.header
.type
= ENCL_OP_GET_FROM_BUFFER
;
376 EXPECT_EQ(ENCL_CALL(&get_op
, &self
->run
, false), 0);
378 EXPECT_EQ(get_op
.value
, MAGIC
);
379 EXPECT_EEXIT(&self
->run
);
380 EXPECT_EQ(self
->run
.user_data
, 0);
383 TEST_F_TIMEOUT(enclave
, unclobbered_vdso_oversubscribed_remove
, 900)
385 struct sgx_enclave_remove_pages remove_ioc
;
386 struct sgx_enclave_modify_types modt_ioc
;
387 struct encl_op_get_from_buf get_op
;
388 struct encl_op_eaccept eaccept_op
;
389 struct encl_op_put_to_buf put_op
;
390 struct encl_segment
*heap
;
391 unsigned long total_mem
;
397 * Create enclave with additional heap that is as big as all
398 * available physical SGX memory.
400 total_mem
= get_total_epc_mem();
401 ASSERT_NE(total_mem
, 0);
402 TH_LOG("Creating an enclave with %lu bytes heap may take a while ...",
404 ASSERT_TRUE(setup_test_encl(total_mem
, &self
->encl
, _metadata
));
407 * Hardware (SGX2) and kernel support is needed for this test. Start
408 * with check that test has a chance of succeeding.
410 memset(&modt_ioc
, 0, sizeof(modt_ioc
));
411 ret
= ioctl(self
->encl
.fd
, SGX_IOC_ENCLAVE_MODIFY_TYPES
, &modt_ioc
);
416 "Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()");
417 else if (errno
== ENODEV
)
418 SKIP(return, "System does not support SGX2");
422 * Invalid parameters were provided during sanity check,
423 * expect command to fail.
427 /* SGX2 is supported by kernel and hardware, test can proceed. */
428 memset(&self
->run
, 0, sizeof(self
->run
));
429 self
->run
.tcs
= self
->encl
.encl_base
;
431 heap
= &self
->encl
.segment_tbl
[self
->encl
.nr_segments
- 1];
433 put_op
.header
.type
= ENCL_OP_PUT_TO_BUFFER
;
434 put_op
.value
= MAGIC
;
436 EXPECT_EQ(ENCL_CALL(&put_op
, &self
->run
, false), 0);
438 EXPECT_EEXIT(&self
->run
);
439 EXPECT_EQ(self
->run
.user_data
, 0);
441 get_op
.header
.type
= ENCL_OP_GET_FROM_BUFFER
;
444 EXPECT_EQ(ENCL_CALL(&get_op
, &self
->run
, false), 0);
446 EXPECT_EQ(get_op
.value
, MAGIC
);
447 EXPECT_EEXIT(&self
->run
);
448 EXPECT_EQ(self
->run
.user_data
, 0);
450 /* Trim entire heap. */
451 memset(&modt_ioc
, 0, sizeof(modt_ioc
));
453 modt_ioc
.offset
= heap
->offset
;
454 modt_ioc
.length
= heap
->size
;
455 modt_ioc
.page_type
= SGX_PAGE_TYPE_TRIM
;
457 TH_LOG("Changing type of %zd bytes to trimmed may take a while ...",
459 ret
= ioctl(self
->encl
.fd
, SGX_IOC_ENCLAVE_MODIFY_TYPES
, &modt_ioc
);
460 errno_save
= ret
== -1 ? errno
: 0;
463 EXPECT_EQ(errno_save
, 0);
464 EXPECT_EQ(modt_ioc
.result
, 0);
465 EXPECT_EQ(modt_ioc
.count
, heap
->size
);
467 /* EACCEPT all removed pages. */
468 addr
= self
->encl
.encl_base
+ heap
->offset
;
470 eaccept_op
.flags
= SGX_SECINFO_TRIM
| SGX_SECINFO_MODIFIED
;
471 eaccept_op
.header
.type
= ENCL_OP_EACCEPT
;
473 TH_LOG("Entering enclave to run EACCEPT for each page of %zd bytes may take a while ...",
475 for (i
= 0; i
< heap
->size
; i
+= 4096) {
476 eaccept_op
.epc_addr
= addr
+ i
;
479 EXPECT_EQ(ENCL_CALL(&eaccept_op
, &self
->run
, true), 0);
481 EXPECT_EQ(self
->run
.exception_vector
, 0);
482 EXPECT_EQ(self
->run
.exception_error_code
, 0);
483 EXPECT_EQ(self
->run
.exception_addr
, 0);
484 ASSERT_EQ(eaccept_op
.ret
, 0);
485 ASSERT_EQ(self
->run
.function
, EEXIT
);
488 /* Complete page removal. */
489 memset(&remove_ioc
, 0, sizeof(remove_ioc
));
491 remove_ioc
.offset
= heap
->offset
;
492 remove_ioc
.length
= heap
->size
;
494 TH_LOG("Removing %zd bytes from enclave may take a while ...",
496 ret
= ioctl(self
->encl
.fd
, SGX_IOC_ENCLAVE_REMOVE_PAGES
, &remove_ioc
);
497 errno_save
= ret
== -1 ? errno
: 0;
500 EXPECT_EQ(errno_save
, 0);
501 EXPECT_EQ(remove_ioc
.count
, heap
->size
);
504 TEST_F(enclave
, clobbered_vdso
)
506 struct encl_op_get_from_buf get_op
;
507 struct encl_op_put_to_buf put_op
;
509 ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT
, &self
->encl
, _metadata
));
511 memset(&self
->run
, 0, sizeof(self
->run
));
512 self
->run
.tcs
= self
->encl
.encl_base
;
514 put_op
.header
.type
= ENCL_OP_PUT_TO_BUFFER
;
515 put_op
.value
= MAGIC
;
517 EXPECT_EQ(ENCL_CALL(&put_op
, &self
->run
, true), 0);
519 EXPECT_EEXIT(&self
->run
);
520 EXPECT_EQ(self
->run
.user_data
, 0);
522 get_op
.header
.type
= ENCL_OP_GET_FROM_BUFFER
;
525 EXPECT_EQ(ENCL_CALL(&get_op
, &self
->run
, true), 0);
527 EXPECT_EQ(get_op
.value
, MAGIC
);
528 EXPECT_EEXIT(&self
->run
);
529 EXPECT_EQ(self
->run
.user_data
, 0);
532 static int test_handler(long rdi
, long rsi
, long rdx
, long ursp
, long r8
, long r9
,
533 struct sgx_enclave_run
*run
)
540 TEST_F(enclave
, clobbered_vdso_and_user_function
)
542 struct encl_op_get_from_buf get_op
;
543 struct encl_op_put_to_buf put_op
;
545 ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT
, &self
->encl
, _metadata
));
547 memset(&self
->run
, 0, sizeof(self
->run
));
548 self
->run
.tcs
= self
->encl
.encl_base
;
550 self
->run
.user_handler
= (__u64
)test_handler
;
551 self
->run
.user_data
= 0xdeadbeef;
553 put_op
.header
.type
= ENCL_OP_PUT_TO_BUFFER
;
554 put_op
.value
= MAGIC
;
556 EXPECT_EQ(ENCL_CALL(&put_op
, &self
->run
, true), 0);
558 EXPECT_EEXIT(&self
->run
);
559 EXPECT_EQ(self
->run
.user_data
, 0);
561 get_op
.header
.type
= ENCL_OP_GET_FROM_BUFFER
;
564 EXPECT_EQ(ENCL_CALL(&get_op
, &self
->run
, true), 0);
566 EXPECT_EQ(get_op
.value
, MAGIC
);
567 EXPECT_EEXIT(&self
->run
);
568 EXPECT_EQ(self
->run
.user_data
, 0);
572 * Sanity check that it is possible to enter either of the two hardcoded TCS
574 TEST_F(enclave
, tcs_entry
)
576 struct encl_op_header op
;
578 ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT
, &self
->encl
, _metadata
));
580 memset(&self
->run
, 0, sizeof(self
->run
));
581 self
->run
.tcs
= self
->encl
.encl_base
;
583 op
.type
= ENCL_OP_NOP
;
585 EXPECT_EQ(ENCL_CALL(&op
, &self
->run
, true), 0);
587 EXPECT_EEXIT(&self
->run
);
588 EXPECT_EQ(self
->run
.exception_vector
, 0);
589 EXPECT_EQ(self
->run
.exception_error_code
, 0);
590 EXPECT_EQ(self
->run
.exception_addr
, 0);
592 /* Move to the next TCS. */
593 self
->run
.tcs
= self
->encl
.encl_base
+ PAGE_SIZE
;
595 EXPECT_EQ(ENCL_CALL(&op
, &self
->run
, true), 0);
597 EXPECT_EEXIT(&self
->run
);
598 EXPECT_EQ(self
->run
.exception_vector
, 0);
599 EXPECT_EQ(self
->run
.exception_error_code
, 0);
600 EXPECT_EQ(self
->run
.exception_addr
, 0);
604 * Second page of .data segment is used to test changing PTE permissions.
605 * This spans the local encl_buffer within the test enclave.
607 * 1) Start with a sanity check: a value is written to the target page within
608 * the enclave and read back to ensure target page can be written to.
609 * 2) Change PTE permissions (RW -> RO) of target page within enclave.
610 * 3) Repeat (1) - this time expecting a regular #PF communicated via the
612 * 4) Change PTE permissions of target page within enclave back to be RW.
613 * 5) Repeat (1) by resuming enclave, now expected to be possible to write to
614 * and read from target page within enclave.
616 TEST_F(enclave
, pte_permissions
)
618 struct encl_op_get_from_addr get_addr_op
;
619 struct encl_op_put_to_addr put_addr_op
;
620 unsigned long data_start
;
623 ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT
, &self
->encl
, _metadata
));
625 memset(&self
->run
, 0, sizeof(self
->run
));
626 self
->run
.tcs
= self
->encl
.encl_base
;
628 data_start
= self
->encl
.encl_base
+
629 encl_get_data_offset(&self
->encl
) +
633 * Sanity check to ensure it is possible to write to page that will
634 * have its permissions manipulated.
637 /* Write MAGIC to page */
638 put_addr_op
.value
= MAGIC
;
639 put_addr_op
.addr
= data_start
;
640 put_addr_op
.header
.type
= ENCL_OP_PUT_TO_ADDRESS
;
642 EXPECT_EQ(ENCL_CALL(&put_addr_op
, &self
->run
, true), 0);
644 EXPECT_EEXIT(&self
->run
);
645 EXPECT_EQ(self
->run
.exception_vector
, 0);
646 EXPECT_EQ(self
->run
.exception_error_code
, 0);
647 EXPECT_EQ(self
->run
.exception_addr
, 0);
650 * Read memory that was just written to, confirming that it is the
651 * value previously written (MAGIC).
653 get_addr_op
.value
= 0;
654 get_addr_op
.addr
= data_start
;
655 get_addr_op
.header
.type
= ENCL_OP_GET_FROM_ADDRESS
;
657 EXPECT_EQ(ENCL_CALL(&get_addr_op
, &self
->run
, true), 0);
659 EXPECT_EQ(get_addr_op
.value
, MAGIC
);
660 EXPECT_EEXIT(&self
->run
);
661 EXPECT_EQ(self
->run
.exception_vector
, 0);
662 EXPECT_EQ(self
->run
.exception_error_code
, 0);
663 EXPECT_EQ(self
->run
.exception_addr
, 0);
665 /* Change PTE permissions of target page within the enclave */
666 ret
= mprotect((void *)data_start
, PAGE_SIZE
, PROT_READ
);
671 * PTE permissions of target page changed to read-only, EPCM
672 * permissions unchanged (EPCM permissions are RW), attempt to
673 * write to the page, expecting a regular #PF.
676 put_addr_op
.value
= MAGIC2
;
678 EXPECT_EQ(ENCL_CALL(&put_addr_op
, &self
->run
, true), 0);
680 EXPECT_EQ(self
->run
.exception_vector
, 14);
681 EXPECT_EQ(self
->run
.exception_error_code
, 0x7);
682 EXPECT_EQ(self
->run
.exception_addr
, data_start
);
684 self
->run
.exception_vector
= 0;
685 self
->run
.exception_error_code
= 0;
686 self
->run
.exception_addr
= 0;
689 * Change PTE permissions back to enable enclave to write to the
690 * target page and resume enclave - do not expect any exceptions this
693 ret
= mprotect((void *)data_start
, PAGE_SIZE
, PROT_READ
| PROT_WRITE
);
697 EXPECT_EQ(vdso_sgx_enter_enclave((unsigned long)&put_addr_op
, 0,
698 0, ERESUME
, 0, 0, &self
->run
),
701 EXPECT_EEXIT(&self
->run
);
702 EXPECT_EQ(self
->run
.exception_vector
, 0);
703 EXPECT_EQ(self
->run
.exception_error_code
, 0);
704 EXPECT_EQ(self
->run
.exception_addr
, 0);
706 get_addr_op
.value
= 0;
708 EXPECT_EQ(ENCL_CALL(&get_addr_op
, &self
->run
, true), 0);
710 EXPECT_EQ(get_addr_op
.value
, MAGIC2
);
711 EXPECT_EEXIT(&self
->run
);
712 EXPECT_EQ(self
->run
.exception_vector
, 0);
713 EXPECT_EQ(self
->run
.exception_error_code
, 0);
714 EXPECT_EQ(self
->run
.exception_addr
, 0);
718 * Modifying permissions of TCS page should not be possible.
720 TEST_F(enclave
, tcs_permissions
)
722 struct sgx_enclave_restrict_permissions ioc
;
725 ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT
, &self
->encl
, _metadata
));
727 memset(&self
->run
, 0, sizeof(self
->run
));
728 self
->run
.tcs
= self
->encl
.encl_base
;
730 memset(&ioc
, 0, sizeof(ioc
));
733 * Ensure kernel supports needed ioctl() and system supports needed
737 ret
= ioctl(self
->encl
.fd
, SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS
, &ioc
);
738 errno_save
= ret
== -1 ? errno
: 0;
741 * Invalid parameters were provided during sanity check,
742 * expect command to fail.
747 if (errno_save
== ENOTTY
)
749 "Kernel does not support SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS ioctl()");
750 else if (errno_save
== ENODEV
)
751 SKIP(return, "System does not support SGX2");
754 * Attempt to make TCS page read-only. This is not allowed and
755 * should be prevented by the kernel.
757 ioc
.offset
= encl_get_tcs_offset(&self
->encl
);
758 ioc
.length
= PAGE_SIZE
;
759 ioc
.permissions
= SGX_SECINFO_R
;
761 ret
= ioctl(self
->encl
.fd
, SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS
, &ioc
);
762 errno_save
= ret
== -1 ? errno
: 0;
765 EXPECT_EQ(errno_save
, EINVAL
);
766 EXPECT_EQ(ioc
.result
, 0);
767 EXPECT_EQ(ioc
.count
, 0);
771 * Enclave page permission test.
773 * Modify and restore enclave page's EPCM (enclave) permissions from
774 * outside enclave (ENCLS[EMODPR] via kernel) as well as from within
775 * enclave (via ENCLU[EMODPE]). Check for page fault if
776 * VMA allows access but EPCM permissions do not.
778 TEST_F(enclave
, epcm_permissions
)
780 struct sgx_enclave_restrict_permissions restrict_ioc
;
781 struct encl_op_get_from_addr get_addr_op
;
782 struct encl_op_put_to_addr put_addr_op
;
783 struct encl_op_eaccept eaccept_op
;
784 struct encl_op_emodpe emodpe_op
;
785 unsigned long data_start
;
788 ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT
, &self
->encl
, _metadata
));
790 memset(&self
->run
, 0, sizeof(self
->run
));
791 self
->run
.tcs
= self
->encl
.encl_base
;
794 * Ensure kernel supports needed ioctl() and system supports needed
797 memset(&restrict_ioc
, 0, sizeof(restrict_ioc
));
799 ret
= ioctl(self
->encl
.fd
, SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS
,
801 errno_save
= ret
== -1 ? errno
: 0;
804 * Invalid parameters were provided during sanity check,
805 * expect command to fail.
810 if (errno_save
== ENOTTY
)
812 "Kernel does not support SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS ioctl()");
813 else if (errno_save
== ENODEV
)
814 SKIP(return, "System does not support SGX2");
817 * Page that will have its permissions changed is the second data
818 * page in the .data segment. This forms part of the local encl_buffer
819 * within the enclave.
821 * At start of test @data_start should have EPCM as well as PTE and
822 * VMA permissions of RW.
825 data_start
= self
->encl
.encl_base
+
826 encl_get_data_offset(&self
->encl
) + PAGE_SIZE
;
829 * Sanity check that page at @data_start is writable before making
830 * any changes to page permissions.
832 * Start by writing MAGIC to test page.
834 put_addr_op
.value
= MAGIC
;
835 put_addr_op
.addr
= data_start
;
836 put_addr_op
.header
.type
= ENCL_OP_PUT_TO_ADDRESS
;
838 EXPECT_EQ(ENCL_CALL(&put_addr_op
, &self
->run
, true), 0);
840 EXPECT_EEXIT(&self
->run
);
841 EXPECT_EQ(self
->run
.exception_vector
, 0);
842 EXPECT_EQ(self
->run
.exception_error_code
, 0);
843 EXPECT_EQ(self
->run
.exception_addr
, 0);
846 * Read memory that was just written to, confirming that
849 get_addr_op
.value
= 0;
850 get_addr_op
.addr
= data_start
;
851 get_addr_op
.header
.type
= ENCL_OP_GET_FROM_ADDRESS
;
853 EXPECT_EQ(ENCL_CALL(&get_addr_op
, &self
->run
, true), 0);
855 EXPECT_EQ(get_addr_op
.value
, MAGIC
);
856 EXPECT_EEXIT(&self
->run
);
857 EXPECT_EQ(self
->run
.exception_vector
, 0);
858 EXPECT_EQ(self
->run
.exception_error_code
, 0);
859 EXPECT_EQ(self
->run
.exception_addr
, 0);
862 * Change EPCM permissions to read-only. Kernel still considers
865 memset(&restrict_ioc
, 0, sizeof(restrict_ioc
));
867 restrict_ioc
.offset
= encl_get_data_offset(&self
->encl
) + PAGE_SIZE
;
868 restrict_ioc
.length
= PAGE_SIZE
;
869 restrict_ioc
.permissions
= SGX_SECINFO_R
;
871 ret
= ioctl(self
->encl
.fd
, SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS
,
873 errno_save
= ret
== -1 ? errno
: 0;
876 EXPECT_EQ(errno_save
, 0);
877 EXPECT_EQ(restrict_ioc
.result
, 0);
878 EXPECT_EQ(restrict_ioc
.count
, 4096);
881 * EPCM permissions changed from kernel, need to EACCEPT from enclave.
883 eaccept_op
.epc_addr
= data_start
;
884 eaccept_op
.flags
= SGX_SECINFO_R
| SGX_SECINFO_REG
| SGX_SECINFO_PR
;
886 eaccept_op
.header
.type
= ENCL_OP_EACCEPT
;
888 EXPECT_EQ(ENCL_CALL(&eaccept_op
, &self
->run
, true), 0);
890 EXPECT_EEXIT(&self
->run
);
891 EXPECT_EQ(self
->run
.exception_vector
, 0);
892 EXPECT_EQ(self
->run
.exception_error_code
, 0);
893 EXPECT_EQ(self
->run
.exception_addr
, 0);
894 EXPECT_EQ(eaccept_op
.ret
, 0);
897 * EPCM permissions of page is now read-only, expect #PF
898 * on EPCM when attempting to write to page from within enclave.
900 put_addr_op
.value
= MAGIC2
;
902 EXPECT_EQ(ENCL_CALL(&put_addr_op
, &self
->run
, true), 0);
904 EXPECT_EQ(self
->run
.function
, ERESUME
);
905 EXPECT_EQ(self
->run
.exception_vector
, 14);
906 EXPECT_EQ(self
->run
.exception_error_code
, 0x8007);
907 EXPECT_EQ(self
->run
.exception_addr
, data_start
);
909 self
->run
.exception_vector
= 0;
910 self
->run
.exception_error_code
= 0;
911 self
->run
.exception_addr
= 0;
914 * Received AEX but cannot return to enclave at same entrypoint,
915 * need different TCS from where EPCM permission can be made writable
918 self
->run
.tcs
= self
->encl
.encl_base
+ PAGE_SIZE
;
921 * Enter enclave at new TCS to change EPCM permissions to be
922 * writable again and thus fix the page fault that triggered the
926 emodpe_op
.epc_addr
= data_start
;
927 emodpe_op
.flags
= SGX_SECINFO_R
| SGX_SECINFO_W
;
928 emodpe_op
.header
.type
= ENCL_OP_EMODPE
;
930 EXPECT_EQ(ENCL_CALL(&emodpe_op
, &self
->run
, true), 0);
932 EXPECT_EEXIT(&self
->run
);
933 EXPECT_EQ(self
->run
.exception_vector
, 0);
934 EXPECT_EQ(self
->run
.exception_error_code
, 0);
935 EXPECT_EQ(self
->run
.exception_addr
, 0);
938 * Attempt to return to main TCS to resume execution at faulting
939 * instruction, PTE should continue to allow writing to the page.
941 self
->run
.tcs
= self
->encl
.encl_base
;
944 * Wrong page permissions that caused original fault has
945 * now been fixed via EPCM permissions.
946 * Resume execution in main TCS to re-attempt the memory access.
948 self
->run
.tcs
= self
->encl
.encl_base
;
950 EXPECT_EQ(vdso_sgx_enter_enclave((unsigned long)&put_addr_op
, 0, 0,
955 EXPECT_EEXIT(&self
->run
);
956 EXPECT_EQ(self
->run
.exception_vector
, 0);
957 EXPECT_EQ(self
->run
.exception_error_code
, 0);
958 EXPECT_EQ(self
->run
.exception_addr
, 0);
960 get_addr_op
.value
= 0;
962 EXPECT_EQ(ENCL_CALL(&get_addr_op
, &self
->run
, true), 0);
964 EXPECT_EQ(get_addr_op
.value
, MAGIC2
);
965 EXPECT_EEXIT(&self
->run
);
966 EXPECT_EQ(self
->run
.user_data
, 0);
967 EXPECT_EQ(self
->run
.exception_vector
, 0);
968 EXPECT_EQ(self
->run
.exception_error_code
, 0);
969 EXPECT_EQ(self
->run
.exception_addr
, 0);
973 * Test the addition of pages to an initialized enclave via writing to
974 * a page belonging to the enclave's address space but was not added
975 * during enclave creation.
977 TEST_F(enclave
, augment
)
979 struct encl_op_get_from_addr get_addr_op
;
980 struct encl_op_put_to_addr put_addr_op
;
981 struct encl_op_eaccept eaccept_op
;
982 size_t total_size
= 0;
986 if (!sgx2_supported())
987 SKIP(return, "SGX2 not supported");
989 ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT
, &self
->encl
, _metadata
));
991 memset(&self
->run
, 0, sizeof(self
->run
));
992 self
->run
.tcs
= self
->encl
.encl_base
;
994 for (i
= 0; i
< self
->encl
.nr_segments
; i
++) {
995 struct encl_segment
*seg
= &self
->encl
.segment_tbl
[i
];
997 total_size
+= seg
->size
;
1001 * Actual enclave size is expected to be larger than the loaded
1002 * test enclave since enclave size must be a power of 2 in bytes
1003 * and test_encl does not consume it all.
1005 EXPECT_LT(total_size
+ PAGE_SIZE
, self
->encl
.encl_size
);
1008 * Create memory mapping for the page that will be added. New
1009 * memory mapping is for one page right after all existing
1011 * Kernel will allow new mapping using any permissions if it
1012 * falls into the enclave's address range but not backed
1013 * by existing enclave pages.
1015 addr
= mmap((void *)self
->encl
.encl_base
+ total_size
, PAGE_SIZE
,
1016 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
1017 MAP_SHARED
| MAP_FIXED
, self
->encl
.fd
, 0);
1018 EXPECT_NE(addr
, MAP_FAILED
);
1020 self
->run
.exception_vector
= 0;
1021 self
->run
.exception_error_code
= 0;
1022 self
->run
.exception_addr
= 0;
1025 * Attempt to write to the new page from within enclave.
1026 * Expected to fail since page is not (yet) part of the enclave.
1027 * The first #PF will trigger the addition of the page to the
1028 * enclave, but since the new page needs an EACCEPT from within the
1029 * enclave before it can be used it would not be possible
1030 * to successfully return to the failing instruction. This is the
1031 * cause of the second #PF captured here having the SGX bit set,
1032 * it is from hardware preventing the page from being used.
1034 put_addr_op
.value
= MAGIC
;
1035 put_addr_op
.addr
= (unsigned long)addr
;
1036 put_addr_op
.header
.type
= ENCL_OP_PUT_TO_ADDRESS
;
1038 EXPECT_EQ(ENCL_CALL(&put_addr_op
, &self
->run
, true), 0);
1040 EXPECT_EQ(self
->run
.function
, ERESUME
);
1041 EXPECT_EQ(self
->run
.exception_vector
, 14);
1042 EXPECT_EQ(self
->run
.exception_addr
, (unsigned long)addr
);
1044 if (self
->run
.exception_error_code
== 0x6) {
1045 munmap(addr
, PAGE_SIZE
);
1046 SKIP(return, "Kernel does not support adding pages to initialized enclave");
1049 EXPECT_EQ(self
->run
.exception_error_code
, 0x8007);
1051 self
->run
.exception_vector
= 0;
1052 self
->run
.exception_error_code
= 0;
1053 self
->run
.exception_addr
= 0;
1055 /* Handle AEX by running EACCEPT from new entry point. */
1056 self
->run
.tcs
= self
->encl
.encl_base
+ PAGE_SIZE
;
1058 eaccept_op
.epc_addr
= self
->encl
.encl_base
+ total_size
;
1059 eaccept_op
.flags
= SGX_SECINFO_R
| SGX_SECINFO_W
| SGX_SECINFO_REG
| SGX_SECINFO_PENDING
;
1061 eaccept_op
.header
.type
= ENCL_OP_EACCEPT
;
1063 EXPECT_EQ(ENCL_CALL(&eaccept_op
, &self
->run
, true), 0);
1065 EXPECT_EEXIT(&self
->run
);
1066 EXPECT_EQ(self
->run
.exception_vector
, 0);
1067 EXPECT_EQ(self
->run
.exception_error_code
, 0);
1068 EXPECT_EQ(self
->run
.exception_addr
, 0);
1069 EXPECT_EQ(eaccept_op
.ret
, 0);
1071 /* Can now return to main TCS to resume execution. */
1072 self
->run
.tcs
= self
->encl
.encl_base
;
1074 EXPECT_EQ(vdso_sgx_enter_enclave((unsigned long)&put_addr_op
, 0, 0,
1079 EXPECT_EEXIT(&self
->run
);
1080 EXPECT_EQ(self
->run
.exception_vector
, 0);
1081 EXPECT_EQ(self
->run
.exception_error_code
, 0);
1082 EXPECT_EQ(self
->run
.exception_addr
, 0);
1085 * Read memory from newly added page that was just written to,
1086 * confirming that data previously written (MAGIC) is present.
1088 get_addr_op
.value
= 0;
1089 get_addr_op
.addr
= (unsigned long)addr
;
1090 get_addr_op
.header
.type
= ENCL_OP_GET_FROM_ADDRESS
;
1092 EXPECT_EQ(ENCL_CALL(&get_addr_op
, &self
->run
, true), 0);
1094 EXPECT_EQ(get_addr_op
.value
, MAGIC
);
1095 EXPECT_EEXIT(&self
->run
);
1096 EXPECT_EQ(self
->run
.exception_vector
, 0);
1097 EXPECT_EQ(self
->run
.exception_error_code
, 0);
1098 EXPECT_EQ(self
->run
.exception_addr
, 0);
1100 munmap(addr
, PAGE_SIZE
);
1104 * Test for the addition of pages to an initialized enclave via a
1105 * pre-emptive run of EACCEPT on page to be added.
1107 TEST_F(enclave
, augment_via_eaccept
)
1109 struct encl_op_get_from_addr get_addr_op
;
1110 struct encl_op_put_to_addr put_addr_op
;
1111 struct encl_op_eaccept eaccept_op
;
1112 size_t total_size
= 0;
1116 if (!sgx2_supported())
1117 SKIP(return, "SGX2 not supported");
1119 ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT
, &self
->encl
, _metadata
));
1121 memset(&self
->run
, 0, sizeof(self
->run
));
1122 self
->run
.tcs
= self
->encl
.encl_base
;
1124 for (i
= 0; i
< self
->encl
.nr_segments
; i
++) {
1125 struct encl_segment
*seg
= &self
->encl
.segment_tbl
[i
];
1127 total_size
+= seg
->size
;
1131 * Actual enclave size is expected to be larger than the loaded
1132 * test enclave since enclave size must be a power of 2 in bytes while
1133 * test_encl does not consume it all.
1135 EXPECT_LT(total_size
+ PAGE_SIZE
, self
->encl
.encl_size
);
1138 * mmap() a page at end of existing enclave to be used for dynamic
1141 * Kernel will allow new mapping using any permissions if it
1142 * falls into the enclave's address range but not backed
1143 * by existing enclave pages.
1146 addr
= mmap((void *)self
->encl
.encl_base
+ total_size
, PAGE_SIZE
,
1147 PROT_READ
| PROT_WRITE
| PROT_EXEC
, MAP_SHARED
| MAP_FIXED
,
1149 EXPECT_NE(addr
, MAP_FAILED
);
1151 self
->run
.exception_vector
= 0;
1152 self
->run
.exception_error_code
= 0;
1153 self
->run
.exception_addr
= 0;
1156 * Run EACCEPT on new page to trigger the #PF->EAUG->EACCEPT(again
1157 * without a #PF). All should be transparent to userspace.
1159 eaccept_op
.epc_addr
= self
->encl
.encl_base
+ total_size
;
1160 eaccept_op
.flags
= SGX_SECINFO_R
| SGX_SECINFO_W
| SGX_SECINFO_REG
| SGX_SECINFO_PENDING
;
1162 eaccept_op
.header
.type
= ENCL_OP_EACCEPT
;
1164 EXPECT_EQ(ENCL_CALL(&eaccept_op
, &self
->run
, true), 0);
1166 if (self
->run
.exception_vector
== 14 &&
1167 self
->run
.exception_error_code
== 4 &&
1168 self
->run
.exception_addr
== self
->encl
.encl_base
+ total_size
) {
1169 munmap(addr
, PAGE_SIZE
);
1170 SKIP(return, "Kernel does not support adding pages to initialized enclave");
1173 EXPECT_EEXIT(&self
->run
);
1174 EXPECT_EQ(self
->run
.exception_vector
, 0);
1175 EXPECT_EQ(self
->run
.exception_error_code
, 0);
1176 EXPECT_EQ(self
->run
.exception_addr
, 0);
1177 EXPECT_EQ(eaccept_op
.ret
, 0);
1180 * New page should be accessible from within enclave - attempt to
1183 put_addr_op
.value
= MAGIC
;
1184 put_addr_op
.addr
= (unsigned long)addr
;
1185 put_addr_op
.header
.type
= ENCL_OP_PUT_TO_ADDRESS
;
1187 EXPECT_EQ(ENCL_CALL(&put_addr_op
, &self
->run
, true), 0);
1189 EXPECT_EEXIT(&self
->run
);
1190 EXPECT_EQ(self
->run
.exception_vector
, 0);
1191 EXPECT_EQ(self
->run
.exception_error_code
, 0);
1192 EXPECT_EQ(self
->run
.exception_addr
, 0);
1195 * Read memory from newly added page that was just written to,
1196 * confirming that data previously written (MAGIC) is present.
1198 get_addr_op
.value
= 0;
1199 get_addr_op
.addr
= (unsigned long)addr
;
1200 get_addr_op
.header
.type
= ENCL_OP_GET_FROM_ADDRESS
;
1202 EXPECT_EQ(ENCL_CALL(&get_addr_op
, &self
->run
, true), 0);
1204 EXPECT_EQ(get_addr_op
.value
, MAGIC
);
1205 EXPECT_EEXIT(&self
->run
);
1206 EXPECT_EQ(self
->run
.exception_vector
, 0);
1207 EXPECT_EQ(self
->run
.exception_error_code
, 0);
1208 EXPECT_EQ(self
->run
.exception_addr
, 0);
1210 munmap(addr
, PAGE_SIZE
);
1214 * SGX2 page type modification test in two phases:
1216 * Create a new TCS, consisting out of three new pages (stack page with regular
1217 * page type, SSA page with regular page type, and TCS page with TCS page
1218 * type) in an initialized enclave and run a simple workload within it.
1220 * Remove the three pages added in phase 1, add a new regular page at the
1221 * same address that previously hosted the TCS page and verify that it can
1224 TEST_F(enclave
, tcs_create
)
1226 struct encl_op_init_tcs_page init_tcs_page_op
;
1227 struct sgx_enclave_remove_pages remove_ioc
;
1228 struct encl_op_get_from_addr get_addr_op
;
1229 struct sgx_enclave_modify_types modt_ioc
;
1230 struct encl_op_put_to_addr put_addr_op
;
1231 struct encl_op_get_from_buf get_buf_op
;
1232 struct encl_op_put_to_buf put_buf_op
;
1233 void *addr
, *tcs
, *stack_end
, *ssa
;
1234 struct encl_op_eaccept eaccept_op
;
1235 size_t total_size
= 0;
1240 ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT
, &self
->encl
,
1243 memset(&self
->run
, 0, sizeof(self
->run
));
1244 self
->run
.tcs
= self
->encl
.encl_base
;
1247 * Hardware (SGX2) and kernel support is needed for this test. Start
1248 * with check that test has a chance of succeeding.
1250 memset(&modt_ioc
, 0, sizeof(modt_ioc
));
1251 ret
= ioctl(self
->encl
.fd
, SGX_IOC_ENCLAVE_MODIFY_TYPES
, &modt_ioc
);
1254 if (errno
== ENOTTY
)
1256 "Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()");
1257 else if (errno
== ENODEV
)
1258 SKIP(return, "System does not support SGX2");
1262 * Invalid parameters were provided during sanity check,
1263 * expect command to fail.
1268 * Add three regular pages via EAUG: one will be the TCS stack, one
1269 * will be the TCS SSA, and one will be the new TCS. The stack and
1270 * SSA will remain as regular pages, the TCS page will need its
1271 * type changed after populated with needed data.
1273 for (i
= 0; i
< self
->encl
.nr_segments
; i
++) {
1274 struct encl_segment
*seg
= &self
->encl
.segment_tbl
[i
];
1276 total_size
+= seg
->size
;
1280 * Actual enclave size is expected to be larger than the loaded
1281 * test enclave since enclave size must be a power of 2 in bytes while
1282 * test_encl does not consume it all.
1284 EXPECT_LT(total_size
+ 3 * PAGE_SIZE
, self
->encl
.encl_size
);
1287 * mmap() three pages at end of existing enclave to be used for the
1290 addr
= mmap((void *)self
->encl
.encl_base
+ total_size
, 3 * PAGE_SIZE
,
1291 PROT_READ
| PROT_WRITE
, MAP_SHARED
| MAP_FIXED
,
1293 EXPECT_NE(addr
, MAP_FAILED
);
1295 self
->run
.exception_vector
= 0;
1296 self
->run
.exception_error_code
= 0;
1297 self
->run
.exception_addr
= 0;
1299 stack_end
= (void *)self
->encl
.encl_base
+ total_size
;
1300 tcs
= (void *)self
->encl
.encl_base
+ total_size
+ PAGE_SIZE
;
1301 ssa
= (void *)self
->encl
.encl_base
+ total_size
+ 2 * PAGE_SIZE
;
1304 * Run EACCEPT on each new page to trigger the
1305 * EACCEPT->(#PF)->EAUG->EACCEPT(again without a #PF) flow.
1308 eaccept_op
.epc_addr
= (unsigned long)stack_end
;
1309 eaccept_op
.flags
= SGX_SECINFO_R
| SGX_SECINFO_W
| SGX_SECINFO_REG
| SGX_SECINFO_PENDING
;
1311 eaccept_op
.header
.type
= ENCL_OP_EACCEPT
;
1313 EXPECT_EQ(ENCL_CALL(&eaccept_op
, &self
->run
, true), 0);
1315 if (self
->run
.exception_vector
== 14 &&
1316 self
->run
.exception_error_code
== 4 &&
1317 self
->run
.exception_addr
== (unsigned long)stack_end
) {
1318 munmap(addr
, 3 * PAGE_SIZE
);
1319 SKIP(return, "Kernel does not support adding pages to initialized enclave");
1322 EXPECT_EEXIT(&self
->run
);
1323 EXPECT_EQ(self
->run
.exception_vector
, 0);
1324 EXPECT_EQ(self
->run
.exception_error_code
, 0);
1325 EXPECT_EQ(self
->run
.exception_addr
, 0);
1326 EXPECT_EQ(eaccept_op
.ret
, 0);
1328 eaccept_op
.epc_addr
= (unsigned long)ssa
;
1330 EXPECT_EQ(ENCL_CALL(&eaccept_op
, &self
->run
, true), 0);
1332 EXPECT_EEXIT(&self
->run
);
1333 EXPECT_EQ(self
->run
.exception_vector
, 0);
1334 EXPECT_EQ(self
->run
.exception_error_code
, 0);
1335 EXPECT_EQ(self
->run
.exception_addr
, 0);
1336 EXPECT_EQ(eaccept_op
.ret
, 0);
1338 eaccept_op
.epc_addr
= (unsigned long)tcs
;
1340 EXPECT_EQ(ENCL_CALL(&eaccept_op
, &self
->run
, true), 0);
1342 EXPECT_EEXIT(&self
->run
);
1343 EXPECT_EQ(self
->run
.exception_vector
, 0);
1344 EXPECT_EQ(self
->run
.exception_error_code
, 0);
1345 EXPECT_EQ(self
->run
.exception_addr
, 0);
1346 EXPECT_EQ(eaccept_op
.ret
, 0);
1349 * Three new pages added to enclave. Now populate the TCS page with
1350 * needed data. This should be done from within enclave. Provide
1351 * the function that will do the actual data population with needed
1356 * New TCS will use the "encl_dyn_entry" entrypoint that expects
1357 * stack to begin in page before TCS page.
1359 val_64
= encl_get_entry(&self
->encl
, "encl_dyn_entry");
1360 EXPECT_NE(val_64
, 0);
1362 init_tcs_page_op
.tcs_page
= (unsigned long)tcs
;
1363 init_tcs_page_op
.ssa
= (unsigned long)total_size
+ 2 * PAGE_SIZE
;
1364 init_tcs_page_op
.entry
= val_64
;
1365 init_tcs_page_op
.header
.type
= ENCL_OP_INIT_TCS_PAGE
;
1367 EXPECT_EQ(ENCL_CALL(&init_tcs_page_op
, &self
->run
, true), 0);
1369 EXPECT_EEXIT(&self
->run
);
1370 EXPECT_EQ(self
->run
.exception_vector
, 0);
1371 EXPECT_EQ(self
->run
.exception_error_code
, 0);
1372 EXPECT_EQ(self
->run
.exception_addr
, 0);
1374 /* Change TCS page type to TCS. */
1375 memset(&modt_ioc
, 0, sizeof(modt_ioc
));
1377 modt_ioc
.offset
= total_size
+ PAGE_SIZE
;
1378 modt_ioc
.length
= PAGE_SIZE
;
1379 modt_ioc
.page_type
= SGX_PAGE_TYPE_TCS
;
1381 ret
= ioctl(self
->encl
.fd
, SGX_IOC_ENCLAVE_MODIFY_TYPES
, &modt_ioc
);
1382 errno_save
= ret
== -1 ? errno
: 0;
1385 EXPECT_EQ(errno_save
, 0);
1386 EXPECT_EQ(modt_ioc
.result
, 0);
1387 EXPECT_EQ(modt_ioc
.count
, 4096);
1389 /* EACCEPT new TCS page from enclave. */
1390 eaccept_op
.epc_addr
= (unsigned long)tcs
;
1391 eaccept_op
.flags
= SGX_SECINFO_TCS
| SGX_SECINFO_MODIFIED
;
1393 eaccept_op
.header
.type
= ENCL_OP_EACCEPT
;
1395 EXPECT_EQ(ENCL_CALL(&eaccept_op
, &self
->run
, true), 0);
1397 EXPECT_EEXIT(&self
->run
);
1398 EXPECT_EQ(self
->run
.exception_vector
, 0);
1399 EXPECT_EQ(self
->run
.exception_error_code
, 0);
1400 EXPECT_EQ(self
->run
.exception_addr
, 0);
1401 EXPECT_EQ(eaccept_op
.ret
, 0);
1403 /* Run workload from new TCS. */
1404 self
->run
.tcs
= (unsigned long)tcs
;
1407 * Simple workload to write to data buffer and read value back.
1409 put_buf_op
.header
.type
= ENCL_OP_PUT_TO_BUFFER
;
1410 put_buf_op
.value
= MAGIC
;
1412 EXPECT_EQ(ENCL_CALL(&put_buf_op
, &self
->run
, true), 0);
1414 EXPECT_EEXIT(&self
->run
);
1415 EXPECT_EQ(self
->run
.exception_vector
, 0);
1416 EXPECT_EQ(self
->run
.exception_error_code
, 0);
1417 EXPECT_EQ(self
->run
.exception_addr
, 0);
1419 get_buf_op
.header
.type
= ENCL_OP_GET_FROM_BUFFER
;
1420 get_buf_op
.value
= 0;
1422 EXPECT_EQ(ENCL_CALL(&get_buf_op
, &self
->run
, true), 0);
1424 EXPECT_EQ(get_buf_op
.value
, MAGIC
);
1425 EXPECT_EEXIT(&self
->run
);
1426 EXPECT_EQ(self
->run
.exception_vector
, 0);
1427 EXPECT_EQ(self
->run
.exception_error_code
, 0);
1428 EXPECT_EQ(self
->run
.exception_addr
, 0);
1432 * Remove pages associated with new TCS, create a regular page
1433 * where TCS page used to be and verify it can be used as a regular
1437 /* Start page removal by requesting change of page type to PT_TRIM. */
1438 memset(&modt_ioc
, 0, sizeof(modt_ioc
));
1440 modt_ioc
.offset
= total_size
;
1441 modt_ioc
.length
= 3 * PAGE_SIZE
;
1442 modt_ioc
.page_type
= SGX_PAGE_TYPE_TRIM
;
1444 ret
= ioctl(self
->encl
.fd
, SGX_IOC_ENCLAVE_MODIFY_TYPES
, &modt_ioc
);
1445 errno_save
= ret
== -1 ? errno
: 0;
1448 EXPECT_EQ(errno_save
, 0);
1449 EXPECT_EQ(modt_ioc
.result
, 0);
1450 EXPECT_EQ(modt_ioc
.count
, 3 * PAGE_SIZE
);
1453 * Enter enclave via TCS #1 and approve page removal by sending
1454 * EACCEPT for each of three removed pages.
1456 self
->run
.tcs
= self
->encl
.encl_base
;
1458 eaccept_op
.epc_addr
= (unsigned long)stack_end
;
1459 eaccept_op
.flags
= SGX_SECINFO_TRIM
| SGX_SECINFO_MODIFIED
;
1461 eaccept_op
.header
.type
= ENCL_OP_EACCEPT
;
1463 EXPECT_EQ(ENCL_CALL(&eaccept_op
, &self
->run
, true), 0);
1465 EXPECT_EEXIT(&self
->run
);
1466 EXPECT_EQ(self
->run
.exception_vector
, 0);
1467 EXPECT_EQ(self
->run
.exception_error_code
, 0);
1468 EXPECT_EQ(self
->run
.exception_addr
, 0);
1469 EXPECT_EQ(eaccept_op
.ret
, 0);
1471 eaccept_op
.epc_addr
= (unsigned long)tcs
;
1474 EXPECT_EQ(ENCL_CALL(&eaccept_op
, &self
->run
, true), 0);
1476 EXPECT_EEXIT(&self
->run
);
1477 EXPECT_EQ(self
->run
.exception_vector
, 0);
1478 EXPECT_EQ(self
->run
.exception_error_code
, 0);
1479 EXPECT_EQ(self
->run
.exception_addr
, 0);
1480 EXPECT_EQ(eaccept_op
.ret
, 0);
1482 eaccept_op
.epc_addr
= (unsigned long)ssa
;
1485 EXPECT_EQ(ENCL_CALL(&eaccept_op
, &self
->run
, true), 0);
1487 EXPECT_EEXIT(&self
->run
);
1488 EXPECT_EQ(self
->run
.exception_vector
, 0);
1489 EXPECT_EQ(self
->run
.exception_error_code
, 0);
1490 EXPECT_EQ(self
->run
.exception_addr
, 0);
1491 EXPECT_EQ(eaccept_op
.ret
, 0);
1493 /* Send final ioctl() to complete page removal. */
1494 memset(&remove_ioc
, 0, sizeof(remove_ioc
));
1496 remove_ioc
.offset
= total_size
;
1497 remove_ioc
.length
= 3 * PAGE_SIZE
;
1499 ret
= ioctl(self
->encl
.fd
, SGX_IOC_ENCLAVE_REMOVE_PAGES
, &remove_ioc
);
1500 errno_save
= ret
== -1 ? errno
: 0;
1503 EXPECT_EQ(errno_save
, 0);
1504 EXPECT_EQ(remove_ioc
.count
, 3 * PAGE_SIZE
);
1507 * Enter enclave via TCS #1 and access location where TCS #3 was to
1508 * trigger dynamic add of regular page at that location.
1510 eaccept_op
.epc_addr
= (unsigned long)tcs
;
1511 eaccept_op
.flags
= SGX_SECINFO_R
| SGX_SECINFO_W
| SGX_SECINFO_REG
| SGX_SECINFO_PENDING
;
1513 eaccept_op
.header
.type
= ENCL_OP_EACCEPT
;
1515 EXPECT_EQ(ENCL_CALL(&eaccept_op
, &self
->run
, true), 0);
1517 EXPECT_EEXIT(&self
->run
);
1518 EXPECT_EQ(self
->run
.exception_vector
, 0);
1519 EXPECT_EQ(self
->run
.exception_error_code
, 0);
1520 EXPECT_EQ(self
->run
.exception_addr
, 0);
1521 EXPECT_EQ(eaccept_op
.ret
, 0);
1524 * New page should be accessible from within enclave - write to it.
1526 put_addr_op
.value
= MAGIC
;
1527 put_addr_op
.addr
= (unsigned long)tcs
;
1528 put_addr_op
.header
.type
= ENCL_OP_PUT_TO_ADDRESS
;
1530 EXPECT_EQ(ENCL_CALL(&put_addr_op
, &self
->run
, true), 0);
1532 EXPECT_EEXIT(&self
->run
);
1533 EXPECT_EQ(self
->run
.exception_vector
, 0);
1534 EXPECT_EQ(self
->run
.exception_error_code
, 0);
1535 EXPECT_EQ(self
->run
.exception_addr
, 0);
1538 * Read memory from newly added page that was just written to,
1539 * confirming that data previously written (MAGIC) is present.
1541 get_addr_op
.value
= 0;
1542 get_addr_op
.addr
= (unsigned long)tcs
;
1543 get_addr_op
.header
.type
= ENCL_OP_GET_FROM_ADDRESS
;
1545 EXPECT_EQ(ENCL_CALL(&get_addr_op
, &self
->run
, true), 0);
1547 EXPECT_EQ(get_addr_op
.value
, MAGIC
);
1548 EXPECT_EEXIT(&self
->run
);
1549 EXPECT_EQ(self
->run
.exception_vector
, 0);
1550 EXPECT_EQ(self
->run
.exception_error_code
, 0);
1551 EXPECT_EQ(self
->run
.exception_addr
, 0);
1553 munmap(addr
, 3 * PAGE_SIZE
);
1557 * Ensure sane behavior if user requests page removal, does not run
1558 * EACCEPT from within enclave but still attempts to finalize page removal
1559 * with the SGX_IOC_ENCLAVE_REMOVE_PAGES ioctl(). The latter should fail
1560 * because the removal was not EACCEPTed from within the enclave.
1562 TEST_F(enclave
, remove_added_page_no_eaccept
)
1564 struct sgx_enclave_remove_pages remove_ioc
;
1565 struct encl_op_get_from_addr get_addr_op
;
1566 struct sgx_enclave_modify_types modt_ioc
;
1567 struct encl_op_put_to_addr put_addr_op
;
1568 unsigned long data_start
;
1569 int ret
, errno_save
;
1571 ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT
, &self
->encl
, _metadata
));
1573 memset(&self
->run
, 0, sizeof(self
->run
));
1574 self
->run
.tcs
= self
->encl
.encl_base
;
1577 * Hardware (SGX2) and kernel support is needed for this test. Start
1578 * with check that test has a chance of succeeding.
1580 memset(&modt_ioc
, 0, sizeof(modt_ioc
));
1581 ret
= ioctl(self
->encl
.fd
, SGX_IOC_ENCLAVE_MODIFY_TYPES
, &modt_ioc
);
1584 if (errno
== ENOTTY
)
1586 "Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()");
1587 else if (errno
== ENODEV
)
1588 SKIP(return, "System does not support SGX2");
1592 * Invalid parameters were provided during sanity check,
1593 * expect command to fail.
1598 * Page that will be removed is the second data page in the .data
1599 * segment. This forms part of the local encl_buffer within the
1602 data_start
= self
->encl
.encl_base
+
1603 encl_get_data_offset(&self
->encl
) + PAGE_SIZE
;
1606 * Sanity check that page at @data_start is writable before
1609 * Start by writing MAGIC to test page.
1611 put_addr_op
.value
= MAGIC
;
1612 put_addr_op
.addr
= data_start
;
1613 put_addr_op
.header
.type
= ENCL_OP_PUT_TO_ADDRESS
;
1615 EXPECT_EQ(ENCL_CALL(&put_addr_op
, &self
->run
, true), 0);
1617 EXPECT_EEXIT(&self
->run
);
1618 EXPECT_EQ(self
->run
.exception_vector
, 0);
1619 EXPECT_EQ(self
->run
.exception_error_code
, 0);
1620 EXPECT_EQ(self
->run
.exception_addr
, 0);
1623 * Read memory that was just written to, confirming that data
1624 * previously written (MAGIC) is present.
1626 get_addr_op
.value
= 0;
1627 get_addr_op
.addr
= data_start
;
1628 get_addr_op
.header
.type
= ENCL_OP_GET_FROM_ADDRESS
;
1630 EXPECT_EQ(ENCL_CALL(&get_addr_op
, &self
->run
, true), 0);
1632 EXPECT_EQ(get_addr_op
.value
, MAGIC
);
1633 EXPECT_EEXIT(&self
->run
);
1634 EXPECT_EQ(self
->run
.exception_vector
, 0);
1635 EXPECT_EQ(self
->run
.exception_error_code
, 0);
1636 EXPECT_EQ(self
->run
.exception_addr
, 0);
1638 /* Start page removal by requesting change of page type to PT_TRIM */
1639 memset(&modt_ioc
, 0, sizeof(modt_ioc
));
1641 modt_ioc
.offset
= encl_get_data_offset(&self
->encl
) + PAGE_SIZE
;
1642 modt_ioc
.length
= PAGE_SIZE
;
1643 modt_ioc
.page_type
= SGX_PAGE_TYPE_TRIM
;
1645 ret
= ioctl(self
->encl
.fd
, SGX_IOC_ENCLAVE_MODIFY_TYPES
, &modt_ioc
);
1646 errno_save
= ret
== -1 ? errno
: 0;
1649 EXPECT_EQ(errno_save
, 0);
1650 EXPECT_EQ(modt_ioc
.result
, 0);
1651 EXPECT_EQ(modt_ioc
.count
, 4096);
1655 /* Send final ioctl() to complete page removal */
1656 memset(&remove_ioc
, 0, sizeof(remove_ioc
));
1658 remove_ioc
.offset
= encl_get_data_offset(&self
->encl
) + PAGE_SIZE
;
1659 remove_ioc
.length
= PAGE_SIZE
;
1661 ret
= ioctl(self
->encl
.fd
, SGX_IOC_ENCLAVE_REMOVE_PAGES
, &remove_ioc
);
1662 errno_save
= ret
== -1 ? errno
: 0;
1664 /* Operation not permitted since EACCEPT was omitted. */
1666 EXPECT_EQ(errno_save
, EPERM
);
1667 EXPECT_EQ(remove_ioc
.count
, 0);
1671 * Request enclave page removal but instead of correctly following with
1672 * EACCEPT a read attempt to page is made from within the enclave.
1674 TEST_F(enclave
, remove_added_page_invalid_access
)
1676 struct encl_op_get_from_addr get_addr_op
;
1677 struct encl_op_put_to_addr put_addr_op
;
1678 struct sgx_enclave_modify_types ioc
;
1679 unsigned long data_start
;
1680 int ret
, errno_save
;
1682 ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT
, &self
->encl
, _metadata
));
1684 memset(&self
->run
, 0, sizeof(self
->run
));
1685 self
->run
.tcs
= self
->encl
.encl_base
;
1688 * Hardware (SGX2) and kernel support is needed for this test. Start
1689 * with check that test has a chance of succeeding.
1691 memset(&ioc
, 0, sizeof(ioc
));
1692 ret
= ioctl(self
->encl
.fd
, SGX_IOC_ENCLAVE_MODIFY_TYPES
, &ioc
);
1695 if (errno
== ENOTTY
)
1697 "Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()");
1698 else if (errno
== ENODEV
)
1699 SKIP(return, "System does not support SGX2");
1703 * Invalid parameters were provided during sanity check,
1704 * expect command to fail.
1709 * Page that will be removed is the second data page in the .data
1710 * segment. This forms part of the local encl_buffer within the
1713 data_start
= self
->encl
.encl_base
+
1714 encl_get_data_offset(&self
->encl
) + PAGE_SIZE
;
1717 * Sanity check that page at @data_start is writable before
1720 * Start by writing MAGIC to test page.
1722 put_addr_op
.value
= MAGIC
;
1723 put_addr_op
.addr
= data_start
;
1724 put_addr_op
.header
.type
= ENCL_OP_PUT_TO_ADDRESS
;
1726 EXPECT_EQ(ENCL_CALL(&put_addr_op
, &self
->run
, true), 0);
1728 EXPECT_EEXIT(&self
->run
);
1729 EXPECT_EQ(self
->run
.exception_vector
, 0);
1730 EXPECT_EQ(self
->run
.exception_error_code
, 0);
1731 EXPECT_EQ(self
->run
.exception_addr
, 0);
1734 * Read memory that was just written to, confirming that data
1735 * previously written (MAGIC) is present.
1737 get_addr_op
.value
= 0;
1738 get_addr_op
.addr
= data_start
;
1739 get_addr_op
.header
.type
= ENCL_OP_GET_FROM_ADDRESS
;
1741 EXPECT_EQ(ENCL_CALL(&get_addr_op
, &self
->run
, true), 0);
1743 EXPECT_EQ(get_addr_op
.value
, MAGIC
);
1744 EXPECT_EEXIT(&self
->run
);
1745 EXPECT_EQ(self
->run
.exception_vector
, 0);
1746 EXPECT_EQ(self
->run
.exception_error_code
, 0);
1747 EXPECT_EQ(self
->run
.exception_addr
, 0);
1749 /* Start page removal by requesting change of page type to PT_TRIM. */
1750 memset(&ioc
, 0, sizeof(ioc
));
1752 ioc
.offset
= encl_get_data_offset(&self
->encl
) + PAGE_SIZE
;
1753 ioc
.length
= PAGE_SIZE
;
1754 ioc
.page_type
= SGX_PAGE_TYPE_TRIM
;
1756 ret
= ioctl(self
->encl
.fd
, SGX_IOC_ENCLAVE_MODIFY_TYPES
, &ioc
);
1757 errno_save
= ret
== -1 ? errno
: 0;
1760 EXPECT_EQ(errno_save
, 0);
1761 EXPECT_EQ(ioc
.result
, 0);
1762 EXPECT_EQ(ioc
.count
, 4096);
1765 * Read from page that was just removed.
1767 get_addr_op
.value
= 0;
1769 EXPECT_EQ(ENCL_CALL(&get_addr_op
, &self
->run
, true), 0);
1772 * From kernel perspective the page is present but according to SGX the
1773 * page should not be accessible so a #PF with SGX bit set is
1777 EXPECT_EQ(self
->run
.function
, ERESUME
);
1778 EXPECT_EQ(self
->run
.exception_vector
, 14);
1779 EXPECT_EQ(self
->run
.exception_error_code
, 0x8005);
1780 EXPECT_EQ(self
->run
.exception_addr
, data_start
);
1784 * Request enclave page removal and correctly follow with
1785 * EACCEPT but do not follow with removal ioctl() but instead a read attempt
1786 * to removed page is made from within the enclave.
1788 TEST_F(enclave
, remove_added_page_invalid_access_after_eaccept
)
1790 struct encl_op_get_from_addr get_addr_op
;
1791 struct encl_op_put_to_addr put_addr_op
;
1792 struct sgx_enclave_modify_types ioc
;
1793 struct encl_op_eaccept eaccept_op
;
1794 unsigned long data_start
;
1795 int ret
, errno_save
;
1797 ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT
, &self
->encl
, _metadata
));
1799 memset(&self
->run
, 0, sizeof(self
->run
));
1800 self
->run
.tcs
= self
->encl
.encl_base
;
1803 * Hardware (SGX2) and kernel support is needed for this test. Start
1804 * with check that test has a chance of succeeding.
1806 memset(&ioc
, 0, sizeof(ioc
));
1807 ret
= ioctl(self
->encl
.fd
, SGX_IOC_ENCLAVE_MODIFY_TYPES
, &ioc
);
1810 if (errno
== ENOTTY
)
1812 "Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()");
1813 else if (errno
== ENODEV
)
1814 SKIP(return, "System does not support SGX2");
1818 * Invalid parameters were provided during sanity check,
1819 * expect command to fail.
1824 * Page that will be removed is the second data page in the .data
1825 * segment. This forms part of the local encl_buffer within the
1828 data_start
= self
->encl
.encl_base
+
1829 encl_get_data_offset(&self
->encl
) + PAGE_SIZE
;
1832 * Sanity check that page at @data_start is writable before
1835 * Start by writing MAGIC to test page.
1837 put_addr_op
.value
= MAGIC
;
1838 put_addr_op
.addr
= data_start
;
1839 put_addr_op
.header
.type
= ENCL_OP_PUT_TO_ADDRESS
;
1841 EXPECT_EQ(ENCL_CALL(&put_addr_op
, &self
->run
, true), 0);
1843 EXPECT_EEXIT(&self
->run
);
1844 EXPECT_EQ(self
->run
.exception_vector
, 0);
1845 EXPECT_EQ(self
->run
.exception_error_code
, 0);
1846 EXPECT_EQ(self
->run
.exception_addr
, 0);
1849 * Read memory that was just written to, confirming that data
1850 * previously written (MAGIC) is present.
1852 get_addr_op
.value
= 0;
1853 get_addr_op
.addr
= data_start
;
1854 get_addr_op
.header
.type
= ENCL_OP_GET_FROM_ADDRESS
;
1856 EXPECT_EQ(ENCL_CALL(&get_addr_op
, &self
->run
, true), 0);
1858 EXPECT_EQ(get_addr_op
.value
, MAGIC
);
1859 EXPECT_EEXIT(&self
->run
);
1860 EXPECT_EQ(self
->run
.exception_vector
, 0);
1861 EXPECT_EQ(self
->run
.exception_error_code
, 0);
1862 EXPECT_EQ(self
->run
.exception_addr
, 0);
1864 /* Start page removal by requesting change of page type to PT_TRIM. */
1865 memset(&ioc
, 0, sizeof(ioc
));
1867 ioc
.offset
= encl_get_data_offset(&self
->encl
) + PAGE_SIZE
;
1868 ioc
.length
= PAGE_SIZE
;
1869 ioc
.page_type
= SGX_PAGE_TYPE_TRIM
;
1871 ret
= ioctl(self
->encl
.fd
, SGX_IOC_ENCLAVE_MODIFY_TYPES
, &ioc
);
1872 errno_save
= ret
== -1 ? errno
: 0;
1875 EXPECT_EQ(errno_save
, 0);
1876 EXPECT_EQ(ioc
.result
, 0);
1877 EXPECT_EQ(ioc
.count
, 4096);
1879 eaccept_op
.epc_addr
= (unsigned long)data_start
;
1881 eaccept_op
.flags
= SGX_SECINFO_TRIM
| SGX_SECINFO_MODIFIED
;
1882 eaccept_op
.header
.type
= ENCL_OP_EACCEPT
;
1884 EXPECT_EQ(ENCL_CALL(&eaccept_op
, &self
->run
, true), 0);
1886 EXPECT_EEXIT(&self
->run
);
1887 EXPECT_EQ(self
->run
.exception_vector
, 0);
1888 EXPECT_EQ(self
->run
.exception_error_code
, 0);
1889 EXPECT_EQ(self
->run
.exception_addr
, 0);
1890 EXPECT_EQ(eaccept_op
.ret
, 0);
1892 /* Skip ioctl() to remove page. */
1895 * Read from page that was just removed.
1897 get_addr_op
.value
= 0;
1899 EXPECT_EQ(ENCL_CALL(&get_addr_op
, &self
->run
, true), 0);
1902 * From kernel perspective the page is present but according to SGX the
1903 * page should not be accessible so a #PF with SGX bit set is
1907 EXPECT_EQ(self
->run
.function
, ERESUME
);
1908 EXPECT_EQ(self
->run
.exception_vector
, 14);
1909 EXPECT_EQ(self
->run
.exception_error_code
, 0x8005);
1910 EXPECT_EQ(self
->run
.exception_addr
, data_start
);
1913 TEST_F(enclave
, remove_untouched_page
)
1915 struct sgx_enclave_remove_pages remove_ioc
;
1916 struct sgx_enclave_modify_types modt_ioc
;
1917 struct encl_op_eaccept eaccept_op
;
1918 unsigned long data_start
;
1919 int ret
, errno_save
;
1921 ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT
, &self
->encl
, _metadata
));
1924 * Hardware (SGX2) and kernel support is needed for this test. Start
1925 * with check that test has a chance of succeeding.
1927 memset(&modt_ioc
, 0, sizeof(modt_ioc
));
1928 ret
= ioctl(self
->encl
.fd
, SGX_IOC_ENCLAVE_MODIFY_TYPES
, &modt_ioc
);
1931 if (errno
== ENOTTY
)
1933 "Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()");
1934 else if (errno
== ENODEV
)
1935 SKIP(return, "System does not support SGX2");
1939 * Invalid parameters were provided during sanity check,
1940 * expect command to fail.
1944 /* SGX2 is supported by kernel and hardware, test can proceed. */
1945 memset(&self
->run
, 0, sizeof(self
->run
));
1946 self
->run
.tcs
= self
->encl
.encl_base
;
1948 data_start
= self
->encl
.encl_base
+
1949 encl_get_data_offset(&self
->encl
) + PAGE_SIZE
;
1951 memset(&modt_ioc
, 0, sizeof(modt_ioc
));
1953 modt_ioc
.offset
= encl_get_data_offset(&self
->encl
) + PAGE_SIZE
;
1954 modt_ioc
.length
= PAGE_SIZE
;
1955 modt_ioc
.page_type
= SGX_PAGE_TYPE_TRIM
;
1956 ret
= ioctl(self
->encl
.fd
, SGX_IOC_ENCLAVE_MODIFY_TYPES
, &modt_ioc
);
1957 errno_save
= ret
== -1 ? errno
: 0;
1960 EXPECT_EQ(errno_save
, 0);
1961 EXPECT_EQ(modt_ioc
.result
, 0);
1962 EXPECT_EQ(modt_ioc
.count
, 4096);
1965 * Enter enclave via TCS #1 and approve page removal by sending
1966 * EACCEPT for removed page.
1969 eaccept_op
.epc_addr
= data_start
;
1970 eaccept_op
.flags
= SGX_SECINFO_TRIM
| SGX_SECINFO_MODIFIED
;
1972 eaccept_op
.header
.type
= ENCL_OP_EACCEPT
;
1974 EXPECT_EQ(ENCL_CALL(&eaccept_op
, &self
->run
, true), 0);
1975 EXPECT_EEXIT(&self
->run
);
1976 EXPECT_EQ(self
->run
.exception_vector
, 0);
1977 EXPECT_EQ(self
->run
.exception_error_code
, 0);
1978 EXPECT_EQ(self
->run
.exception_addr
, 0);
1979 EXPECT_EQ(eaccept_op
.ret
, 0);
1981 memset(&remove_ioc
, 0, sizeof(remove_ioc
));
1983 remove_ioc
.offset
= encl_get_data_offset(&self
->encl
) + PAGE_SIZE
;
1984 remove_ioc
.length
= PAGE_SIZE
;
1985 ret
= ioctl(self
->encl
.fd
, SGX_IOC_ENCLAVE_REMOVE_PAGES
, &remove_ioc
);
1986 errno_save
= ret
== -1 ? errno
: 0;
1989 EXPECT_EQ(errno_save
, 0);
1990 EXPECT_EQ(remove_ioc
.count
, 4096);