0001
0002
0003
0004 #include <cpuid.h>
0005 #include <elf.h>
0006 #include <errno.h>
0007 #include <fcntl.h>
0008 #include <stdbool.h>
0009 #include <stdio.h>
0010 #include <stdint.h>
0011 #include <stdlib.h>
0012 #include <string.h>
0013 #include <unistd.h>
0014 #include <sys/ioctl.h>
0015 #include <sys/mman.h>
0016 #include <sys/stat.h>
0017 #include <sys/time.h>
0018 #include <sys/types.h>
0019 #include <sys/auxv.h>
0020 #include "defines.h"
0021 #include "../kselftest_harness.h"
0022 #include "main.h"
0023
0024 static const uint64_t MAGIC = 0x1122334455667788ULL;
0025 static const uint64_t MAGIC2 = 0x8877665544332211ULL;
0026 vdso_sgx_enter_enclave_t vdso_sgx_enter_enclave;
0027
0028
0029
0030
0031
0032
0033
0034 enum sgx_secinfo_page_state {
0035 SGX_SECINFO_PENDING = (1 << 3),
0036 SGX_SECINFO_MODIFIED = (1 << 4),
0037 SGX_SECINFO_PR = (1 << 5),
0038 };
0039
0040 struct vdso_symtab {
0041 Elf64_Sym *elf_symtab;
0042 const char *elf_symstrtab;
0043 Elf64_Word *elf_hashtab;
0044 };
0045
0046 static Elf64_Dyn *vdso_get_dyntab(void *addr)
0047 {
0048 Elf64_Ehdr *ehdr = addr;
0049 Elf64_Phdr *phdrtab = addr + ehdr->e_phoff;
0050 int i;
0051
0052 for (i = 0; i < ehdr->e_phnum; i++)
0053 if (phdrtab[i].p_type == PT_DYNAMIC)
0054 return addr + phdrtab[i].p_offset;
0055
0056 return NULL;
0057 }
0058
0059 static void *vdso_get_dyn(void *addr, Elf64_Dyn *dyntab, Elf64_Sxword tag)
0060 {
0061 int i;
0062
0063 for (i = 0; dyntab[i].d_tag != DT_NULL; i++)
0064 if (dyntab[i].d_tag == tag)
0065 return addr + dyntab[i].d_un.d_ptr;
0066
0067 return NULL;
0068 }
0069
0070 static bool vdso_get_symtab(void *addr, struct vdso_symtab *symtab)
0071 {
0072 Elf64_Dyn *dyntab = vdso_get_dyntab(addr);
0073
0074 symtab->elf_symtab = vdso_get_dyn(addr, dyntab, DT_SYMTAB);
0075 if (!symtab->elf_symtab)
0076 return false;
0077
0078 symtab->elf_symstrtab = vdso_get_dyn(addr, dyntab, DT_STRTAB);
0079 if (!symtab->elf_symstrtab)
0080 return false;
0081
0082 symtab->elf_hashtab = vdso_get_dyn(addr, dyntab, DT_HASH);
0083 if (!symtab->elf_hashtab)
0084 return false;
0085
0086 return true;
0087 }
0088
0089 static inline int sgx2_supported(void)
0090 {
0091 unsigned int eax, ebx, ecx, edx;
0092
0093 __cpuid_count(SGX_CPUID, 0x0, eax, ebx, ecx, edx);
0094
0095 return eax & 0x2;
0096 }
0097
0098 static unsigned long elf_sym_hash(const char *name)
0099 {
0100 unsigned long h = 0, high;
0101
0102 while (*name) {
0103 h = (h << 4) + *name++;
0104 high = h & 0xf0000000;
0105
0106 if (high)
0107 h ^= high >> 24;
0108
0109 h &= ~high;
0110 }
0111
0112 return h;
0113 }
0114
0115 static Elf64_Sym *vdso_symtab_get(struct vdso_symtab *symtab, const char *name)
0116 {
0117 Elf64_Word bucketnum = symtab->elf_hashtab[0];
0118 Elf64_Word *buckettab = &symtab->elf_hashtab[2];
0119 Elf64_Word *chaintab = &symtab->elf_hashtab[2 + bucketnum];
0120 Elf64_Sym *sym;
0121 Elf64_Word i;
0122
0123 for (i = buckettab[elf_sym_hash(name) % bucketnum]; i != STN_UNDEF;
0124 i = chaintab[i]) {
0125 sym = &symtab->elf_symtab[i];
0126 if (!strcmp(name, &symtab->elf_symstrtab[sym->st_name]))
0127 return sym;
0128 }
0129
0130 return NULL;
0131 }
0132
0133
0134
0135
0136
0137 static off_t encl_get_tcs_offset(struct encl *encl)
0138 {
0139 int i;
0140
0141 for (i = 0; i < encl->nr_segments; i++) {
0142 struct encl_segment *seg = &encl->segment_tbl[i];
0143
0144 if (i == 0 && seg->prot == (PROT_READ | PROT_WRITE))
0145 return seg->offset;
0146 }
0147
0148 return -1;
0149 }
0150
0151
0152
0153
0154
0155
0156 static off_t encl_get_data_offset(struct encl *encl)
0157 {
0158 int i;
0159
0160 for (i = 1; i < encl->nr_segments; i++) {
0161 struct encl_segment *seg = &encl->segment_tbl[i];
0162
0163 if (seg->prot == (PROT_READ | PROT_WRITE))
0164 return seg->offset;
0165 }
0166
0167 return -1;
0168 }
0169
0170 FIXTURE(enclave) {
0171 struct encl encl;
0172 struct sgx_enclave_run run;
0173 };
0174
0175 static bool setup_test_encl(unsigned long heap_size, struct encl *encl,
0176 struct __test_metadata *_metadata)
0177 {
0178 Elf64_Sym *sgx_enter_enclave_sym = NULL;
0179 struct vdso_symtab symtab;
0180 struct encl_segment *seg;
0181 char maps_line[256];
0182 FILE *maps_file;
0183 unsigned int i;
0184 void *addr;
0185
0186 if (!encl_load("test_encl.elf", encl, heap_size)) {
0187 encl_delete(encl);
0188 TH_LOG("Failed to load the test enclave.");
0189 return false;
0190 }
0191
0192 if (!encl_measure(encl))
0193 goto err;
0194
0195 if (!encl_build(encl))
0196 goto err;
0197
0198
0199
0200
0201 for (i = 0; i < encl->nr_segments; i++) {
0202 struct encl_segment *seg = &encl->segment_tbl[i];
0203
0204 addr = mmap((void *)encl->encl_base + seg->offset, seg->size,
0205 seg->prot, MAP_SHARED | MAP_FIXED, encl->fd, 0);
0206 EXPECT_NE(addr, MAP_FAILED);
0207 if (addr == MAP_FAILED)
0208 goto err;
0209 }
0210
0211
0212 addr = (void *)getauxval(AT_SYSINFO_EHDR);
0213 if (!addr)
0214 goto err;
0215
0216 if (!vdso_get_symtab(addr, &symtab))
0217 goto err;
0218
0219 sgx_enter_enclave_sym = vdso_symtab_get(&symtab, "__vdso_sgx_enter_enclave");
0220 if (!sgx_enter_enclave_sym)
0221 goto err;
0222
0223 vdso_sgx_enter_enclave = addr + sgx_enter_enclave_sym->st_value;
0224
0225 return true;
0226
0227 err:
0228 for (i = 0; i < encl->nr_segments; i++) {
0229 seg = &encl->segment_tbl[i];
0230
0231 TH_LOG("0x%016lx 0x%016lx 0x%02x", seg->offset, seg->size, seg->prot);
0232 }
0233
0234 maps_file = fopen("/proc/self/maps", "r");
0235 if (maps_file != NULL) {
0236 while (fgets(maps_line, sizeof(maps_line), maps_file) != NULL) {
0237 maps_line[strlen(maps_line) - 1] = '\0';
0238
0239 if (strstr(maps_line, "/dev/sgx_enclave"))
0240 TH_LOG("%s", maps_line);
0241 }
0242
0243 fclose(maps_file);
0244 }
0245
0246 TH_LOG("Failed to initialize the test enclave.");
0247
0248 encl_delete(encl);
0249
0250 return false;
0251 }
0252
0253 FIXTURE_SETUP(enclave)
0254 {
0255 }
0256
0257 FIXTURE_TEARDOWN(enclave)
0258 {
0259 encl_delete(&self->encl);
0260 }
0261
0262 #define ENCL_CALL(op, run, clobbered) \
0263 ({ \
0264 int ret; \
0265 if ((clobbered)) \
0266 ret = vdso_sgx_enter_enclave((unsigned long)(op), 0, 0, \
0267 EENTER, 0, 0, (run)); \
0268 else \
0269 ret = sgx_enter_enclave((void *)(op), NULL, 0, EENTER, NULL, NULL, \
0270 (run)); \
0271 ret; \
0272 })
0273
0274 #define EXPECT_EEXIT(run) \
0275 do { \
0276 EXPECT_EQ((run)->function, EEXIT); \
0277 if ((run)->function != EEXIT) \
0278 TH_LOG("0x%02x 0x%02x 0x%016llx", (run)->exception_vector, \
0279 (run)->exception_error_code, (run)->exception_addr); \
0280 } while (0)
0281
0282 TEST_F(enclave, unclobbered_vdso)
0283 {
0284 struct encl_op_get_from_buf get_op;
0285 struct encl_op_put_to_buf put_op;
0286
0287 ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
0288
0289 memset(&self->run, 0, sizeof(self->run));
0290 self->run.tcs = self->encl.encl_base;
0291
0292 put_op.header.type = ENCL_OP_PUT_TO_BUFFER;
0293 put_op.value = MAGIC;
0294
0295 EXPECT_EQ(ENCL_CALL(&put_op, &self->run, false), 0);
0296
0297 EXPECT_EEXIT(&self->run);
0298 EXPECT_EQ(self->run.user_data, 0);
0299
0300 get_op.header.type = ENCL_OP_GET_FROM_BUFFER;
0301 get_op.value = 0;
0302
0303 EXPECT_EQ(ENCL_CALL(&get_op, &self->run, false), 0);
0304
0305 EXPECT_EQ(get_op.value, MAGIC);
0306 EXPECT_EEXIT(&self->run);
0307 EXPECT_EQ(self->run.user_data, 0);
0308 }
0309
0310
0311
0312
0313
0314
0315 static unsigned long sgx_calc_section_metric(unsigned int low,
0316 unsigned int high)
0317 {
0318 return (low & GENMASK_ULL(31, 12)) +
0319 ((high & GENMASK_ULL(19, 0)) << 32);
0320 }
0321
0322
0323
0324
0325
0326
0327 static unsigned long get_total_epc_mem(void)
0328 {
0329 unsigned int eax, ebx, ecx, edx;
0330 unsigned long total_size = 0;
0331 unsigned int type;
0332 int section = 0;
0333
0334 while (true) {
0335 __cpuid_count(SGX_CPUID, section + SGX_CPUID_EPC, eax, ebx, ecx, edx);
0336
0337 type = eax & SGX_CPUID_EPC_MASK;
0338 if (type == SGX_CPUID_EPC_INVALID)
0339 break;
0340
0341 if (type != SGX_CPUID_EPC_SECTION)
0342 break;
0343
0344 total_size += sgx_calc_section_metric(ecx, edx);
0345
0346 section++;
0347 }
0348
0349 return total_size;
0350 }
0351
0352 TEST_F(enclave, unclobbered_vdso_oversubscribed)
0353 {
0354 struct encl_op_get_from_buf get_op;
0355 struct encl_op_put_to_buf put_op;
0356 unsigned long total_mem;
0357
0358 total_mem = get_total_epc_mem();
0359 ASSERT_NE(total_mem, 0);
0360 ASSERT_TRUE(setup_test_encl(total_mem, &self->encl, _metadata));
0361
0362 memset(&self->run, 0, sizeof(self->run));
0363 self->run.tcs = self->encl.encl_base;
0364
0365 put_op.header.type = ENCL_OP_PUT_TO_BUFFER;
0366 put_op.value = MAGIC;
0367
0368 EXPECT_EQ(ENCL_CALL(&put_op, &self->run, false), 0);
0369
0370 EXPECT_EEXIT(&self->run);
0371 EXPECT_EQ(self->run.user_data, 0);
0372
0373 get_op.header.type = ENCL_OP_GET_FROM_BUFFER;
0374 get_op.value = 0;
0375
0376 EXPECT_EQ(ENCL_CALL(&get_op, &self->run, false), 0);
0377
0378 EXPECT_EQ(get_op.value, MAGIC);
0379 EXPECT_EEXIT(&self->run);
0380 EXPECT_EQ(self->run.user_data, 0);
0381 }
0382
0383 TEST_F_TIMEOUT(enclave, unclobbered_vdso_oversubscribed_remove, 900)
0384 {
0385 struct sgx_enclave_remove_pages remove_ioc;
0386 struct sgx_enclave_modify_types modt_ioc;
0387 struct encl_op_get_from_buf get_op;
0388 struct encl_op_eaccept eaccept_op;
0389 struct encl_op_put_to_buf put_op;
0390 struct encl_segment *heap;
0391 unsigned long total_mem;
0392 int ret, errno_save;
0393 unsigned long addr;
0394 unsigned long i;
0395
0396
0397
0398
0399
0400 total_mem = get_total_epc_mem();
0401 ASSERT_NE(total_mem, 0);
0402 TH_LOG("Creating an enclave with %lu bytes heap may take a while ...",
0403 total_mem);
0404 ASSERT_TRUE(setup_test_encl(total_mem, &self->encl, _metadata));
0405
0406
0407
0408
0409
0410 memset(&modt_ioc, 0, sizeof(modt_ioc));
0411 ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
0412
0413 if (ret == -1) {
0414 if (errno == ENOTTY)
0415 SKIP(return,
0416 "Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()");
0417 else if (errno == ENODEV)
0418 SKIP(return, "System does not support SGX2");
0419 }
0420
0421
0422
0423
0424
0425 EXPECT_EQ(ret, -1);
0426
0427
0428 memset(&self->run, 0, sizeof(self->run));
0429 self->run.tcs = self->encl.encl_base;
0430
0431 heap = &self->encl.segment_tbl[self->encl.nr_segments - 1];
0432
0433 put_op.header.type = ENCL_OP_PUT_TO_BUFFER;
0434 put_op.value = MAGIC;
0435
0436 EXPECT_EQ(ENCL_CALL(&put_op, &self->run, false), 0);
0437
0438 EXPECT_EEXIT(&self->run);
0439 EXPECT_EQ(self->run.user_data, 0);
0440
0441 get_op.header.type = ENCL_OP_GET_FROM_BUFFER;
0442 get_op.value = 0;
0443
0444 EXPECT_EQ(ENCL_CALL(&get_op, &self->run, false), 0);
0445
0446 EXPECT_EQ(get_op.value, MAGIC);
0447 EXPECT_EEXIT(&self->run);
0448 EXPECT_EQ(self->run.user_data, 0);
0449
0450
0451 memset(&modt_ioc, 0, sizeof(modt_ioc));
0452
0453 modt_ioc.offset = heap->offset;
0454 modt_ioc.length = heap->size;
0455 modt_ioc.page_type = SGX_PAGE_TYPE_TRIM;
0456
0457 TH_LOG("Changing type of %zd bytes to trimmed may take a while ...",
0458 heap->size);
0459 ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
0460 errno_save = ret == -1 ? errno : 0;
0461
0462 EXPECT_EQ(ret, 0);
0463 EXPECT_EQ(errno_save, 0);
0464 EXPECT_EQ(modt_ioc.result, 0);
0465 EXPECT_EQ(modt_ioc.count, heap->size);
0466
0467
0468 addr = self->encl.encl_base + heap->offset;
0469
0470 eaccept_op.flags = SGX_SECINFO_TRIM | SGX_SECINFO_MODIFIED;
0471 eaccept_op.header.type = ENCL_OP_EACCEPT;
0472
0473 TH_LOG("Entering enclave to run EACCEPT for each page of %zd bytes may take a while ...",
0474 heap->size);
0475 for (i = 0; i < heap->size; i += 4096) {
0476 eaccept_op.epc_addr = addr + i;
0477 eaccept_op.ret = 0;
0478
0479 EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
0480
0481 EXPECT_EQ(self->run.exception_vector, 0);
0482 EXPECT_EQ(self->run.exception_error_code, 0);
0483 EXPECT_EQ(self->run.exception_addr, 0);
0484 ASSERT_EQ(eaccept_op.ret, 0);
0485 ASSERT_EQ(self->run.function, EEXIT);
0486 }
0487
0488
0489 memset(&remove_ioc, 0, sizeof(remove_ioc));
0490
0491 remove_ioc.offset = heap->offset;
0492 remove_ioc.length = heap->size;
0493
0494 TH_LOG("Removing %zd bytes from enclave may take a while ...",
0495 heap->size);
0496 ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_REMOVE_PAGES, &remove_ioc);
0497 errno_save = ret == -1 ? errno : 0;
0498
0499 EXPECT_EQ(ret, 0);
0500 EXPECT_EQ(errno_save, 0);
0501 EXPECT_EQ(remove_ioc.count, heap->size);
0502 }
0503
0504 TEST_F(enclave, clobbered_vdso)
0505 {
0506 struct encl_op_get_from_buf get_op;
0507 struct encl_op_put_to_buf put_op;
0508
0509 ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
0510
0511 memset(&self->run, 0, sizeof(self->run));
0512 self->run.tcs = self->encl.encl_base;
0513
0514 put_op.header.type = ENCL_OP_PUT_TO_BUFFER;
0515 put_op.value = MAGIC;
0516
0517 EXPECT_EQ(ENCL_CALL(&put_op, &self->run, true), 0);
0518
0519 EXPECT_EEXIT(&self->run);
0520 EXPECT_EQ(self->run.user_data, 0);
0521
0522 get_op.header.type = ENCL_OP_GET_FROM_BUFFER;
0523 get_op.value = 0;
0524
0525 EXPECT_EQ(ENCL_CALL(&get_op, &self->run, true), 0);
0526
0527 EXPECT_EQ(get_op.value, MAGIC);
0528 EXPECT_EEXIT(&self->run);
0529 EXPECT_EQ(self->run.user_data, 0);
0530 }
0531
0532 static int test_handler(long rdi, long rsi, long rdx, long ursp, long r8, long r9,
0533 struct sgx_enclave_run *run)
0534 {
0535 run->user_data = 0;
0536
0537 return 0;
0538 }
0539
0540 TEST_F(enclave, clobbered_vdso_and_user_function)
0541 {
0542 struct encl_op_get_from_buf get_op;
0543 struct encl_op_put_to_buf put_op;
0544
0545 ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
0546
0547 memset(&self->run, 0, sizeof(self->run));
0548 self->run.tcs = self->encl.encl_base;
0549
0550 self->run.user_handler = (__u64)test_handler;
0551 self->run.user_data = 0xdeadbeef;
0552
0553 put_op.header.type = ENCL_OP_PUT_TO_BUFFER;
0554 put_op.value = MAGIC;
0555
0556 EXPECT_EQ(ENCL_CALL(&put_op, &self->run, true), 0);
0557
0558 EXPECT_EEXIT(&self->run);
0559 EXPECT_EQ(self->run.user_data, 0);
0560
0561 get_op.header.type = ENCL_OP_GET_FROM_BUFFER;
0562 get_op.value = 0;
0563
0564 EXPECT_EQ(ENCL_CALL(&get_op, &self->run, true), 0);
0565
0566 EXPECT_EQ(get_op.value, MAGIC);
0567 EXPECT_EEXIT(&self->run);
0568 EXPECT_EQ(self->run.user_data, 0);
0569 }
0570
0571
0572
0573
0574 TEST_F(enclave, tcs_entry)
0575 {
0576 struct encl_op_header op;
0577
0578 ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
0579
0580 memset(&self->run, 0, sizeof(self->run));
0581 self->run.tcs = self->encl.encl_base;
0582
0583 op.type = ENCL_OP_NOP;
0584
0585 EXPECT_EQ(ENCL_CALL(&op, &self->run, true), 0);
0586
0587 EXPECT_EEXIT(&self->run);
0588 EXPECT_EQ(self->run.exception_vector, 0);
0589 EXPECT_EQ(self->run.exception_error_code, 0);
0590 EXPECT_EQ(self->run.exception_addr, 0);
0591
0592
0593 self->run.tcs = self->encl.encl_base + PAGE_SIZE;
0594
0595 EXPECT_EQ(ENCL_CALL(&op, &self->run, true), 0);
0596
0597 EXPECT_EEXIT(&self->run);
0598 EXPECT_EQ(self->run.exception_vector, 0);
0599 EXPECT_EQ(self->run.exception_error_code, 0);
0600 EXPECT_EQ(self->run.exception_addr, 0);
0601 }
0602
0603
0604
0605
0606
0607
0608
0609
0610
0611
0612
0613
0614
0615
0616 TEST_F(enclave, pte_permissions)
0617 {
0618 struct encl_op_get_from_addr get_addr_op;
0619 struct encl_op_put_to_addr put_addr_op;
0620 unsigned long data_start;
0621 int ret;
0622
0623 ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
0624
0625 memset(&self->run, 0, sizeof(self->run));
0626 self->run.tcs = self->encl.encl_base;
0627
0628 data_start = self->encl.encl_base +
0629 encl_get_data_offset(&self->encl) +
0630 PAGE_SIZE;
0631
0632
0633
0634
0635
0636
0637
0638 put_addr_op.value = MAGIC;
0639 put_addr_op.addr = data_start;
0640 put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
0641
0642 EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
0643
0644 EXPECT_EEXIT(&self->run);
0645 EXPECT_EQ(self->run.exception_vector, 0);
0646 EXPECT_EQ(self->run.exception_error_code, 0);
0647 EXPECT_EQ(self->run.exception_addr, 0);
0648
0649
0650
0651
0652
0653 get_addr_op.value = 0;
0654 get_addr_op.addr = data_start;
0655 get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
0656
0657 EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
0658
0659 EXPECT_EQ(get_addr_op.value, MAGIC);
0660 EXPECT_EEXIT(&self->run);
0661 EXPECT_EQ(self->run.exception_vector, 0);
0662 EXPECT_EQ(self->run.exception_error_code, 0);
0663 EXPECT_EQ(self->run.exception_addr, 0);
0664
0665
0666 ret = mprotect((void *)data_start, PAGE_SIZE, PROT_READ);
0667 if (ret)
0668 perror("mprotect");
0669
0670
0671
0672
0673
0674
0675
0676 put_addr_op.value = MAGIC2;
0677
0678 EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
0679
0680 EXPECT_EQ(self->run.exception_vector, 14);
0681 EXPECT_EQ(self->run.exception_error_code, 0x7);
0682 EXPECT_EQ(self->run.exception_addr, data_start);
0683
0684 self->run.exception_vector = 0;
0685 self->run.exception_error_code = 0;
0686 self->run.exception_addr = 0;
0687
0688
0689
0690
0691
0692
0693 ret = mprotect((void *)data_start, PAGE_SIZE, PROT_READ | PROT_WRITE);
0694 if (ret)
0695 perror("mprotect");
0696
0697 EXPECT_EQ(vdso_sgx_enter_enclave((unsigned long)&put_addr_op, 0,
0698 0, ERESUME, 0, 0, &self->run),
0699 0);
0700
0701 EXPECT_EEXIT(&self->run);
0702 EXPECT_EQ(self->run.exception_vector, 0);
0703 EXPECT_EQ(self->run.exception_error_code, 0);
0704 EXPECT_EQ(self->run.exception_addr, 0);
0705
0706 get_addr_op.value = 0;
0707
0708 EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
0709
0710 EXPECT_EQ(get_addr_op.value, MAGIC2);
0711 EXPECT_EEXIT(&self->run);
0712 EXPECT_EQ(self->run.exception_vector, 0);
0713 EXPECT_EQ(self->run.exception_error_code, 0);
0714 EXPECT_EQ(self->run.exception_addr, 0);
0715 }
0716
0717
0718
0719
0720 TEST_F(enclave, tcs_permissions)
0721 {
0722 struct sgx_enclave_restrict_permissions ioc;
0723 int ret, errno_save;
0724
0725 ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
0726
0727 memset(&self->run, 0, sizeof(self->run));
0728 self->run.tcs = self->encl.encl_base;
0729
0730 memset(&ioc, 0, sizeof(ioc));
0731
0732
0733
0734
0735
0736
0737 ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS, &ioc);
0738 errno_save = ret == -1 ? errno : 0;
0739
0740
0741
0742
0743
0744 ASSERT_EQ(ret, -1);
0745
0746
0747 if (errno_save == ENOTTY)
0748 SKIP(return,
0749 "Kernel does not support SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS ioctl()");
0750 else if (errno_save == ENODEV)
0751 SKIP(return, "System does not support SGX2");
0752
0753
0754
0755
0756
0757 ioc.offset = encl_get_tcs_offset(&self->encl);
0758 ioc.length = PAGE_SIZE;
0759 ioc.permissions = SGX_SECINFO_R;
0760
0761 ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS, &ioc);
0762 errno_save = ret == -1 ? errno : 0;
0763
0764 EXPECT_EQ(ret, -1);
0765 EXPECT_EQ(errno_save, EINVAL);
0766 EXPECT_EQ(ioc.result, 0);
0767 EXPECT_EQ(ioc.count, 0);
0768 }
0769
0770
0771
0772
0773
0774
0775
0776
0777
0778 TEST_F(enclave, epcm_permissions)
0779 {
0780 struct sgx_enclave_restrict_permissions restrict_ioc;
0781 struct encl_op_get_from_addr get_addr_op;
0782 struct encl_op_put_to_addr put_addr_op;
0783 struct encl_op_eaccept eaccept_op;
0784 struct encl_op_emodpe emodpe_op;
0785 unsigned long data_start;
0786 int ret, errno_save;
0787
0788 ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
0789
0790 memset(&self->run, 0, sizeof(self->run));
0791 self->run.tcs = self->encl.encl_base;
0792
0793
0794
0795
0796
0797 memset(&restrict_ioc, 0, sizeof(restrict_ioc));
0798
0799 ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS,
0800 &restrict_ioc);
0801 errno_save = ret == -1 ? errno : 0;
0802
0803
0804
0805
0806
0807 ASSERT_EQ(ret, -1);
0808
0809
0810 if (errno_save == ENOTTY)
0811 SKIP(return,
0812 "Kernel does not support SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS ioctl()");
0813 else if (errno_save == ENODEV)
0814 SKIP(return, "System does not support SGX2");
0815
0816
0817
0818
0819
0820
0821
0822
0823
0824
0825 data_start = self->encl.encl_base +
0826 encl_get_data_offset(&self->encl) + PAGE_SIZE;
0827
0828
0829
0830
0831
0832
0833
0834 put_addr_op.value = MAGIC;
0835 put_addr_op.addr = data_start;
0836 put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
0837
0838 EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
0839
0840 EXPECT_EEXIT(&self->run);
0841 EXPECT_EQ(self->run.exception_vector, 0);
0842 EXPECT_EQ(self->run.exception_error_code, 0);
0843 EXPECT_EQ(self->run.exception_addr, 0);
0844
0845
0846
0847
0848
0849 get_addr_op.value = 0;
0850 get_addr_op.addr = data_start;
0851 get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
0852
0853 EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
0854
0855 EXPECT_EQ(get_addr_op.value, MAGIC);
0856 EXPECT_EEXIT(&self->run);
0857 EXPECT_EQ(self->run.exception_vector, 0);
0858 EXPECT_EQ(self->run.exception_error_code, 0);
0859 EXPECT_EQ(self->run.exception_addr, 0);
0860
0861
0862
0863
0864
0865 memset(&restrict_ioc, 0, sizeof(restrict_ioc));
0866
0867 restrict_ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE;
0868 restrict_ioc.length = PAGE_SIZE;
0869 restrict_ioc.permissions = SGX_SECINFO_R;
0870
0871 ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS,
0872 &restrict_ioc);
0873 errno_save = ret == -1 ? errno : 0;
0874
0875 EXPECT_EQ(ret, 0);
0876 EXPECT_EQ(errno_save, 0);
0877 EXPECT_EQ(restrict_ioc.result, 0);
0878 EXPECT_EQ(restrict_ioc.count, 4096);
0879
0880
0881
0882
0883 eaccept_op.epc_addr = data_start;
0884 eaccept_op.flags = SGX_SECINFO_R | SGX_SECINFO_REG | SGX_SECINFO_PR;
0885 eaccept_op.ret = 0;
0886 eaccept_op.header.type = ENCL_OP_EACCEPT;
0887
0888 EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
0889
0890 EXPECT_EEXIT(&self->run);
0891 EXPECT_EQ(self->run.exception_vector, 0);
0892 EXPECT_EQ(self->run.exception_error_code, 0);
0893 EXPECT_EQ(self->run.exception_addr, 0);
0894 EXPECT_EQ(eaccept_op.ret, 0);
0895
0896
0897
0898
0899
0900 put_addr_op.value = MAGIC2;
0901
0902 EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
0903
0904 EXPECT_EQ(self->run.function, ERESUME);
0905 EXPECT_EQ(self->run.exception_vector, 14);
0906 EXPECT_EQ(self->run.exception_error_code, 0x8007);
0907 EXPECT_EQ(self->run.exception_addr, data_start);
0908
0909 self->run.exception_vector = 0;
0910 self->run.exception_error_code = 0;
0911 self->run.exception_addr = 0;
0912
0913
0914
0915
0916
0917
0918 self->run.tcs = self->encl.encl_base + PAGE_SIZE;
0919
0920
0921
0922
0923
0924
0925
0926 emodpe_op.epc_addr = data_start;
0927 emodpe_op.flags = SGX_SECINFO_R | SGX_SECINFO_W;
0928 emodpe_op.header.type = ENCL_OP_EMODPE;
0929
0930 EXPECT_EQ(ENCL_CALL(&emodpe_op, &self->run, true), 0);
0931
0932 EXPECT_EEXIT(&self->run);
0933 EXPECT_EQ(self->run.exception_vector, 0);
0934 EXPECT_EQ(self->run.exception_error_code, 0);
0935 EXPECT_EQ(self->run.exception_addr, 0);
0936
0937
0938
0939
0940
0941 self->run.tcs = self->encl.encl_base;
0942
0943
0944
0945
0946
0947
0948 self->run.tcs = self->encl.encl_base;
0949
0950 EXPECT_EQ(vdso_sgx_enter_enclave((unsigned long)&put_addr_op, 0, 0,
0951 ERESUME, 0, 0,
0952 &self->run),
0953 0);
0954
0955 EXPECT_EEXIT(&self->run);
0956 EXPECT_EQ(self->run.exception_vector, 0);
0957 EXPECT_EQ(self->run.exception_error_code, 0);
0958 EXPECT_EQ(self->run.exception_addr, 0);
0959
0960 get_addr_op.value = 0;
0961
0962 EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
0963
0964 EXPECT_EQ(get_addr_op.value, MAGIC2);
0965 EXPECT_EEXIT(&self->run);
0966 EXPECT_EQ(self->run.user_data, 0);
0967 EXPECT_EQ(self->run.exception_vector, 0);
0968 EXPECT_EQ(self->run.exception_error_code, 0);
0969 EXPECT_EQ(self->run.exception_addr, 0);
0970 }
0971
0972
0973
0974
0975
0976
0977 TEST_F(enclave, augment)
0978 {
0979 struct encl_op_get_from_addr get_addr_op;
0980 struct encl_op_put_to_addr put_addr_op;
0981 struct encl_op_eaccept eaccept_op;
0982 size_t total_size = 0;
0983 void *addr;
0984 int i;
0985
0986 if (!sgx2_supported())
0987 SKIP(return, "SGX2 not supported");
0988
0989 ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
0990
0991 memset(&self->run, 0, sizeof(self->run));
0992 self->run.tcs = self->encl.encl_base;
0993
0994 for (i = 0; i < self->encl.nr_segments; i++) {
0995 struct encl_segment *seg = &self->encl.segment_tbl[i];
0996
0997 total_size += seg->size;
0998 }
0999
1000
1001
1002
1003
1004
1005 EXPECT_LT(total_size + PAGE_SIZE, self->encl.encl_size);
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015 addr = mmap((void *)self->encl.encl_base + total_size, PAGE_SIZE,
1016 PROT_READ | PROT_WRITE | PROT_EXEC,
1017 MAP_SHARED | MAP_FIXED, self->encl.fd, 0);
1018 EXPECT_NE(addr, MAP_FAILED);
1019
1020 self->run.exception_vector = 0;
1021 self->run.exception_error_code = 0;
1022 self->run.exception_addr = 0;
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034 put_addr_op.value = MAGIC;
1035 put_addr_op.addr = (unsigned long)addr;
1036 put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
1037
1038 EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
1039
1040 EXPECT_EQ(self->run.function, ERESUME);
1041 EXPECT_EQ(self->run.exception_vector, 14);
1042 EXPECT_EQ(self->run.exception_addr, (unsigned long)addr);
1043
1044 if (self->run.exception_error_code == 0x6) {
1045 munmap(addr, PAGE_SIZE);
1046 SKIP(return, "Kernel does not support adding pages to initialized enclave");
1047 }
1048
1049 EXPECT_EQ(self->run.exception_error_code, 0x8007);
1050
1051 self->run.exception_vector = 0;
1052 self->run.exception_error_code = 0;
1053 self->run.exception_addr = 0;
1054
1055
1056 self->run.tcs = self->encl.encl_base + PAGE_SIZE;
1057
1058 eaccept_op.epc_addr = self->encl.encl_base + total_size;
1059 eaccept_op.flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_REG | SGX_SECINFO_PENDING;
1060 eaccept_op.ret = 0;
1061 eaccept_op.header.type = ENCL_OP_EACCEPT;
1062
1063 EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
1064
1065 EXPECT_EEXIT(&self->run);
1066 EXPECT_EQ(self->run.exception_vector, 0);
1067 EXPECT_EQ(self->run.exception_error_code, 0);
1068 EXPECT_EQ(self->run.exception_addr, 0);
1069 EXPECT_EQ(eaccept_op.ret, 0);
1070
1071
1072 self->run.tcs = self->encl.encl_base;
1073
1074 EXPECT_EQ(vdso_sgx_enter_enclave((unsigned long)&put_addr_op, 0, 0,
1075 ERESUME, 0, 0,
1076 &self->run),
1077 0);
1078
1079 EXPECT_EEXIT(&self->run);
1080 EXPECT_EQ(self->run.exception_vector, 0);
1081 EXPECT_EQ(self->run.exception_error_code, 0);
1082 EXPECT_EQ(self->run.exception_addr, 0);
1083
1084
1085
1086
1087
1088 get_addr_op.value = 0;
1089 get_addr_op.addr = (unsigned long)addr;
1090 get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
1091
1092 EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
1093
1094 EXPECT_EQ(get_addr_op.value, MAGIC);
1095 EXPECT_EEXIT(&self->run);
1096 EXPECT_EQ(self->run.exception_vector, 0);
1097 EXPECT_EQ(self->run.exception_error_code, 0);
1098 EXPECT_EQ(self->run.exception_addr, 0);
1099
1100 munmap(addr, PAGE_SIZE);
1101 }
1102
1103
1104
1105
1106
1107 TEST_F(enclave, augment_via_eaccept)
1108 {
1109 struct encl_op_get_from_addr get_addr_op;
1110 struct encl_op_put_to_addr put_addr_op;
1111 struct encl_op_eaccept eaccept_op;
1112 size_t total_size = 0;
1113 void *addr;
1114 int i;
1115
1116 if (!sgx2_supported())
1117 SKIP(return, "SGX2 not supported");
1118
1119 ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
1120
1121 memset(&self->run, 0, sizeof(self->run));
1122 self->run.tcs = self->encl.encl_base;
1123
1124 for (i = 0; i < self->encl.nr_segments; i++) {
1125 struct encl_segment *seg = &self->encl.segment_tbl[i];
1126
1127 total_size += seg->size;
1128 }
1129
1130
1131
1132
1133
1134
1135 EXPECT_LT(total_size + PAGE_SIZE, self->encl.encl_size);
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146 addr = mmap((void *)self->encl.encl_base + total_size, PAGE_SIZE,
1147 PROT_READ | PROT_WRITE | PROT_EXEC, MAP_SHARED | MAP_FIXED,
1148 self->encl.fd, 0);
1149 EXPECT_NE(addr, MAP_FAILED);
1150
1151 self->run.exception_vector = 0;
1152 self->run.exception_error_code = 0;
1153 self->run.exception_addr = 0;
1154
1155
1156
1157
1158
1159 eaccept_op.epc_addr = self->encl.encl_base + total_size;
1160 eaccept_op.flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_REG | SGX_SECINFO_PENDING;
1161 eaccept_op.ret = 0;
1162 eaccept_op.header.type = ENCL_OP_EACCEPT;
1163
1164 EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
1165
1166 if (self->run.exception_vector == 14 &&
1167 self->run.exception_error_code == 4 &&
1168 self->run.exception_addr == self->encl.encl_base + total_size) {
1169 munmap(addr, PAGE_SIZE);
1170 SKIP(return, "Kernel does not support adding pages to initialized enclave");
1171 }
1172
1173 EXPECT_EEXIT(&self->run);
1174 EXPECT_EQ(self->run.exception_vector, 0);
1175 EXPECT_EQ(self->run.exception_error_code, 0);
1176 EXPECT_EQ(self->run.exception_addr, 0);
1177 EXPECT_EQ(eaccept_op.ret, 0);
1178
1179
1180
1181
1182
1183 put_addr_op.value = MAGIC;
1184 put_addr_op.addr = (unsigned long)addr;
1185 put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
1186
1187 EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
1188
1189 EXPECT_EEXIT(&self->run);
1190 EXPECT_EQ(self->run.exception_vector, 0);
1191 EXPECT_EQ(self->run.exception_error_code, 0);
1192 EXPECT_EQ(self->run.exception_addr, 0);
1193
1194
1195
1196
1197
1198 get_addr_op.value = 0;
1199 get_addr_op.addr = (unsigned long)addr;
1200 get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
1201
1202 EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
1203
1204 EXPECT_EQ(get_addr_op.value, MAGIC);
1205 EXPECT_EEXIT(&self->run);
1206 EXPECT_EQ(self->run.exception_vector, 0);
1207 EXPECT_EQ(self->run.exception_error_code, 0);
1208 EXPECT_EQ(self->run.exception_addr, 0);
1209
1210 munmap(addr, PAGE_SIZE);
1211 }
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224 TEST_F(enclave, tcs_create)
1225 {
1226 struct encl_op_init_tcs_page init_tcs_page_op;
1227 struct sgx_enclave_remove_pages remove_ioc;
1228 struct encl_op_get_from_addr get_addr_op;
1229 struct sgx_enclave_modify_types modt_ioc;
1230 struct encl_op_put_to_addr put_addr_op;
1231 struct encl_op_get_from_buf get_buf_op;
1232 struct encl_op_put_to_buf put_buf_op;
1233 void *addr, *tcs, *stack_end, *ssa;
1234 struct encl_op_eaccept eaccept_op;
1235 size_t total_size = 0;
1236 uint64_t val_64;
1237 int errno_save;
1238 int ret, i;
1239
1240 ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl,
1241 _metadata));
1242
1243 memset(&self->run, 0, sizeof(self->run));
1244 self->run.tcs = self->encl.encl_base;
1245
1246
1247
1248
1249
1250 memset(&modt_ioc, 0, sizeof(modt_ioc));
1251 ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
1252
1253 if (ret == -1) {
1254 if (errno == ENOTTY)
1255 SKIP(return,
1256 "Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()");
1257 else if (errno == ENODEV)
1258 SKIP(return, "System does not support SGX2");
1259 }
1260
1261
1262
1263
1264
1265 EXPECT_EQ(ret, -1);
1266
1267
1268
1269
1270
1271
1272
1273 for (i = 0; i < self->encl.nr_segments; i++) {
1274 struct encl_segment *seg = &self->encl.segment_tbl[i];
1275
1276 total_size += seg->size;
1277 }
1278
1279
1280
1281
1282
1283
1284 EXPECT_LT(total_size + 3 * PAGE_SIZE, self->encl.encl_size);
1285
1286
1287
1288
1289
1290 addr = mmap((void *)self->encl.encl_base + total_size, 3 * PAGE_SIZE,
1291 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED,
1292 self->encl.fd, 0);
1293 EXPECT_NE(addr, MAP_FAILED);
1294
1295 self->run.exception_vector = 0;
1296 self->run.exception_error_code = 0;
1297 self->run.exception_addr = 0;
1298
1299 stack_end = (void *)self->encl.encl_base + total_size;
1300 tcs = (void *)self->encl.encl_base + total_size + PAGE_SIZE;
1301 ssa = (void *)self->encl.encl_base + total_size + 2 * PAGE_SIZE;
1302
1303
1304
1305
1306
1307
1308 eaccept_op.epc_addr = (unsigned long)stack_end;
1309 eaccept_op.flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_REG | SGX_SECINFO_PENDING;
1310 eaccept_op.ret = 0;
1311 eaccept_op.header.type = ENCL_OP_EACCEPT;
1312
1313 EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
1314
1315 if (self->run.exception_vector == 14 &&
1316 self->run.exception_error_code == 4 &&
1317 self->run.exception_addr == (unsigned long)stack_end) {
1318 munmap(addr, 3 * PAGE_SIZE);
1319 SKIP(return, "Kernel does not support adding pages to initialized enclave");
1320 }
1321
1322 EXPECT_EEXIT(&self->run);
1323 EXPECT_EQ(self->run.exception_vector, 0);
1324 EXPECT_EQ(self->run.exception_error_code, 0);
1325 EXPECT_EQ(self->run.exception_addr, 0);
1326 EXPECT_EQ(eaccept_op.ret, 0);
1327
1328 eaccept_op.epc_addr = (unsigned long)ssa;
1329
1330 EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
1331
1332 EXPECT_EEXIT(&self->run);
1333 EXPECT_EQ(self->run.exception_vector, 0);
1334 EXPECT_EQ(self->run.exception_error_code, 0);
1335 EXPECT_EQ(self->run.exception_addr, 0);
1336 EXPECT_EQ(eaccept_op.ret, 0);
1337
1338 eaccept_op.epc_addr = (unsigned long)tcs;
1339
1340 EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
1341
1342 EXPECT_EEXIT(&self->run);
1343 EXPECT_EQ(self->run.exception_vector, 0);
1344 EXPECT_EQ(self->run.exception_error_code, 0);
1345 EXPECT_EQ(self->run.exception_addr, 0);
1346 EXPECT_EQ(eaccept_op.ret, 0);
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359 val_64 = encl_get_entry(&self->encl, "encl_dyn_entry");
1360 EXPECT_NE(val_64, 0);
1361
1362 init_tcs_page_op.tcs_page = (unsigned long)tcs;
1363 init_tcs_page_op.ssa = (unsigned long)total_size + 2 * PAGE_SIZE;
1364 init_tcs_page_op.entry = val_64;
1365 init_tcs_page_op.header.type = ENCL_OP_INIT_TCS_PAGE;
1366
1367 EXPECT_EQ(ENCL_CALL(&init_tcs_page_op, &self->run, true), 0);
1368
1369 EXPECT_EEXIT(&self->run);
1370 EXPECT_EQ(self->run.exception_vector, 0);
1371 EXPECT_EQ(self->run.exception_error_code, 0);
1372 EXPECT_EQ(self->run.exception_addr, 0);
1373
1374
1375 memset(&modt_ioc, 0, sizeof(modt_ioc));
1376
1377 modt_ioc.offset = total_size + PAGE_SIZE;
1378 modt_ioc.length = PAGE_SIZE;
1379 modt_ioc.page_type = SGX_PAGE_TYPE_TCS;
1380
1381 ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
1382 errno_save = ret == -1 ? errno : 0;
1383
1384 EXPECT_EQ(ret, 0);
1385 EXPECT_EQ(errno_save, 0);
1386 EXPECT_EQ(modt_ioc.result, 0);
1387 EXPECT_EQ(modt_ioc.count, 4096);
1388
1389
1390 eaccept_op.epc_addr = (unsigned long)tcs;
1391 eaccept_op.flags = SGX_SECINFO_TCS | SGX_SECINFO_MODIFIED;
1392 eaccept_op.ret = 0;
1393 eaccept_op.header.type = ENCL_OP_EACCEPT;
1394
1395 EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
1396
1397 EXPECT_EEXIT(&self->run);
1398 EXPECT_EQ(self->run.exception_vector, 0);
1399 EXPECT_EQ(self->run.exception_error_code, 0);
1400 EXPECT_EQ(self->run.exception_addr, 0);
1401 EXPECT_EQ(eaccept_op.ret, 0);
1402
1403
1404 self->run.tcs = (unsigned long)tcs;
1405
1406
1407
1408
1409 put_buf_op.header.type = ENCL_OP_PUT_TO_BUFFER;
1410 put_buf_op.value = MAGIC;
1411
1412 EXPECT_EQ(ENCL_CALL(&put_buf_op, &self->run, true), 0);
1413
1414 EXPECT_EEXIT(&self->run);
1415 EXPECT_EQ(self->run.exception_vector, 0);
1416 EXPECT_EQ(self->run.exception_error_code, 0);
1417 EXPECT_EQ(self->run.exception_addr, 0);
1418
1419 get_buf_op.header.type = ENCL_OP_GET_FROM_BUFFER;
1420 get_buf_op.value = 0;
1421
1422 EXPECT_EQ(ENCL_CALL(&get_buf_op, &self->run, true), 0);
1423
1424 EXPECT_EQ(get_buf_op.value, MAGIC);
1425 EXPECT_EEXIT(&self->run);
1426 EXPECT_EQ(self->run.exception_vector, 0);
1427 EXPECT_EQ(self->run.exception_error_code, 0);
1428 EXPECT_EQ(self->run.exception_addr, 0);
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438 memset(&modt_ioc, 0, sizeof(modt_ioc));
1439
1440 modt_ioc.offset = total_size;
1441 modt_ioc.length = 3 * PAGE_SIZE;
1442 modt_ioc.page_type = SGX_PAGE_TYPE_TRIM;
1443
1444 ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
1445 errno_save = ret == -1 ? errno : 0;
1446
1447 EXPECT_EQ(ret, 0);
1448 EXPECT_EQ(errno_save, 0);
1449 EXPECT_EQ(modt_ioc.result, 0);
1450 EXPECT_EQ(modt_ioc.count, 3 * PAGE_SIZE);
1451
1452
1453
1454
1455
1456 self->run.tcs = self->encl.encl_base;
1457
1458 eaccept_op.epc_addr = (unsigned long)stack_end;
1459 eaccept_op.flags = SGX_SECINFO_TRIM | SGX_SECINFO_MODIFIED;
1460 eaccept_op.ret = 0;
1461 eaccept_op.header.type = ENCL_OP_EACCEPT;
1462
1463 EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
1464
1465 EXPECT_EEXIT(&self->run);
1466 EXPECT_EQ(self->run.exception_vector, 0);
1467 EXPECT_EQ(self->run.exception_error_code, 0);
1468 EXPECT_EQ(self->run.exception_addr, 0);
1469 EXPECT_EQ(eaccept_op.ret, 0);
1470
1471 eaccept_op.epc_addr = (unsigned long)tcs;
1472 eaccept_op.ret = 0;
1473
1474 EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
1475
1476 EXPECT_EEXIT(&self->run);
1477 EXPECT_EQ(self->run.exception_vector, 0);
1478 EXPECT_EQ(self->run.exception_error_code, 0);
1479 EXPECT_EQ(self->run.exception_addr, 0);
1480 EXPECT_EQ(eaccept_op.ret, 0);
1481
1482 eaccept_op.epc_addr = (unsigned long)ssa;
1483 eaccept_op.ret = 0;
1484
1485 EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
1486
1487 EXPECT_EEXIT(&self->run);
1488 EXPECT_EQ(self->run.exception_vector, 0);
1489 EXPECT_EQ(self->run.exception_error_code, 0);
1490 EXPECT_EQ(self->run.exception_addr, 0);
1491 EXPECT_EQ(eaccept_op.ret, 0);
1492
1493
1494 memset(&remove_ioc, 0, sizeof(remove_ioc));
1495
1496 remove_ioc.offset = total_size;
1497 remove_ioc.length = 3 * PAGE_SIZE;
1498
1499 ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_REMOVE_PAGES, &remove_ioc);
1500 errno_save = ret == -1 ? errno : 0;
1501
1502 EXPECT_EQ(ret, 0);
1503 EXPECT_EQ(errno_save, 0);
1504 EXPECT_EQ(remove_ioc.count, 3 * PAGE_SIZE);
1505
1506
1507
1508
1509
1510 eaccept_op.epc_addr = (unsigned long)tcs;
1511 eaccept_op.flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_REG | SGX_SECINFO_PENDING;
1512 eaccept_op.ret = 0;
1513 eaccept_op.header.type = ENCL_OP_EACCEPT;
1514
1515 EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
1516
1517 EXPECT_EEXIT(&self->run);
1518 EXPECT_EQ(self->run.exception_vector, 0);
1519 EXPECT_EQ(self->run.exception_error_code, 0);
1520 EXPECT_EQ(self->run.exception_addr, 0);
1521 EXPECT_EQ(eaccept_op.ret, 0);
1522
1523
1524
1525
1526 put_addr_op.value = MAGIC;
1527 put_addr_op.addr = (unsigned long)tcs;
1528 put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
1529
1530 EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
1531
1532 EXPECT_EEXIT(&self->run);
1533 EXPECT_EQ(self->run.exception_vector, 0);
1534 EXPECT_EQ(self->run.exception_error_code, 0);
1535 EXPECT_EQ(self->run.exception_addr, 0);
1536
1537
1538
1539
1540
1541 get_addr_op.value = 0;
1542 get_addr_op.addr = (unsigned long)tcs;
1543 get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
1544
1545 EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
1546
1547 EXPECT_EQ(get_addr_op.value, MAGIC);
1548 EXPECT_EEXIT(&self->run);
1549 EXPECT_EQ(self->run.exception_vector, 0);
1550 EXPECT_EQ(self->run.exception_error_code, 0);
1551 EXPECT_EQ(self->run.exception_addr, 0);
1552
1553 munmap(addr, 3 * PAGE_SIZE);
1554 }
1555
1556
1557
1558
1559
1560
1561
1562 TEST_F(enclave, remove_added_page_no_eaccept)
1563 {
1564 struct sgx_enclave_remove_pages remove_ioc;
1565 struct encl_op_get_from_addr get_addr_op;
1566 struct sgx_enclave_modify_types modt_ioc;
1567 struct encl_op_put_to_addr put_addr_op;
1568 unsigned long data_start;
1569 int ret, errno_save;
1570
1571 ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
1572
1573 memset(&self->run, 0, sizeof(self->run));
1574 self->run.tcs = self->encl.encl_base;
1575
1576
1577
1578
1579
1580 memset(&modt_ioc, 0, sizeof(modt_ioc));
1581 ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
1582
1583 if (ret == -1) {
1584 if (errno == ENOTTY)
1585 SKIP(return,
1586 "Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()");
1587 else if (errno == ENODEV)
1588 SKIP(return, "System does not support SGX2");
1589 }
1590
1591
1592
1593
1594
1595 EXPECT_EQ(ret, -1);
1596
1597
1598
1599
1600
1601
1602 data_start = self->encl.encl_base +
1603 encl_get_data_offset(&self->encl) + PAGE_SIZE;
1604
1605
1606
1607
1608
1609
1610
1611 put_addr_op.value = MAGIC;
1612 put_addr_op.addr = data_start;
1613 put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
1614
1615 EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
1616
1617 EXPECT_EEXIT(&self->run);
1618 EXPECT_EQ(self->run.exception_vector, 0);
1619 EXPECT_EQ(self->run.exception_error_code, 0);
1620 EXPECT_EQ(self->run.exception_addr, 0);
1621
1622
1623
1624
1625
1626 get_addr_op.value = 0;
1627 get_addr_op.addr = data_start;
1628 get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
1629
1630 EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
1631
1632 EXPECT_EQ(get_addr_op.value, MAGIC);
1633 EXPECT_EEXIT(&self->run);
1634 EXPECT_EQ(self->run.exception_vector, 0);
1635 EXPECT_EQ(self->run.exception_error_code, 0);
1636 EXPECT_EQ(self->run.exception_addr, 0);
1637
1638
1639 memset(&modt_ioc, 0, sizeof(modt_ioc));
1640
1641 modt_ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE;
1642 modt_ioc.length = PAGE_SIZE;
1643 modt_ioc.page_type = SGX_PAGE_TYPE_TRIM;
1644
1645 ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
1646 errno_save = ret == -1 ? errno : 0;
1647
1648 EXPECT_EQ(ret, 0);
1649 EXPECT_EQ(errno_save, 0);
1650 EXPECT_EQ(modt_ioc.result, 0);
1651 EXPECT_EQ(modt_ioc.count, 4096);
1652
1653
1654
1655
1656 memset(&remove_ioc, 0, sizeof(remove_ioc));
1657
1658 remove_ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE;
1659 remove_ioc.length = PAGE_SIZE;
1660
1661 ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_REMOVE_PAGES, &remove_ioc);
1662 errno_save = ret == -1 ? errno : 0;
1663
1664
1665 EXPECT_EQ(ret, -1);
1666 EXPECT_EQ(errno_save, EPERM);
1667 EXPECT_EQ(remove_ioc.count, 0);
1668 }
1669
1670
1671
1672
1673
1674 TEST_F(enclave, remove_added_page_invalid_access)
1675 {
1676 struct encl_op_get_from_addr get_addr_op;
1677 struct encl_op_put_to_addr put_addr_op;
1678 struct sgx_enclave_modify_types ioc;
1679 unsigned long data_start;
1680 int ret, errno_save;
1681
1682 ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
1683
1684 memset(&self->run, 0, sizeof(self->run));
1685 self->run.tcs = self->encl.encl_base;
1686
1687
1688
1689
1690
1691 memset(&ioc, 0, sizeof(ioc));
1692 ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &ioc);
1693
1694 if (ret == -1) {
1695 if (errno == ENOTTY)
1696 SKIP(return,
1697 "Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()");
1698 else if (errno == ENODEV)
1699 SKIP(return, "System does not support SGX2");
1700 }
1701
1702
1703
1704
1705
1706 EXPECT_EQ(ret, -1);
1707
1708
1709
1710
1711
1712
1713 data_start = self->encl.encl_base +
1714 encl_get_data_offset(&self->encl) + PAGE_SIZE;
1715
1716
1717
1718
1719
1720
1721
1722 put_addr_op.value = MAGIC;
1723 put_addr_op.addr = data_start;
1724 put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
1725
1726 EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
1727
1728 EXPECT_EEXIT(&self->run);
1729 EXPECT_EQ(self->run.exception_vector, 0);
1730 EXPECT_EQ(self->run.exception_error_code, 0);
1731 EXPECT_EQ(self->run.exception_addr, 0);
1732
1733
1734
1735
1736
1737 get_addr_op.value = 0;
1738 get_addr_op.addr = data_start;
1739 get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
1740
1741 EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
1742
1743 EXPECT_EQ(get_addr_op.value, MAGIC);
1744 EXPECT_EEXIT(&self->run);
1745 EXPECT_EQ(self->run.exception_vector, 0);
1746 EXPECT_EQ(self->run.exception_error_code, 0);
1747 EXPECT_EQ(self->run.exception_addr, 0);
1748
1749
1750 memset(&ioc, 0, sizeof(ioc));
1751
1752 ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE;
1753 ioc.length = PAGE_SIZE;
1754 ioc.page_type = SGX_PAGE_TYPE_TRIM;
1755
1756 ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &ioc);
1757 errno_save = ret == -1 ? errno : 0;
1758
1759 EXPECT_EQ(ret, 0);
1760 EXPECT_EQ(errno_save, 0);
1761 EXPECT_EQ(ioc.result, 0);
1762 EXPECT_EQ(ioc.count, 4096);
1763
1764
1765
1766
1767 get_addr_op.value = 0;
1768
1769 EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
1770
1771
1772
1773
1774
1775
1776
1777 EXPECT_EQ(self->run.function, ERESUME);
1778 EXPECT_EQ(self->run.exception_vector, 14);
1779 EXPECT_EQ(self->run.exception_error_code, 0x8005);
1780 EXPECT_EQ(self->run.exception_addr, data_start);
1781 }
1782
1783
1784
1785
1786
1787
1788 TEST_F(enclave, remove_added_page_invalid_access_after_eaccept)
1789 {
1790 struct encl_op_get_from_addr get_addr_op;
1791 struct encl_op_put_to_addr put_addr_op;
1792 struct sgx_enclave_modify_types ioc;
1793 struct encl_op_eaccept eaccept_op;
1794 unsigned long data_start;
1795 int ret, errno_save;
1796
1797 ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
1798
1799 memset(&self->run, 0, sizeof(self->run));
1800 self->run.tcs = self->encl.encl_base;
1801
1802
1803
1804
1805
1806 memset(&ioc, 0, sizeof(ioc));
1807 ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &ioc);
1808
1809 if (ret == -1) {
1810 if (errno == ENOTTY)
1811 SKIP(return,
1812 "Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()");
1813 else if (errno == ENODEV)
1814 SKIP(return, "System does not support SGX2");
1815 }
1816
1817
1818
1819
1820
1821 EXPECT_EQ(ret, -1);
1822
1823
1824
1825
1826
1827
1828 data_start = self->encl.encl_base +
1829 encl_get_data_offset(&self->encl) + PAGE_SIZE;
1830
1831
1832
1833
1834
1835
1836
1837 put_addr_op.value = MAGIC;
1838 put_addr_op.addr = data_start;
1839 put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
1840
1841 EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
1842
1843 EXPECT_EEXIT(&self->run);
1844 EXPECT_EQ(self->run.exception_vector, 0);
1845 EXPECT_EQ(self->run.exception_error_code, 0);
1846 EXPECT_EQ(self->run.exception_addr, 0);
1847
1848
1849
1850
1851
1852 get_addr_op.value = 0;
1853 get_addr_op.addr = data_start;
1854 get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
1855
1856 EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
1857
1858 EXPECT_EQ(get_addr_op.value, MAGIC);
1859 EXPECT_EEXIT(&self->run);
1860 EXPECT_EQ(self->run.exception_vector, 0);
1861 EXPECT_EQ(self->run.exception_error_code, 0);
1862 EXPECT_EQ(self->run.exception_addr, 0);
1863
1864
1865 memset(&ioc, 0, sizeof(ioc));
1866
1867 ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE;
1868 ioc.length = PAGE_SIZE;
1869 ioc.page_type = SGX_PAGE_TYPE_TRIM;
1870
1871 ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &ioc);
1872 errno_save = ret == -1 ? errno : 0;
1873
1874 EXPECT_EQ(ret, 0);
1875 EXPECT_EQ(errno_save, 0);
1876 EXPECT_EQ(ioc.result, 0);
1877 EXPECT_EQ(ioc.count, 4096);
1878
1879 eaccept_op.epc_addr = (unsigned long)data_start;
1880 eaccept_op.ret = 0;
1881 eaccept_op.flags = SGX_SECINFO_TRIM | SGX_SECINFO_MODIFIED;
1882 eaccept_op.header.type = ENCL_OP_EACCEPT;
1883
1884 EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
1885
1886 EXPECT_EEXIT(&self->run);
1887 EXPECT_EQ(self->run.exception_vector, 0);
1888 EXPECT_EQ(self->run.exception_error_code, 0);
1889 EXPECT_EQ(self->run.exception_addr, 0);
1890 EXPECT_EQ(eaccept_op.ret, 0);
1891
1892
1893
1894
1895
1896
1897 get_addr_op.value = 0;
1898
1899 EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
1900
1901
1902
1903
1904
1905
1906
1907 EXPECT_EQ(self->run.function, ERESUME);
1908 EXPECT_EQ(self->run.exception_vector, 14);
1909 EXPECT_EQ(self->run.exception_error_code, 0x8005);
1910 EXPECT_EQ(self->run.exception_addr, data_start);
1911 }
1912
1913 TEST_F(enclave, remove_untouched_page)
1914 {
1915 struct sgx_enclave_remove_pages remove_ioc;
1916 struct sgx_enclave_modify_types modt_ioc;
1917 struct encl_op_eaccept eaccept_op;
1918 unsigned long data_start;
1919 int ret, errno_save;
1920
1921 ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
1922
1923
1924
1925
1926
1927 memset(&modt_ioc, 0, sizeof(modt_ioc));
1928 ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
1929
1930 if (ret == -1) {
1931 if (errno == ENOTTY)
1932 SKIP(return,
1933 "Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()");
1934 else if (errno == ENODEV)
1935 SKIP(return, "System does not support SGX2");
1936 }
1937
1938
1939
1940
1941
1942 EXPECT_EQ(ret, -1);
1943
1944
1945 memset(&self->run, 0, sizeof(self->run));
1946 self->run.tcs = self->encl.encl_base;
1947
1948 data_start = self->encl.encl_base +
1949 encl_get_data_offset(&self->encl) + PAGE_SIZE;
1950
1951 memset(&modt_ioc, 0, sizeof(modt_ioc));
1952
1953 modt_ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE;
1954 modt_ioc.length = PAGE_SIZE;
1955 modt_ioc.page_type = SGX_PAGE_TYPE_TRIM;
1956 ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
1957 errno_save = ret == -1 ? errno : 0;
1958
1959 EXPECT_EQ(ret, 0);
1960 EXPECT_EQ(errno_save, 0);
1961 EXPECT_EQ(modt_ioc.result, 0);
1962 EXPECT_EQ(modt_ioc.count, 4096);
1963
1964
1965
1966
1967
1968
1969 eaccept_op.epc_addr = data_start;
1970 eaccept_op.flags = SGX_SECINFO_TRIM | SGX_SECINFO_MODIFIED;
1971 eaccept_op.ret = 0;
1972 eaccept_op.header.type = ENCL_OP_EACCEPT;
1973
1974 EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
1975 EXPECT_EEXIT(&self->run);
1976 EXPECT_EQ(self->run.exception_vector, 0);
1977 EXPECT_EQ(self->run.exception_error_code, 0);
1978 EXPECT_EQ(self->run.exception_addr, 0);
1979 EXPECT_EQ(eaccept_op.ret, 0);
1980
1981 memset(&remove_ioc, 0, sizeof(remove_ioc));
1982
1983 remove_ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE;
1984 remove_ioc.length = PAGE_SIZE;
1985 ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_REMOVE_PAGES, &remove_ioc);
1986 errno_save = ret == -1 ? errno : 0;
1987
1988 EXPECT_EQ(ret, 0);
1989 EXPECT_EQ(errno_save, 0);
1990 EXPECT_EQ(remove_ioc.count, 4096);
1991 }
1992
1993 TEST_HARNESS_MAIN