0001
0002
0003
0004 #include <assert.h>
0005 #include <elf.h>
0006 #include <errno.h>
0007 #include <fcntl.h>
0008 #include <stdbool.h>
0009 #include <stdio.h>
0010 #include <stdint.h>
0011 #include <stdlib.h>
0012 #include <string.h>
0013 #include <unistd.h>
0014 #include <sys/ioctl.h>
0015 #include <sys/mman.h>
0016 #include <sys/stat.h>
0017 #include <sys/time.h>
0018 #include <sys/types.h>
0019 #include "defines.h"
0020 #include "main.h"
0021
0022 void encl_delete(struct encl *encl)
0023 {
0024 struct encl_segment *heap_seg;
0025
0026 if (encl->encl_base)
0027 munmap((void *)encl->encl_base, encl->encl_size);
0028
0029 if (encl->bin)
0030 munmap(encl->bin, encl->bin_size);
0031
0032 if (encl->fd)
0033 close(encl->fd);
0034
0035 if (encl->segment_tbl) {
0036 heap_seg = &encl->segment_tbl[encl->nr_segments - 1];
0037 munmap(heap_seg->src, heap_seg->size);
0038 free(encl->segment_tbl);
0039 }
0040
0041 memset(encl, 0, sizeof(*encl));
0042 }
0043
0044 static bool encl_map_bin(const char *path, struct encl *encl)
0045 {
0046 struct stat sb;
0047 void *bin;
0048 int ret;
0049 int fd;
0050
0051 fd = open(path, O_RDONLY);
0052 if (fd == -1) {
0053 perror("enclave executable open()");
0054 return false;
0055 }
0056
0057 ret = stat(path, &sb);
0058 if (ret) {
0059 perror("enclave executable stat()");
0060 goto err;
0061 }
0062
0063 bin = mmap(NULL, sb.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
0064 if (bin == MAP_FAILED) {
0065 perror("enclave executable mmap()");
0066 goto err;
0067 }
0068
0069 encl->bin = bin;
0070 encl->bin_size = sb.st_size;
0071
0072 close(fd);
0073 return true;
0074
0075 err:
0076 close(fd);
0077 return false;
0078 }
0079
0080 static bool encl_ioc_create(struct encl *encl)
0081 {
0082 struct sgx_secs *secs = &encl->secs;
0083 struct sgx_enclave_create ioc;
0084 int rc;
0085
0086 assert(encl->encl_base != 0);
0087
0088 memset(secs, 0, sizeof(*secs));
0089 secs->ssa_frame_size = 1;
0090 secs->attributes = SGX_ATTR_MODE64BIT;
0091 secs->xfrm = 3;
0092 secs->base = encl->encl_base;
0093 secs->size = encl->encl_size;
0094
0095 ioc.src = (unsigned long)secs;
0096 rc = ioctl(encl->fd, SGX_IOC_ENCLAVE_CREATE, &ioc);
0097 if (rc) {
0098 perror("SGX_IOC_ENCLAVE_CREATE failed");
0099 munmap((void *)secs->base, encl->encl_size);
0100 return false;
0101 }
0102
0103 return true;
0104 }
0105
0106 static bool encl_ioc_add_pages(struct encl *encl, struct encl_segment *seg)
0107 {
0108 struct sgx_enclave_add_pages ioc;
0109 struct sgx_secinfo secinfo;
0110 int rc;
0111
0112 memset(&secinfo, 0, sizeof(secinfo));
0113 secinfo.flags = seg->flags;
0114
0115 ioc.src = (uint64_t)seg->src;
0116 ioc.offset = seg->offset;
0117 ioc.length = seg->size;
0118 ioc.secinfo = (unsigned long)&secinfo;
0119 if (seg->measure)
0120 ioc.flags = SGX_PAGE_MEASURE;
0121 else
0122 ioc.flags = 0;
0123
0124 rc = ioctl(encl->fd, SGX_IOC_ENCLAVE_ADD_PAGES, &ioc);
0125 if (rc < 0) {
0126 perror("SGX_IOC_ENCLAVE_ADD_PAGES failed");
0127 return false;
0128 }
0129
0130 return true;
0131 }
0132
0133
0134
0135
0136
0137 uint64_t encl_get_entry(struct encl *encl, const char *symbol)
0138 {
0139 Elf64_Shdr *sections;
0140 Elf64_Sym *symtab;
0141 Elf64_Ehdr *ehdr;
0142 char *sym_names;
0143 int num_sym;
0144 int i;
0145
0146 ehdr = encl->bin;
0147 sections = encl->bin + ehdr->e_shoff;
0148
0149 for (i = 0; i < ehdr->e_shnum; i++) {
0150 if (sections[i].sh_type == SHT_SYMTAB) {
0151 symtab = (Elf64_Sym *)((char *)encl->bin + sections[i].sh_offset);
0152 num_sym = sections[i].sh_size / sections[i].sh_entsize;
0153 break;
0154 }
0155 }
0156
0157 for (i = 0; i < ehdr->e_shnum; i++) {
0158 if (sections[i].sh_type == SHT_STRTAB) {
0159 sym_names = (char *)encl->bin + sections[i].sh_offset;
0160 break;
0161 }
0162 }
0163
0164 for (i = 0; i < num_sym; i++) {
0165 Elf64_Sym *sym = &symtab[i];
0166
0167 if (!strcmp(symbol, sym_names + sym->st_name))
0168 return (uint64_t)sym->st_value;
0169 }
0170
0171 return 0;
0172 }
0173
0174 bool encl_load(const char *path, struct encl *encl, unsigned long heap_size)
0175 {
0176 const char device_path[] = "/dev/sgx_enclave";
0177 struct encl_segment *seg;
0178 Elf64_Phdr *phdr_tbl;
0179 off_t src_offset;
0180 Elf64_Ehdr *ehdr;
0181 struct stat sb;
0182 void *ptr;
0183 int i, j;
0184 int ret;
0185 int fd = -1;
0186
0187 memset(encl, 0, sizeof(*encl));
0188
0189 fd = open(device_path, O_RDWR);
0190 if (fd < 0) {
0191 perror("Unable to open /dev/sgx_enclave");
0192 goto err;
0193 }
0194
0195 ret = stat(device_path, &sb);
0196 if (ret) {
0197 perror("device file stat()");
0198 goto err;
0199 }
0200
0201 ptr = mmap(NULL, PAGE_SIZE, PROT_READ, MAP_SHARED, fd, 0);
0202 if (ptr == (void *)-1) {
0203 perror("mmap for read");
0204 goto err;
0205 }
0206 munmap(ptr, PAGE_SIZE);
0207
0208 #define ERR_MSG \
0209 "mmap() succeeded for PROT_READ, but failed for PROT_EXEC.\n" \
0210 " Check that /dev does not have noexec set:\n" \
0211 " \tmount | grep \"/dev .*noexec\"\n" \
0212 " If so, remount it executable: mount -o remount,exec /dev\n\n"
0213
0214 ptr = mmap(NULL, PAGE_SIZE, PROT_EXEC, MAP_SHARED, fd, 0);
0215 if (ptr == (void *)-1) {
0216 fprintf(stderr, ERR_MSG);
0217 goto err;
0218 }
0219 munmap(ptr, PAGE_SIZE);
0220
0221 encl->fd = fd;
0222
0223 if (!encl_map_bin(path, encl))
0224 goto err;
0225
0226 ehdr = encl->bin;
0227 phdr_tbl = encl->bin + ehdr->e_phoff;
0228
0229 encl->nr_segments = 1;
0230
0231 for (i = 0; i < ehdr->e_phnum; i++) {
0232 Elf64_Phdr *phdr = &phdr_tbl[i];
0233
0234 if (phdr->p_type == PT_LOAD)
0235 encl->nr_segments++;
0236 }
0237
0238 encl->segment_tbl = calloc(encl->nr_segments,
0239 sizeof(struct encl_segment));
0240 if (!encl->segment_tbl)
0241 goto err;
0242
0243 for (i = 0, j = 0; i < ehdr->e_phnum; i++) {
0244 Elf64_Phdr *phdr = &phdr_tbl[i];
0245 unsigned int flags = phdr->p_flags;
0246
0247 if (phdr->p_type != PT_LOAD)
0248 continue;
0249
0250 seg = &encl->segment_tbl[j];
0251
0252 if (!!(flags & ~(PF_R | PF_W | PF_X))) {
0253 fprintf(stderr,
0254 "%d has invalid segment flags 0x%02x.\n", i,
0255 phdr->p_flags);
0256 goto err;
0257 }
0258
0259 if (j == 0 && flags != (PF_R | PF_W)) {
0260 fprintf(stderr,
0261 "TCS has invalid segment flags 0x%02x.\n",
0262 phdr->p_flags);
0263 goto err;
0264 }
0265
0266 if (j == 0) {
0267 src_offset = phdr->p_offset & PAGE_MASK;
0268 encl->src = encl->bin + src_offset;
0269
0270 seg->prot = PROT_READ | PROT_WRITE;
0271 seg->flags = SGX_PAGE_TYPE_TCS << 8;
0272 } else {
0273 seg->prot = (phdr->p_flags & PF_R) ? PROT_READ : 0;
0274 seg->prot |= (phdr->p_flags & PF_W) ? PROT_WRITE : 0;
0275 seg->prot |= (phdr->p_flags & PF_X) ? PROT_EXEC : 0;
0276 seg->flags = (SGX_PAGE_TYPE_REG << 8) | seg->prot;
0277 }
0278
0279 seg->offset = (phdr->p_offset & PAGE_MASK) - src_offset;
0280 seg->size = (phdr->p_filesz + PAGE_SIZE - 1) & PAGE_MASK;
0281 seg->src = encl->src + seg->offset;
0282 seg->measure = true;
0283
0284 j++;
0285 }
0286
0287 assert(j == encl->nr_segments - 1);
0288
0289 seg = &encl->segment_tbl[j];
0290 seg->offset = encl->segment_tbl[j - 1].offset + encl->segment_tbl[j - 1].size;
0291 seg->size = heap_size;
0292 seg->src = mmap(NULL, heap_size, PROT_READ | PROT_WRITE,
0293 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
0294 seg->prot = PROT_READ | PROT_WRITE;
0295 seg->flags = (SGX_PAGE_TYPE_REG << 8) | seg->prot;
0296 seg->measure = false;
0297
0298 if (seg->src == MAP_FAILED)
0299 goto err;
0300
0301 encl->src_size = encl->segment_tbl[j].offset + encl->segment_tbl[j].size;
0302
0303 for (encl->encl_size = 4096; encl->encl_size < encl->src_size; )
0304 encl->encl_size <<= 1;
0305
0306 return true;
0307
0308 err:
0309 if (fd != -1)
0310 close(fd);
0311 encl_delete(encl);
0312 return false;
0313 }
0314
0315 static bool encl_map_area(struct encl *encl)
0316 {
0317 size_t encl_size = encl->encl_size;
0318 void *area;
0319
0320 area = mmap(NULL, encl_size * 2, PROT_NONE,
0321 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
0322 if (area == MAP_FAILED) {
0323 perror("reservation mmap()");
0324 return false;
0325 }
0326
0327 encl->encl_base = ((uint64_t)area + encl_size - 1) & ~(encl_size - 1);
0328
0329 munmap(area, encl->encl_base - (uint64_t)area);
0330 munmap((void *)(encl->encl_base + encl_size),
0331 (uint64_t)area + encl_size - encl->encl_base);
0332
0333 return true;
0334 }
0335
0336 bool encl_build(struct encl *encl)
0337 {
0338 struct sgx_enclave_init ioc;
0339 int ret;
0340 int i;
0341
0342 if (!encl_map_area(encl))
0343 return false;
0344
0345 if (!encl_ioc_create(encl))
0346 return false;
0347
0348
0349
0350
0351
0352 for (i = 0; i < encl->nr_segments; i++) {
0353 struct encl_segment *seg = &encl->segment_tbl[i];
0354
0355 if (!encl_ioc_add_pages(encl, seg))
0356 return false;
0357 }
0358
0359 ioc.sigstruct = (uint64_t)&encl->sigstruct;
0360 ret = ioctl(encl->fd, SGX_IOC_ENCLAVE_INIT, &ioc);
0361 if (ret) {
0362 perror("SGX_IOC_ENCLAVE_INIT failed");
0363 return false;
0364 }
0365
0366 return true;
0367 }