0001
0002
0003
0004 #include <asm/mman.h>
0005 #include <asm/sgx.h>
0006 #include <linux/mman.h>
0007 #include <linux/delay.h>
0008 #include <linux/file.h>
0009 #include <linux/hashtable.h>
0010 #include <linux/highmem.h>
0011 #include <linux/ratelimit.h>
0012 #include <linux/sched/signal.h>
0013 #include <linux/shmem_fs.h>
0014 #include <linux/slab.h>
0015 #include <linux/suspend.h>
0016 #include "driver.h"
0017 #include "encl.h"
0018 #include "encls.h"
0019
0020 struct sgx_va_page *sgx_encl_grow(struct sgx_encl *encl, bool reclaim)
0021 {
0022 struct sgx_va_page *va_page = NULL;
0023 void *err;
0024
0025 BUILD_BUG_ON(SGX_VA_SLOT_COUNT !=
0026 (SGX_ENCL_PAGE_VA_OFFSET_MASK >> 3) + 1);
0027
0028 if (!(encl->page_cnt % SGX_VA_SLOT_COUNT)) {
0029 va_page = kzalloc(sizeof(*va_page), GFP_KERNEL);
0030 if (!va_page)
0031 return ERR_PTR(-ENOMEM);
0032
0033 va_page->epc_page = sgx_alloc_va_page(reclaim);
0034 if (IS_ERR(va_page->epc_page)) {
0035 err = ERR_CAST(va_page->epc_page);
0036 kfree(va_page);
0037 return err;
0038 }
0039
0040 WARN_ON_ONCE(encl->page_cnt % SGX_VA_SLOT_COUNT);
0041 }
0042 encl->page_cnt++;
0043 return va_page;
0044 }
0045
0046 void sgx_encl_shrink(struct sgx_encl *encl, struct sgx_va_page *va_page)
0047 {
0048 encl->page_cnt--;
0049
0050 if (va_page) {
0051 sgx_encl_free_epc_page(va_page->epc_page);
0052 list_del(&va_page->list);
0053 kfree(va_page);
0054 }
0055 }
0056
0057 static int sgx_encl_create(struct sgx_encl *encl, struct sgx_secs *secs)
0058 {
0059 struct sgx_epc_page *secs_epc;
0060 struct sgx_va_page *va_page;
0061 struct sgx_pageinfo pginfo;
0062 struct sgx_secinfo secinfo;
0063 unsigned long encl_size;
0064 struct file *backing;
0065 long ret;
0066
0067 va_page = sgx_encl_grow(encl, true);
0068 if (IS_ERR(va_page))
0069 return PTR_ERR(va_page);
0070 else if (va_page)
0071 list_add(&va_page->list, &encl->va_pages);
0072
0073
0074
0075 encl_size = secs->size + PAGE_SIZE;
0076
0077 backing = shmem_file_setup("SGX backing", encl_size + (encl_size >> 5),
0078 VM_NORESERVE);
0079 if (IS_ERR(backing)) {
0080 ret = PTR_ERR(backing);
0081 goto err_out_shrink;
0082 }
0083
0084 encl->backing = backing;
0085
0086 secs_epc = sgx_alloc_epc_page(&encl->secs, true);
0087 if (IS_ERR(secs_epc)) {
0088 ret = PTR_ERR(secs_epc);
0089 goto err_out_backing;
0090 }
0091
0092 encl->secs.epc_page = secs_epc;
0093
0094 pginfo.addr = 0;
0095 pginfo.contents = (unsigned long)secs;
0096 pginfo.metadata = (unsigned long)&secinfo;
0097 pginfo.secs = 0;
0098 memset(&secinfo, 0, sizeof(secinfo));
0099
0100 ret = __ecreate((void *)&pginfo, sgx_get_epc_virt_addr(secs_epc));
0101 if (ret) {
0102 ret = -EIO;
0103 goto err_out;
0104 }
0105
0106 if (secs->attributes & SGX_ATTR_DEBUG)
0107 set_bit(SGX_ENCL_DEBUG, &encl->flags);
0108
0109 encl->secs.encl = encl;
0110 encl->secs.type = SGX_PAGE_TYPE_SECS;
0111 encl->base = secs->base;
0112 encl->size = secs->size;
0113 encl->attributes = secs->attributes;
0114 encl->attributes_mask = SGX_ATTR_DEBUG | SGX_ATTR_MODE64BIT | SGX_ATTR_KSS;
0115
0116
0117 set_bit(SGX_ENCL_CREATED, &encl->flags);
0118
0119 return 0;
0120
0121 err_out:
0122 sgx_encl_free_epc_page(encl->secs.epc_page);
0123 encl->secs.epc_page = NULL;
0124
0125 err_out_backing:
0126 fput(encl->backing);
0127 encl->backing = NULL;
0128
0129 err_out_shrink:
0130 sgx_encl_shrink(encl, va_page);
0131
0132 return ret;
0133 }
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147 static long sgx_ioc_enclave_create(struct sgx_encl *encl, void __user *arg)
0148 {
0149 struct sgx_enclave_create create_arg;
0150 void *secs;
0151 int ret;
0152
0153 if (test_bit(SGX_ENCL_CREATED, &encl->flags))
0154 return -EINVAL;
0155
0156 if (copy_from_user(&create_arg, arg, sizeof(create_arg)))
0157 return -EFAULT;
0158
0159 secs = kmalloc(PAGE_SIZE, GFP_KERNEL);
0160 if (!secs)
0161 return -ENOMEM;
0162
0163 if (copy_from_user(secs, (void __user *)create_arg.src, PAGE_SIZE))
0164 ret = -EFAULT;
0165 else
0166 ret = sgx_encl_create(encl, secs);
0167
0168 kfree(secs);
0169 return ret;
0170 }
0171
0172 static int sgx_validate_secinfo(struct sgx_secinfo *secinfo)
0173 {
0174 u64 perm = secinfo->flags & SGX_SECINFO_PERMISSION_MASK;
0175 u64 pt = secinfo->flags & SGX_SECINFO_PAGE_TYPE_MASK;
0176
0177 if (pt != SGX_SECINFO_REG && pt != SGX_SECINFO_TCS)
0178 return -EINVAL;
0179
0180 if ((perm & SGX_SECINFO_W) && !(perm & SGX_SECINFO_R))
0181 return -EINVAL;
0182
0183
0184
0185
0186
0187 if (pt == SGX_SECINFO_TCS && perm)
0188 return -EINVAL;
0189
0190 if (secinfo->flags & SGX_SECINFO_RESERVED_MASK)
0191 return -EINVAL;
0192
0193 if (memchr_inv(secinfo->reserved, 0, sizeof(secinfo->reserved)))
0194 return -EINVAL;
0195
0196 return 0;
0197 }
0198
0199 static int __sgx_encl_add_page(struct sgx_encl *encl,
0200 struct sgx_encl_page *encl_page,
0201 struct sgx_epc_page *epc_page,
0202 struct sgx_secinfo *secinfo, unsigned long src)
0203 {
0204 struct sgx_pageinfo pginfo;
0205 struct vm_area_struct *vma;
0206 struct page *src_page;
0207 int ret;
0208
0209
0210 vma = find_vma(current->mm, src);
0211 if (!vma)
0212 return -EFAULT;
0213
0214 if (!(vma->vm_flags & VM_MAYEXEC))
0215 return -EACCES;
0216
0217 ret = get_user_pages(src, 1, 0, &src_page, NULL);
0218 if (ret < 1)
0219 return -EFAULT;
0220
0221 pginfo.secs = (unsigned long)sgx_get_epc_virt_addr(encl->secs.epc_page);
0222 pginfo.addr = encl_page->desc & PAGE_MASK;
0223 pginfo.metadata = (unsigned long)secinfo;
0224 pginfo.contents = (unsigned long)kmap_atomic(src_page);
0225
0226 ret = __eadd(&pginfo, sgx_get_epc_virt_addr(epc_page));
0227
0228 kunmap_atomic((void *)pginfo.contents);
0229 put_page(src_page);
0230
0231 return ret ? -EIO : 0;
0232 }
0233
0234
0235
0236
0237
0238
0239 static int __sgx_encl_extend(struct sgx_encl *encl,
0240 struct sgx_epc_page *epc_page)
0241 {
0242 unsigned long offset;
0243 int ret;
0244
0245 for (offset = 0; offset < PAGE_SIZE; offset += SGX_EEXTEND_BLOCK_SIZE) {
0246 ret = __eextend(sgx_get_epc_virt_addr(encl->secs.epc_page),
0247 sgx_get_epc_virt_addr(epc_page) + offset);
0248 if (ret) {
0249 if (encls_failed(ret))
0250 ENCLS_WARN(ret, "EEXTEND");
0251
0252 return -EIO;
0253 }
0254 }
0255
0256 return 0;
0257 }
0258
0259 static int sgx_encl_add_page(struct sgx_encl *encl, unsigned long src,
0260 unsigned long offset, struct sgx_secinfo *secinfo,
0261 unsigned long flags)
0262 {
0263 struct sgx_encl_page *encl_page;
0264 struct sgx_epc_page *epc_page;
0265 struct sgx_va_page *va_page;
0266 int ret;
0267
0268 encl_page = sgx_encl_page_alloc(encl, offset, secinfo->flags);
0269 if (IS_ERR(encl_page))
0270 return PTR_ERR(encl_page);
0271
0272 epc_page = sgx_alloc_epc_page(encl_page, true);
0273 if (IS_ERR(epc_page)) {
0274 kfree(encl_page);
0275 return PTR_ERR(epc_page);
0276 }
0277
0278 va_page = sgx_encl_grow(encl, true);
0279 if (IS_ERR(va_page)) {
0280 ret = PTR_ERR(va_page);
0281 goto err_out_free;
0282 }
0283
0284 mmap_read_lock(current->mm);
0285 mutex_lock(&encl->lock);
0286
0287
0288
0289
0290
0291 if (va_page)
0292 list_add(&va_page->list, &encl->va_pages);
0293
0294
0295
0296
0297
0298
0299 ret = xa_insert(&encl->page_array, PFN_DOWN(encl_page->desc),
0300 encl_page, GFP_KERNEL);
0301 if (ret)
0302 goto err_out_unlock;
0303
0304 ret = __sgx_encl_add_page(encl, encl_page, epc_page, secinfo,
0305 src);
0306 if (ret)
0307 goto err_out;
0308
0309
0310
0311
0312
0313
0314 encl_page->encl = encl;
0315 encl_page->epc_page = epc_page;
0316 encl_page->type = (secinfo->flags & SGX_SECINFO_PAGE_TYPE_MASK) >> 8;
0317 encl->secs_child_cnt++;
0318
0319 if (flags & SGX_PAGE_MEASURE) {
0320 ret = __sgx_encl_extend(encl, epc_page);
0321 if (ret)
0322 goto err_out;
0323 }
0324
0325 sgx_mark_page_reclaimable(encl_page->epc_page);
0326 mutex_unlock(&encl->lock);
0327 mmap_read_unlock(current->mm);
0328 return ret;
0329
0330 err_out:
0331 xa_erase(&encl->page_array, PFN_DOWN(encl_page->desc));
0332
0333 err_out_unlock:
0334 sgx_encl_shrink(encl, va_page);
0335 mutex_unlock(&encl->lock);
0336 mmap_read_unlock(current->mm);
0337
0338 err_out_free:
0339 sgx_encl_free_epc_page(epc_page);
0340 kfree(encl_page);
0341
0342 return ret;
0343 }
0344
0345
0346
0347
0348
0349 static int sgx_validate_offset_length(struct sgx_encl *encl,
0350 unsigned long offset,
0351 unsigned long length)
0352 {
0353 if (!IS_ALIGNED(offset, PAGE_SIZE))
0354 return -EINVAL;
0355
0356 if (!length || !IS_ALIGNED(length, PAGE_SIZE))
0357 return -EINVAL;
0358
0359 if (offset + length - PAGE_SIZE >= encl->size)
0360 return -EINVAL;
0361
0362 return 0;
0363 }
0364
0365
0366
0367
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377
0378
0379
0380
0381
0382
0383
0384
0385
0386
0387
0388
0389
0390
0391
0392
0393
0394
0395
0396
0397
0398
0399
0400
0401
0402
0403
0404 static long sgx_ioc_enclave_add_pages(struct sgx_encl *encl, void __user *arg)
0405 {
0406 struct sgx_enclave_add_pages add_arg;
0407 struct sgx_secinfo secinfo;
0408 unsigned long c;
0409 int ret;
0410
0411 if (!test_bit(SGX_ENCL_CREATED, &encl->flags) ||
0412 test_bit(SGX_ENCL_INITIALIZED, &encl->flags))
0413 return -EINVAL;
0414
0415 if (copy_from_user(&add_arg, arg, sizeof(add_arg)))
0416 return -EFAULT;
0417
0418 if (!IS_ALIGNED(add_arg.src, PAGE_SIZE))
0419 return -EINVAL;
0420
0421 if (sgx_validate_offset_length(encl, add_arg.offset, add_arg.length))
0422 return -EINVAL;
0423
0424 if (copy_from_user(&secinfo, (void __user *)add_arg.secinfo,
0425 sizeof(secinfo)))
0426 return -EFAULT;
0427
0428 if (sgx_validate_secinfo(&secinfo))
0429 return -EINVAL;
0430
0431 for (c = 0 ; c < add_arg.length; c += PAGE_SIZE) {
0432 if (signal_pending(current)) {
0433 if (!c)
0434 ret = -ERESTARTSYS;
0435
0436 break;
0437 }
0438
0439 if (need_resched())
0440 cond_resched();
0441
0442 ret = sgx_encl_add_page(encl, add_arg.src + c, add_arg.offset + c,
0443 &secinfo, add_arg.flags);
0444 if (ret)
0445 break;
0446 }
0447
0448 add_arg.count = c;
0449
0450 if (copy_to_user(arg, &add_arg, sizeof(add_arg)))
0451 return -EFAULT;
0452
0453 return ret;
0454 }
0455
0456 static int __sgx_get_key_hash(struct crypto_shash *tfm, const void *modulus,
0457 void *hash)
0458 {
0459 SHASH_DESC_ON_STACK(shash, tfm);
0460
0461 shash->tfm = tfm;
0462
0463 return crypto_shash_digest(shash, modulus, SGX_MODULUS_SIZE, hash);
0464 }
0465
0466 static int sgx_get_key_hash(const void *modulus, void *hash)
0467 {
0468 struct crypto_shash *tfm;
0469 int ret;
0470
0471 tfm = crypto_alloc_shash("sha256", 0, CRYPTO_ALG_ASYNC);
0472 if (IS_ERR(tfm))
0473 return PTR_ERR(tfm);
0474
0475 ret = __sgx_get_key_hash(tfm, modulus, hash);
0476
0477 crypto_free_shash(tfm);
0478 return ret;
0479 }
0480
0481 static int sgx_encl_init(struct sgx_encl *encl, struct sgx_sigstruct *sigstruct,
0482 void *token)
0483 {
0484 u64 mrsigner[4];
0485 int i, j;
0486 void *addr;
0487 int ret;
0488
0489
0490
0491
0492
0493 if (encl->attributes & ~encl->attributes_mask)
0494 return -EACCES;
0495
0496
0497
0498
0499
0500
0501
0502
0503
0504 if (sigstruct->body.attributes & sigstruct->body.attributes_mask &
0505 sgx_attributes_reserved_mask)
0506 return -EINVAL;
0507
0508 if (sigstruct->body.miscselect & sigstruct->body.misc_mask &
0509 sgx_misc_reserved_mask)
0510 return -EINVAL;
0511
0512 if (sigstruct->body.xfrm & sigstruct->body.xfrm_mask &
0513 sgx_xfrm_reserved_mask)
0514 return -EINVAL;
0515
0516 ret = sgx_get_key_hash(sigstruct->modulus, mrsigner);
0517 if (ret)
0518 return ret;
0519
0520 mutex_lock(&encl->lock);
0521
0522
0523
0524
0525
0526
0527
0528 for (i = 0; i < SGX_EINIT_SLEEP_COUNT; i++) {
0529 for (j = 0; j < SGX_EINIT_SPIN_COUNT; j++) {
0530 addr = sgx_get_epc_virt_addr(encl->secs.epc_page);
0531
0532 preempt_disable();
0533
0534 sgx_update_lepubkeyhash(mrsigner);
0535
0536 ret = __einit(sigstruct, token, addr);
0537
0538 preempt_enable();
0539
0540 if (ret == SGX_UNMASKED_EVENT)
0541 continue;
0542 else
0543 break;
0544 }
0545
0546 if (ret != SGX_UNMASKED_EVENT)
0547 break;
0548
0549 msleep_interruptible(SGX_EINIT_SLEEP_TIME);
0550
0551 if (signal_pending(current)) {
0552 ret = -ERESTARTSYS;
0553 goto err_out;
0554 }
0555 }
0556
0557 if (encls_faulted(ret)) {
0558 if (encls_failed(ret))
0559 ENCLS_WARN(ret, "EINIT");
0560
0561 ret = -EIO;
0562 } else if (ret) {
0563 pr_debug("EINIT returned %d\n", ret);
0564 ret = -EPERM;
0565 } else {
0566 set_bit(SGX_ENCL_INITIALIZED, &encl->flags);
0567 }
0568
0569 err_out:
0570 mutex_unlock(&encl->lock);
0571 return ret;
0572 }
0573
0574
0575
0576
0577
0578
0579
0580
0581
0582
0583
0584
0585
0586
0587
0588
0589 static long sgx_ioc_enclave_init(struct sgx_encl *encl, void __user *arg)
0590 {
0591 struct sgx_sigstruct *sigstruct;
0592 struct sgx_enclave_init init_arg;
0593 void *token;
0594 int ret;
0595
0596 if (!test_bit(SGX_ENCL_CREATED, &encl->flags) ||
0597 test_bit(SGX_ENCL_INITIALIZED, &encl->flags))
0598 return -EINVAL;
0599
0600 if (copy_from_user(&init_arg, arg, sizeof(init_arg)))
0601 return -EFAULT;
0602
0603
0604
0605
0606
0607
0608 sigstruct = kmalloc(PAGE_SIZE, GFP_KERNEL);
0609 if (!sigstruct)
0610 return -ENOMEM;
0611
0612 token = (void *)((unsigned long)sigstruct + PAGE_SIZE / 2);
0613 memset(token, 0, SGX_LAUNCH_TOKEN_SIZE);
0614
0615 if (copy_from_user(sigstruct, (void __user *)init_arg.sigstruct,
0616 sizeof(*sigstruct))) {
0617 ret = -EFAULT;
0618 goto out;
0619 }
0620
0621
0622
0623
0624
0625
0626
0627
0628 if (sigstruct->header.vendor != 0x0000 &&
0629 sigstruct->header.vendor != 0x8086) {
0630 ret = -EINVAL;
0631 goto out;
0632 }
0633
0634 ret = sgx_encl_init(encl, sigstruct, token);
0635
0636 out:
0637 kfree(sigstruct);
0638 return ret;
0639 }
0640
0641
0642
0643
0644
0645
0646
0647
0648
0649
0650
0651
0652
0653 static long sgx_ioc_enclave_provision(struct sgx_encl *encl, void __user *arg)
0654 {
0655 struct sgx_enclave_provision params;
0656
0657 if (copy_from_user(¶ms, arg, sizeof(params)))
0658 return -EFAULT;
0659
0660 return sgx_set_attribute(&encl->attributes_mask, params.fd);
0661 }
0662
0663
0664
0665
0666
0667
0668 static int sgx_ioc_sgx2_ready(struct sgx_encl *encl)
0669 {
0670 if (!(cpu_feature_enabled(X86_FEATURE_SGX2)))
0671 return -ENODEV;
0672
0673 if (!test_bit(SGX_ENCL_INITIALIZED, &encl->flags))
0674 return -EINVAL;
0675
0676 return 0;
0677 }
0678
0679
0680
0681
0682
0683
0684
0685
0686
0687
0688
0689
0690 static int sgx_enclave_etrack(struct sgx_encl *encl)
0691 {
0692 void *epc_virt;
0693 int ret;
0694
0695 epc_virt = sgx_get_epc_virt_addr(encl->secs.epc_page);
0696 ret = __etrack(epc_virt);
0697 if (ret) {
0698
0699
0700
0701
0702
0703 pr_err_once("ETRACK returned %d (0x%x)", ret, ret);
0704
0705
0706
0707
0708 on_each_cpu_mask(sgx_encl_cpumask(encl), sgx_ipi_cb, NULL, 1);
0709 ret = __etrack(epc_virt);
0710 if (ret) {
0711 pr_err_once("ETRACK repeat returned %d (0x%x)",
0712 ret, ret);
0713 return -EFAULT;
0714 }
0715 }
0716 on_each_cpu_mask(sgx_encl_cpumask(encl), sgx_ipi_cb, NULL, 1);
0717
0718 return 0;
0719 }
0720
0721
0722
0723
0724
0725
0726
0727
0728
0729
0730
0731 static long
0732 sgx_enclave_restrict_permissions(struct sgx_encl *encl,
0733 struct sgx_enclave_restrict_permissions *modp)
0734 {
0735 struct sgx_encl_page *entry;
0736 struct sgx_secinfo secinfo;
0737 unsigned long addr;
0738 unsigned long c;
0739 void *epc_virt;
0740 int ret;
0741
0742 memset(&secinfo, 0, sizeof(secinfo));
0743 secinfo.flags = modp->permissions & SGX_SECINFO_PERMISSION_MASK;
0744
0745 for (c = 0 ; c < modp->length; c += PAGE_SIZE) {
0746 addr = encl->base + modp->offset + c;
0747
0748 sgx_reclaim_direct();
0749
0750 mutex_lock(&encl->lock);
0751
0752 entry = sgx_encl_load_page(encl, addr);
0753 if (IS_ERR(entry)) {
0754 ret = PTR_ERR(entry) == -EBUSY ? -EAGAIN : -EFAULT;
0755 goto out_unlock;
0756 }
0757
0758
0759
0760
0761
0762
0763 if (entry->type != SGX_PAGE_TYPE_REG) {
0764 ret = -EINVAL;
0765 goto out_unlock;
0766 }
0767
0768
0769
0770
0771
0772
0773
0774
0775
0776
0777
0778 epc_virt = sgx_get_epc_virt_addr(entry->epc_page);
0779 ret = __emodpr(&secinfo, epc_virt);
0780 if (encls_faulted(ret)) {
0781
0782
0783
0784
0785
0786
0787
0788 pr_err_once("EMODPR encountered exception %d\n",
0789 ENCLS_TRAPNR(ret));
0790 ret = -EFAULT;
0791 goto out_unlock;
0792 }
0793 if (encls_failed(ret)) {
0794 modp->result = ret;
0795 ret = -EFAULT;
0796 goto out_unlock;
0797 }
0798
0799 ret = sgx_enclave_etrack(encl);
0800 if (ret) {
0801 ret = -EFAULT;
0802 goto out_unlock;
0803 }
0804
0805 mutex_unlock(&encl->lock);
0806 }
0807
0808 ret = 0;
0809 goto out;
0810
0811 out_unlock:
0812 mutex_unlock(&encl->lock);
0813 out:
0814 modp->count = c;
0815
0816 return ret;
0817 }
0818
0819
0820
0821
0822
0823
0824
0825
0826
0827
0828
0829
0830
0831
0832
0833
0834
0835
0836
0837
0838
0839 static long sgx_ioc_enclave_restrict_permissions(struct sgx_encl *encl,
0840 void __user *arg)
0841 {
0842 struct sgx_enclave_restrict_permissions params;
0843 long ret;
0844
0845 ret = sgx_ioc_sgx2_ready(encl);
0846 if (ret)
0847 return ret;
0848
0849 if (copy_from_user(¶ms, arg, sizeof(params)))
0850 return -EFAULT;
0851
0852 if (sgx_validate_offset_length(encl, params.offset, params.length))
0853 return -EINVAL;
0854
0855 if (params.permissions & ~SGX_SECINFO_PERMISSION_MASK)
0856 return -EINVAL;
0857
0858
0859
0860
0861
0862 if ((params.permissions & SGX_SECINFO_W) &&
0863 !(params.permissions & SGX_SECINFO_R))
0864 return -EINVAL;
0865
0866 if (params.result || params.count)
0867 return -EINVAL;
0868
0869 ret = sgx_enclave_restrict_permissions(encl, ¶ms);
0870
0871 if (copy_to_user(arg, ¶ms, sizeof(params)))
0872 return -EFAULT;
0873
0874 return ret;
0875 }
0876
0877
0878
0879
0880
0881
0882
0883
0884
0885
0886
0887 static long sgx_enclave_modify_types(struct sgx_encl *encl,
0888 struct sgx_enclave_modify_types *modt)
0889 {
0890 unsigned long max_prot_restore;
0891 enum sgx_page_type page_type;
0892 struct sgx_encl_page *entry;
0893 struct sgx_secinfo secinfo;
0894 unsigned long prot;
0895 unsigned long addr;
0896 unsigned long c;
0897 void *epc_virt;
0898 int ret;
0899
0900 page_type = modt->page_type & SGX_PAGE_TYPE_MASK;
0901
0902
0903
0904
0905 if (page_type != SGX_PAGE_TYPE_TCS && page_type != SGX_PAGE_TYPE_TRIM)
0906 return -EINVAL;
0907
0908 memset(&secinfo, 0, sizeof(secinfo));
0909
0910 secinfo.flags = page_type << 8;
0911
0912 for (c = 0 ; c < modt->length; c += PAGE_SIZE) {
0913 addr = encl->base + modt->offset + c;
0914
0915 sgx_reclaim_direct();
0916
0917 mutex_lock(&encl->lock);
0918
0919 entry = sgx_encl_load_page(encl, addr);
0920 if (IS_ERR(entry)) {
0921 ret = PTR_ERR(entry) == -EBUSY ? -EAGAIN : -EFAULT;
0922 goto out_unlock;
0923 }
0924
0925
0926
0927
0928
0929
0930
0931 if (!(entry->type == SGX_PAGE_TYPE_REG ||
0932 (entry->type == SGX_PAGE_TYPE_TCS &&
0933 page_type == SGX_PAGE_TYPE_TRIM))) {
0934 ret = -EINVAL;
0935 goto out_unlock;
0936 }
0937
0938 max_prot_restore = entry->vm_max_prot_bits;
0939
0940
0941
0942
0943
0944
0945
0946
0947
0948 if (entry->type == SGX_PAGE_TYPE_REG &&
0949 page_type == SGX_PAGE_TYPE_TCS) {
0950 if (~entry->vm_max_prot_bits & (VM_READ | VM_WRITE)) {
0951 ret = -EPERM;
0952 goto out_unlock;
0953 }
0954 prot = PROT_READ | PROT_WRITE;
0955 entry->vm_max_prot_bits = calc_vm_prot_bits(prot, 0);
0956
0957
0958
0959
0960
0961 if (sgx_unmark_page_reclaimable(entry->epc_page)) {
0962 ret = -EAGAIN;
0963 goto out_entry_changed;
0964 }
0965
0966
0967
0968
0969
0970 mutex_unlock(&encl->lock);
0971
0972 sgx_zap_enclave_ptes(encl, addr);
0973
0974 mutex_lock(&encl->lock);
0975
0976 sgx_mark_page_reclaimable(entry->epc_page);
0977 }
0978
0979
0980 epc_virt = sgx_get_epc_virt_addr(entry->epc_page);
0981 ret = __emodt(&secinfo, epc_virt);
0982 if (encls_faulted(ret)) {
0983
0984
0985
0986
0987
0988
0989
0990 pr_err_once("EMODT encountered exception %d\n",
0991 ENCLS_TRAPNR(ret));
0992 ret = -EFAULT;
0993 goto out_entry_changed;
0994 }
0995 if (encls_failed(ret)) {
0996 modt->result = ret;
0997 ret = -EFAULT;
0998 goto out_entry_changed;
0999 }
1000
1001 ret = sgx_enclave_etrack(encl);
1002 if (ret) {
1003 ret = -EFAULT;
1004 goto out_unlock;
1005 }
1006
1007 entry->type = page_type;
1008
1009 mutex_unlock(&encl->lock);
1010 }
1011
1012 ret = 0;
1013 goto out;
1014
1015 out_entry_changed:
1016 entry->vm_max_prot_bits = max_prot_restore;
1017 out_unlock:
1018 mutex_unlock(&encl->lock);
1019 out:
1020 modt->count = c;
1021
1022 return ret;
1023 }
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048 static long sgx_ioc_enclave_modify_types(struct sgx_encl *encl,
1049 void __user *arg)
1050 {
1051 struct sgx_enclave_modify_types params;
1052 long ret;
1053
1054 ret = sgx_ioc_sgx2_ready(encl);
1055 if (ret)
1056 return ret;
1057
1058 if (copy_from_user(¶ms, arg, sizeof(params)))
1059 return -EFAULT;
1060
1061 if (sgx_validate_offset_length(encl, params.offset, params.length))
1062 return -EINVAL;
1063
1064 if (params.page_type & ~SGX_PAGE_TYPE_MASK)
1065 return -EINVAL;
1066
1067 if (params.result || params.count)
1068 return -EINVAL;
1069
1070 ret = sgx_enclave_modify_types(encl, ¶ms);
1071
1072 if (copy_to_user(arg, ¶ms, sizeof(params)))
1073 return -EFAULT;
1074
1075 return ret;
1076 }
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087 static long sgx_encl_remove_pages(struct sgx_encl *encl,
1088 struct sgx_enclave_remove_pages *params)
1089 {
1090 struct sgx_encl_page *entry;
1091 struct sgx_secinfo secinfo;
1092 unsigned long addr;
1093 unsigned long c;
1094 void *epc_virt;
1095 int ret;
1096
1097 memset(&secinfo, 0, sizeof(secinfo));
1098 secinfo.flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_X;
1099
1100 for (c = 0 ; c < params->length; c += PAGE_SIZE) {
1101 addr = encl->base + params->offset + c;
1102
1103 sgx_reclaim_direct();
1104
1105 mutex_lock(&encl->lock);
1106
1107 entry = sgx_encl_load_page(encl, addr);
1108 if (IS_ERR(entry)) {
1109 ret = PTR_ERR(entry) == -EBUSY ? -EAGAIN : -EFAULT;
1110 goto out_unlock;
1111 }
1112
1113 if (entry->type != SGX_PAGE_TYPE_TRIM) {
1114 ret = -EPERM;
1115 goto out_unlock;
1116 }
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126 epc_virt = sgx_get_epc_virt_addr(entry->epc_page);
1127 ret = __emodpr(&secinfo, epc_virt);
1128 if (!encls_faulted(ret) || ENCLS_TRAPNR(ret) != X86_TRAP_PF) {
1129 ret = -EPERM;
1130 goto out_unlock;
1131 }
1132
1133 if (sgx_unmark_page_reclaimable(entry->epc_page)) {
1134 ret = -EBUSY;
1135 goto out_unlock;
1136 }
1137
1138
1139
1140
1141
1142 mutex_unlock(&encl->lock);
1143
1144 sgx_zap_enclave_ptes(encl, addr);
1145
1146 mutex_lock(&encl->lock);
1147
1148 sgx_encl_free_epc_page(entry->epc_page);
1149 encl->secs_child_cnt--;
1150 entry->epc_page = NULL;
1151 xa_erase(&encl->page_array, PFN_DOWN(entry->desc));
1152 sgx_encl_shrink(encl, NULL);
1153 kfree(entry);
1154
1155 mutex_unlock(&encl->lock);
1156 }
1157
1158 ret = 0;
1159 goto out;
1160
1161 out_unlock:
1162 mutex_unlock(&encl->lock);
1163 out:
1164 params->count = c;
1165
1166 return ret;
1167 }
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195 static long sgx_ioc_enclave_remove_pages(struct sgx_encl *encl,
1196 void __user *arg)
1197 {
1198 struct sgx_enclave_remove_pages params;
1199 long ret;
1200
1201 ret = sgx_ioc_sgx2_ready(encl);
1202 if (ret)
1203 return ret;
1204
1205 if (copy_from_user(¶ms, arg, sizeof(params)))
1206 return -EFAULT;
1207
1208 if (sgx_validate_offset_length(encl, params.offset, params.length))
1209 return -EINVAL;
1210
1211 if (params.count)
1212 return -EINVAL;
1213
1214 ret = sgx_encl_remove_pages(encl, ¶ms);
1215
1216 if (copy_to_user(arg, ¶ms, sizeof(params)))
1217 return -EFAULT;
1218
1219 return ret;
1220 }
1221
1222 long sgx_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
1223 {
1224 struct sgx_encl *encl = filep->private_data;
1225 int ret;
1226
1227 if (test_and_set_bit(SGX_ENCL_IOCTL, &encl->flags))
1228 return -EBUSY;
1229
1230 switch (cmd) {
1231 case SGX_IOC_ENCLAVE_CREATE:
1232 ret = sgx_ioc_enclave_create(encl, (void __user *)arg);
1233 break;
1234 case SGX_IOC_ENCLAVE_ADD_PAGES:
1235 ret = sgx_ioc_enclave_add_pages(encl, (void __user *)arg);
1236 break;
1237 case SGX_IOC_ENCLAVE_INIT:
1238 ret = sgx_ioc_enclave_init(encl, (void __user *)arg);
1239 break;
1240 case SGX_IOC_ENCLAVE_PROVISION:
1241 ret = sgx_ioc_enclave_provision(encl, (void __user *)arg);
1242 break;
1243 case SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS:
1244 ret = sgx_ioc_enclave_restrict_permissions(encl,
1245 (void __user *)arg);
1246 break;
1247 case SGX_IOC_ENCLAVE_MODIFY_TYPES:
1248 ret = sgx_ioc_enclave_modify_types(encl, (void __user *)arg);
1249 break;
1250 case SGX_IOC_ENCLAVE_REMOVE_PAGES:
1251 ret = sgx_ioc_enclave_remove_pages(encl, (void __user *)arg);
1252 break;
1253 default:
1254 ret = -ENOIOCTLCMD;
1255 break;
1256 }
1257
1258 clear_bit(SGX_ENCL_IOCTL, &encl->flags);
1259 return ret;
1260 }