0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/miscdevice.h>
0009 #include <linux/mm.h>
0010 #include <linux/mman.h>
0011 #include <linux/sched/mm.h>
0012 #include <linux/sched/signal.h>
0013 #include <linux/slab.h>
0014 #include <linux/xarray.h>
0015 #include <asm/sgx.h>
0016 #include <uapi/asm/sgx.h>
0017
0018 #include "encls.h"
0019 #include "sgx.h"
0020
0021 struct sgx_vepc {
0022 struct xarray page_array;
0023 struct mutex lock;
0024 };
0025
0026
0027
0028
0029
0030 static struct mutex zombie_secs_pages_lock;
0031 static struct list_head zombie_secs_pages;
0032
0033 static int __sgx_vepc_fault(struct sgx_vepc *vepc,
0034 struct vm_area_struct *vma, unsigned long addr)
0035 {
0036 struct sgx_epc_page *epc_page;
0037 unsigned long index, pfn;
0038 int ret;
0039
0040 WARN_ON(!mutex_is_locked(&vepc->lock));
0041
0042
0043 index = vma->vm_pgoff + PFN_DOWN(addr - vma->vm_start);
0044
0045 epc_page = xa_load(&vepc->page_array, index);
0046 if (epc_page)
0047 return 0;
0048
0049 epc_page = sgx_alloc_epc_page(vepc, false);
0050 if (IS_ERR(epc_page))
0051 return PTR_ERR(epc_page);
0052
0053 ret = xa_err(xa_store(&vepc->page_array, index, epc_page, GFP_KERNEL));
0054 if (ret)
0055 goto err_free;
0056
0057 pfn = PFN_DOWN(sgx_get_epc_phys_addr(epc_page));
0058
0059 ret = vmf_insert_pfn(vma, addr, pfn);
0060 if (ret != VM_FAULT_NOPAGE) {
0061 ret = -EFAULT;
0062 goto err_delete;
0063 }
0064
0065 return 0;
0066
0067 err_delete:
0068 xa_erase(&vepc->page_array, index);
0069 err_free:
0070 sgx_free_epc_page(epc_page);
0071 return ret;
0072 }
0073
0074 static vm_fault_t sgx_vepc_fault(struct vm_fault *vmf)
0075 {
0076 struct vm_area_struct *vma = vmf->vma;
0077 struct sgx_vepc *vepc = vma->vm_private_data;
0078 int ret;
0079
0080 mutex_lock(&vepc->lock);
0081 ret = __sgx_vepc_fault(vepc, vma, vmf->address);
0082 mutex_unlock(&vepc->lock);
0083
0084 if (!ret)
0085 return VM_FAULT_NOPAGE;
0086
0087 if (ret == -EBUSY && (vmf->flags & FAULT_FLAG_ALLOW_RETRY)) {
0088 mmap_read_unlock(vma->vm_mm);
0089 return VM_FAULT_RETRY;
0090 }
0091
0092 return VM_FAULT_SIGBUS;
0093 }
0094
0095 static const struct vm_operations_struct sgx_vepc_vm_ops = {
0096 .fault = sgx_vepc_fault,
0097 };
0098
0099 static int sgx_vepc_mmap(struct file *file, struct vm_area_struct *vma)
0100 {
0101 struct sgx_vepc *vepc = file->private_data;
0102
0103 if (!(vma->vm_flags & VM_SHARED))
0104 return -EINVAL;
0105
0106 vma->vm_ops = &sgx_vepc_vm_ops;
0107
0108 vma->vm_flags |= VM_PFNMAP | VM_IO | VM_DONTDUMP | VM_DONTCOPY;
0109 vma->vm_private_data = vepc;
0110
0111 return 0;
0112 }
0113
0114 static int sgx_vepc_remove_page(struct sgx_epc_page *epc_page)
0115 {
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125 return __eremove(sgx_get_epc_virt_addr(epc_page));
0126 }
0127
0128 static int sgx_vepc_free_page(struct sgx_epc_page *epc_page)
0129 {
0130 int ret = sgx_vepc_remove_page(epc_page);
0131 if (ret) {
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144 WARN_ONCE(ret != SGX_CHILD_PRESENT, EREMOVE_ERROR_MESSAGE,
0145 ret, ret);
0146 return ret;
0147 }
0148
0149 sgx_free_epc_page(epc_page);
0150 return 0;
0151 }
0152
0153 static long sgx_vepc_remove_all(struct sgx_vepc *vepc)
0154 {
0155 struct sgx_epc_page *entry;
0156 unsigned long index;
0157 long failures = 0;
0158
0159 xa_for_each(&vepc->page_array, index, entry) {
0160 int ret = sgx_vepc_remove_page(entry);
0161 if (ret) {
0162 if (ret == SGX_CHILD_PRESENT) {
0163
0164 failures++;
0165 } else {
0166
0167
0168
0169
0170
0171
0172
0173 WARN_ON_ONCE(encls_faulted(ret) &&
0174 ENCLS_TRAPNR(ret) != X86_TRAP_GP);
0175 return -EBUSY;
0176 }
0177 }
0178 cond_resched();
0179 }
0180
0181
0182
0183
0184
0185 return failures;
0186 }
0187
0188 static int sgx_vepc_release(struct inode *inode, struct file *file)
0189 {
0190 struct sgx_vepc *vepc = file->private_data;
0191 struct sgx_epc_page *epc_page, *tmp, *entry;
0192 unsigned long index;
0193
0194 LIST_HEAD(secs_pages);
0195
0196 xa_for_each(&vepc->page_array, index, entry) {
0197
0198
0199
0200
0201
0202
0203 if (sgx_vepc_free_page(entry))
0204 continue;
0205
0206 xa_erase(&vepc->page_array, index);
0207 }
0208
0209
0210
0211
0212
0213 xa_for_each(&vepc->page_array, index, entry) {
0214 epc_page = entry;
0215
0216
0217
0218
0219
0220
0221 if (sgx_vepc_free_page(epc_page))
0222 list_add_tail(&epc_page->list, &secs_pages);
0223
0224 xa_erase(&vepc->page_array, index);
0225 }
0226
0227
0228
0229
0230
0231
0232
0233
0234 mutex_lock(&zombie_secs_pages_lock);
0235 list_for_each_entry_safe(epc_page, tmp, &zombie_secs_pages, list) {
0236
0237
0238
0239
0240
0241
0242 list_del(&epc_page->list);
0243
0244 if (sgx_vepc_free_page(epc_page))
0245 list_add_tail(&epc_page->list, &secs_pages);
0246 }
0247
0248 if (!list_empty(&secs_pages))
0249 list_splice_tail(&secs_pages, &zombie_secs_pages);
0250 mutex_unlock(&zombie_secs_pages_lock);
0251
0252 xa_destroy(&vepc->page_array);
0253 kfree(vepc);
0254
0255 return 0;
0256 }
0257
0258 static int sgx_vepc_open(struct inode *inode, struct file *file)
0259 {
0260 struct sgx_vepc *vepc;
0261
0262 vepc = kzalloc(sizeof(struct sgx_vepc), GFP_KERNEL);
0263 if (!vepc)
0264 return -ENOMEM;
0265 mutex_init(&vepc->lock);
0266 xa_init(&vepc->page_array);
0267
0268 file->private_data = vepc;
0269
0270 return 0;
0271 }
0272
0273 static long sgx_vepc_ioctl(struct file *file,
0274 unsigned int cmd, unsigned long arg)
0275 {
0276 struct sgx_vepc *vepc = file->private_data;
0277
0278 switch (cmd) {
0279 case SGX_IOC_VEPC_REMOVE_ALL:
0280 if (arg)
0281 return -EINVAL;
0282 return sgx_vepc_remove_all(vepc);
0283
0284 default:
0285 return -ENOTTY;
0286 }
0287 }
0288
0289 static const struct file_operations sgx_vepc_fops = {
0290 .owner = THIS_MODULE,
0291 .open = sgx_vepc_open,
0292 .unlocked_ioctl = sgx_vepc_ioctl,
0293 .compat_ioctl = sgx_vepc_ioctl,
0294 .release = sgx_vepc_release,
0295 .mmap = sgx_vepc_mmap,
0296 };
0297
0298 static struct miscdevice sgx_vepc_dev = {
0299 .minor = MISC_DYNAMIC_MINOR,
0300 .name = "sgx_vepc",
0301 .nodename = "sgx_vepc",
0302 .fops = &sgx_vepc_fops,
0303 };
0304
0305 int __init sgx_vepc_init(void)
0306 {
0307
0308 if (!cpu_feature_enabled(X86_FEATURE_VMX))
0309 return -ENODEV;
0310
0311 INIT_LIST_HEAD(&zombie_secs_pages);
0312 mutex_init(&zombie_secs_pages_lock);
0313
0314 return misc_register(&sgx_vepc_dev);
0315 }
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331 int sgx_virt_ecreate(struct sgx_pageinfo *pageinfo, void __user *secs,
0332 int *trapnr)
0333 {
0334 int ret;
0335
0336
0337
0338
0339
0340
0341
0342
0343
0344
0345
0346
0347 if (WARN_ON_ONCE(!access_ok(secs, PAGE_SIZE)))
0348 return -EINVAL;
0349
0350 __uaccess_begin();
0351 ret = __ecreate(pageinfo, (void *)secs);
0352 __uaccess_end();
0353
0354 if (encls_faulted(ret)) {
0355 *trapnr = ENCLS_TRAPNR(ret);
0356 return -EFAULT;
0357 }
0358
0359
0360 WARN_ON_ONCE(ret);
0361 return 0;
0362 }
0363 EXPORT_SYMBOL_GPL(sgx_virt_ecreate);
0364
0365 static int __sgx_virt_einit(void __user *sigstruct, void __user *token,
0366 void __user *secs)
0367 {
0368 int ret;
0369
0370
0371
0372
0373
0374
0375 #define SGX_EINITTOKEN_SIZE 304
0376 if (WARN_ON_ONCE(!access_ok(sigstruct, sizeof(struct sgx_sigstruct)) ||
0377 !access_ok(token, SGX_EINITTOKEN_SIZE) ||
0378 !access_ok(secs, PAGE_SIZE)))
0379 return -EINVAL;
0380
0381 __uaccess_begin();
0382 ret = __einit((void *)sigstruct, (void *)token, (void *)secs);
0383 __uaccess_end();
0384
0385 return ret;
0386 }
0387
0388
0389
0390
0391
0392
0393
0394
0395
0396
0397
0398
0399
0400
0401
0402
0403
0404
0405 int sgx_virt_einit(void __user *sigstruct, void __user *token,
0406 void __user *secs, u64 *lepubkeyhash, int *trapnr)
0407 {
0408 int ret;
0409
0410 if (!cpu_feature_enabled(X86_FEATURE_SGX_LC)) {
0411 ret = __sgx_virt_einit(sigstruct, token, secs);
0412 } else {
0413 preempt_disable();
0414
0415 sgx_update_lepubkeyhash(lepubkeyhash);
0416
0417 ret = __sgx_virt_einit(sigstruct, token, secs);
0418 preempt_enable();
0419 }
0420
0421
0422 if (ret == -EINVAL)
0423 return ret;
0424
0425 if (encls_faulted(ret)) {
0426 *trapnr = ENCLS_TRAPNR(ret);
0427 return -EFAULT;
0428 }
0429
0430 return ret;
0431 }
0432 EXPORT_SYMBOL_GPL(sgx_virt_einit);