0001
0002
0003
0004
0005 #include <linux/anon_inodes.h>
0006 #include <linux/device.h>
0007 #include <linux/idr.h>
0008 #include <linux/mm.h>
0009 #include <linux/sched.h>
0010 #include <linux/slab.h>
0011 #include <linux/tee_drv.h>
0012 #include <linux/uaccess.h>
0013 #include <linux/uio.h>
0014 #include "tee_private.h"
0015
0016 static void shm_put_kernel_pages(struct page **pages, size_t page_count)
0017 {
0018 size_t n;
0019
0020 for (n = 0; n < page_count; n++)
0021 put_page(pages[n]);
0022 }
0023
0024 static int shm_get_kernel_pages(unsigned long start, size_t page_count,
0025 struct page **pages)
0026 {
0027 size_t n;
0028 int rc;
0029
0030 if (is_vmalloc_addr((void *)start)) {
0031 struct page *page;
0032
0033 for (n = 0; n < page_count; n++) {
0034 page = vmalloc_to_page((void *)(start + PAGE_SIZE * n));
0035 if (!page)
0036 return -ENOMEM;
0037
0038 get_page(page);
0039 pages[n] = page;
0040 }
0041 rc = page_count;
0042 } else {
0043 struct kvec *kiov;
0044
0045 kiov = kcalloc(page_count, sizeof(*kiov), GFP_KERNEL);
0046 if (!kiov)
0047 return -ENOMEM;
0048
0049 for (n = 0; n < page_count; n++) {
0050 kiov[n].iov_base = (void *)(start + n * PAGE_SIZE);
0051 kiov[n].iov_len = PAGE_SIZE;
0052 }
0053
0054 rc = get_kernel_pages(kiov, page_count, 0, pages);
0055 kfree(kiov);
0056 }
0057
0058 return rc;
0059 }
0060
0061 static void release_registered_pages(struct tee_shm *shm)
0062 {
0063 if (shm->pages) {
0064 if (shm->flags & TEE_SHM_USER_MAPPED)
0065 unpin_user_pages(shm->pages, shm->num_pages);
0066 else
0067 shm_put_kernel_pages(shm->pages, shm->num_pages);
0068
0069 kfree(shm->pages);
0070 }
0071 }
0072
0073 static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm)
0074 {
0075 if (shm->flags & TEE_SHM_POOL) {
0076 teedev->pool->ops->free(teedev->pool, shm);
0077 } else if (shm->flags & TEE_SHM_DYNAMIC) {
0078 int rc = teedev->desc->ops->shm_unregister(shm->ctx, shm);
0079
0080 if (rc)
0081 dev_err(teedev->dev.parent,
0082 "unregister shm %p failed: %d", shm, rc);
0083
0084 release_registered_pages(shm);
0085 }
0086
0087 teedev_ctx_put(shm->ctx);
0088
0089 kfree(shm);
0090
0091 tee_device_put(teedev);
0092 }
0093
0094 static struct tee_shm *shm_alloc_helper(struct tee_context *ctx, size_t size,
0095 size_t align, u32 flags, int id)
0096 {
0097 struct tee_device *teedev = ctx->teedev;
0098 struct tee_shm *shm;
0099 void *ret;
0100 int rc;
0101
0102 if (!tee_device_get(teedev))
0103 return ERR_PTR(-EINVAL);
0104
0105 if (!teedev->pool) {
0106
0107 ret = ERR_PTR(-EINVAL);
0108 goto err_dev_put;
0109 }
0110
0111 shm = kzalloc(sizeof(*shm), GFP_KERNEL);
0112 if (!shm) {
0113 ret = ERR_PTR(-ENOMEM);
0114 goto err_dev_put;
0115 }
0116
0117 refcount_set(&shm->refcount, 1);
0118 shm->flags = flags;
0119 shm->id = id;
0120
0121
0122
0123
0124
0125
0126
0127 shm->ctx = ctx;
0128
0129 rc = teedev->pool->ops->alloc(teedev->pool, shm, size, align);
0130 if (rc) {
0131 ret = ERR_PTR(rc);
0132 goto err_kfree;
0133 }
0134
0135 teedev_ctx_get(ctx);
0136 return shm;
0137 err_kfree:
0138 kfree(shm);
0139 err_dev_put:
0140 tee_device_put(teedev);
0141 return ret;
0142 }
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156 struct tee_shm *tee_shm_alloc_user_buf(struct tee_context *ctx, size_t size)
0157 {
0158 u32 flags = TEE_SHM_DYNAMIC | TEE_SHM_POOL;
0159 struct tee_device *teedev = ctx->teedev;
0160 struct tee_shm *shm;
0161 void *ret;
0162 int id;
0163
0164 mutex_lock(&teedev->mutex);
0165 id = idr_alloc(&teedev->idr, NULL, 1, 0, GFP_KERNEL);
0166 mutex_unlock(&teedev->mutex);
0167 if (id < 0)
0168 return ERR_PTR(id);
0169
0170 shm = shm_alloc_helper(ctx, size, PAGE_SIZE, flags, id);
0171 if (IS_ERR(shm)) {
0172 mutex_lock(&teedev->mutex);
0173 idr_remove(&teedev->idr, id);
0174 mutex_unlock(&teedev->mutex);
0175 return shm;
0176 }
0177
0178 mutex_lock(&teedev->mutex);
0179 ret = idr_replace(&teedev->idr, shm, id);
0180 mutex_unlock(&teedev->mutex);
0181 if (IS_ERR(ret)) {
0182 tee_shm_free(shm);
0183 return ret;
0184 }
0185
0186 return shm;
0187 }
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201 struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size)
0202 {
0203 u32 flags = TEE_SHM_DYNAMIC | TEE_SHM_POOL;
0204
0205 return shm_alloc_helper(ctx, size, PAGE_SIZE, flags, -1);
0206 }
0207 EXPORT_SYMBOL_GPL(tee_shm_alloc_kernel_buf);
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225 struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size)
0226 {
0227 u32 flags = TEE_SHM_PRIV | TEE_SHM_POOL;
0228
0229 return shm_alloc_helper(ctx, size, sizeof(long) * 2, flags, -1);
0230 }
0231 EXPORT_SYMBOL_GPL(tee_shm_alloc_priv_buf);
0232
0233 static struct tee_shm *
0234 register_shm_helper(struct tee_context *ctx, unsigned long addr,
0235 size_t length, u32 flags, int id)
0236 {
0237 struct tee_device *teedev = ctx->teedev;
0238 struct tee_shm *shm;
0239 unsigned long start;
0240 size_t num_pages;
0241 void *ret;
0242 int rc;
0243
0244 if (!tee_device_get(teedev))
0245 return ERR_PTR(-EINVAL);
0246
0247 if (!teedev->desc->ops->shm_register ||
0248 !teedev->desc->ops->shm_unregister) {
0249 ret = ERR_PTR(-ENOTSUPP);
0250 goto err_dev_put;
0251 }
0252
0253 teedev_ctx_get(ctx);
0254
0255 shm = kzalloc(sizeof(*shm), GFP_KERNEL);
0256 if (!shm) {
0257 ret = ERR_PTR(-ENOMEM);
0258 goto err_ctx_put;
0259 }
0260
0261 refcount_set(&shm->refcount, 1);
0262 shm->flags = flags;
0263 shm->ctx = ctx;
0264 shm->id = id;
0265 addr = untagged_addr(addr);
0266 start = rounddown(addr, PAGE_SIZE);
0267 shm->offset = addr - start;
0268 shm->size = length;
0269 num_pages = (roundup(addr + length, PAGE_SIZE) - start) / PAGE_SIZE;
0270 shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL);
0271 if (!shm->pages) {
0272 ret = ERR_PTR(-ENOMEM);
0273 goto err_free_shm;
0274 }
0275
0276 if (flags & TEE_SHM_USER_MAPPED)
0277 rc = pin_user_pages_fast(start, num_pages, FOLL_WRITE,
0278 shm->pages);
0279 else
0280 rc = shm_get_kernel_pages(start, num_pages, shm->pages);
0281 if (rc > 0)
0282 shm->num_pages = rc;
0283 if (rc != num_pages) {
0284 if (rc >= 0)
0285 rc = -ENOMEM;
0286 ret = ERR_PTR(rc);
0287 goto err_put_shm_pages;
0288 }
0289
0290 rc = teedev->desc->ops->shm_register(ctx, shm, shm->pages,
0291 shm->num_pages, start);
0292 if (rc) {
0293 ret = ERR_PTR(rc);
0294 goto err_put_shm_pages;
0295 }
0296
0297 return shm;
0298 err_put_shm_pages:
0299 if (flags & TEE_SHM_USER_MAPPED)
0300 unpin_user_pages(shm->pages, shm->num_pages);
0301 else
0302 shm_put_kernel_pages(shm->pages, shm->num_pages);
0303 kfree(shm->pages);
0304 err_free_shm:
0305 kfree(shm);
0306 err_ctx_put:
0307 teedev_ctx_put(ctx);
0308 err_dev_put:
0309 tee_device_put(teedev);
0310 return ret;
0311 }
0312
0313
0314
0315
0316
0317
0318
0319
0320
0321 struct tee_shm *tee_shm_register_user_buf(struct tee_context *ctx,
0322 unsigned long addr, size_t length)
0323 {
0324 u32 flags = TEE_SHM_USER_MAPPED | TEE_SHM_DYNAMIC;
0325 struct tee_device *teedev = ctx->teedev;
0326 struct tee_shm *shm;
0327 void *ret;
0328 int id;
0329
0330 if (!access_ok((void __user *)addr, length))
0331 return ERR_PTR(-EFAULT);
0332
0333 mutex_lock(&teedev->mutex);
0334 id = idr_alloc(&teedev->idr, NULL, 1, 0, GFP_KERNEL);
0335 mutex_unlock(&teedev->mutex);
0336 if (id < 0)
0337 return ERR_PTR(id);
0338
0339 shm = register_shm_helper(ctx, addr, length, flags, id);
0340 if (IS_ERR(shm)) {
0341 mutex_lock(&teedev->mutex);
0342 idr_remove(&teedev->idr, id);
0343 mutex_unlock(&teedev->mutex);
0344 return shm;
0345 }
0346
0347 mutex_lock(&teedev->mutex);
0348 ret = idr_replace(&teedev->idr, shm, id);
0349 mutex_unlock(&teedev->mutex);
0350 if (IS_ERR(ret)) {
0351 tee_shm_free(shm);
0352 return ret;
0353 }
0354
0355 return shm;
0356 }
0357
0358
0359
0360
0361
0362
0363
0364
0365
0366
0367
0368 struct tee_shm *tee_shm_register_kernel_buf(struct tee_context *ctx,
0369 void *addr, size_t length)
0370 {
0371 u32 flags = TEE_SHM_DYNAMIC;
0372
0373 return register_shm_helper(ctx, (unsigned long)addr, length, flags, -1);
0374 }
0375 EXPORT_SYMBOL_GPL(tee_shm_register_kernel_buf);
0376
0377 static int tee_shm_fop_release(struct inode *inode, struct file *filp)
0378 {
0379 tee_shm_put(filp->private_data);
0380 return 0;
0381 }
0382
0383 static int tee_shm_fop_mmap(struct file *filp, struct vm_area_struct *vma)
0384 {
0385 struct tee_shm *shm = filp->private_data;
0386 size_t size = vma->vm_end - vma->vm_start;
0387
0388
0389 if (shm->flags & TEE_SHM_USER_MAPPED)
0390 return -EINVAL;
0391
0392
0393 if (vma->vm_pgoff + vma_pages(vma) > shm->size >> PAGE_SHIFT)
0394 return -EINVAL;
0395
0396 return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT,
0397 size, vma->vm_page_prot);
0398 }
0399
0400 static const struct file_operations tee_shm_fops = {
0401 .owner = THIS_MODULE,
0402 .release = tee_shm_fop_release,
0403 .mmap = tee_shm_fop_mmap,
0404 };
0405
0406
0407
0408
0409
0410
0411 int tee_shm_get_fd(struct tee_shm *shm)
0412 {
0413 int fd;
0414
0415 if (shm->id < 0)
0416 return -EINVAL;
0417
0418
0419 refcount_inc(&shm->refcount);
0420 fd = anon_inode_getfd("tee_shm", &tee_shm_fops, shm, O_RDWR);
0421 if (fd < 0)
0422 tee_shm_put(shm);
0423 return fd;
0424 }
0425
0426
0427
0428
0429
0430 void tee_shm_free(struct tee_shm *shm)
0431 {
0432 tee_shm_put(shm);
0433 }
0434 EXPORT_SYMBOL_GPL(tee_shm_free);
0435
0436
0437
0438
0439
0440
0441
0442
0443 void *tee_shm_get_va(struct tee_shm *shm, size_t offs)
0444 {
0445 if (!shm->kaddr)
0446 return ERR_PTR(-EINVAL);
0447 if (offs >= shm->size)
0448 return ERR_PTR(-EINVAL);
0449 return (char *)shm->kaddr + offs;
0450 }
0451 EXPORT_SYMBOL_GPL(tee_shm_get_va);
0452
0453
0454
0455
0456
0457
0458
0459
0460
0461 int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa)
0462 {
0463 if (offs >= shm->size)
0464 return -EINVAL;
0465 if (pa)
0466 *pa = shm->paddr + offs;
0467 return 0;
0468 }
0469 EXPORT_SYMBOL_GPL(tee_shm_get_pa);
0470
0471
0472
0473
0474
0475
0476
0477
0478 struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id)
0479 {
0480 struct tee_device *teedev;
0481 struct tee_shm *shm;
0482
0483 if (!ctx)
0484 return ERR_PTR(-EINVAL);
0485
0486 teedev = ctx->teedev;
0487 mutex_lock(&teedev->mutex);
0488 shm = idr_find(&teedev->idr, id);
0489
0490
0491
0492
0493
0494 if (!shm || shm->ctx != ctx)
0495 shm = ERR_PTR(-EINVAL);
0496 else
0497 refcount_inc(&shm->refcount);
0498 mutex_unlock(&teedev->mutex);
0499 return shm;
0500 }
0501 EXPORT_SYMBOL_GPL(tee_shm_get_from_id);
0502
0503
0504
0505
0506
0507 void tee_shm_put(struct tee_shm *shm)
0508 {
0509 struct tee_device *teedev = shm->ctx->teedev;
0510 bool do_release = false;
0511
0512 mutex_lock(&teedev->mutex);
0513 if (refcount_dec_and_test(&shm->refcount)) {
0514
0515
0516
0517
0518
0519
0520 if (shm->id >= 0)
0521 idr_remove(&teedev->idr, shm->id);
0522 do_release = true;
0523 }
0524 mutex_unlock(&teedev->mutex);
0525
0526 if (do_release)
0527 tee_shm_release(teedev, shm);
0528 }
0529 EXPORT_SYMBOL_GPL(tee_shm_put);