0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/kernel.h>
0012 #include <linux/errno.h>
0013 #include <linux/dma-buf.h>
0014 #include <linux/slab.h>
0015 #include <linux/types.h>
0016 #include <linux/uaccess.h>
0017 #include <linux/module.h>
0018
0019 #include <xen/xen.h>
0020 #include <xen/grant_table.h>
0021
0022 #include "gntdev-common.h"
0023 #include "gntdev-dmabuf.h"
0024
0025 MODULE_IMPORT_NS(DMA_BUF);
0026
0027 struct gntdev_dmabuf {
0028 struct gntdev_dmabuf_priv *priv;
0029 struct dma_buf *dmabuf;
0030 struct list_head next;
0031 int fd;
0032
0033 union {
0034 struct {
0035
0036 struct kref refcount;
0037
0038 struct gntdev_priv *priv;
0039 struct gntdev_grant_map *map;
0040 } exp;
0041 struct {
0042
0043 grant_ref_t *refs;
0044
0045 struct sg_table *sgt;
0046
0047 struct dma_buf_attachment *attach;
0048 } imp;
0049 } u;
0050
0051
0052 int nr_pages;
0053
0054 struct page **pages;
0055 };
0056
0057 struct gntdev_dmabuf_wait_obj {
0058 struct list_head next;
0059 struct gntdev_dmabuf *gntdev_dmabuf;
0060 struct completion completion;
0061 };
0062
0063 struct gntdev_dmabuf_attachment {
0064 struct sg_table *sgt;
0065 enum dma_data_direction dir;
0066 };
0067
0068 struct gntdev_dmabuf_priv {
0069
0070 struct list_head exp_list;
0071
0072 struct list_head exp_wait_list;
0073
0074 struct list_head imp_list;
0075
0076 struct mutex lock;
0077
0078
0079
0080
0081
0082 struct file *filp;
0083 };
0084
0085
0086
0087
0088
0089 static void dmabuf_exp_release(struct kref *kref);
0090
0091 static struct gntdev_dmabuf_wait_obj *
0092 dmabuf_exp_wait_obj_new(struct gntdev_dmabuf_priv *priv,
0093 struct gntdev_dmabuf *gntdev_dmabuf)
0094 {
0095 struct gntdev_dmabuf_wait_obj *obj;
0096
0097 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
0098 if (!obj)
0099 return ERR_PTR(-ENOMEM);
0100
0101 init_completion(&obj->completion);
0102 obj->gntdev_dmabuf = gntdev_dmabuf;
0103
0104 mutex_lock(&priv->lock);
0105 list_add(&obj->next, &priv->exp_wait_list);
0106
0107 kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release);
0108 mutex_unlock(&priv->lock);
0109 return obj;
0110 }
0111
0112 static void dmabuf_exp_wait_obj_free(struct gntdev_dmabuf_priv *priv,
0113 struct gntdev_dmabuf_wait_obj *obj)
0114 {
0115 mutex_lock(&priv->lock);
0116 list_del(&obj->next);
0117 mutex_unlock(&priv->lock);
0118 kfree(obj);
0119 }
0120
0121 static int dmabuf_exp_wait_obj_wait(struct gntdev_dmabuf_wait_obj *obj,
0122 u32 wait_to_ms)
0123 {
0124 if (wait_for_completion_timeout(&obj->completion,
0125 msecs_to_jiffies(wait_to_ms)) <= 0)
0126 return -ETIMEDOUT;
0127
0128 return 0;
0129 }
0130
0131 static void dmabuf_exp_wait_obj_signal(struct gntdev_dmabuf_priv *priv,
0132 struct gntdev_dmabuf *gntdev_dmabuf)
0133 {
0134 struct gntdev_dmabuf_wait_obj *obj;
0135
0136 list_for_each_entry(obj, &priv->exp_wait_list, next)
0137 if (obj->gntdev_dmabuf == gntdev_dmabuf) {
0138 pr_debug("Found gntdev_dmabuf in the wait list, wake\n");
0139 complete_all(&obj->completion);
0140 break;
0141 }
0142 }
0143
0144 static struct gntdev_dmabuf *
0145 dmabuf_exp_wait_obj_get_dmabuf(struct gntdev_dmabuf_priv *priv, int fd)
0146 {
0147 struct gntdev_dmabuf *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
0148
0149 mutex_lock(&priv->lock);
0150 list_for_each_entry(gntdev_dmabuf, &priv->exp_list, next)
0151 if (gntdev_dmabuf->fd == fd) {
0152 pr_debug("Found gntdev_dmabuf in the wait list\n");
0153 kref_get(&gntdev_dmabuf->u.exp.refcount);
0154 ret = gntdev_dmabuf;
0155 break;
0156 }
0157 mutex_unlock(&priv->lock);
0158 return ret;
0159 }
0160
0161 static int dmabuf_exp_wait_released(struct gntdev_dmabuf_priv *priv, int fd,
0162 int wait_to_ms)
0163 {
0164 struct gntdev_dmabuf *gntdev_dmabuf;
0165 struct gntdev_dmabuf_wait_obj *obj;
0166 int ret;
0167
0168 pr_debug("Will wait for dma-buf with fd %d\n", fd);
0169
0170
0171
0172
0173
0174 gntdev_dmabuf = dmabuf_exp_wait_obj_get_dmabuf(priv, fd);
0175 if (IS_ERR(gntdev_dmabuf))
0176 return PTR_ERR(gntdev_dmabuf);
0177
0178
0179
0180
0181
0182
0183 obj = dmabuf_exp_wait_obj_new(priv, gntdev_dmabuf);
0184 if (IS_ERR(obj))
0185 return PTR_ERR(obj);
0186
0187 ret = dmabuf_exp_wait_obj_wait(obj, wait_to_ms);
0188 dmabuf_exp_wait_obj_free(priv, obj);
0189 return ret;
0190 }
0191
0192
0193
0194 static struct sg_table *
0195 dmabuf_pages_to_sgt(struct page **pages, unsigned int nr_pages)
0196 {
0197 struct sg_table *sgt;
0198 int ret;
0199
0200 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
0201 if (!sgt) {
0202 ret = -ENOMEM;
0203 goto out;
0204 }
0205
0206 ret = sg_alloc_table_from_pages(sgt, pages, nr_pages, 0,
0207 nr_pages << PAGE_SHIFT,
0208 GFP_KERNEL);
0209 if (ret)
0210 goto out;
0211
0212 return sgt;
0213
0214 out:
0215 kfree(sgt);
0216 return ERR_PTR(ret);
0217 }
0218
0219 static int dmabuf_exp_ops_attach(struct dma_buf *dma_buf,
0220 struct dma_buf_attachment *attach)
0221 {
0222 struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach;
0223
0224 gntdev_dmabuf_attach = kzalloc(sizeof(*gntdev_dmabuf_attach),
0225 GFP_KERNEL);
0226 if (!gntdev_dmabuf_attach)
0227 return -ENOMEM;
0228
0229 gntdev_dmabuf_attach->dir = DMA_NONE;
0230 attach->priv = gntdev_dmabuf_attach;
0231 return 0;
0232 }
0233
0234 static void dmabuf_exp_ops_detach(struct dma_buf *dma_buf,
0235 struct dma_buf_attachment *attach)
0236 {
0237 struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv;
0238
0239 if (gntdev_dmabuf_attach) {
0240 struct sg_table *sgt = gntdev_dmabuf_attach->sgt;
0241
0242 if (sgt) {
0243 if (gntdev_dmabuf_attach->dir != DMA_NONE)
0244 dma_unmap_sgtable(attach->dev, sgt,
0245 gntdev_dmabuf_attach->dir,
0246 DMA_ATTR_SKIP_CPU_SYNC);
0247 sg_free_table(sgt);
0248 }
0249
0250 kfree(sgt);
0251 kfree(gntdev_dmabuf_attach);
0252 attach->priv = NULL;
0253 }
0254 }
0255
0256 static struct sg_table *
0257 dmabuf_exp_ops_map_dma_buf(struct dma_buf_attachment *attach,
0258 enum dma_data_direction dir)
0259 {
0260 struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv;
0261 struct gntdev_dmabuf *gntdev_dmabuf = attach->dmabuf->priv;
0262 struct sg_table *sgt;
0263
0264 pr_debug("Mapping %d pages for dev %p\n", gntdev_dmabuf->nr_pages,
0265 attach->dev);
0266
0267 if (dir == DMA_NONE || !gntdev_dmabuf_attach)
0268 return ERR_PTR(-EINVAL);
0269
0270
0271 if (gntdev_dmabuf_attach->dir == dir)
0272 return gntdev_dmabuf_attach->sgt;
0273
0274
0275
0276
0277
0278 if (gntdev_dmabuf_attach->dir != DMA_NONE)
0279 return ERR_PTR(-EBUSY);
0280
0281 sgt = dmabuf_pages_to_sgt(gntdev_dmabuf->pages,
0282 gntdev_dmabuf->nr_pages);
0283 if (!IS_ERR(sgt)) {
0284 if (dma_map_sgtable(attach->dev, sgt, dir,
0285 DMA_ATTR_SKIP_CPU_SYNC)) {
0286 sg_free_table(sgt);
0287 kfree(sgt);
0288 sgt = ERR_PTR(-ENOMEM);
0289 } else {
0290 gntdev_dmabuf_attach->sgt = sgt;
0291 gntdev_dmabuf_attach->dir = dir;
0292 }
0293 }
0294 if (IS_ERR(sgt))
0295 pr_debug("Failed to map sg table for dev %p\n", attach->dev);
0296 return sgt;
0297 }
0298
0299 static void dmabuf_exp_ops_unmap_dma_buf(struct dma_buf_attachment *attach,
0300 struct sg_table *sgt,
0301 enum dma_data_direction dir)
0302 {
0303
0304 }
0305
0306 static void dmabuf_exp_release(struct kref *kref)
0307 {
0308 struct gntdev_dmabuf *gntdev_dmabuf =
0309 container_of(kref, struct gntdev_dmabuf, u.exp.refcount);
0310
0311 dmabuf_exp_wait_obj_signal(gntdev_dmabuf->priv, gntdev_dmabuf);
0312 list_del(&gntdev_dmabuf->next);
0313 fput(gntdev_dmabuf->priv->filp);
0314 kfree(gntdev_dmabuf);
0315 }
0316
0317 static void dmabuf_exp_remove_map(struct gntdev_priv *priv,
0318 struct gntdev_grant_map *map)
0319 {
0320 mutex_lock(&priv->lock);
0321 list_del(&map->next);
0322 gntdev_put_map(NULL , map);
0323 mutex_unlock(&priv->lock);
0324 }
0325
0326 static void dmabuf_exp_ops_release(struct dma_buf *dma_buf)
0327 {
0328 struct gntdev_dmabuf *gntdev_dmabuf = dma_buf->priv;
0329 struct gntdev_dmabuf_priv *priv = gntdev_dmabuf->priv;
0330
0331 dmabuf_exp_remove_map(gntdev_dmabuf->u.exp.priv,
0332 gntdev_dmabuf->u.exp.map);
0333 mutex_lock(&priv->lock);
0334 kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release);
0335 mutex_unlock(&priv->lock);
0336 }
0337
0338 static const struct dma_buf_ops dmabuf_exp_ops = {
0339 .attach = dmabuf_exp_ops_attach,
0340 .detach = dmabuf_exp_ops_detach,
0341 .map_dma_buf = dmabuf_exp_ops_map_dma_buf,
0342 .unmap_dma_buf = dmabuf_exp_ops_unmap_dma_buf,
0343 .release = dmabuf_exp_ops_release,
0344 };
0345
0346 struct gntdev_dmabuf_export_args {
0347 struct gntdev_priv *priv;
0348 struct gntdev_grant_map *map;
0349 struct gntdev_dmabuf_priv *dmabuf_priv;
0350 struct device *dev;
0351 int count;
0352 struct page **pages;
0353 u32 fd;
0354 };
0355
0356 static int dmabuf_exp_from_pages(struct gntdev_dmabuf_export_args *args)
0357 {
0358 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
0359 struct gntdev_dmabuf *gntdev_dmabuf;
0360 int ret;
0361
0362 gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
0363 if (!gntdev_dmabuf)
0364 return -ENOMEM;
0365
0366 kref_init(&gntdev_dmabuf->u.exp.refcount);
0367
0368 gntdev_dmabuf->priv = args->dmabuf_priv;
0369 gntdev_dmabuf->nr_pages = args->count;
0370 gntdev_dmabuf->pages = args->pages;
0371 gntdev_dmabuf->u.exp.priv = args->priv;
0372 gntdev_dmabuf->u.exp.map = args->map;
0373
0374 exp_info.exp_name = KBUILD_MODNAME;
0375 if (args->dev->driver && args->dev->driver->owner)
0376 exp_info.owner = args->dev->driver->owner;
0377 else
0378 exp_info.owner = THIS_MODULE;
0379 exp_info.ops = &dmabuf_exp_ops;
0380 exp_info.size = args->count << PAGE_SHIFT;
0381 exp_info.flags = O_RDWR;
0382 exp_info.priv = gntdev_dmabuf;
0383
0384 gntdev_dmabuf->dmabuf = dma_buf_export(&exp_info);
0385 if (IS_ERR(gntdev_dmabuf->dmabuf)) {
0386 ret = PTR_ERR(gntdev_dmabuf->dmabuf);
0387 gntdev_dmabuf->dmabuf = NULL;
0388 goto fail;
0389 }
0390
0391 ret = dma_buf_fd(gntdev_dmabuf->dmabuf, O_CLOEXEC);
0392 if (ret < 0)
0393 goto fail;
0394
0395 gntdev_dmabuf->fd = ret;
0396 args->fd = ret;
0397
0398 pr_debug("Exporting DMA buffer with fd %d\n", ret);
0399
0400 mutex_lock(&args->dmabuf_priv->lock);
0401 list_add(&gntdev_dmabuf->next, &args->dmabuf_priv->exp_list);
0402 mutex_unlock(&args->dmabuf_priv->lock);
0403 get_file(gntdev_dmabuf->priv->filp);
0404 return 0;
0405
0406 fail:
0407 if (gntdev_dmabuf->dmabuf)
0408 dma_buf_put(gntdev_dmabuf->dmabuf);
0409 kfree(gntdev_dmabuf);
0410 return ret;
0411 }
0412
0413 static struct gntdev_grant_map *
0414 dmabuf_exp_alloc_backing_storage(struct gntdev_priv *priv, int dmabuf_flags,
0415 int count)
0416 {
0417 struct gntdev_grant_map *map;
0418
0419 if (unlikely(gntdev_test_page_count(count)))
0420 return ERR_PTR(-EINVAL);
0421
0422 if ((dmabuf_flags & GNTDEV_DMA_FLAG_WC) &&
0423 (dmabuf_flags & GNTDEV_DMA_FLAG_COHERENT)) {
0424 pr_debug("Wrong dma-buf flags: 0x%x\n", dmabuf_flags);
0425 return ERR_PTR(-EINVAL);
0426 }
0427
0428 map = gntdev_alloc_map(priv, count, dmabuf_flags);
0429 if (!map)
0430 return ERR_PTR(-ENOMEM);
0431
0432 return map;
0433 }
0434
0435 static int dmabuf_exp_from_refs(struct gntdev_priv *priv, int flags,
0436 int count, u32 domid, u32 *refs, u32 *fd)
0437 {
0438 struct gntdev_grant_map *map;
0439 struct gntdev_dmabuf_export_args args;
0440 int i, ret;
0441
0442 map = dmabuf_exp_alloc_backing_storage(priv, flags, count);
0443 if (IS_ERR(map))
0444 return PTR_ERR(map);
0445
0446 for (i = 0; i < count; i++) {
0447 map->grants[i].domid = domid;
0448 map->grants[i].ref = refs[i];
0449 }
0450
0451 mutex_lock(&priv->lock);
0452 gntdev_add_map(priv, map);
0453 mutex_unlock(&priv->lock);
0454
0455 map->flags |= GNTMAP_host_map;
0456 #if defined(CONFIG_X86)
0457 map->flags |= GNTMAP_device_map;
0458 #endif
0459
0460 ret = gntdev_map_grant_pages(map);
0461 if (ret < 0)
0462 goto out;
0463
0464 args.priv = priv;
0465 args.map = map;
0466 args.dev = priv->dma_dev;
0467 args.dmabuf_priv = priv->dmabuf_priv;
0468 args.count = map->count;
0469 args.pages = map->pages;
0470 args.fd = -1;
0471
0472 ret = dmabuf_exp_from_pages(&args);
0473 if (ret < 0)
0474 goto out;
0475
0476 *fd = args.fd;
0477 return 0;
0478
0479 out:
0480 dmabuf_exp_remove_map(priv, map);
0481 return ret;
0482 }
0483
0484
0485
0486 static int
0487 dmabuf_imp_grant_foreign_access(struct page **pages, u32 *refs,
0488 int count, int domid)
0489 {
0490 grant_ref_t priv_gref_head;
0491 int i, ret;
0492
0493 ret = gnttab_alloc_grant_references(count, &priv_gref_head);
0494 if (ret < 0) {
0495 pr_debug("Cannot allocate grant references, ret %d\n", ret);
0496 return ret;
0497 }
0498
0499 for (i = 0; i < count; i++) {
0500 int cur_ref;
0501
0502 cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
0503 if (cur_ref < 0) {
0504 ret = cur_ref;
0505 pr_debug("Cannot claim grant reference, ret %d\n", ret);
0506 goto out;
0507 }
0508
0509 gnttab_grant_foreign_access_ref(cur_ref, domid,
0510 xen_page_to_gfn(pages[i]), 0);
0511 refs[i] = cur_ref;
0512 }
0513
0514 return 0;
0515
0516 out:
0517 gnttab_free_grant_references(priv_gref_head);
0518 return ret;
0519 }
0520
0521 static void dmabuf_imp_end_foreign_access(u32 *refs, int count)
0522 {
0523 int i;
0524
0525 for (i = 0; i < count; i++)
0526 if (refs[i] != INVALID_GRANT_REF)
0527 gnttab_end_foreign_access(refs[i], NULL);
0528 }
0529
0530 static void dmabuf_imp_free_storage(struct gntdev_dmabuf *gntdev_dmabuf)
0531 {
0532 kfree(gntdev_dmabuf->pages);
0533 kfree(gntdev_dmabuf->u.imp.refs);
0534 kfree(gntdev_dmabuf);
0535 }
0536
0537 static struct gntdev_dmabuf *dmabuf_imp_alloc_storage(int count)
0538 {
0539 struct gntdev_dmabuf *gntdev_dmabuf;
0540 int i;
0541
0542 gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
0543 if (!gntdev_dmabuf)
0544 goto fail_no_free;
0545
0546 gntdev_dmabuf->u.imp.refs = kcalloc(count,
0547 sizeof(gntdev_dmabuf->u.imp.refs[0]),
0548 GFP_KERNEL);
0549 if (!gntdev_dmabuf->u.imp.refs)
0550 goto fail;
0551
0552 gntdev_dmabuf->pages = kcalloc(count,
0553 sizeof(gntdev_dmabuf->pages[0]),
0554 GFP_KERNEL);
0555 if (!gntdev_dmabuf->pages)
0556 goto fail;
0557
0558 gntdev_dmabuf->nr_pages = count;
0559
0560 for (i = 0; i < count; i++)
0561 gntdev_dmabuf->u.imp.refs[i] = INVALID_GRANT_REF;
0562
0563 return gntdev_dmabuf;
0564
0565 fail:
0566 dmabuf_imp_free_storage(gntdev_dmabuf);
0567 fail_no_free:
0568 return ERR_PTR(-ENOMEM);
0569 }
0570
0571 static struct gntdev_dmabuf *
0572 dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
0573 int fd, int count, int domid)
0574 {
0575 struct gntdev_dmabuf *gntdev_dmabuf, *ret;
0576 struct dma_buf *dma_buf;
0577 struct dma_buf_attachment *attach;
0578 struct sg_table *sgt;
0579 struct sg_page_iter sg_iter;
0580 int i;
0581
0582 dma_buf = dma_buf_get(fd);
0583 if (IS_ERR(dma_buf))
0584 return ERR_CAST(dma_buf);
0585
0586 gntdev_dmabuf = dmabuf_imp_alloc_storage(count);
0587 if (IS_ERR(gntdev_dmabuf)) {
0588 ret = gntdev_dmabuf;
0589 goto fail_put;
0590 }
0591
0592 gntdev_dmabuf->priv = priv;
0593 gntdev_dmabuf->fd = fd;
0594
0595 attach = dma_buf_attach(dma_buf, dev);
0596 if (IS_ERR(attach)) {
0597 ret = ERR_CAST(attach);
0598 goto fail_free_obj;
0599 }
0600
0601 gntdev_dmabuf->u.imp.attach = attach;
0602
0603 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
0604 if (IS_ERR(sgt)) {
0605 ret = ERR_CAST(sgt);
0606 goto fail_detach;
0607 }
0608
0609
0610 if (sgt->sgl->offset) {
0611 ret = ERR_PTR(-EINVAL);
0612 pr_debug("DMA buffer has %d bytes offset, user-space expects 0\n",
0613 sgt->sgl->offset);
0614 goto fail_unmap;
0615 }
0616
0617
0618 if (attach->dmabuf->size != gntdev_dmabuf->nr_pages << PAGE_SHIFT) {
0619 ret = ERR_PTR(-EINVAL);
0620 pr_debug("DMA buffer has %zu pages, user-space expects %d\n",
0621 attach->dmabuf->size, gntdev_dmabuf->nr_pages);
0622 goto fail_unmap;
0623 }
0624
0625 gntdev_dmabuf->u.imp.sgt = sgt;
0626
0627
0628 i = 0;
0629 for_each_sgtable_page(sgt, &sg_iter, 0) {
0630 struct page *page = sg_page_iter_page(&sg_iter);
0631
0632
0633
0634
0635
0636 if (!pfn_valid(page_to_pfn(page))) {
0637 ret = ERR_PTR(-EINVAL);
0638 goto fail_unmap;
0639 }
0640
0641 gntdev_dmabuf->pages[i++] = page;
0642 }
0643
0644 ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gntdev_dmabuf->pages,
0645 gntdev_dmabuf->u.imp.refs,
0646 count, domid));
0647 if (IS_ERR(ret))
0648 goto fail_end_access;
0649
0650 pr_debug("Imported DMA buffer with fd %d\n", fd);
0651
0652 mutex_lock(&priv->lock);
0653 list_add(&gntdev_dmabuf->next, &priv->imp_list);
0654 mutex_unlock(&priv->lock);
0655
0656 return gntdev_dmabuf;
0657
0658 fail_end_access:
0659 dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs, count);
0660 fail_unmap:
0661 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
0662 fail_detach:
0663 dma_buf_detach(dma_buf, attach);
0664 fail_free_obj:
0665 dmabuf_imp_free_storage(gntdev_dmabuf);
0666 fail_put:
0667 dma_buf_put(dma_buf);
0668 return ret;
0669 }
0670
0671
0672
0673
0674
0675 static struct gntdev_dmabuf *
0676 dmabuf_imp_find_unlink(struct gntdev_dmabuf_priv *priv, int fd)
0677 {
0678 struct gntdev_dmabuf *q, *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
0679
0680 mutex_lock(&priv->lock);
0681 list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next) {
0682 if (gntdev_dmabuf->fd == fd) {
0683 pr_debug("Found gntdev_dmabuf in the import list\n");
0684 ret = gntdev_dmabuf;
0685 list_del(&gntdev_dmabuf->next);
0686 break;
0687 }
0688 }
0689 mutex_unlock(&priv->lock);
0690 return ret;
0691 }
0692
0693 static int dmabuf_imp_release(struct gntdev_dmabuf_priv *priv, u32 fd)
0694 {
0695 struct gntdev_dmabuf *gntdev_dmabuf;
0696 struct dma_buf_attachment *attach;
0697 struct dma_buf *dma_buf;
0698
0699 gntdev_dmabuf = dmabuf_imp_find_unlink(priv, fd);
0700 if (IS_ERR(gntdev_dmabuf))
0701 return PTR_ERR(gntdev_dmabuf);
0702
0703 pr_debug("Releasing DMA buffer with fd %d\n", fd);
0704
0705 dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs,
0706 gntdev_dmabuf->nr_pages);
0707
0708 attach = gntdev_dmabuf->u.imp.attach;
0709
0710 if (gntdev_dmabuf->u.imp.sgt)
0711 dma_buf_unmap_attachment(attach, gntdev_dmabuf->u.imp.sgt,
0712 DMA_BIDIRECTIONAL);
0713 dma_buf = attach->dmabuf;
0714 dma_buf_detach(attach->dmabuf, attach);
0715 dma_buf_put(dma_buf);
0716
0717 dmabuf_imp_free_storage(gntdev_dmabuf);
0718 return 0;
0719 }
0720
0721 static void dmabuf_imp_release_all(struct gntdev_dmabuf_priv *priv)
0722 {
0723 struct gntdev_dmabuf *q, *gntdev_dmabuf;
0724
0725 list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next)
0726 dmabuf_imp_release(priv, gntdev_dmabuf->fd);
0727 }
0728
0729
0730
0731 long gntdev_ioctl_dmabuf_exp_from_refs(struct gntdev_priv *priv, int use_ptemod,
0732 struct ioctl_gntdev_dmabuf_exp_from_refs __user *u)
0733 {
0734 struct ioctl_gntdev_dmabuf_exp_from_refs op;
0735 u32 *refs;
0736 long ret;
0737
0738 if (use_ptemod) {
0739 pr_debug("Cannot provide dma-buf: use_ptemode %d\n",
0740 use_ptemod);
0741 return -EINVAL;
0742 }
0743
0744 if (copy_from_user(&op, u, sizeof(op)) != 0)
0745 return -EFAULT;
0746
0747 if (unlikely(gntdev_test_page_count(op.count)))
0748 return -EINVAL;
0749
0750 refs = kcalloc(op.count, sizeof(*refs), GFP_KERNEL);
0751 if (!refs)
0752 return -ENOMEM;
0753
0754 if (copy_from_user(refs, u->refs, sizeof(*refs) * op.count) != 0) {
0755 ret = -EFAULT;
0756 goto out;
0757 }
0758
0759 ret = dmabuf_exp_from_refs(priv, op.flags, op.count,
0760 op.domid, refs, &op.fd);
0761 if (ret)
0762 goto out;
0763
0764 if (copy_to_user(u, &op, sizeof(op)) != 0)
0765 ret = -EFAULT;
0766
0767 out:
0768 kfree(refs);
0769 return ret;
0770 }
0771
0772 long gntdev_ioctl_dmabuf_exp_wait_released(struct gntdev_priv *priv,
0773 struct ioctl_gntdev_dmabuf_exp_wait_released __user *u)
0774 {
0775 struct ioctl_gntdev_dmabuf_exp_wait_released op;
0776
0777 if (copy_from_user(&op, u, sizeof(op)) != 0)
0778 return -EFAULT;
0779
0780 return dmabuf_exp_wait_released(priv->dmabuf_priv, op.fd,
0781 op.wait_to_ms);
0782 }
0783
0784 long gntdev_ioctl_dmabuf_imp_to_refs(struct gntdev_priv *priv,
0785 struct ioctl_gntdev_dmabuf_imp_to_refs __user *u)
0786 {
0787 struct ioctl_gntdev_dmabuf_imp_to_refs op;
0788 struct gntdev_dmabuf *gntdev_dmabuf;
0789 long ret;
0790
0791 if (copy_from_user(&op, u, sizeof(op)) != 0)
0792 return -EFAULT;
0793
0794 if (unlikely(gntdev_test_page_count(op.count)))
0795 return -EINVAL;
0796
0797 gntdev_dmabuf = dmabuf_imp_to_refs(priv->dmabuf_priv,
0798 priv->dma_dev, op.fd,
0799 op.count, op.domid);
0800 if (IS_ERR(gntdev_dmabuf))
0801 return PTR_ERR(gntdev_dmabuf);
0802
0803 if (copy_to_user(u->refs, gntdev_dmabuf->u.imp.refs,
0804 sizeof(*u->refs) * op.count) != 0) {
0805 ret = -EFAULT;
0806 goto out_release;
0807 }
0808 return 0;
0809
0810 out_release:
0811 dmabuf_imp_release(priv->dmabuf_priv, op.fd);
0812 return ret;
0813 }
0814
0815 long gntdev_ioctl_dmabuf_imp_release(struct gntdev_priv *priv,
0816 struct ioctl_gntdev_dmabuf_imp_release __user *u)
0817 {
0818 struct ioctl_gntdev_dmabuf_imp_release op;
0819
0820 if (copy_from_user(&op, u, sizeof(op)) != 0)
0821 return -EFAULT;
0822
0823 return dmabuf_imp_release(priv->dmabuf_priv, op.fd);
0824 }
0825
0826 struct gntdev_dmabuf_priv *gntdev_dmabuf_init(struct file *filp)
0827 {
0828 struct gntdev_dmabuf_priv *priv;
0829
0830 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
0831 if (!priv)
0832 return ERR_PTR(-ENOMEM);
0833
0834 mutex_init(&priv->lock);
0835 INIT_LIST_HEAD(&priv->exp_list);
0836 INIT_LIST_HEAD(&priv->exp_wait_list);
0837 INIT_LIST_HEAD(&priv->imp_list);
0838
0839 priv->filp = filp;
0840
0841 return priv;
0842 }
0843
0844 void gntdev_dmabuf_fini(struct gntdev_dmabuf_priv *priv)
0845 {
0846 dmabuf_imp_release_all(priv);
0847 kfree(priv);
0848 }