0001
0002
0003
0004
0005
0006
0007 #include <linux/dma-buf.h>
0008 #include <linux/highmem.h>
0009
0010 #include <drm/drm_prime.h>
0011
0012 #include "omap_drv.h"
0013
0014 MODULE_IMPORT_NS(DMA_BUF);
0015
0016
0017
0018
0019
0020 static struct sg_table *omap_gem_map_dma_buf(
0021 struct dma_buf_attachment *attachment,
0022 enum dma_data_direction dir)
0023 {
0024 struct drm_gem_object *obj = attachment->dmabuf->priv;
0025 struct sg_table *sg;
0026 sg = omap_gem_get_sg(obj, dir);
0027 if (IS_ERR(sg))
0028 return sg;
0029
0030 return sg;
0031 }
0032
0033 static void omap_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
0034 struct sg_table *sg, enum dma_data_direction dir)
0035 {
0036 struct drm_gem_object *obj = attachment->dmabuf->priv;
0037 omap_gem_put_sg(obj, sg);
0038 }
0039
0040 static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer,
0041 enum dma_data_direction dir)
0042 {
0043 struct drm_gem_object *obj = buffer->priv;
0044 struct page **pages;
0045 if (omap_gem_flags(obj) & OMAP_BO_TILED_MASK) {
0046
0047
0048
0049 return -ENOMEM;
0050 }
0051
0052 return omap_gem_get_pages(obj, &pages, true);
0053 }
0054
0055 static int omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer,
0056 enum dma_data_direction dir)
0057 {
0058 struct drm_gem_object *obj = buffer->priv;
0059 omap_gem_put_pages(obj);
0060 return 0;
0061 }
0062
0063 static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
0064 struct vm_area_struct *vma)
0065 {
0066 struct drm_gem_object *obj = buffer->priv;
0067 int ret = 0;
0068
0069 ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma);
0070 if (ret < 0)
0071 return ret;
0072
0073 return omap_gem_mmap_obj(obj, vma);
0074 }
0075
0076 static const struct dma_buf_ops omap_dmabuf_ops = {
0077 .map_dma_buf = omap_gem_map_dma_buf,
0078 .unmap_dma_buf = omap_gem_unmap_dma_buf,
0079 .release = drm_gem_dmabuf_release,
0080 .begin_cpu_access = omap_gem_dmabuf_begin_cpu_access,
0081 .end_cpu_access = omap_gem_dmabuf_end_cpu_access,
0082 .mmap = omap_gem_dmabuf_mmap,
0083 };
0084
0085 struct dma_buf *omap_gem_prime_export(struct drm_gem_object *obj, int flags)
0086 {
0087 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
0088
0089 exp_info.ops = &omap_dmabuf_ops;
0090 exp_info.size = omap_gem_mmap_size(obj);
0091 exp_info.flags = flags;
0092 exp_info.priv = obj;
0093 exp_info.resv = obj->resv;
0094
0095 return drm_gem_dmabuf_export(obj->dev, &exp_info);
0096 }
0097
0098
0099
0100
0101
0102 struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
0103 struct dma_buf *dma_buf)
0104 {
0105 struct dma_buf_attachment *attach;
0106 struct drm_gem_object *obj;
0107 struct sg_table *sgt;
0108 int ret;
0109
0110 if (dma_buf->ops == &omap_dmabuf_ops) {
0111 obj = dma_buf->priv;
0112 if (obj->dev == dev) {
0113
0114
0115
0116
0117 drm_gem_object_get(obj);
0118 return obj;
0119 }
0120 }
0121
0122 attach = dma_buf_attach(dma_buf, dev->dev);
0123 if (IS_ERR(attach))
0124 return ERR_CAST(attach);
0125
0126 get_dma_buf(dma_buf);
0127
0128 sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
0129 if (IS_ERR(sgt)) {
0130 ret = PTR_ERR(sgt);
0131 goto fail_detach;
0132 }
0133
0134 obj = omap_gem_new_dmabuf(dev, dma_buf->size, sgt);
0135 if (IS_ERR(obj)) {
0136 ret = PTR_ERR(obj);
0137 goto fail_unmap;
0138 }
0139
0140 obj->import_attach = attach;
0141
0142 return obj;
0143
0144 fail_unmap:
0145 dma_buf_unmap_attachment(attach, sgt, DMA_TO_DEVICE);
0146 fail_detach:
0147 dma_buf_detach(dma_buf, attach);
0148 dma_buf_put(dma_buf);
0149
0150 return ERR_PTR(ret);
0151 }