0001
0002
0003
0004
0005
0006
0007 #include <linux/dma-buf.h>
0008 #include <linux/highmem.h>
0009 #include <linux/dma-resv.h>
0010 #include <linux/module.h>
0011
0012 #include <asm/smp.h>
0013
0014 #include "gem/i915_gem_dmabuf.h"
0015 #include "i915_drv.h"
0016 #include "i915_gem_object.h"
0017 #include "i915_scatterlist.h"
0018
0019 MODULE_IMPORT_NS(DMA_BUF);
0020
0021 I915_SELFTEST_DECLARE(static bool force_different_devices;)
0022
0023 static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
0024 {
0025 return to_intel_bo(buf->priv);
0026 }
0027
0028 static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
0029 enum dma_data_direction dir)
0030 {
0031 struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
0032 struct sg_table *st;
0033 struct scatterlist *src, *dst;
0034 int ret, i;
0035
0036
0037 st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
0038 if (st == NULL) {
0039 ret = -ENOMEM;
0040 goto err;
0041 }
0042
0043 ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
0044 if (ret)
0045 goto err_free;
0046
0047 src = obj->mm.pages->sgl;
0048 dst = st->sgl;
0049 for (i = 0; i < obj->mm.pages->nents; i++) {
0050 sg_set_page(dst, sg_page(src), src->length, 0);
0051 dst = sg_next(dst);
0052 src = sg_next(src);
0053 }
0054
0055 ret = dma_map_sgtable(attachment->dev, st, dir, DMA_ATTR_SKIP_CPU_SYNC);
0056 if (ret)
0057 goto err_free_sg;
0058
0059 return st;
0060
0061 err_free_sg:
0062 sg_free_table(st);
0063 err_free:
0064 kfree(st);
0065 err:
0066 return ERR_PTR(ret);
0067 }
0068
0069 static int i915_gem_dmabuf_vmap(struct dma_buf *dma_buf,
0070 struct iosys_map *map)
0071 {
0072 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
0073 void *vaddr;
0074
0075 vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
0076 if (IS_ERR(vaddr))
0077 return PTR_ERR(vaddr);
0078
0079 iosys_map_set_vaddr(map, vaddr);
0080
0081 return 0;
0082 }
0083
0084 static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf,
0085 struct iosys_map *map)
0086 {
0087 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
0088
0089 i915_gem_object_flush_map(obj);
0090 i915_gem_object_unpin_map(obj);
0091 }
0092
0093 static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
0094 {
0095 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
0096 struct drm_i915_private *i915 = to_i915(obj->base.dev);
0097 int ret;
0098
0099 if (obj->base.size < vma->vm_end - vma->vm_start)
0100 return -EINVAL;
0101
0102 if (HAS_LMEM(i915))
0103 return drm_gem_prime_mmap(&obj->base, vma);
0104
0105 if (!obj->base.filp)
0106 return -ENODEV;
0107
0108 ret = call_mmap(obj->base.filp, vma);
0109 if (ret)
0110 return ret;
0111
0112 vma_set_file(vma, obj->base.filp);
0113
0114 return 0;
0115 }
0116
0117 static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
0118 {
0119 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
0120 bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
0121 struct i915_gem_ww_ctx ww;
0122 int err;
0123
0124 i915_gem_ww_ctx_init(&ww, true);
0125 retry:
0126 err = i915_gem_object_lock(obj, &ww);
0127 if (!err)
0128 err = i915_gem_object_pin_pages(obj);
0129 if (!err) {
0130 err = i915_gem_object_set_to_cpu_domain(obj, write);
0131 i915_gem_object_unpin_pages(obj);
0132 }
0133 if (err == -EDEADLK) {
0134 err = i915_gem_ww_ctx_backoff(&ww);
0135 if (!err)
0136 goto retry;
0137 }
0138 i915_gem_ww_ctx_fini(&ww);
0139 return err;
0140 }
0141
0142 static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
0143 {
0144 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
0145 struct i915_gem_ww_ctx ww;
0146 int err;
0147
0148 i915_gem_ww_ctx_init(&ww, true);
0149 retry:
0150 err = i915_gem_object_lock(obj, &ww);
0151 if (!err)
0152 err = i915_gem_object_pin_pages(obj);
0153 if (!err) {
0154 err = i915_gem_object_set_to_gtt_domain(obj, false);
0155 i915_gem_object_unpin_pages(obj);
0156 }
0157 if (err == -EDEADLK) {
0158 err = i915_gem_ww_ctx_backoff(&ww);
0159 if (!err)
0160 goto retry;
0161 }
0162 i915_gem_ww_ctx_fini(&ww);
0163 return err;
0164 }
0165
0166 static int i915_gem_dmabuf_attach(struct dma_buf *dmabuf,
0167 struct dma_buf_attachment *attach)
0168 {
0169 struct drm_i915_gem_object *obj = dma_buf_to_obj(dmabuf);
0170 struct i915_gem_ww_ctx ww;
0171 int err;
0172
0173 if (!i915_gem_object_can_migrate(obj, INTEL_REGION_SMEM))
0174 return -EOPNOTSUPP;
0175
0176 for_i915_gem_ww(&ww, err, true) {
0177 err = i915_gem_object_lock(obj, &ww);
0178 if (err)
0179 continue;
0180
0181 err = i915_gem_object_migrate(obj, &ww, INTEL_REGION_SMEM);
0182 if (err)
0183 continue;
0184
0185 err = i915_gem_object_wait_migration(obj, 0);
0186 if (err)
0187 continue;
0188
0189 err = i915_gem_object_pin_pages(obj);
0190 }
0191
0192 return err;
0193 }
0194
0195 static void i915_gem_dmabuf_detach(struct dma_buf *dmabuf,
0196 struct dma_buf_attachment *attach)
0197 {
0198 struct drm_i915_gem_object *obj = dma_buf_to_obj(dmabuf);
0199
0200 i915_gem_object_unpin_pages(obj);
0201 }
0202
0203 static const struct dma_buf_ops i915_dmabuf_ops = {
0204 .attach = i915_gem_dmabuf_attach,
0205 .detach = i915_gem_dmabuf_detach,
0206 .map_dma_buf = i915_gem_map_dma_buf,
0207 .unmap_dma_buf = drm_gem_unmap_dma_buf,
0208 .release = drm_gem_dmabuf_release,
0209 .mmap = i915_gem_dmabuf_mmap,
0210 .vmap = i915_gem_dmabuf_vmap,
0211 .vunmap = i915_gem_dmabuf_vunmap,
0212 .begin_cpu_access = i915_gem_begin_cpu_access,
0213 .end_cpu_access = i915_gem_end_cpu_access,
0214 };
0215
0216 struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags)
0217 {
0218 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
0219 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
0220
0221 exp_info.ops = &i915_dmabuf_ops;
0222 exp_info.size = gem_obj->size;
0223 exp_info.flags = flags;
0224 exp_info.priv = gem_obj;
0225 exp_info.resv = obj->base.resv;
0226
0227 if (obj->ops->dmabuf_export) {
0228 int ret = obj->ops->dmabuf_export(obj);
0229 if (ret)
0230 return ERR_PTR(ret);
0231 }
0232
0233 return drm_gem_dmabuf_export(gem_obj->dev, &exp_info);
0234 }
0235
0236 static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
0237 {
0238 struct drm_i915_private *i915 = to_i915(obj->base.dev);
0239 struct sg_table *pages;
0240 unsigned int sg_page_sizes;
0241
0242 assert_object_held(obj);
0243
0244 pages = dma_buf_map_attachment(obj->base.import_attach,
0245 DMA_BIDIRECTIONAL);
0246 if (IS_ERR(pages))
0247 return PTR_ERR(pages);
0248
0249
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260 if (i915_gem_object_can_bypass_llc(obj) ||
0261 (!HAS_LLC(i915) && !IS_DG1(i915)))
0262 wbinvd_on_all_cpus();
0263
0264 sg_page_sizes = i915_sg_dma_sizes(pages->sgl);
0265 __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
0266
0267 return 0;
0268 }
0269
0270 static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
0271 struct sg_table *pages)
0272 {
0273 dma_buf_unmap_attachment(obj->base.import_attach, pages,
0274 DMA_BIDIRECTIONAL);
0275 }
0276
0277 static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
0278 .name = "i915_gem_object_dmabuf",
0279 .get_pages = i915_gem_object_get_pages_dmabuf,
0280 .put_pages = i915_gem_object_put_pages_dmabuf,
0281 };
0282
0283 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
0284 struct dma_buf *dma_buf)
0285 {
0286 static struct lock_class_key lock_class;
0287 struct dma_buf_attachment *attach;
0288 struct drm_i915_gem_object *obj;
0289 int ret;
0290
0291
0292 if (dma_buf->ops == &i915_dmabuf_ops) {
0293 obj = dma_buf_to_obj(dma_buf);
0294
0295 if (obj->base.dev == dev &&
0296 !I915_SELFTEST_ONLY(force_different_devices)) {
0297
0298
0299
0300
0301 return &i915_gem_object_get(obj)->base;
0302 }
0303 }
0304
0305 if (i915_gem_object_size_2big(dma_buf->size))
0306 return ERR_PTR(-E2BIG);
0307
0308
0309 attach = dma_buf_attach(dma_buf, dev->dev);
0310 if (IS_ERR(attach))
0311 return ERR_CAST(attach);
0312
0313 get_dma_buf(dma_buf);
0314
0315 obj = i915_gem_object_alloc();
0316 if (obj == NULL) {
0317 ret = -ENOMEM;
0318 goto fail_detach;
0319 }
0320
0321 drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
0322 i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops, &lock_class,
0323 I915_BO_ALLOC_USER);
0324 obj->base.import_attach = attach;
0325 obj->base.resv = dma_buf->resv;
0326
0327
0328
0329
0330
0331
0332
0333
0334 obj->read_domains = I915_GEM_DOMAIN_GTT;
0335 obj->write_domain = 0;
0336
0337 return &obj->base;
0338
0339 fail_detach:
0340 dma_buf_detach(dma_buf, attach);
0341 dma_buf_put(dma_buf);
0342
0343 return ERR_PTR(ret);
0344 }
0345
0346 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
0347 #include "selftests/mock_dmabuf.c"
0348 #include "selftests/i915_gem_dmabuf.c"
0349 #endif