0001
0002
0003
0004 #include <linux/dma-fence-array.h>
0005 #include <linux/dma-mapping.h>
0006 #include <linux/file.h>
0007 #include <linux/host1x.h>
0008 #include <linux/iommu.h>
0009 #include <linux/kref.h>
0010 #include <linux/list.h>
0011 #include <linux/nospec.h>
0012 #include <linux/pm_runtime.h>
0013 #include <linux/scatterlist.h>
0014 #include <linux/slab.h>
0015 #include <linux/sync_file.h>
0016
0017 #include <drm/drm_drv.h>
0018 #include <drm/drm_file.h>
0019 #include <drm/drm_syncobj.h>
0020
0021 #include "drm.h"
0022 #include "gem.h"
0023 #include "submit.h"
0024 #include "uapi.h"
0025
0026 #define SUBMIT_ERR(context, fmt, ...) \
0027 dev_err_ratelimited(context->client->base.dev, \
0028 "%s: job submission failed: " fmt "\n", \
0029 current->comm, ##__VA_ARGS__)
0030
0031 struct gather_bo {
0032 struct host1x_bo base;
0033
0034 struct kref ref;
0035
0036 struct device *dev;
0037 u32 *gather_data;
0038 dma_addr_t gather_data_dma;
0039 size_t gather_data_words;
0040 };
0041
0042 static struct host1x_bo *gather_bo_get(struct host1x_bo *host_bo)
0043 {
0044 struct gather_bo *bo = container_of(host_bo, struct gather_bo, base);
0045
0046 kref_get(&bo->ref);
0047
0048 return host_bo;
0049 }
0050
0051 static void gather_bo_release(struct kref *ref)
0052 {
0053 struct gather_bo *bo = container_of(ref, struct gather_bo, ref);
0054
0055 dma_free_attrs(bo->dev, bo->gather_data_words * 4, bo->gather_data, bo->gather_data_dma,
0056 0);
0057 kfree(bo);
0058 }
0059
0060 static void gather_bo_put(struct host1x_bo *host_bo)
0061 {
0062 struct gather_bo *bo = container_of(host_bo, struct gather_bo, base);
0063
0064 kref_put(&bo->ref, gather_bo_release);
0065 }
0066
0067 static struct host1x_bo_mapping *
0068 gather_bo_pin(struct device *dev, struct host1x_bo *bo, enum dma_data_direction direction)
0069 {
0070 struct gather_bo *gather = container_of(bo, struct gather_bo, base);
0071 struct host1x_bo_mapping *map;
0072 int err;
0073
0074 map = kzalloc(sizeof(*map), GFP_KERNEL);
0075 if (!map)
0076 return ERR_PTR(-ENOMEM);
0077
0078 kref_init(&map->ref);
0079 map->bo = host1x_bo_get(bo);
0080 map->direction = direction;
0081 map->dev = dev;
0082
0083 map->sgt = kzalloc(sizeof(*map->sgt), GFP_KERNEL);
0084 if (!map->sgt) {
0085 err = -ENOMEM;
0086 goto free;
0087 }
0088
0089 err = dma_get_sgtable(gather->dev, map->sgt, gather->gather_data, gather->gather_data_dma,
0090 gather->gather_data_words * 4);
0091 if (err)
0092 goto free_sgt;
0093
0094 err = dma_map_sgtable(dev, map->sgt, direction, 0);
0095 if (err)
0096 goto free_sgt;
0097
0098 map->phys = sg_dma_address(map->sgt->sgl);
0099 map->size = gather->gather_data_words * 4;
0100 map->chunks = err;
0101
0102 return map;
0103
0104 free_sgt:
0105 sg_free_table(map->sgt);
0106 kfree(map->sgt);
0107 free:
0108 kfree(map);
0109 return ERR_PTR(err);
0110 }
0111
0112 static void gather_bo_unpin(struct host1x_bo_mapping *map)
0113 {
0114 if (!map)
0115 return;
0116
0117 dma_unmap_sgtable(map->dev, map->sgt, map->direction, 0);
0118 sg_free_table(map->sgt);
0119 kfree(map->sgt);
0120 host1x_bo_put(map->bo);
0121
0122 kfree(map);
0123 }
0124
0125 static void *gather_bo_mmap(struct host1x_bo *host_bo)
0126 {
0127 struct gather_bo *bo = container_of(host_bo, struct gather_bo, base);
0128
0129 return bo->gather_data;
0130 }
0131
0132 static void gather_bo_munmap(struct host1x_bo *host_bo, void *addr)
0133 {
0134 }
0135
0136 const struct host1x_bo_ops gather_bo_ops = {
0137 .get = gather_bo_get,
0138 .put = gather_bo_put,
0139 .pin = gather_bo_pin,
0140 .unpin = gather_bo_unpin,
0141 .mmap = gather_bo_mmap,
0142 .munmap = gather_bo_munmap,
0143 };
0144
0145 static struct tegra_drm_mapping *
0146 tegra_drm_mapping_get(struct tegra_drm_context *context, u32 id)
0147 {
0148 struct tegra_drm_mapping *mapping;
0149
0150 xa_lock(&context->mappings);
0151
0152 mapping = xa_load(&context->mappings, id);
0153 if (mapping)
0154 kref_get(&mapping->ref);
0155
0156 xa_unlock(&context->mappings);
0157
0158 return mapping;
0159 }
0160
0161 static void *alloc_copy_user_array(void __user *from, size_t count, size_t size)
0162 {
0163 size_t copy_len;
0164 void *data;
0165
0166 if (check_mul_overflow(count, size, ©_len))
0167 return ERR_PTR(-EINVAL);
0168
0169 if (copy_len > 0x4000)
0170 return ERR_PTR(-E2BIG);
0171
0172 data = kvmalloc(copy_len, GFP_KERNEL);
0173 if (!data)
0174 return ERR_PTR(-ENOMEM);
0175
0176 if (copy_from_user(data, from, copy_len)) {
0177 kvfree(data);
0178 return ERR_PTR(-EFAULT);
0179 }
0180
0181 return data;
0182 }
0183
0184 static int submit_copy_gather_data(struct gather_bo **pbo, struct device *dev,
0185 struct tegra_drm_context *context,
0186 struct drm_tegra_channel_submit *args)
0187 {
0188 struct gather_bo *bo;
0189 size_t copy_len;
0190
0191 if (args->gather_data_words == 0) {
0192 SUBMIT_ERR(context, "gather_data_words cannot be zero");
0193 return -EINVAL;
0194 }
0195
0196 if (check_mul_overflow((size_t)args->gather_data_words, (size_t)4, ©_len)) {
0197 SUBMIT_ERR(context, "gather_data_words is too large");
0198 return -EINVAL;
0199 }
0200
0201 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
0202 if (!bo) {
0203 SUBMIT_ERR(context, "failed to allocate memory for bo info");
0204 return -ENOMEM;
0205 }
0206
0207 host1x_bo_init(&bo->base, &gather_bo_ops);
0208 kref_init(&bo->ref);
0209 bo->dev = dev;
0210
0211 bo->gather_data = dma_alloc_attrs(dev, copy_len, &bo->gather_data_dma,
0212 GFP_KERNEL | __GFP_NOWARN, 0);
0213 if (!bo->gather_data) {
0214 SUBMIT_ERR(context, "failed to allocate memory for gather data");
0215 kfree(bo);
0216 return -ENOMEM;
0217 }
0218
0219 if (copy_from_user(bo->gather_data, u64_to_user_ptr(args->gather_data_ptr), copy_len)) {
0220 SUBMIT_ERR(context, "failed to copy gather data from userspace");
0221 dma_free_attrs(dev, copy_len, bo->gather_data, bo->gather_data_dma, 0);
0222 kfree(bo);
0223 return -EFAULT;
0224 }
0225
0226 bo->gather_data_words = args->gather_data_words;
0227
0228 *pbo = bo;
0229
0230 return 0;
0231 }
0232
0233 static int submit_write_reloc(struct tegra_drm_context *context, struct gather_bo *bo,
0234 struct drm_tegra_submit_buf *buf, struct tegra_drm_mapping *mapping)
0235 {
0236
0237 dma_addr_t iova = mapping->iova + buf->reloc.target_offset;
0238 u32 written_ptr;
0239
0240 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
0241 if (buf->flags & DRM_TEGRA_SUBMIT_RELOC_SECTOR_LAYOUT)
0242 iova |= BIT_ULL(39);
0243 #endif
0244
0245 written_ptr = iova >> buf->reloc.shift;
0246
0247 if (buf->reloc.gather_offset_words >= bo->gather_data_words) {
0248 SUBMIT_ERR(context,
0249 "relocation has too large gather offset (%u vs gather length %zu)",
0250 buf->reloc.gather_offset_words, bo->gather_data_words);
0251 return -EINVAL;
0252 }
0253
0254 buf->reloc.gather_offset_words = array_index_nospec(buf->reloc.gather_offset_words,
0255 bo->gather_data_words);
0256
0257 bo->gather_data[buf->reloc.gather_offset_words] = written_ptr;
0258
0259 return 0;
0260 }
0261
0262 static int submit_process_bufs(struct tegra_drm_context *context, struct gather_bo *bo,
0263 struct drm_tegra_channel_submit *args,
0264 struct tegra_drm_submit_data *job_data)
0265 {
0266 struct tegra_drm_used_mapping *mappings;
0267 struct drm_tegra_submit_buf *bufs;
0268 int err;
0269 u32 i;
0270
0271 bufs = alloc_copy_user_array(u64_to_user_ptr(args->bufs_ptr), args->num_bufs,
0272 sizeof(*bufs));
0273 if (IS_ERR(bufs)) {
0274 SUBMIT_ERR(context, "failed to copy bufs array from userspace");
0275 return PTR_ERR(bufs);
0276 }
0277
0278 mappings = kcalloc(args->num_bufs, sizeof(*mappings), GFP_KERNEL);
0279 if (!mappings) {
0280 SUBMIT_ERR(context, "failed to allocate memory for mapping info");
0281 err = -ENOMEM;
0282 goto done;
0283 }
0284
0285 for (i = 0; i < args->num_bufs; i++) {
0286 struct drm_tegra_submit_buf *buf = &bufs[i];
0287 struct tegra_drm_mapping *mapping;
0288
0289 if (buf->flags & ~DRM_TEGRA_SUBMIT_RELOC_SECTOR_LAYOUT) {
0290 SUBMIT_ERR(context, "invalid flag specified for buffer");
0291 err = -EINVAL;
0292 goto drop_refs;
0293 }
0294
0295 mapping = tegra_drm_mapping_get(context, buf->mapping);
0296 if (!mapping) {
0297 SUBMIT_ERR(context, "invalid mapping ID '%u' for buffer", buf->mapping);
0298 err = -EINVAL;
0299 goto drop_refs;
0300 }
0301
0302 err = submit_write_reloc(context, bo, buf, mapping);
0303 if (err) {
0304 tegra_drm_mapping_put(mapping);
0305 goto drop_refs;
0306 }
0307
0308 mappings[i].mapping = mapping;
0309 mappings[i].flags = buf->flags;
0310 }
0311
0312 job_data->used_mappings = mappings;
0313 job_data->num_used_mappings = i;
0314
0315 err = 0;
0316
0317 goto done;
0318
0319 drop_refs:
0320 while (i--)
0321 tegra_drm_mapping_put(mappings[i].mapping);
0322
0323 kfree(mappings);
0324 job_data->used_mappings = NULL;
0325
0326 done:
0327 kvfree(bufs);
0328
0329 return err;
0330 }
0331
0332 static int submit_get_syncpt(struct tegra_drm_context *context, struct host1x_job *job,
0333 struct xarray *syncpoints, struct drm_tegra_channel_submit *args)
0334 {
0335 struct host1x_syncpt *sp;
0336
0337 if (args->syncpt.flags) {
0338 SUBMIT_ERR(context, "invalid flag specified for syncpt");
0339 return -EINVAL;
0340 }
0341
0342
0343 sp = xa_load(syncpoints, args->syncpt.id);
0344 if (!sp) {
0345 SUBMIT_ERR(context, "syncpoint specified in syncpt was not allocated");
0346 return -EINVAL;
0347 }
0348
0349 job->syncpt = host1x_syncpt_get(sp);
0350 job->syncpt_incrs = args->syncpt.increments;
0351
0352 return 0;
0353 }
0354
0355 static int submit_job_add_gather(struct host1x_job *job, struct tegra_drm_context *context,
0356 struct drm_tegra_submit_cmd_gather_uptr *cmd,
0357 struct gather_bo *bo, u32 *offset,
0358 struct tegra_drm_submit_data *job_data,
0359 u32 *class)
0360 {
0361 u32 next_offset;
0362
0363 if (cmd->reserved[0] || cmd->reserved[1] || cmd->reserved[2]) {
0364 SUBMIT_ERR(context, "non-zero reserved field in GATHER_UPTR command");
0365 return -EINVAL;
0366 }
0367
0368
0369 if (cmd->words > 16383) {
0370 SUBMIT_ERR(context, "too many words in GATHER_UPTR command");
0371 return -EINVAL;
0372 }
0373
0374 if (check_add_overflow(*offset, cmd->words, &next_offset)) {
0375 SUBMIT_ERR(context, "too many total words in job");
0376 return -EINVAL;
0377 }
0378
0379 if (next_offset > bo->gather_data_words) {
0380 SUBMIT_ERR(context, "GATHER_UPTR command overflows gather data");
0381 return -EINVAL;
0382 }
0383
0384 if (tegra_drm_fw_validate(context->client, bo->gather_data, *offset,
0385 cmd->words, job_data, class)) {
0386 SUBMIT_ERR(context, "job was rejected by firewall");
0387 return -EINVAL;
0388 }
0389
0390 host1x_job_add_gather(job, &bo->base, cmd->words, *offset * 4);
0391
0392 *offset = next_offset;
0393
0394 return 0;
0395 }
0396
0397 static struct host1x_job *
0398 submit_create_job(struct tegra_drm_context *context, struct gather_bo *bo,
0399 struct drm_tegra_channel_submit *args, struct tegra_drm_submit_data *job_data,
0400 struct xarray *syncpoints)
0401 {
0402 struct drm_tegra_submit_cmd *cmds;
0403 u32 i, gather_offset = 0, class;
0404 struct host1x_job *job;
0405 int err;
0406
0407
0408 class = context->client->base.class;
0409
0410 cmds = alloc_copy_user_array(u64_to_user_ptr(args->cmds_ptr), args->num_cmds,
0411 sizeof(*cmds));
0412 if (IS_ERR(cmds)) {
0413 SUBMIT_ERR(context, "failed to copy cmds array from userspace");
0414 return ERR_CAST(cmds);
0415 }
0416
0417 job = host1x_job_alloc(context->channel, args->num_cmds, 0, true);
0418 if (!job) {
0419 SUBMIT_ERR(context, "failed to allocate memory for job");
0420 job = ERR_PTR(-ENOMEM);
0421 goto done;
0422 }
0423
0424 err = submit_get_syncpt(context, job, syncpoints, args);
0425 if (err < 0)
0426 goto free_job;
0427
0428 job->client = &context->client->base;
0429 job->class = context->client->base.class;
0430 job->serialize = true;
0431
0432 for (i = 0; i < args->num_cmds; i++) {
0433 struct drm_tegra_submit_cmd *cmd = &cmds[i];
0434
0435 if (cmd->flags) {
0436 SUBMIT_ERR(context, "unknown flags given for cmd");
0437 err = -EINVAL;
0438 goto free_job;
0439 }
0440
0441 if (cmd->type == DRM_TEGRA_SUBMIT_CMD_GATHER_UPTR) {
0442 err = submit_job_add_gather(job, context, &cmd->gather_uptr, bo,
0443 &gather_offset, job_data, &class);
0444 if (err)
0445 goto free_job;
0446 } else if (cmd->type == DRM_TEGRA_SUBMIT_CMD_WAIT_SYNCPT) {
0447 if (cmd->wait_syncpt.reserved[0] || cmd->wait_syncpt.reserved[1]) {
0448 SUBMIT_ERR(context, "non-zero reserved value");
0449 err = -EINVAL;
0450 goto free_job;
0451 }
0452
0453 host1x_job_add_wait(job, cmd->wait_syncpt.id, cmd->wait_syncpt.value,
0454 false, class);
0455 } else if (cmd->type == DRM_TEGRA_SUBMIT_CMD_WAIT_SYNCPT_RELATIVE) {
0456 if (cmd->wait_syncpt.reserved[0] || cmd->wait_syncpt.reserved[1]) {
0457 SUBMIT_ERR(context, "non-zero reserved value");
0458 err = -EINVAL;
0459 goto free_job;
0460 }
0461
0462 if (cmd->wait_syncpt.id != args->syncpt.id) {
0463 SUBMIT_ERR(context, "syncpoint ID in CMD_WAIT_SYNCPT_RELATIVE is not used by the job");
0464 err = -EINVAL;
0465 goto free_job;
0466 }
0467
0468 host1x_job_add_wait(job, cmd->wait_syncpt.id, cmd->wait_syncpt.value,
0469 true, class);
0470 } else {
0471 SUBMIT_ERR(context, "unknown cmd type");
0472 err = -EINVAL;
0473 goto free_job;
0474 }
0475 }
0476
0477 if (gather_offset == 0) {
0478 SUBMIT_ERR(context, "job must have at least one gather");
0479 err = -EINVAL;
0480 goto free_job;
0481 }
0482
0483 goto done;
0484
0485 free_job:
0486 host1x_job_put(job);
0487 job = ERR_PTR(err);
0488
0489 done:
0490 kvfree(cmds);
0491
0492 return job;
0493 }
0494
0495 static void release_job(struct host1x_job *job)
0496 {
0497 struct tegra_drm_client *client = container_of(job->client, struct tegra_drm_client, base);
0498 struct tegra_drm_submit_data *job_data = job->user_data;
0499 u32 i;
0500
0501 if (job->memory_context)
0502 host1x_memory_context_put(job->memory_context);
0503
0504 for (i = 0; i < job_data->num_used_mappings; i++)
0505 tegra_drm_mapping_put(job_data->used_mappings[i].mapping);
0506
0507 kfree(job_data->used_mappings);
0508 kfree(job_data);
0509
0510 pm_runtime_mark_last_busy(client->base.dev);
0511 pm_runtime_put_autosuspend(client->base.dev);
0512 }
0513
0514 int tegra_drm_ioctl_channel_submit(struct drm_device *drm, void *data,
0515 struct drm_file *file)
0516 {
0517 struct tegra_drm_file *fpriv = file->driver_priv;
0518 struct drm_tegra_channel_submit *args = data;
0519 struct tegra_drm_submit_data *job_data;
0520 struct drm_syncobj *syncobj = NULL;
0521 struct tegra_drm_context *context;
0522 struct host1x_job *job;
0523 struct gather_bo *bo;
0524 u32 i;
0525 int err;
0526
0527 mutex_lock(&fpriv->lock);
0528
0529 context = xa_load(&fpriv->contexts, args->context);
0530 if (!context) {
0531 mutex_unlock(&fpriv->lock);
0532 pr_err_ratelimited("%s: %s: invalid channel context '%#x'", __func__,
0533 current->comm, args->context);
0534 return -EINVAL;
0535 }
0536
0537 if (args->syncobj_in) {
0538 struct dma_fence *fence;
0539
0540 err = drm_syncobj_find_fence(file, args->syncobj_in, 0, 0, &fence);
0541 if (err) {
0542 SUBMIT_ERR(context, "invalid syncobj_in '%#x'", args->syncobj_in);
0543 goto unlock;
0544 }
0545
0546 err = dma_fence_wait_timeout(fence, true, msecs_to_jiffies(10000));
0547 dma_fence_put(fence);
0548 if (err) {
0549 SUBMIT_ERR(context, "wait for syncobj_in timed out");
0550 goto unlock;
0551 }
0552 }
0553
0554 if (args->syncobj_out) {
0555 syncobj = drm_syncobj_find(file, args->syncobj_out);
0556 if (!syncobj) {
0557 SUBMIT_ERR(context, "invalid syncobj_out '%#x'", args->syncobj_out);
0558 err = -ENOENT;
0559 goto unlock;
0560 }
0561 }
0562
0563
0564 err = submit_copy_gather_data(&bo, drm->dev, context, args);
0565 if (err)
0566 goto unlock;
0567
0568 job_data = kzalloc(sizeof(*job_data), GFP_KERNEL);
0569 if (!job_data) {
0570 SUBMIT_ERR(context, "failed to allocate memory for job data");
0571 err = -ENOMEM;
0572 goto put_bo;
0573 }
0574
0575
0576 err = submit_process_bufs(context, bo, args, job_data);
0577 if (err)
0578 goto free_job_data;
0579
0580
0581 job = submit_create_job(context, bo, args, job_data, &fpriv->syncpoints);
0582 if (IS_ERR(job)) {
0583 err = PTR_ERR(job);
0584 goto free_job_data;
0585 }
0586
0587
0588 err = host1x_job_pin(job, context->client->base.dev);
0589 if (err) {
0590 SUBMIT_ERR(context, "failed to pin job: %d", err);
0591 goto put_job;
0592 }
0593
0594 if (context->client->ops->get_streamid_offset) {
0595 err = context->client->ops->get_streamid_offset(
0596 context->client, &job->engine_streamid_offset);
0597 if (err) {
0598 SUBMIT_ERR(context, "failed to get streamid offset: %d", err);
0599 goto unpin_job;
0600 }
0601 }
0602
0603 if (context->memory_context && context->client->ops->can_use_memory_ctx) {
0604 bool supported;
0605
0606 err = context->client->ops->can_use_memory_ctx(context->client, &supported);
0607 if (err) {
0608 SUBMIT_ERR(context, "failed to detect if engine can use memory context: %d", err);
0609 goto unpin_job;
0610 }
0611
0612 if (supported) {
0613 job->memory_context = context->memory_context;
0614 host1x_memory_context_get(job->memory_context);
0615 }
0616 } else if (context->client->ops->get_streamid_offset) {
0617 #ifdef CONFIG_IOMMU_API
0618 struct iommu_fwspec *spec;
0619
0620
0621
0622
0623
0624 spec = dev_iommu_fwspec_get(context->client->base.dev);
0625 if (spec && spec->num_ids > 0)
0626 job->engine_fallback_streamid = spec->ids[0] & 0xffff;
0627 else
0628 job->engine_fallback_streamid = 0x7f;
0629 #else
0630 job->engine_fallback_streamid = 0x7f;
0631 #endif
0632 }
0633
0634
0635 err = pm_runtime_resume_and_get(context->client->base.dev);
0636 if (err < 0) {
0637 SUBMIT_ERR(context, "could not power up engine: %d", err);
0638 goto put_memory_context;
0639 }
0640
0641 job->user_data = job_data;
0642 job->release = release_job;
0643 job->timeout = 10000;
0644
0645
0646
0647
0648
0649 job_data = NULL;
0650
0651
0652 err = host1x_job_submit(job);
0653 if (err) {
0654 SUBMIT_ERR(context, "host1x job submission failed: %d", err);
0655 goto unpin_job;
0656 }
0657
0658
0659 args->syncpt.value = job->syncpt_end;
0660
0661 if (syncobj) {
0662 struct dma_fence *fence = host1x_fence_create(job->syncpt, job->syncpt_end);
0663 if (IS_ERR(fence)) {
0664 err = PTR_ERR(fence);
0665 SUBMIT_ERR(context, "failed to create postfence: %d", err);
0666 }
0667
0668 drm_syncobj_replace_fence(syncobj, fence);
0669 }
0670
0671 goto put_job;
0672
0673 put_memory_context:
0674 if (job->memory_context)
0675 host1x_memory_context_put(job->memory_context);
0676 unpin_job:
0677 host1x_job_unpin(job);
0678 put_job:
0679 host1x_job_put(job);
0680 free_job_data:
0681 if (job_data && job_data->used_mappings) {
0682 for (i = 0; i < job_data->num_used_mappings; i++)
0683 tegra_drm_mapping_put(job_data->used_mappings[i].mapping);
0684
0685 kfree(job_data->used_mappings);
0686 }
0687
0688 if (job_data)
0689 kfree(job_data);
0690 put_bo:
0691 gather_bo_put(&bo->base);
0692 unlock:
0693 if (syncobj)
0694 drm_syncobj_put(syncobj);
0695
0696 mutex_unlock(&fpriv->lock);
0697 return err;
0698 }