0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/dma-mapping.h>
0009 #include <linux/kthread.h>
0010 #include <linux/sched/mm.h>
0011 #include <linux/uaccess.h>
0012 #include <uapi/linux/sched/types.h>
0013
0014 #include <drm/drm_bridge.h>
0015 #include <drm/drm_drv.h>
0016 #include <drm/drm_file.h>
0017 #include <drm/drm_ioctl.h>
0018 #include <drm/drm_prime.h>
0019 #include <drm/drm_of.h>
0020 #include <drm/drm_vblank.h>
0021
0022 #include "disp/msm_disp_snapshot.h"
0023 #include "msm_drv.h"
0024 #include "msm_debugfs.h"
0025 #include "msm_fence.h"
0026 #include "msm_gem.h"
0027 #include "msm_gpu.h"
0028 #include "msm_kms.h"
0029 #include "msm_mmu.h"
0030 #include "adreno/adreno_gpu.h"
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048 #define MSM_VERSION_MAJOR 1
0049 #define MSM_VERSION_MINOR 9
0050 #define MSM_VERSION_PATCHLEVEL 0
0051
0052 static const struct drm_mode_config_funcs mode_config_funcs = {
0053 .fb_create = msm_framebuffer_create,
0054 .output_poll_changed = drm_fb_helper_output_poll_changed,
0055 .atomic_check = drm_atomic_helper_check,
0056 .atomic_commit = drm_atomic_helper_commit,
0057 };
0058
0059 static const struct drm_mode_config_helper_funcs mode_config_helper_funcs = {
0060 .atomic_commit_tail = msm_atomic_commit_tail,
0061 };
0062
0063 #ifdef CONFIG_DRM_FBDEV_EMULATION
0064 static bool fbdev = true;
0065 MODULE_PARM_DESC(fbdev, "Enable fbdev compat layer");
0066 module_param(fbdev, bool, 0600);
0067 #endif
0068
0069 static char *vram = "16m";
0070 MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU)");
0071 module_param(vram, charp, 0);
0072
0073 bool dumpstate;
0074 MODULE_PARM_DESC(dumpstate, "Dump KMS state on errors");
0075 module_param(dumpstate, bool, 0600);
0076
0077 static bool modeset = true;
0078 MODULE_PARM_DESC(modeset, "Use kernel modesetting [KMS] (1=on (default), 0=disable)");
0079 module_param(modeset, bool, 0600);
0080
0081 static irqreturn_t msm_irq(int irq, void *arg)
0082 {
0083 struct drm_device *dev = arg;
0084 struct msm_drm_private *priv = dev->dev_private;
0085 struct msm_kms *kms = priv->kms;
0086
0087 BUG_ON(!kms);
0088
0089 return kms->funcs->irq(kms);
0090 }
0091
0092 static void msm_irq_preinstall(struct drm_device *dev)
0093 {
0094 struct msm_drm_private *priv = dev->dev_private;
0095 struct msm_kms *kms = priv->kms;
0096
0097 BUG_ON(!kms);
0098
0099 kms->funcs->irq_preinstall(kms);
0100 }
0101
0102 static int msm_irq_postinstall(struct drm_device *dev)
0103 {
0104 struct msm_drm_private *priv = dev->dev_private;
0105 struct msm_kms *kms = priv->kms;
0106
0107 BUG_ON(!kms);
0108
0109 if (kms->funcs->irq_postinstall)
0110 return kms->funcs->irq_postinstall(kms);
0111
0112 return 0;
0113 }
0114
0115 static int msm_irq_install(struct drm_device *dev, unsigned int irq)
0116 {
0117 struct msm_drm_private *priv = dev->dev_private;
0118 struct msm_kms *kms = priv->kms;
0119 int ret;
0120
0121 if (irq == IRQ_NOTCONNECTED)
0122 return -ENOTCONN;
0123
0124 msm_irq_preinstall(dev);
0125
0126 ret = request_irq(irq, msm_irq, 0, dev->driver->name, dev);
0127 if (ret)
0128 return ret;
0129
0130 kms->irq_requested = true;
0131
0132 ret = msm_irq_postinstall(dev);
0133 if (ret) {
0134 free_irq(irq, dev);
0135 return ret;
0136 }
0137
0138 return 0;
0139 }
0140
0141 static void msm_irq_uninstall(struct drm_device *dev)
0142 {
0143 struct msm_drm_private *priv = dev->dev_private;
0144 struct msm_kms *kms = priv->kms;
0145
0146 kms->funcs->irq_uninstall(kms);
0147 if (kms->irq_requested)
0148 free_irq(kms->irq, dev);
0149 }
0150
0151 struct msm_vblank_work {
0152 struct work_struct work;
0153 int crtc_id;
0154 bool enable;
0155 struct msm_drm_private *priv;
0156 };
0157
0158 static void vblank_ctrl_worker(struct work_struct *work)
0159 {
0160 struct msm_vblank_work *vbl_work = container_of(work,
0161 struct msm_vblank_work, work);
0162 struct msm_drm_private *priv = vbl_work->priv;
0163 struct msm_kms *kms = priv->kms;
0164
0165 if (vbl_work->enable)
0166 kms->funcs->enable_vblank(kms, priv->crtcs[vbl_work->crtc_id]);
0167 else
0168 kms->funcs->disable_vblank(kms, priv->crtcs[vbl_work->crtc_id]);
0169
0170 kfree(vbl_work);
0171 }
0172
0173 static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
0174 int crtc_id, bool enable)
0175 {
0176 struct msm_vblank_work *vbl_work;
0177
0178 vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC);
0179 if (!vbl_work)
0180 return -ENOMEM;
0181
0182 INIT_WORK(&vbl_work->work, vblank_ctrl_worker);
0183
0184 vbl_work->crtc_id = crtc_id;
0185 vbl_work->enable = enable;
0186 vbl_work->priv = priv;
0187
0188 queue_work(priv->wq, &vbl_work->work);
0189
0190 return 0;
0191 }
0192
0193 static int msm_drm_uninit(struct device *dev)
0194 {
0195 struct platform_device *pdev = to_platform_device(dev);
0196 struct msm_drm_private *priv = platform_get_drvdata(pdev);
0197 struct drm_device *ddev = priv->dev;
0198 struct msm_kms *kms = priv->kms;
0199 int i;
0200
0201
0202
0203
0204
0205
0206
0207
0208 if (ddev->registered) {
0209 drm_dev_unregister(ddev);
0210 drm_atomic_helper_shutdown(ddev);
0211 }
0212
0213
0214
0215
0216
0217
0218 flush_workqueue(priv->wq);
0219
0220
0221 for (i = 0; i < priv->num_crtcs; i++) {
0222 if (priv->event_thread[i].worker)
0223 kthread_destroy_worker(priv->event_thread[i].worker);
0224 }
0225
0226 msm_gem_shrinker_cleanup(ddev);
0227
0228 drm_kms_helper_poll_fini(ddev);
0229
0230 msm_perf_debugfs_cleanup(priv);
0231 msm_rd_debugfs_cleanup(priv);
0232
0233 #ifdef CONFIG_DRM_FBDEV_EMULATION
0234 if (fbdev && priv->fbdev)
0235 msm_fbdev_free(ddev);
0236 #endif
0237
0238 msm_disp_snapshot_destroy(ddev);
0239
0240 drm_mode_config_cleanup(ddev);
0241
0242 for (i = 0; i < priv->num_bridges; i++)
0243 drm_bridge_remove(priv->bridges[i]);
0244
0245 pm_runtime_get_sync(dev);
0246 msm_irq_uninstall(ddev);
0247 pm_runtime_put_sync(dev);
0248
0249 if (kms && kms->funcs)
0250 kms->funcs->destroy(kms);
0251
0252 if (priv->vram.paddr) {
0253 unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING;
0254 drm_mm_takedown(&priv->vram.mm);
0255 dma_free_attrs(dev, priv->vram.size, NULL,
0256 priv->vram.paddr, attrs);
0257 }
0258
0259 component_unbind_all(dev, ddev);
0260
0261 ddev->dev_private = NULL;
0262 drm_dev_put(ddev);
0263
0264 destroy_workqueue(priv->wq);
0265
0266 return 0;
0267 }
0268
0269 #include <linux/of_address.h>
0270
0271 struct msm_gem_address_space *msm_kms_init_aspace(struct drm_device *dev)
0272 {
0273 struct iommu_domain *domain;
0274 struct msm_gem_address_space *aspace;
0275 struct msm_mmu *mmu;
0276 struct device *mdp_dev = dev->dev;
0277 struct device *mdss_dev = mdp_dev->parent;
0278 struct device *iommu_dev;
0279
0280
0281
0282
0283
0284 if (device_iommu_mapped(mdp_dev))
0285 iommu_dev = mdp_dev;
0286 else
0287 iommu_dev = mdss_dev;
0288
0289 domain = iommu_domain_alloc(iommu_dev->bus);
0290 if (!domain) {
0291 drm_info(dev, "no IOMMU, fallback to phys contig buffers for scanout\n");
0292 return NULL;
0293 }
0294
0295 mmu = msm_iommu_new(iommu_dev, domain);
0296 if (IS_ERR(mmu)) {
0297 iommu_domain_free(domain);
0298 return ERR_CAST(mmu);
0299 }
0300
0301 aspace = msm_gem_address_space_create(mmu, "mdp_kms",
0302 0x1000, 0x100000000 - 0x1000);
0303 if (IS_ERR(aspace))
0304 mmu->funcs->destroy(mmu);
0305
0306 return aspace;
0307 }
0308
0309 bool msm_use_mmu(struct drm_device *dev)
0310 {
0311 struct msm_drm_private *priv = dev->dev_private;
0312
0313
0314
0315
0316
0317
0318 return priv->is_a2xx ||
0319 device_iommu_mapped(dev->dev) ||
0320 device_iommu_mapped(dev->dev->parent);
0321 }
0322
0323 static int msm_init_vram(struct drm_device *dev)
0324 {
0325 struct msm_drm_private *priv = dev->dev_private;
0326 struct device_node *node;
0327 unsigned long size = 0;
0328 int ret = 0;
0329
0330
0331
0332
0333
0334
0335
0336
0337
0338
0339
0340
0341
0342
0343
0344
0345
0346
0347 node = of_parse_phandle(dev->dev->of_node, "memory-region", 0);
0348 if (node) {
0349 struct resource r;
0350 ret = of_address_to_resource(node, 0, &r);
0351 of_node_put(node);
0352 if (ret)
0353 return ret;
0354 size = r.end - r.start + 1;
0355 DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start);
0356
0357
0358
0359
0360
0361 } else if (!msm_use_mmu(dev)) {
0362 DRM_INFO("using %s VRAM carveout\n", vram);
0363 size = memparse(vram, NULL);
0364 }
0365
0366 if (size) {
0367 unsigned long attrs = 0;
0368 void *p;
0369
0370 priv->vram.size = size;
0371
0372 drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
0373 spin_lock_init(&priv->vram.lock);
0374
0375 attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
0376 attrs |= DMA_ATTR_WRITE_COMBINE;
0377
0378
0379
0380
0381 p = dma_alloc_attrs(dev->dev, size,
0382 &priv->vram.paddr, GFP_KERNEL, attrs);
0383 if (!p) {
0384 DRM_DEV_ERROR(dev->dev, "failed to allocate VRAM\n");
0385 priv->vram.paddr = 0;
0386 return -ENOMEM;
0387 }
0388
0389 DRM_DEV_INFO(dev->dev, "VRAM: %08x->%08x\n",
0390 (uint32_t)priv->vram.paddr,
0391 (uint32_t)(priv->vram.paddr + size));
0392 }
0393
0394 return ret;
0395 }
0396
0397 static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
0398 {
0399 struct msm_drm_private *priv = dev_get_drvdata(dev);
0400 struct drm_device *ddev;
0401 struct msm_kms *kms;
0402 int ret, i;
0403
0404 if (drm_firmware_drivers_only())
0405 return -ENODEV;
0406
0407 ddev = drm_dev_alloc(drv, dev);
0408 if (IS_ERR(ddev)) {
0409 DRM_DEV_ERROR(dev, "failed to allocate drm_device\n");
0410 return PTR_ERR(ddev);
0411 }
0412 ddev->dev_private = priv;
0413 priv->dev = ddev;
0414
0415 priv->wq = alloc_ordered_workqueue("msm", 0);
0416 priv->hangcheck_period = DRM_MSM_HANGCHECK_DEFAULT_PERIOD;
0417
0418 INIT_LIST_HEAD(&priv->objects);
0419 mutex_init(&priv->obj_lock);
0420
0421 INIT_LIST_HEAD(&priv->inactive_willneed);
0422 INIT_LIST_HEAD(&priv->inactive_dontneed);
0423 INIT_LIST_HEAD(&priv->inactive_unpinned);
0424 mutex_init(&priv->mm_lock);
0425
0426
0427 fs_reclaim_acquire(GFP_KERNEL);
0428 might_lock(&priv->mm_lock);
0429 fs_reclaim_release(GFP_KERNEL);
0430
0431 drm_mode_config_init(ddev);
0432
0433 ret = msm_init_vram(ddev);
0434 if (ret)
0435 return ret;
0436
0437
0438 ret = component_bind_all(dev, ddev);
0439 if (ret)
0440 return ret;
0441
0442 dma_set_max_seg_size(dev, UINT_MAX);
0443
0444 msm_gem_shrinker_init(ddev);
0445
0446 if (priv->kms_init) {
0447 ret = priv->kms_init(ddev);
0448 if (ret) {
0449 DRM_DEV_ERROR(dev, "failed to load kms\n");
0450 priv->kms = NULL;
0451 goto err_msm_uninit;
0452 }
0453 kms = priv->kms;
0454 } else {
0455
0456 WARN_ON(dev->of_node);
0457 kms = NULL;
0458 }
0459
0460
0461 ddev->mode_config.normalize_zpos = true;
0462
0463 if (kms) {
0464 kms->dev = ddev;
0465 ret = kms->funcs->hw_init(kms);
0466 if (ret) {
0467 DRM_DEV_ERROR(dev, "kms hw init failed: %d\n", ret);
0468 goto err_msm_uninit;
0469 }
0470 }
0471
0472 drm_helper_move_panel_connectors_to_head(ddev);
0473
0474 ddev->mode_config.funcs = &mode_config_funcs;
0475 ddev->mode_config.helper_private = &mode_config_helper_funcs;
0476
0477 for (i = 0; i < priv->num_crtcs; i++) {
0478
0479 priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id;
0480 priv->event_thread[i].dev = ddev;
0481 priv->event_thread[i].worker = kthread_create_worker(0,
0482 "crtc_event:%d", priv->event_thread[i].crtc_id);
0483 if (IS_ERR(priv->event_thread[i].worker)) {
0484 ret = PTR_ERR(priv->event_thread[i].worker);
0485 DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n");
0486 ret = PTR_ERR(priv->event_thread[i].worker);
0487 goto err_msm_uninit;
0488 }
0489
0490 sched_set_fifo(priv->event_thread[i].worker->task);
0491 }
0492
0493 ret = drm_vblank_init(ddev, priv->num_crtcs);
0494 if (ret < 0) {
0495 DRM_DEV_ERROR(dev, "failed to initialize vblank\n");
0496 goto err_msm_uninit;
0497 }
0498
0499 if (kms) {
0500 pm_runtime_get_sync(dev);
0501 ret = msm_irq_install(ddev, kms->irq);
0502 pm_runtime_put_sync(dev);
0503 if (ret < 0) {
0504 DRM_DEV_ERROR(dev, "failed to install IRQ handler\n");
0505 goto err_msm_uninit;
0506 }
0507 }
0508
0509 ret = drm_dev_register(ddev, 0);
0510 if (ret)
0511 goto err_msm_uninit;
0512
0513 if (kms) {
0514 ret = msm_disp_snapshot_init(ddev);
0515 if (ret)
0516 DRM_DEV_ERROR(dev, "msm_disp_snapshot_init failed ret = %d\n", ret);
0517 }
0518 drm_mode_config_reset(ddev);
0519
0520 #ifdef CONFIG_DRM_FBDEV_EMULATION
0521 if (kms && fbdev)
0522 priv->fbdev = msm_fbdev_init(ddev);
0523 #endif
0524
0525 ret = msm_debugfs_late_init(ddev);
0526 if (ret)
0527 goto err_msm_uninit;
0528
0529 drm_kms_helper_poll_init(ddev);
0530
0531 return 0;
0532
0533 err_msm_uninit:
0534 msm_drm_uninit(dev);
0535 return ret;
0536 }
0537
0538
0539
0540
0541
0542 static void load_gpu(struct drm_device *dev)
0543 {
0544 static DEFINE_MUTEX(init_lock);
0545 struct msm_drm_private *priv = dev->dev_private;
0546
0547 mutex_lock(&init_lock);
0548
0549 if (!priv->gpu)
0550 priv->gpu = adreno_load_gpu(dev);
0551
0552 mutex_unlock(&init_lock);
0553 }
0554
0555 static int context_init(struct drm_device *dev, struct drm_file *file)
0556 {
0557 static atomic_t ident = ATOMIC_INIT(0);
0558 struct msm_drm_private *priv = dev->dev_private;
0559 struct msm_file_private *ctx;
0560
0561 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
0562 if (!ctx)
0563 return -ENOMEM;
0564
0565 INIT_LIST_HEAD(&ctx->submitqueues);
0566 rwlock_init(&ctx->queuelock);
0567
0568 kref_init(&ctx->ref);
0569 msm_submitqueue_init(dev, ctx);
0570
0571 ctx->aspace = msm_gpu_create_private_address_space(priv->gpu, current);
0572 file->driver_priv = ctx;
0573
0574 ctx->seqno = atomic_inc_return(&ident);
0575
0576 return 0;
0577 }
0578
0579 static int msm_open(struct drm_device *dev, struct drm_file *file)
0580 {
0581
0582
0583
0584 load_gpu(dev);
0585
0586 return context_init(dev, file);
0587 }
0588
0589 static void context_close(struct msm_file_private *ctx)
0590 {
0591 msm_submitqueue_close(ctx);
0592 msm_file_private_put(ctx);
0593 }
0594
0595 static void msm_postclose(struct drm_device *dev, struct drm_file *file)
0596 {
0597 struct msm_drm_private *priv = dev->dev_private;
0598 struct msm_file_private *ctx = file->driver_priv;
0599
0600
0601
0602
0603
0604 if (priv->gpu)
0605 msm_file_private_set_sysprof(ctx, priv->gpu, 0);
0606
0607 context_close(ctx);
0608 }
0609
0610 int msm_crtc_enable_vblank(struct drm_crtc *crtc)
0611 {
0612 struct drm_device *dev = crtc->dev;
0613 unsigned int pipe = crtc->index;
0614 struct msm_drm_private *priv = dev->dev_private;
0615 struct msm_kms *kms = priv->kms;
0616 if (!kms)
0617 return -ENXIO;
0618 drm_dbg_vbl(dev, "crtc=%u", pipe);
0619 return vblank_ctrl_queue_work(priv, pipe, true);
0620 }
0621
0622 void msm_crtc_disable_vblank(struct drm_crtc *crtc)
0623 {
0624 struct drm_device *dev = crtc->dev;
0625 unsigned int pipe = crtc->index;
0626 struct msm_drm_private *priv = dev->dev_private;
0627 struct msm_kms *kms = priv->kms;
0628 if (!kms)
0629 return;
0630 drm_dbg_vbl(dev, "crtc=%u", pipe);
0631 vblank_ctrl_queue_work(priv, pipe, false);
0632 }
0633
0634
0635
0636
0637
0638 static int msm_ioctl_get_param(struct drm_device *dev, void *data,
0639 struct drm_file *file)
0640 {
0641 struct msm_drm_private *priv = dev->dev_private;
0642 struct drm_msm_param *args = data;
0643 struct msm_gpu *gpu;
0644
0645
0646
0647
0648 if ((args->pipe != MSM_PIPE_3D0) || (args->pad != 0))
0649 return -EINVAL;
0650
0651 gpu = priv->gpu;
0652
0653 if (!gpu)
0654 return -ENXIO;
0655
0656 return gpu->funcs->get_param(gpu, file->driver_priv,
0657 args->param, &args->value, &args->len);
0658 }
0659
0660 static int msm_ioctl_set_param(struct drm_device *dev, void *data,
0661 struct drm_file *file)
0662 {
0663 struct msm_drm_private *priv = dev->dev_private;
0664 struct drm_msm_param *args = data;
0665 struct msm_gpu *gpu;
0666
0667 if ((args->pipe != MSM_PIPE_3D0) || (args->pad != 0))
0668 return -EINVAL;
0669
0670 gpu = priv->gpu;
0671
0672 if (!gpu)
0673 return -ENXIO;
0674
0675 return gpu->funcs->set_param(gpu, file->driver_priv,
0676 args->param, args->value, args->len);
0677 }
0678
0679 static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
0680 struct drm_file *file)
0681 {
0682 struct drm_msm_gem_new *args = data;
0683 uint32_t flags = args->flags;
0684
0685 if (args->flags & ~MSM_BO_FLAGS) {
0686 DRM_ERROR("invalid flags: %08x\n", args->flags);
0687 return -EINVAL;
0688 }
0689
0690
0691
0692
0693
0694
0695
0696
0697 if (flags & MSM_BO_UNCACHED) {
0698 flags &= ~MSM_BO_CACHED;
0699 flags |= MSM_BO_WC;
0700 }
0701
0702 return msm_gem_new_handle(dev, file, args->size,
0703 args->flags, &args->handle, NULL);
0704 }
0705
0706 static inline ktime_t to_ktime(struct drm_msm_timespec timeout)
0707 {
0708 return ktime_set(timeout.tv_sec, timeout.tv_nsec);
0709 }
0710
0711 static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
0712 struct drm_file *file)
0713 {
0714 struct drm_msm_gem_cpu_prep *args = data;
0715 struct drm_gem_object *obj;
0716 ktime_t timeout = to_ktime(args->timeout);
0717 int ret;
0718
0719 if (args->op & ~MSM_PREP_FLAGS) {
0720 DRM_ERROR("invalid op: %08x\n", args->op);
0721 return -EINVAL;
0722 }
0723
0724 obj = drm_gem_object_lookup(file, args->handle);
0725 if (!obj)
0726 return -ENOENT;
0727
0728 ret = msm_gem_cpu_prep(obj, args->op, &timeout);
0729
0730 drm_gem_object_put(obj);
0731
0732 return ret;
0733 }
0734
0735 static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
0736 struct drm_file *file)
0737 {
0738 struct drm_msm_gem_cpu_fini *args = data;
0739 struct drm_gem_object *obj;
0740 int ret;
0741
0742 obj = drm_gem_object_lookup(file, args->handle);
0743 if (!obj)
0744 return -ENOENT;
0745
0746 ret = msm_gem_cpu_fini(obj);
0747
0748 drm_gem_object_put(obj);
0749
0750 return ret;
0751 }
0752
0753 static int msm_ioctl_gem_info_iova(struct drm_device *dev,
0754 struct drm_file *file, struct drm_gem_object *obj,
0755 uint64_t *iova)
0756 {
0757 struct msm_drm_private *priv = dev->dev_private;
0758 struct msm_file_private *ctx = file->driver_priv;
0759
0760 if (!priv->gpu)
0761 return -EINVAL;
0762
0763
0764
0765
0766
0767 return msm_gem_get_iova(obj, ctx->aspace, iova);
0768 }
0769
0770 static int msm_ioctl_gem_info_set_iova(struct drm_device *dev,
0771 struct drm_file *file, struct drm_gem_object *obj,
0772 uint64_t iova)
0773 {
0774 struct msm_drm_private *priv = dev->dev_private;
0775 struct msm_file_private *ctx = file->driver_priv;
0776
0777 if (!priv->gpu)
0778 return -EINVAL;
0779
0780
0781 if (priv->gpu->aspace == ctx->aspace)
0782 return -EOPNOTSUPP;
0783
0784 return msm_gem_set_iova(obj, ctx->aspace, iova);
0785 }
0786
0787 static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
0788 struct drm_file *file)
0789 {
0790 struct drm_msm_gem_info *args = data;
0791 struct drm_gem_object *obj;
0792 struct msm_gem_object *msm_obj;
0793 int i, ret = 0;
0794
0795 if (args->pad)
0796 return -EINVAL;
0797
0798 switch (args->info) {
0799 case MSM_INFO_GET_OFFSET:
0800 case MSM_INFO_GET_IOVA:
0801 case MSM_INFO_SET_IOVA:
0802
0803 if (args->len)
0804 return -EINVAL;
0805 break;
0806 case MSM_INFO_SET_NAME:
0807 case MSM_INFO_GET_NAME:
0808 break;
0809 default:
0810 return -EINVAL;
0811 }
0812
0813 obj = drm_gem_object_lookup(file, args->handle);
0814 if (!obj)
0815 return -ENOENT;
0816
0817 msm_obj = to_msm_bo(obj);
0818
0819 switch (args->info) {
0820 case MSM_INFO_GET_OFFSET:
0821 args->value = msm_gem_mmap_offset(obj);
0822 break;
0823 case MSM_INFO_GET_IOVA:
0824 ret = msm_ioctl_gem_info_iova(dev, file, obj, &args->value);
0825 break;
0826 case MSM_INFO_SET_IOVA:
0827 ret = msm_ioctl_gem_info_set_iova(dev, file, obj, args->value);
0828 break;
0829 case MSM_INFO_SET_NAME:
0830
0831 if (args->len >= sizeof(msm_obj->name)) {
0832 ret = -EINVAL;
0833 break;
0834 }
0835 if (copy_from_user(msm_obj->name, u64_to_user_ptr(args->value),
0836 args->len)) {
0837 msm_obj->name[0] = '\0';
0838 ret = -EFAULT;
0839 break;
0840 }
0841 msm_obj->name[args->len] = '\0';
0842 for (i = 0; i < args->len; i++) {
0843 if (!isprint(msm_obj->name[i])) {
0844 msm_obj->name[i] = '\0';
0845 break;
0846 }
0847 }
0848 break;
0849 case MSM_INFO_GET_NAME:
0850 if (args->value && (args->len < strlen(msm_obj->name))) {
0851 ret = -EINVAL;
0852 break;
0853 }
0854 args->len = strlen(msm_obj->name);
0855 if (args->value) {
0856 if (copy_to_user(u64_to_user_ptr(args->value),
0857 msm_obj->name, args->len))
0858 ret = -EFAULT;
0859 }
0860 break;
0861 }
0862
0863 drm_gem_object_put(obj);
0864
0865 return ret;
0866 }
0867
0868 static int wait_fence(struct msm_gpu_submitqueue *queue, uint32_t fence_id,
0869 ktime_t timeout)
0870 {
0871 struct dma_fence *fence;
0872 int ret;
0873
0874 if (fence_after(fence_id, queue->last_fence)) {
0875 DRM_ERROR_RATELIMITED("waiting on invalid fence: %u (of %u)\n",
0876 fence_id, queue->last_fence);
0877 return -EINVAL;
0878 }
0879
0880
0881
0882
0883
0884
0885
0886
0887
0888 ret = mutex_lock_interruptible(&queue->lock);
0889 if (ret)
0890 return ret;
0891 fence = idr_find(&queue->fence_idr, fence_id);
0892 if (fence)
0893 fence = dma_fence_get_rcu(fence);
0894 mutex_unlock(&queue->lock);
0895
0896 if (!fence)
0897 return 0;
0898
0899 ret = dma_fence_wait_timeout(fence, true, timeout_to_jiffies(&timeout));
0900 if (ret == 0) {
0901 ret = -ETIMEDOUT;
0902 } else if (ret != -ERESTARTSYS) {
0903 ret = 0;
0904 }
0905
0906 dma_fence_put(fence);
0907
0908 return ret;
0909 }
0910
0911 static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
0912 struct drm_file *file)
0913 {
0914 struct msm_drm_private *priv = dev->dev_private;
0915 struct drm_msm_wait_fence *args = data;
0916 struct msm_gpu_submitqueue *queue;
0917 int ret;
0918
0919 if (args->pad) {
0920 DRM_ERROR("invalid pad: %08x\n", args->pad);
0921 return -EINVAL;
0922 }
0923
0924 if (!priv->gpu)
0925 return 0;
0926
0927 queue = msm_submitqueue_get(file->driver_priv, args->queueid);
0928 if (!queue)
0929 return -ENOENT;
0930
0931 ret = wait_fence(queue, args->fence, to_ktime(args->timeout));
0932
0933 msm_submitqueue_put(queue);
0934
0935 return ret;
0936 }
0937
0938 static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data,
0939 struct drm_file *file)
0940 {
0941 struct drm_msm_gem_madvise *args = data;
0942 struct drm_gem_object *obj;
0943 int ret;
0944
0945 switch (args->madv) {
0946 case MSM_MADV_DONTNEED:
0947 case MSM_MADV_WILLNEED:
0948 break;
0949 default:
0950 return -EINVAL;
0951 }
0952
0953 obj = drm_gem_object_lookup(file, args->handle);
0954 if (!obj) {
0955 return -ENOENT;
0956 }
0957
0958 ret = msm_gem_madvise(obj, args->madv);
0959 if (ret >= 0) {
0960 args->retained = ret;
0961 ret = 0;
0962 }
0963
0964 drm_gem_object_put(obj);
0965
0966 return ret;
0967 }
0968
0969
0970 static int msm_ioctl_submitqueue_new(struct drm_device *dev, void *data,
0971 struct drm_file *file)
0972 {
0973 struct drm_msm_submitqueue *args = data;
0974
0975 if (args->flags & ~MSM_SUBMITQUEUE_FLAGS)
0976 return -EINVAL;
0977
0978 return msm_submitqueue_create(dev, file->driver_priv, args->prio,
0979 args->flags, &args->id);
0980 }
0981
0982 static int msm_ioctl_submitqueue_query(struct drm_device *dev, void *data,
0983 struct drm_file *file)
0984 {
0985 return msm_submitqueue_query(dev, file->driver_priv, data);
0986 }
0987
0988 static int msm_ioctl_submitqueue_close(struct drm_device *dev, void *data,
0989 struct drm_file *file)
0990 {
0991 u32 id = *(u32 *) data;
0992
0993 return msm_submitqueue_remove(file->driver_priv, id);
0994 }
0995
0996 static const struct drm_ioctl_desc msm_ioctls[] = {
0997 DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_RENDER_ALLOW),
0998 DRM_IOCTL_DEF_DRV(MSM_SET_PARAM, msm_ioctl_set_param, DRM_RENDER_ALLOW),
0999 DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_RENDER_ALLOW),
1000 DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_RENDER_ALLOW),
1001 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_RENDER_ALLOW),
1002 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_RENDER_ALLOW),
1003 DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_RENDER_ALLOW),
1004 DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_RENDER_ALLOW),
1005 DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE, msm_ioctl_gem_madvise, DRM_RENDER_ALLOW),
1006 DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW, msm_ioctl_submitqueue_new, DRM_RENDER_ALLOW),
1007 DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_RENDER_ALLOW),
1008 DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_RENDER_ALLOW),
1009 };
1010
1011 static void msm_fop_show_fdinfo(struct seq_file *m, struct file *f)
1012 {
1013 struct drm_file *file = f->private_data;
1014 struct drm_device *dev = file->minor->dev;
1015 struct msm_drm_private *priv = dev->dev_private;
1016 struct drm_printer p = drm_seq_file_printer(m);
1017
1018 if (!priv->gpu)
1019 return;
1020
1021 msm_gpu_show_fdinfo(priv->gpu, file->driver_priv, &p);
1022 }
1023
1024 static const struct file_operations fops = {
1025 .owner = THIS_MODULE,
1026 DRM_GEM_FOPS,
1027 .show_fdinfo = msm_fop_show_fdinfo,
1028 };
1029
1030 static const struct drm_driver msm_driver = {
1031 .driver_features = DRIVER_GEM |
1032 DRIVER_RENDER |
1033 DRIVER_ATOMIC |
1034 DRIVER_MODESET |
1035 DRIVER_SYNCOBJ,
1036 .open = msm_open,
1037 .postclose = msm_postclose,
1038 .lastclose = drm_fb_helper_lastclose,
1039 .dumb_create = msm_gem_dumb_create,
1040 .dumb_map_offset = msm_gem_dumb_map_offset,
1041 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1042 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1043 .gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
1044 .gem_prime_mmap = msm_gem_prime_mmap,
1045 #ifdef CONFIG_DEBUG_FS
1046 .debugfs_init = msm_debugfs_init,
1047 #endif
1048 .ioctls = msm_ioctls,
1049 .num_ioctls = ARRAY_SIZE(msm_ioctls),
1050 .fops = &fops,
1051 .name = "msm",
1052 .desc = "MSM Snapdragon DRM",
1053 .date = "20130625",
1054 .major = MSM_VERSION_MAJOR,
1055 .minor = MSM_VERSION_MINOR,
1056 .patchlevel = MSM_VERSION_PATCHLEVEL,
1057 };
1058
1059 int msm_pm_prepare(struct device *dev)
1060 {
1061 struct msm_drm_private *priv = dev_get_drvdata(dev);
1062 struct drm_device *ddev = priv ? priv->dev : NULL;
1063
1064 if (!priv || !priv->kms)
1065 return 0;
1066
1067 return drm_mode_config_helper_suspend(ddev);
1068 }
1069
1070 void msm_pm_complete(struct device *dev)
1071 {
1072 struct msm_drm_private *priv = dev_get_drvdata(dev);
1073 struct drm_device *ddev = priv ? priv->dev : NULL;
1074
1075 if (!priv || !priv->kms)
1076 return;
1077
1078 drm_mode_config_helper_resume(ddev);
1079 }
1080
1081 static const struct dev_pm_ops msm_pm_ops = {
1082 .prepare = msm_pm_prepare,
1083 .complete = msm_pm_complete,
1084 };
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096 static int add_components_mdp(struct device *master_dev,
1097 struct component_match **matchptr)
1098 {
1099 struct device_node *np = master_dev->of_node;
1100 struct device_node *ep_node;
1101
1102 for_each_endpoint_of_node(np, ep_node) {
1103 struct device_node *intf;
1104 struct of_endpoint ep;
1105 int ret;
1106
1107 ret = of_graph_parse_endpoint(ep_node, &ep);
1108 if (ret) {
1109 DRM_DEV_ERROR(master_dev, "unable to parse port endpoint\n");
1110 of_node_put(ep_node);
1111 return ret;
1112 }
1113
1114
1115
1116
1117
1118 if (of_device_is_compatible(np, "qcom,mdp4") &&
1119 ep.port == 0)
1120 continue;
1121
1122
1123
1124
1125
1126
1127 intf = of_graph_get_remote_port_parent(ep_node);
1128 if (!intf)
1129 continue;
1130
1131 if (of_device_is_available(intf))
1132 drm_of_component_match_add(master_dev, matchptr,
1133 component_compare_of, intf);
1134
1135 of_node_put(intf);
1136 }
1137
1138 return 0;
1139 }
1140
1141
1142
1143
1144
1145
1146 static const struct of_device_id msm_gpu_match[] = {
1147 { .compatible = "qcom,adreno" },
1148 { .compatible = "qcom,adreno-3xx" },
1149 { .compatible = "amd,imageon" },
1150 { .compatible = "qcom,kgsl-3d0" },
1151 { },
1152 };
1153
1154 static int add_gpu_components(struct device *dev,
1155 struct component_match **matchptr)
1156 {
1157 struct device_node *np;
1158
1159 np = of_find_matching_node(NULL, msm_gpu_match);
1160 if (!np)
1161 return 0;
1162
1163 if (of_device_is_available(np))
1164 drm_of_component_match_add(dev, matchptr, component_compare_of, np);
1165
1166 of_node_put(np);
1167
1168 return 0;
1169 }
1170
1171 static int msm_drm_bind(struct device *dev)
1172 {
1173 return msm_drm_init(dev, &msm_driver);
1174 }
1175
1176 static void msm_drm_unbind(struct device *dev)
1177 {
1178 msm_drm_uninit(dev);
1179 }
1180
1181 const struct component_master_ops msm_drm_ops = {
1182 .bind = msm_drm_bind,
1183 .unbind = msm_drm_unbind,
1184 };
1185
1186 int msm_drv_probe(struct device *master_dev,
1187 int (*kms_init)(struct drm_device *dev))
1188 {
1189 struct msm_drm_private *priv;
1190 struct component_match *match = NULL;
1191 int ret;
1192
1193 priv = devm_kzalloc(master_dev, sizeof(*priv), GFP_KERNEL);
1194 if (!priv)
1195 return -ENOMEM;
1196
1197 priv->kms_init = kms_init;
1198 dev_set_drvdata(master_dev, priv);
1199
1200
1201 if (kms_init) {
1202 ret = add_components_mdp(master_dev, &match);
1203 if (ret)
1204 return ret;
1205 }
1206
1207 ret = add_gpu_components(master_dev, &match);
1208 if (ret)
1209 return ret;
1210
1211
1212
1213
1214 ret = dma_set_mask_and_coherent(master_dev, ~0);
1215 if (ret)
1216 return ret;
1217
1218 ret = component_master_add_with_match(master_dev, &msm_drm_ops, match);
1219 if (ret)
1220 return ret;
1221
1222 return 0;
1223 }
1224
1225
1226
1227
1228
1229
1230 static int msm_pdev_probe(struct platform_device *pdev)
1231 {
1232 return msm_drv_probe(&pdev->dev, NULL);
1233 }
1234
1235 static int msm_pdev_remove(struct platform_device *pdev)
1236 {
1237 component_master_del(&pdev->dev, &msm_drm_ops);
1238
1239 return 0;
1240 }
1241
1242 void msm_drv_shutdown(struct platform_device *pdev)
1243 {
1244 struct msm_drm_private *priv = platform_get_drvdata(pdev);
1245 struct drm_device *drm = priv ? priv->dev : NULL;
1246
1247 if (!priv || !priv->kms)
1248 return;
1249
1250 drm_atomic_helper_shutdown(drm);
1251 }
1252
1253 static struct platform_driver msm_platform_driver = {
1254 .probe = msm_pdev_probe,
1255 .remove = msm_pdev_remove,
1256 .shutdown = msm_drv_shutdown,
1257 .driver = {
1258 .name = "msm",
1259 .pm = &msm_pm_ops,
1260 },
1261 };
1262
1263 static int __init msm_drm_register(void)
1264 {
1265 if (!modeset)
1266 return -EINVAL;
1267
1268 DBG("init");
1269 msm_mdp_register();
1270 msm_dpu_register();
1271 msm_dsi_register();
1272 msm_hdmi_register();
1273 msm_dp_register();
1274 adreno_register();
1275 msm_mdp4_register();
1276 msm_mdss_register();
1277 return platform_driver_register(&msm_platform_driver);
1278 }
1279
1280 static void __exit msm_drm_unregister(void)
1281 {
1282 DBG("fini");
1283 platform_driver_unregister(&msm_platform_driver);
1284 msm_mdss_unregister();
1285 msm_mdp4_unregister();
1286 msm_dp_unregister();
1287 msm_hdmi_unregister();
1288 adreno_unregister();
1289 msm_dsi_unregister();
1290 msm_mdp_unregister();
1291 msm_dpu_unregister();
1292 }
1293
1294 module_init(msm_drm_register);
1295 module_exit(msm_drm_unregister);
1296
1297 MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
1298 MODULE_DESCRIPTION("MSM DRM Driver");
1299 MODULE_LICENSE("GPL");