Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
0003 /* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */
0004 /* Copyright 2019 Collabora ltd. */
0005 
0006 #include <linux/module.h>
0007 #include <linux/of_platform.h>
0008 #include <linux/pagemap.h>
0009 #include <linux/pm_runtime.h>
0010 #include <drm/panfrost_drm.h>
0011 #include <drm/drm_drv.h>
0012 #include <drm/drm_ioctl.h>
0013 #include <drm/drm_syncobj.h>
0014 #include <drm/drm_utils.h>
0015 
0016 #include "panfrost_device.h"
0017 #include "panfrost_gem.h"
0018 #include "panfrost_mmu.h"
0019 #include "panfrost_job.h"
0020 #include "panfrost_gpu.h"
0021 #include "panfrost_perfcnt.h"
0022 
0023 static bool unstable_ioctls;
0024 module_param_unsafe(unstable_ioctls, bool, 0600);
0025 
0026 static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct drm_file *file)
0027 {
0028     struct drm_panfrost_get_param *param = data;
0029     struct panfrost_device *pfdev = ddev->dev_private;
0030 
0031     if (param->pad != 0)
0032         return -EINVAL;
0033 
0034 #define PANFROST_FEATURE(name, member)          \
0035     case DRM_PANFROST_PARAM_ ## name:       \
0036         param->value = pfdev->features.member;  \
0037         break
0038 #define PANFROST_FEATURE_ARRAY(name, member, max)           \
0039     case DRM_PANFROST_PARAM_ ## name ## 0 ...           \
0040         DRM_PANFROST_PARAM_ ## name ## max:         \
0041         param->value = pfdev->features.member[param->param -    \
0042             DRM_PANFROST_PARAM_ ## name ## 0];      \
0043         break
0044 
0045     switch (param->param) {
0046         PANFROST_FEATURE(GPU_PROD_ID, id);
0047         PANFROST_FEATURE(GPU_REVISION, revision);
0048         PANFROST_FEATURE(SHADER_PRESENT, shader_present);
0049         PANFROST_FEATURE(TILER_PRESENT, tiler_present);
0050         PANFROST_FEATURE(L2_PRESENT, l2_present);
0051         PANFROST_FEATURE(STACK_PRESENT, stack_present);
0052         PANFROST_FEATURE(AS_PRESENT, as_present);
0053         PANFROST_FEATURE(JS_PRESENT, js_present);
0054         PANFROST_FEATURE(L2_FEATURES, l2_features);
0055         PANFROST_FEATURE(CORE_FEATURES, core_features);
0056         PANFROST_FEATURE(TILER_FEATURES, tiler_features);
0057         PANFROST_FEATURE(MEM_FEATURES, mem_features);
0058         PANFROST_FEATURE(MMU_FEATURES, mmu_features);
0059         PANFROST_FEATURE(THREAD_FEATURES, thread_features);
0060         PANFROST_FEATURE(MAX_THREADS, max_threads);
0061         PANFROST_FEATURE(THREAD_MAX_WORKGROUP_SZ,
0062                 thread_max_workgroup_sz);
0063         PANFROST_FEATURE(THREAD_MAX_BARRIER_SZ,
0064                 thread_max_barrier_sz);
0065         PANFROST_FEATURE(COHERENCY_FEATURES, coherency_features);
0066         PANFROST_FEATURE(AFBC_FEATURES, afbc_features);
0067         PANFROST_FEATURE_ARRAY(TEXTURE_FEATURES, texture_features, 3);
0068         PANFROST_FEATURE_ARRAY(JS_FEATURES, js_features, 15);
0069         PANFROST_FEATURE(NR_CORE_GROUPS, nr_core_groups);
0070         PANFROST_FEATURE(THREAD_TLS_ALLOC, thread_tls_alloc);
0071     default:
0072         return -EINVAL;
0073     }
0074 
0075     return 0;
0076 }
0077 
0078 static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
0079         struct drm_file *file)
0080 {
0081     struct panfrost_file_priv *priv = file->driver_priv;
0082     struct panfrost_gem_object *bo;
0083     struct drm_panfrost_create_bo *args = data;
0084     struct panfrost_gem_mapping *mapping;
0085 
0086     if (!args->size || args->pad ||
0087         (args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP)))
0088         return -EINVAL;
0089 
0090     /* Heaps should never be executable */
0091     if ((args->flags & PANFROST_BO_HEAP) &&
0092         !(args->flags & PANFROST_BO_NOEXEC))
0093         return -EINVAL;
0094 
0095     bo = panfrost_gem_create_with_handle(file, dev, args->size, args->flags,
0096                          &args->handle);
0097     if (IS_ERR(bo))
0098         return PTR_ERR(bo);
0099 
0100     mapping = panfrost_gem_mapping_get(bo, priv);
0101     if (!mapping) {
0102         drm_gem_object_put(&bo->base.base);
0103         return -EINVAL;
0104     }
0105 
0106     args->offset = mapping->mmnode.start << PAGE_SHIFT;
0107     panfrost_gem_mapping_put(mapping);
0108 
0109     return 0;
0110 }
0111 
0112 /**
0113  * panfrost_lookup_bos() - Sets up job->bo[] with the GEM objects
0114  * referenced by the job.
0115  * @dev: DRM device
0116  * @file_priv: DRM file for this fd
0117  * @args: IOCTL args
0118  * @job: job being set up
0119  *
0120  * Resolve handles from userspace to BOs and attach them to job.
0121  *
0122  * Note that this function doesn't need to unreference the BOs on
0123  * failure, because that will happen at panfrost_job_cleanup() time.
0124  */
0125 static int
0126 panfrost_lookup_bos(struct drm_device *dev,
0127           struct drm_file *file_priv,
0128           struct drm_panfrost_submit *args,
0129           struct panfrost_job *job)
0130 {
0131     struct panfrost_file_priv *priv = file_priv->driver_priv;
0132     struct panfrost_gem_object *bo;
0133     unsigned int i;
0134     int ret;
0135 
0136     job->bo_count = args->bo_handle_count;
0137 
0138     if (!job->bo_count)
0139         return 0;
0140 
0141     ret = drm_gem_objects_lookup(file_priv,
0142                      (void __user *)(uintptr_t)args->bo_handles,
0143                      job->bo_count, &job->bos);
0144     if (ret)
0145         return ret;
0146 
0147     job->mappings = kvmalloc_array(job->bo_count,
0148                        sizeof(struct panfrost_gem_mapping *),
0149                        GFP_KERNEL | __GFP_ZERO);
0150     if (!job->mappings)
0151         return -ENOMEM;
0152 
0153     for (i = 0; i < job->bo_count; i++) {
0154         struct panfrost_gem_mapping *mapping;
0155 
0156         bo = to_panfrost_bo(job->bos[i]);
0157         mapping = panfrost_gem_mapping_get(bo, priv);
0158         if (!mapping) {
0159             ret = -EINVAL;
0160             break;
0161         }
0162 
0163         atomic_inc(&bo->gpu_usecount);
0164         job->mappings[i] = mapping;
0165     }
0166 
0167     return ret;
0168 }
0169 
0170 /**
0171  * panfrost_copy_in_sync() - Sets up job->deps with the sync objects
0172  * referenced by the job.
0173  * @dev: DRM device
0174  * @file_priv: DRM file for this fd
0175  * @args: IOCTL args
0176  * @job: job being set up
0177  *
0178  * Resolve syncobjs from userspace to fences and attach them to job.
0179  *
0180  * Note that this function doesn't need to unreference the fences on
0181  * failure, because that will happen at panfrost_job_cleanup() time.
0182  */
0183 static int
0184 panfrost_copy_in_sync(struct drm_device *dev,
0185           struct drm_file *file_priv,
0186           struct drm_panfrost_submit *args,
0187           struct panfrost_job *job)
0188 {
0189     u32 *handles;
0190     int ret = 0;
0191     int i, in_fence_count;
0192 
0193     in_fence_count = args->in_sync_count;
0194 
0195     if (!in_fence_count)
0196         return 0;
0197 
0198     handles = kvmalloc_array(in_fence_count, sizeof(u32), GFP_KERNEL);
0199     if (!handles) {
0200         ret = -ENOMEM;
0201         DRM_DEBUG("Failed to allocate incoming syncobj handles\n");
0202         goto fail;
0203     }
0204 
0205     if (copy_from_user(handles,
0206                (void __user *)(uintptr_t)args->in_syncs,
0207                in_fence_count * sizeof(u32))) {
0208         ret = -EFAULT;
0209         DRM_DEBUG("Failed to copy in syncobj handles\n");
0210         goto fail;
0211     }
0212 
0213     for (i = 0; i < in_fence_count; i++) {
0214         struct dma_fence *fence;
0215 
0216         ret = drm_syncobj_find_fence(file_priv, handles[i], 0, 0,
0217                          &fence);
0218         if (ret)
0219             goto fail;
0220 
0221         ret = drm_sched_job_add_dependency(&job->base, fence);
0222 
0223         if (ret)
0224             goto fail;
0225     }
0226 
0227 fail:
0228     kvfree(handles);
0229     return ret;
0230 }
0231 
0232 static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
0233         struct drm_file *file)
0234 {
0235     struct panfrost_device *pfdev = dev->dev_private;
0236     struct panfrost_file_priv *file_priv = file->driver_priv;
0237     struct drm_panfrost_submit *args = data;
0238     struct drm_syncobj *sync_out = NULL;
0239     struct panfrost_job *job;
0240     int ret = 0, slot;
0241 
0242     if (!args->jc)
0243         return -EINVAL;
0244 
0245     if (args->requirements && args->requirements != PANFROST_JD_REQ_FS)
0246         return -EINVAL;
0247 
0248     if (args->out_sync > 0) {
0249         sync_out = drm_syncobj_find(file, args->out_sync);
0250         if (!sync_out)
0251             return -ENODEV;
0252     }
0253 
0254     job = kzalloc(sizeof(*job), GFP_KERNEL);
0255     if (!job) {
0256         ret = -ENOMEM;
0257         goto out_put_syncout;
0258     }
0259 
0260     kref_init(&job->refcount);
0261 
0262     job->pfdev = pfdev;
0263     job->jc = args->jc;
0264     job->requirements = args->requirements;
0265     job->flush_id = panfrost_gpu_get_latest_flush_id(pfdev);
0266     job->mmu = file_priv->mmu;
0267 
0268     slot = panfrost_job_get_slot(job);
0269 
0270     ret = drm_sched_job_init(&job->base,
0271                  &file_priv->sched_entity[slot],
0272                  NULL);
0273     if (ret)
0274         goto out_put_job;
0275 
0276     ret = panfrost_copy_in_sync(dev, file, args, job);
0277     if (ret)
0278         goto out_cleanup_job;
0279 
0280     ret = panfrost_lookup_bos(dev, file, args, job);
0281     if (ret)
0282         goto out_cleanup_job;
0283 
0284     ret = panfrost_job_push(job);
0285     if (ret)
0286         goto out_cleanup_job;
0287 
0288     /* Update the return sync object for the job */
0289     if (sync_out)
0290         drm_syncobj_replace_fence(sync_out, job->render_done_fence);
0291 
0292 out_cleanup_job:
0293     if (ret)
0294         drm_sched_job_cleanup(&job->base);
0295 out_put_job:
0296     panfrost_job_put(job);
0297 out_put_syncout:
0298     if (sync_out)
0299         drm_syncobj_put(sync_out);
0300 
0301     return ret;
0302 }
0303 
0304 static int
0305 panfrost_ioctl_wait_bo(struct drm_device *dev, void *data,
0306                struct drm_file *file_priv)
0307 {
0308     long ret;
0309     struct drm_panfrost_wait_bo *args = data;
0310     struct drm_gem_object *gem_obj;
0311     unsigned long timeout = drm_timeout_abs_to_jiffies(args->timeout_ns);
0312 
0313     if (args->pad)
0314         return -EINVAL;
0315 
0316     gem_obj = drm_gem_object_lookup(file_priv, args->handle);
0317     if (!gem_obj)
0318         return -ENOENT;
0319 
0320     ret = dma_resv_wait_timeout(gem_obj->resv, DMA_RESV_USAGE_READ,
0321                     true, timeout);
0322     if (!ret)
0323         ret = timeout ? -ETIMEDOUT : -EBUSY;
0324 
0325     drm_gem_object_put(gem_obj);
0326 
0327     return ret;
0328 }
0329 
0330 static int panfrost_ioctl_mmap_bo(struct drm_device *dev, void *data,
0331               struct drm_file *file_priv)
0332 {
0333     struct drm_panfrost_mmap_bo *args = data;
0334     struct drm_gem_object *gem_obj;
0335     int ret;
0336 
0337     if (args->flags != 0) {
0338         DRM_INFO("unknown mmap_bo flags: %d\n", args->flags);
0339         return -EINVAL;
0340     }
0341 
0342     gem_obj = drm_gem_object_lookup(file_priv, args->handle);
0343     if (!gem_obj) {
0344         DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
0345         return -ENOENT;
0346     }
0347 
0348     /* Don't allow mmapping of heap objects as pages are not pinned. */
0349     if (to_panfrost_bo(gem_obj)->is_heap) {
0350         ret = -EINVAL;
0351         goto out;
0352     }
0353 
0354     ret = drm_gem_create_mmap_offset(gem_obj);
0355     if (ret == 0)
0356         args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
0357 
0358 out:
0359     drm_gem_object_put(gem_obj);
0360     return ret;
0361 }
0362 
0363 static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data,
0364                 struct drm_file *file_priv)
0365 {
0366     struct panfrost_file_priv *priv = file_priv->driver_priv;
0367     struct drm_panfrost_get_bo_offset *args = data;
0368     struct panfrost_gem_mapping *mapping;
0369     struct drm_gem_object *gem_obj;
0370     struct panfrost_gem_object *bo;
0371 
0372     gem_obj = drm_gem_object_lookup(file_priv, args->handle);
0373     if (!gem_obj) {
0374         DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
0375         return -ENOENT;
0376     }
0377     bo = to_panfrost_bo(gem_obj);
0378 
0379     mapping = panfrost_gem_mapping_get(bo, priv);
0380     drm_gem_object_put(gem_obj);
0381 
0382     if (!mapping)
0383         return -EINVAL;
0384 
0385     args->offset = mapping->mmnode.start << PAGE_SHIFT;
0386     panfrost_gem_mapping_put(mapping);
0387     return 0;
0388 }
0389 
0390 static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
0391                   struct drm_file *file_priv)
0392 {
0393     struct panfrost_file_priv *priv = file_priv->driver_priv;
0394     struct drm_panfrost_madvise *args = data;
0395     struct panfrost_device *pfdev = dev->dev_private;
0396     struct drm_gem_object *gem_obj;
0397     struct panfrost_gem_object *bo;
0398     int ret = 0;
0399 
0400     gem_obj = drm_gem_object_lookup(file_priv, args->handle);
0401     if (!gem_obj) {
0402         DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
0403         return -ENOENT;
0404     }
0405 
0406     bo = to_panfrost_bo(gem_obj);
0407 
0408     mutex_lock(&pfdev->shrinker_lock);
0409     mutex_lock(&bo->mappings.lock);
0410     if (args->madv == PANFROST_MADV_DONTNEED) {
0411         struct panfrost_gem_mapping *first;
0412 
0413         first = list_first_entry(&bo->mappings.list,
0414                      struct panfrost_gem_mapping,
0415                      node);
0416 
0417         /*
0418          * If we want to mark the BO purgeable, there must be only one
0419          * user: the caller FD.
0420          * We could do something smarter and mark the BO purgeable only
0421          * when all its users have marked it purgeable, but globally
0422          * visible/shared BOs are likely to never be marked purgeable
0423          * anyway, so let's not bother.
0424          */
0425         if (!list_is_singular(&bo->mappings.list) ||
0426             WARN_ON_ONCE(first->mmu != priv->mmu)) {
0427             ret = -EINVAL;
0428             goto out_unlock_mappings;
0429         }
0430     }
0431 
0432     args->retained = drm_gem_shmem_madvise(&bo->base, args->madv);
0433 
0434     if (args->retained) {
0435         if (args->madv == PANFROST_MADV_DONTNEED)
0436             list_move_tail(&bo->base.madv_list,
0437                        &pfdev->shrinker_list);
0438         else if (args->madv == PANFROST_MADV_WILLNEED)
0439             list_del_init(&bo->base.madv_list);
0440     }
0441 
0442 out_unlock_mappings:
0443     mutex_unlock(&bo->mappings.lock);
0444     mutex_unlock(&pfdev->shrinker_lock);
0445 
0446     drm_gem_object_put(gem_obj);
0447     return ret;
0448 }
0449 
0450 int panfrost_unstable_ioctl_check(void)
0451 {
0452     if (!unstable_ioctls)
0453         return -ENOSYS;
0454 
0455     return 0;
0456 }
0457 
0458 static int
0459 panfrost_open(struct drm_device *dev, struct drm_file *file)
0460 {
0461     int ret;
0462     struct panfrost_device *pfdev = dev->dev_private;
0463     struct panfrost_file_priv *panfrost_priv;
0464 
0465     panfrost_priv = kzalloc(sizeof(*panfrost_priv), GFP_KERNEL);
0466     if (!panfrost_priv)
0467         return -ENOMEM;
0468 
0469     panfrost_priv->pfdev = pfdev;
0470     file->driver_priv = panfrost_priv;
0471 
0472     panfrost_priv->mmu = panfrost_mmu_ctx_create(pfdev);
0473     if (IS_ERR(panfrost_priv->mmu)) {
0474         ret = PTR_ERR(panfrost_priv->mmu);
0475         goto err_free;
0476     }
0477 
0478     ret = panfrost_job_open(panfrost_priv);
0479     if (ret)
0480         goto err_job;
0481 
0482     return 0;
0483 
0484 err_job:
0485     panfrost_mmu_ctx_put(panfrost_priv->mmu);
0486 err_free:
0487     kfree(panfrost_priv);
0488     return ret;
0489 }
0490 
0491 static void
0492 panfrost_postclose(struct drm_device *dev, struct drm_file *file)
0493 {
0494     struct panfrost_file_priv *panfrost_priv = file->driver_priv;
0495 
0496     panfrost_perfcnt_close(file);
0497     panfrost_job_close(panfrost_priv);
0498 
0499     panfrost_mmu_ctx_put(panfrost_priv->mmu);
0500     kfree(panfrost_priv);
0501 }
0502 
0503 static const struct drm_ioctl_desc panfrost_drm_driver_ioctls[] = {
0504 #define PANFROST_IOCTL(n, func, flags) \
0505     DRM_IOCTL_DEF_DRV(PANFROST_##n, panfrost_ioctl_##func, flags)
0506 
0507     PANFROST_IOCTL(SUBMIT,      submit,     DRM_RENDER_ALLOW),
0508     PANFROST_IOCTL(WAIT_BO,     wait_bo,    DRM_RENDER_ALLOW),
0509     PANFROST_IOCTL(CREATE_BO,   create_bo,  DRM_RENDER_ALLOW),
0510     PANFROST_IOCTL(MMAP_BO,     mmap_bo,    DRM_RENDER_ALLOW),
0511     PANFROST_IOCTL(GET_PARAM,   get_param,  DRM_RENDER_ALLOW),
0512     PANFROST_IOCTL(GET_BO_OFFSET,   get_bo_offset,  DRM_RENDER_ALLOW),
0513     PANFROST_IOCTL(PERFCNT_ENABLE,  perfcnt_enable, DRM_RENDER_ALLOW),
0514     PANFROST_IOCTL(PERFCNT_DUMP,    perfcnt_dump,   DRM_RENDER_ALLOW),
0515     PANFROST_IOCTL(MADVISE,     madvise,    DRM_RENDER_ALLOW),
0516 };
0517 
0518 DEFINE_DRM_GEM_FOPS(panfrost_drm_driver_fops);
0519 
0520 /*
0521  * Panfrost driver version:
0522  * - 1.0 - initial interface
0523  * - 1.1 - adds HEAP and NOEXEC flags for CREATE_BO
0524  * - 1.2 - adds AFBC_FEATURES query
0525  */
0526 static const struct drm_driver panfrost_drm_driver = {
0527     .driver_features    = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ,
0528     .open           = panfrost_open,
0529     .postclose      = panfrost_postclose,
0530     .ioctls         = panfrost_drm_driver_ioctls,
0531     .num_ioctls     = ARRAY_SIZE(panfrost_drm_driver_ioctls),
0532     .fops           = &panfrost_drm_driver_fops,
0533     .name           = "panfrost",
0534     .desc           = "panfrost DRM",
0535     .date           = "20180908",
0536     .major          = 1,
0537     .minor          = 2,
0538 
0539     .gem_create_object  = panfrost_gem_create_object,
0540     .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
0541     .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
0542     .gem_prime_import_sg_table = panfrost_gem_prime_import_sg_table,
0543     .gem_prime_mmap     = drm_gem_prime_mmap,
0544 };
0545 
0546 static int panfrost_probe(struct platform_device *pdev)
0547 {
0548     struct panfrost_device *pfdev;
0549     struct drm_device *ddev;
0550     int err;
0551 
0552     pfdev = devm_kzalloc(&pdev->dev, sizeof(*pfdev), GFP_KERNEL);
0553     if (!pfdev)
0554         return -ENOMEM;
0555 
0556     pfdev->pdev = pdev;
0557     pfdev->dev = &pdev->dev;
0558 
0559     platform_set_drvdata(pdev, pfdev);
0560 
0561     pfdev->comp = of_device_get_match_data(&pdev->dev);
0562     if (!pfdev->comp)
0563         return -ENODEV;
0564 
0565     pfdev->coherent = device_get_dma_attr(&pdev->dev) == DEV_DMA_COHERENT;
0566 
0567     /* Allocate and initialize the DRM device. */
0568     ddev = drm_dev_alloc(&panfrost_drm_driver, &pdev->dev);
0569     if (IS_ERR(ddev))
0570         return PTR_ERR(ddev);
0571 
0572     ddev->dev_private = pfdev;
0573     pfdev->ddev = ddev;
0574 
0575     mutex_init(&pfdev->shrinker_lock);
0576     INIT_LIST_HEAD(&pfdev->shrinker_list);
0577 
0578     err = panfrost_device_init(pfdev);
0579     if (err) {
0580         if (err != -EPROBE_DEFER)
0581             dev_err(&pdev->dev, "Fatal error during GPU init\n");
0582         goto err_out0;
0583     }
0584 
0585     pm_runtime_set_active(pfdev->dev);
0586     pm_runtime_mark_last_busy(pfdev->dev);
0587     pm_runtime_enable(pfdev->dev);
0588     pm_runtime_set_autosuspend_delay(pfdev->dev, 50); /* ~3 frames */
0589     pm_runtime_use_autosuspend(pfdev->dev);
0590 
0591     /*
0592      * Register the DRM device with the core and the connectors with
0593      * sysfs
0594      */
0595     err = drm_dev_register(ddev, 0);
0596     if (err < 0)
0597         goto err_out1;
0598 
0599     panfrost_gem_shrinker_init(ddev);
0600 
0601     return 0;
0602 
0603 err_out1:
0604     pm_runtime_disable(pfdev->dev);
0605     panfrost_device_fini(pfdev);
0606     pm_runtime_set_suspended(pfdev->dev);
0607 err_out0:
0608     drm_dev_put(ddev);
0609     return err;
0610 }
0611 
0612 static int panfrost_remove(struct platform_device *pdev)
0613 {
0614     struct panfrost_device *pfdev = platform_get_drvdata(pdev);
0615     struct drm_device *ddev = pfdev->ddev;
0616 
0617     drm_dev_unregister(ddev);
0618     panfrost_gem_shrinker_cleanup(ddev);
0619 
0620     pm_runtime_get_sync(pfdev->dev);
0621     pm_runtime_disable(pfdev->dev);
0622     panfrost_device_fini(pfdev);
0623     pm_runtime_set_suspended(pfdev->dev);
0624 
0625     drm_dev_put(ddev);
0626     return 0;
0627 }
0628 
0629 /*
0630  * The OPP core wants the supply names to be NULL terminated, but we need the
0631  * correct num_supplies value for regulator core. Hence, we NULL terminate here
0632  * and then initialize num_supplies with ARRAY_SIZE - 1.
0633  */
0634 static const char * const default_supplies[] = { "mali", NULL };
0635 static const struct panfrost_compatible default_data = {
0636     .num_supplies = ARRAY_SIZE(default_supplies) - 1,
0637     .supply_names = default_supplies,
0638     .num_pm_domains = 1, /* optional */
0639     .pm_domain_names = NULL,
0640 };
0641 
0642 static const struct panfrost_compatible amlogic_data = {
0643     .num_supplies = ARRAY_SIZE(default_supplies) - 1,
0644     .supply_names = default_supplies,
0645     .vendor_quirk = panfrost_gpu_amlogic_quirk,
0646 };
0647 
0648 static const char * const mediatek_mt8183_supplies[] = { "mali", "sram", NULL };
0649 static const char * const mediatek_mt8183_pm_domains[] = { "core0", "core1", "core2" };
0650 static const struct panfrost_compatible mediatek_mt8183_data = {
0651     .num_supplies = ARRAY_SIZE(mediatek_mt8183_supplies) - 1,
0652     .supply_names = mediatek_mt8183_supplies,
0653     .num_pm_domains = ARRAY_SIZE(mediatek_mt8183_pm_domains),
0654     .pm_domain_names = mediatek_mt8183_pm_domains,
0655 };
0656 
0657 static const struct of_device_id dt_match[] = {
0658     /* Set first to probe before the generic compatibles */
0659     { .compatible = "amlogic,meson-gxm-mali",
0660       .data = &amlogic_data, },
0661     { .compatible = "amlogic,meson-g12a-mali",
0662       .data = &amlogic_data, },
0663     { .compatible = "arm,mali-t604", .data = &default_data, },
0664     { .compatible = "arm,mali-t624", .data = &default_data, },
0665     { .compatible = "arm,mali-t628", .data = &default_data, },
0666     { .compatible = "arm,mali-t720", .data = &default_data, },
0667     { .compatible = "arm,mali-t760", .data = &default_data, },
0668     { .compatible = "arm,mali-t820", .data = &default_data, },
0669     { .compatible = "arm,mali-t830", .data = &default_data, },
0670     { .compatible = "arm,mali-t860", .data = &default_data, },
0671     { .compatible = "arm,mali-t880", .data = &default_data, },
0672     { .compatible = "arm,mali-bifrost", .data = &default_data, },
0673     { .compatible = "arm,mali-valhall-jm", .data = &default_data, },
0674     { .compatible = "mediatek,mt8183-mali", .data = &mediatek_mt8183_data },
0675     {}
0676 };
0677 MODULE_DEVICE_TABLE(of, dt_match);
0678 
0679 static const struct dev_pm_ops panfrost_pm_ops = {
0680     SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
0681     SET_RUNTIME_PM_OPS(panfrost_device_suspend, panfrost_device_resume, NULL)
0682 };
0683 
0684 static struct platform_driver panfrost_driver = {
0685     .probe      = panfrost_probe,
0686     .remove     = panfrost_remove,
0687     .driver     = {
0688         .name   = "panfrost",
0689         .pm = &panfrost_pm_ops,
0690         .of_match_table = dt_match,
0691     },
0692 };
0693 module_platform_driver(panfrost_driver);
0694 
0695 MODULE_AUTHOR("Panfrost Project Developers");
0696 MODULE_DESCRIPTION("Panfrost DRM Driver");
0697 MODULE_LICENSE("GPL v2");