Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (C) 2013 Red Hat
0004  * Author: Rob Clark <robdclark@gmail.com>
0005  *
0006  * Copyright (c) 2014 The Linux Foundation. All rights reserved.
0007  */
0008 
0009 #include <linux/ascii85.h>
0010 #include <linux/interconnect.h>
0011 #include <linux/qcom_scm.h>
0012 #include <linux/kernel.h>
0013 #include <linux/of_address.h>
0014 #include <linux/pm_opp.h>
0015 #include <linux/slab.h>
0016 #include <linux/soc/qcom/mdt_loader.h>
0017 #include <linux/nvmem-consumer.h>
0018 #include <soc/qcom/ocmem.h>
0019 #include "adreno_gpu.h"
0020 #include "a6xx_gpu.h"
0021 #include "msm_gem.h"
0022 #include "msm_mmu.h"
0023 
0024 static u64 address_space_size = 0;
0025 MODULE_PARM_DESC(address_space_size, "Override for size of processes private GPU address space");
0026 module_param(address_space_size, ullong, 0600);
0027 
0028 static bool zap_available = true;
0029 
0030 static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname,
0031         u32 pasid)
0032 {
0033     struct device *dev = &gpu->pdev->dev;
0034     const struct firmware *fw;
0035     const char *signed_fwname = NULL;
0036     struct device_node *np, *mem_np;
0037     struct resource r;
0038     phys_addr_t mem_phys;
0039     ssize_t mem_size;
0040     void *mem_region = NULL;
0041     int ret;
0042 
0043     if (!IS_ENABLED(CONFIG_ARCH_QCOM)) {
0044         zap_available = false;
0045         return -EINVAL;
0046     }
0047 
0048     np = of_get_child_by_name(dev->of_node, "zap-shader");
0049     if (!np) {
0050         zap_available = false;
0051         return -ENODEV;
0052     }
0053 
0054     mem_np = of_parse_phandle(np, "memory-region", 0);
0055     of_node_put(np);
0056     if (!mem_np) {
0057         zap_available = false;
0058         return -EINVAL;
0059     }
0060 
0061     ret = of_address_to_resource(mem_np, 0, &r);
0062     of_node_put(mem_np);
0063     if (ret)
0064         return ret;
0065 
0066     mem_phys = r.start;
0067 
0068     /*
0069      * Check for a firmware-name property.  This is the new scheme
0070      * to handle firmware that may be signed with device specific
0071      * keys, allowing us to have a different zap fw path for different
0072      * devices.
0073      *
0074      * If the firmware-name property is found, we bypass the
0075      * adreno_request_fw() mechanism, because we don't need to handle
0076      * the /lib/firmware/qcom/... vs /lib/firmware/... case.
0077      *
0078      * If the firmware-name property is not found, for backwards
0079      * compatibility we fall back to the fwname from the gpulist
0080      * table.
0081      */
0082     of_property_read_string_index(np, "firmware-name", 0, &signed_fwname);
0083     if (signed_fwname) {
0084         fwname = signed_fwname;
0085         ret = request_firmware_direct(&fw, fwname, gpu->dev->dev);
0086         if (ret)
0087             fw = ERR_PTR(ret);
0088     } else if (fwname) {
0089         /* Request the MDT file from the default location: */
0090         fw = adreno_request_fw(to_adreno_gpu(gpu), fwname);
0091     } else {
0092         /*
0093          * For new targets, we require the firmware-name property,
0094          * if a zap-shader is required, rather than falling back
0095          * to a firmware name specified in gpulist.
0096          *
0097          * Because the firmware is signed with a (potentially)
0098          * device specific key, having the name come from gpulist
0099          * was a bad idea, and is only provided for backwards
0100          * compatibility for older targets.
0101          */
0102         return -ENODEV;
0103     }
0104 
0105     if (IS_ERR(fw)) {
0106         DRM_DEV_ERROR(dev, "Unable to load %s\n", fwname);
0107         return PTR_ERR(fw);
0108     }
0109 
0110     /* Figure out how much memory we need */
0111     mem_size = qcom_mdt_get_size(fw);
0112     if (mem_size < 0) {
0113         ret = mem_size;
0114         goto out;
0115     }
0116 
0117     if (mem_size > resource_size(&r)) {
0118         DRM_DEV_ERROR(dev,
0119             "memory region is too small to load the MDT\n");
0120         ret = -E2BIG;
0121         goto out;
0122     }
0123 
0124     /* Allocate memory for the firmware image */
0125     mem_region = memremap(mem_phys, mem_size,  MEMREMAP_WC);
0126     if (!mem_region) {
0127         ret = -ENOMEM;
0128         goto out;
0129     }
0130 
0131     /*
0132      * Load the rest of the MDT
0133      *
0134      * Note that we could be dealing with two different paths, since
0135      * with upstream linux-firmware it would be in a qcom/ subdir..
0136      * adreno_request_fw() handles this, but qcom_mdt_load() does
0137      * not.  But since we've already gotten through adreno_request_fw()
0138      * we know which of the two cases it is:
0139      */
0140     if (signed_fwname || (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY)) {
0141         ret = qcom_mdt_load(dev, fw, fwname, pasid,
0142                 mem_region, mem_phys, mem_size, NULL);
0143     } else {
0144         char *newname;
0145 
0146         newname = kasprintf(GFP_KERNEL, "qcom/%s", fwname);
0147 
0148         ret = qcom_mdt_load(dev, fw, newname, pasid,
0149                 mem_region, mem_phys, mem_size, NULL);
0150         kfree(newname);
0151     }
0152     if (ret)
0153         goto out;
0154 
0155     /* Send the image to the secure world */
0156     ret = qcom_scm_pas_auth_and_reset(pasid);
0157 
0158     /*
0159      * If the scm call returns -EOPNOTSUPP we assume that this target
0160      * doesn't need/support the zap shader so quietly fail
0161      */
0162     if (ret == -EOPNOTSUPP)
0163         zap_available = false;
0164     else if (ret)
0165         DRM_DEV_ERROR(dev, "Unable to authorize the image\n");
0166 
0167 out:
0168     if (mem_region)
0169         memunmap(mem_region);
0170 
0171     release_firmware(fw);
0172 
0173     return ret;
0174 }
0175 
0176 int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid)
0177 {
0178     struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
0179     struct platform_device *pdev = gpu->pdev;
0180 
0181     /* Short cut if we determine the zap shader isn't available/needed */
0182     if (!zap_available)
0183         return -ENODEV;
0184 
0185     /* We need SCM to be able to load the firmware */
0186     if (!qcom_scm_is_available()) {
0187         DRM_DEV_ERROR(&pdev->dev, "SCM is not available\n");
0188         return -EPROBE_DEFER;
0189     }
0190 
0191     return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid);
0192 }
0193 
0194 void adreno_set_llc_attributes(struct iommu_domain *iommu)
0195 {
0196     iommu_set_pgtable_quirks(iommu, IO_PGTABLE_QUIRK_ARM_OUTER_WBWA);
0197 }
0198 
0199 struct msm_gem_address_space *
0200 adreno_iommu_create_address_space(struct msm_gpu *gpu,
0201         struct platform_device *pdev)
0202 {
0203     struct iommu_domain *iommu;
0204     struct msm_mmu *mmu;
0205     struct msm_gem_address_space *aspace;
0206     u64 start, size;
0207 
0208     iommu = iommu_domain_alloc(&platform_bus_type);
0209     if (!iommu)
0210         return NULL;
0211 
0212     mmu = msm_iommu_new(&pdev->dev, iommu);
0213     if (IS_ERR(mmu)) {
0214         iommu_domain_free(iommu);
0215         return ERR_CAST(mmu);
0216     }
0217 
0218     /*
0219      * Use the aperture start or SZ_16M, whichever is greater. This will
0220      * ensure that we align with the allocated pagetable range while still
0221      * allowing room in the lower 32 bits for GMEM and whatnot
0222      */
0223     start = max_t(u64, SZ_16M, iommu->geometry.aperture_start);
0224     size = iommu->geometry.aperture_end - start + 1;
0225 
0226     aspace = msm_gem_address_space_create(mmu, "gpu",
0227         start & GENMASK_ULL(48, 0), size);
0228 
0229     if (IS_ERR(aspace) && !IS_ERR(mmu))
0230         mmu->funcs->destroy(mmu);
0231 
0232     return aspace;
0233 }
0234 
0235 u64 adreno_private_address_space_size(struct msm_gpu *gpu)
0236 {
0237     struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
0238 
0239     if (address_space_size)
0240         return address_space_size;
0241 
0242     if (adreno_gpu->info->address_space_size)
0243         return adreno_gpu->info->address_space_size;
0244 
0245     return SZ_4G;
0246 }
0247 
0248 int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
0249              uint32_t param, uint64_t *value, uint32_t *len)
0250 {
0251     struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
0252 
0253     /* No pointer params yet */
0254     if (*len != 0)
0255         return -EINVAL;
0256 
0257     switch (param) {
0258     case MSM_PARAM_GPU_ID:
0259         *value = adreno_gpu->info->revn;
0260         return 0;
0261     case MSM_PARAM_GMEM_SIZE:
0262         *value = adreno_gpu->gmem;
0263         return 0;
0264     case MSM_PARAM_GMEM_BASE:
0265         *value = !adreno_is_a650_family(adreno_gpu) ? 0x100000 : 0;
0266         return 0;
0267     case MSM_PARAM_CHIP_ID:
0268         *value =  (uint64_t)adreno_gpu->rev.patchid |
0269              ((uint64_t)adreno_gpu->rev.minor << 8) |
0270              ((uint64_t)adreno_gpu->rev.major << 16) |
0271              ((uint64_t)adreno_gpu->rev.core  << 24);
0272         if (!adreno_gpu->info->revn)
0273             *value |= ((uint64_t) adreno_gpu->speedbin) << 32;
0274         return 0;
0275     case MSM_PARAM_MAX_FREQ:
0276         *value = adreno_gpu->base.fast_rate;
0277         return 0;
0278     case MSM_PARAM_TIMESTAMP:
0279         if (adreno_gpu->funcs->get_timestamp) {
0280             int ret;
0281 
0282             pm_runtime_get_sync(&gpu->pdev->dev);
0283             ret = adreno_gpu->funcs->get_timestamp(gpu, value);
0284             pm_runtime_put_autosuspend(&gpu->pdev->dev);
0285 
0286             return ret;
0287         }
0288         return -EINVAL;
0289     case MSM_PARAM_PRIORITIES:
0290         *value = gpu->nr_rings * NR_SCHED_PRIORITIES;
0291         return 0;
0292     case MSM_PARAM_PP_PGTABLE:
0293         *value = 0;
0294         return 0;
0295     case MSM_PARAM_FAULTS:
0296         if (ctx->aspace)
0297             *value = gpu->global_faults + ctx->aspace->faults;
0298         else
0299             *value = gpu->global_faults;
0300         return 0;
0301     case MSM_PARAM_SUSPENDS:
0302         *value = gpu->suspend_count;
0303         return 0;
0304     case MSM_PARAM_VA_START:
0305         if (ctx->aspace == gpu->aspace)
0306             return -EINVAL;
0307         *value = ctx->aspace->va_start;
0308         return 0;
0309     case MSM_PARAM_VA_SIZE:
0310         if (ctx->aspace == gpu->aspace)
0311             return -EINVAL;
0312         *value = ctx->aspace->va_size;
0313         return 0;
0314     default:
0315         DBG("%s: invalid param: %u", gpu->name, param);
0316         return -EINVAL;
0317     }
0318 }
0319 
0320 int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
0321              uint32_t param, uint64_t value, uint32_t len)
0322 {
0323     switch (param) {
0324     case MSM_PARAM_COMM:
0325     case MSM_PARAM_CMDLINE:
0326         /* kstrdup_quotable_cmdline() limits to PAGE_SIZE, so
0327          * that should be a reasonable upper bound
0328          */
0329         if (len > PAGE_SIZE)
0330             return -EINVAL;
0331         break;
0332     default:
0333         if (len != 0)
0334             return -EINVAL;
0335     }
0336 
0337     switch (param) {
0338     case MSM_PARAM_COMM:
0339     case MSM_PARAM_CMDLINE: {
0340         char *str, **paramp;
0341 
0342         str = kmalloc(len + 1, GFP_KERNEL);
0343         if (!str)
0344             return -ENOMEM;
0345 
0346         if (copy_from_user(str, u64_to_user_ptr(value), len)) {
0347             kfree(str);
0348             return -EFAULT;
0349         }
0350 
0351         /* Ensure string is null terminated: */
0352         str[len] = '\0';
0353 
0354         if (param == MSM_PARAM_COMM) {
0355             paramp = &ctx->comm;
0356         } else {
0357             paramp = &ctx->cmdline;
0358         }
0359 
0360         kfree(*paramp);
0361         *paramp = str;
0362 
0363         return 0;
0364     }
0365     case MSM_PARAM_SYSPROF:
0366         if (!capable(CAP_SYS_ADMIN))
0367             return -EPERM;
0368         return msm_file_private_set_sysprof(ctx, gpu, value);
0369     default:
0370         DBG("%s: invalid param: %u", gpu->name, param);
0371         return -EINVAL;
0372     }
0373 }
0374 
0375 const struct firmware *
0376 adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
0377 {
0378     struct drm_device *drm = adreno_gpu->base.dev;
0379     const struct firmware *fw = NULL;
0380     char *newname;
0381     int ret;
0382 
0383     newname = kasprintf(GFP_KERNEL, "qcom/%s", fwname);
0384     if (!newname)
0385         return ERR_PTR(-ENOMEM);
0386 
0387     /*
0388      * Try first to load from qcom/$fwfile using a direct load (to avoid
0389      * a potential timeout waiting for usermode helper)
0390      */
0391     if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) ||
0392         (adreno_gpu->fwloc == FW_LOCATION_NEW)) {
0393 
0394         ret = request_firmware_direct(&fw, newname, drm->dev);
0395         if (!ret) {
0396             DRM_DEV_INFO(drm->dev, "loaded %s from new location\n",
0397                 newname);
0398             adreno_gpu->fwloc = FW_LOCATION_NEW;
0399             goto out;
0400         } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
0401             DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n",
0402                 newname, ret);
0403             fw = ERR_PTR(ret);
0404             goto out;
0405         }
0406     }
0407 
0408     /*
0409      * Then try the legacy location without qcom/ prefix
0410      */
0411     if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) ||
0412         (adreno_gpu->fwloc == FW_LOCATION_LEGACY)) {
0413 
0414         ret = request_firmware_direct(&fw, fwname, drm->dev);
0415         if (!ret) {
0416             DRM_DEV_INFO(drm->dev, "loaded %s from legacy location\n",
0417                 newname);
0418             adreno_gpu->fwloc = FW_LOCATION_LEGACY;
0419             goto out;
0420         } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
0421             DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n",
0422                 fwname, ret);
0423             fw = ERR_PTR(ret);
0424             goto out;
0425         }
0426     }
0427 
0428     /*
0429      * Finally fall back to request_firmware() for cases where the
0430      * usermode helper is needed (I think mainly android)
0431      */
0432     if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) ||
0433         (adreno_gpu->fwloc == FW_LOCATION_HELPER)) {
0434 
0435         ret = request_firmware(&fw, newname, drm->dev);
0436         if (!ret) {
0437             DRM_DEV_INFO(drm->dev, "loaded %s with helper\n",
0438                 newname);
0439             adreno_gpu->fwloc = FW_LOCATION_HELPER;
0440             goto out;
0441         } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
0442             DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n",
0443                 newname, ret);
0444             fw = ERR_PTR(ret);
0445             goto out;
0446         }
0447     }
0448 
0449     DRM_DEV_ERROR(drm->dev, "failed to load %s\n", fwname);
0450     fw = ERR_PTR(-ENOENT);
0451 out:
0452     kfree(newname);
0453     return fw;
0454 }
0455 
0456 int adreno_load_fw(struct adreno_gpu *adreno_gpu)
0457 {
0458     int i;
0459 
0460     for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++) {
0461         const struct firmware *fw;
0462 
0463         if (!adreno_gpu->info->fw[i])
0464             continue;
0465 
0466         /* Skip if the firmware has already been loaded */
0467         if (adreno_gpu->fw[i])
0468             continue;
0469 
0470         fw = adreno_request_fw(adreno_gpu, adreno_gpu->info->fw[i]);
0471         if (IS_ERR(fw))
0472             return PTR_ERR(fw);
0473 
0474         adreno_gpu->fw[i] = fw;
0475     }
0476 
0477     return 0;
0478 }
0479 
0480 struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu,
0481         const struct firmware *fw, u64 *iova)
0482 {
0483     struct drm_gem_object *bo;
0484     void *ptr;
0485 
0486     ptr = msm_gem_kernel_new(gpu->dev, fw->size - 4,
0487         MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova);
0488 
0489     if (IS_ERR(ptr))
0490         return ERR_CAST(ptr);
0491 
0492     memcpy(ptr, &fw->data[4], fw->size - 4);
0493 
0494     msm_gem_put_vaddr(bo);
0495 
0496     return bo;
0497 }
0498 
0499 int adreno_hw_init(struct msm_gpu *gpu)
0500 {
0501     struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
0502     int ret, i;
0503 
0504     VERB("%s", gpu->name);
0505 
0506     ret = adreno_load_fw(adreno_gpu);
0507     if (ret)
0508         return ret;
0509 
0510     for (i = 0; i < gpu->nr_rings; i++) {
0511         struct msm_ringbuffer *ring = gpu->rb[i];
0512 
0513         if (!ring)
0514             continue;
0515 
0516         ring->cur = ring->start;
0517         ring->next = ring->start;
0518         ring->memptrs->rptr = 0;
0519 
0520         /* Detect and clean up an impossible fence, ie. if GPU managed
0521          * to scribble something invalid, we don't want that to confuse
0522          * us into mistakingly believing that submits have completed.
0523          */
0524         if (fence_before(ring->fctx->last_fence, ring->memptrs->fence)) {
0525             ring->memptrs->fence = ring->fctx->last_fence;
0526         }
0527     }
0528 
0529     return 0;
0530 }
0531 
0532 /* Use this helper to read rptr, since a430 doesn't update rptr in memory */
0533 static uint32_t get_rptr(struct adreno_gpu *adreno_gpu,
0534         struct msm_ringbuffer *ring)
0535 {
0536     struct msm_gpu *gpu = &adreno_gpu->base;
0537 
0538     return gpu->funcs->get_rptr(gpu, ring);
0539 }
0540 
0541 struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu)
0542 {
0543     return gpu->rb[0];
0544 }
0545 
0546 void adreno_recover(struct msm_gpu *gpu)
0547 {
0548     struct drm_device *dev = gpu->dev;
0549     int ret;
0550 
0551     // XXX pm-runtime??  we *need* the device to be off after this
0552     // so maybe continuing to call ->pm_suspend/resume() is better?
0553 
0554     gpu->funcs->pm_suspend(gpu);
0555     gpu->funcs->pm_resume(gpu);
0556 
0557     ret = msm_gpu_hw_init(gpu);
0558     if (ret) {
0559         DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret);
0560         /* hmm, oh well? */
0561     }
0562 }
0563 
0564 void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, u32 reg)
0565 {
0566     uint32_t wptr;
0567 
0568     /* Copy the shadow to the actual register */
0569     ring->cur = ring->next;
0570 
0571     /*
0572      * Mask wptr value that we calculate to fit in the HW range. This is
0573      * to account for the possibility that the last command fit exactly into
0574      * the ringbuffer and rb->next hasn't wrapped to zero yet
0575      */
0576     wptr = get_wptr(ring);
0577 
0578     /* ensure writes to ringbuffer have hit system memory: */
0579     mb();
0580 
0581     gpu_write(gpu, reg, wptr);
0582 }
0583 
0584 bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
0585 {
0586     struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
0587     uint32_t wptr = get_wptr(ring);
0588 
0589     /* wait for CP to drain ringbuffer: */
0590     if (!spin_until(get_rptr(adreno_gpu, ring) == wptr))
0591         return true;
0592 
0593     /* TODO maybe we need to reset GPU here to recover from hang? */
0594     DRM_ERROR("%s: timeout waiting to drain ringbuffer %d rptr/wptr = %X/%X\n",
0595         gpu->name, ring->id, get_rptr(adreno_gpu, ring), wptr);
0596 
0597     return false;
0598 }
0599 
0600 int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state)
0601 {
0602     struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
0603     int i, count = 0;
0604 
0605     WARN_ON(!mutex_is_locked(&gpu->lock));
0606 
0607     kref_init(&state->ref);
0608 
0609     ktime_get_real_ts64(&state->time);
0610 
0611     for (i = 0; i < gpu->nr_rings; i++) {
0612         int size = 0, j;
0613 
0614         state->ring[i].fence = gpu->rb[i]->memptrs->fence;
0615         state->ring[i].iova = gpu->rb[i]->iova;
0616         state->ring[i].seqno = gpu->rb[i]->fctx->last_fence;
0617         state->ring[i].rptr = get_rptr(adreno_gpu, gpu->rb[i]);
0618         state->ring[i].wptr = get_wptr(gpu->rb[i]);
0619 
0620         /* Copy at least 'wptr' dwords of the data */
0621         size = state->ring[i].wptr;
0622 
0623         /* After wptr find the last non zero dword to save space */
0624         for (j = state->ring[i].wptr; j < MSM_GPU_RINGBUFFER_SZ >> 2; j++)
0625             if (gpu->rb[i]->start[j])
0626                 size = j + 1;
0627 
0628         if (size) {
0629             state->ring[i].data = kvmalloc(size << 2, GFP_KERNEL);
0630             if (state->ring[i].data) {
0631                 memcpy(state->ring[i].data, gpu->rb[i]->start, size << 2);
0632                 state->ring[i].data_size = size << 2;
0633             }
0634         }
0635     }
0636 
0637     /* Some targets prefer to collect their own registers */
0638     if (!adreno_gpu->registers)
0639         return 0;
0640 
0641     /* Count the number of registers */
0642     for (i = 0; adreno_gpu->registers[i] != ~0; i += 2)
0643         count += adreno_gpu->registers[i + 1] -
0644             adreno_gpu->registers[i] + 1;
0645 
0646     state->registers = kcalloc(count * 2, sizeof(u32), GFP_KERNEL);
0647     if (state->registers) {
0648         int pos = 0;
0649 
0650         for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
0651             u32 start = adreno_gpu->registers[i];
0652             u32 end   = adreno_gpu->registers[i + 1];
0653             u32 addr;
0654 
0655             for (addr = start; addr <= end; addr++) {
0656                 state->registers[pos++] = addr;
0657                 state->registers[pos++] = gpu_read(gpu, addr);
0658             }
0659         }
0660 
0661         state->nr_registers = count;
0662     }
0663 
0664     return 0;
0665 }
0666 
0667 void adreno_gpu_state_destroy(struct msm_gpu_state *state)
0668 {
0669     int i;
0670 
0671     for (i = 0; i < ARRAY_SIZE(state->ring); i++)
0672         kvfree(state->ring[i].data);
0673 
0674     for (i = 0; state->bos && i < state->nr_bos; i++)
0675         kvfree(state->bos[i].data);
0676 
0677     kfree(state->bos);
0678     kfree(state->comm);
0679     kfree(state->cmd);
0680     kfree(state->registers);
0681 }
0682 
0683 static void adreno_gpu_state_kref_destroy(struct kref *kref)
0684 {
0685     struct msm_gpu_state *state = container_of(kref,
0686         struct msm_gpu_state, ref);
0687 
0688     adreno_gpu_state_destroy(state);
0689     kfree(state);
0690 }
0691 
0692 int adreno_gpu_state_put(struct msm_gpu_state *state)
0693 {
0694     if (IS_ERR_OR_NULL(state))
0695         return 1;
0696 
0697     return kref_put(&state->ref, adreno_gpu_state_kref_destroy);
0698 }
0699 
0700 #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
0701 
0702 static char *adreno_gpu_ascii85_encode(u32 *src, size_t len)
0703 {
0704     void *buf;
0705     size_t buf_itr = 0, buffer_size;
0706     char out[ASCII85_BUFSZ];
0707     long l;
0708     int i;
0709 
0710     if (!src || !len)
0711         return NULL;
0712 
0713     l = ascii85_encode_len(len);
0714 
0715     /*
0716      * Ascii85 outputs either a 5 byte string or a 1 byte string. So we
0717      * account for the worst case of 5 bytes per dword plus the 1 for '\0'
0718      */
0719     buffer_size = (l * 5) + 1;
0720 
0721     buf = kvmalloc(buffer_size, GFP_KERNEL);
0722     if (!buf)
0723         return NULL;
0724 
0725     for (i = 0; i < l; i++)
0726         buf_itr += scnprintf(buf + buf_itr, buffer_size - buf_itr, "%s",
0727                 ascii85_encode(src[i], out));
0728 
0729     return buf;
0730 }
0731 
0732 /* len is expected to be in bytes */
0733 void adreno_show_object(struct drm_printer *p, void **ptr, int len,
0734         bool *encoded)
0735 {
0736     if (!*ptr || !len)
0737         return;
0738 
0739     if (!*encoded) {
0740         long datalen, i;
0741         u32 *buf = *ptr;
0742 
0743         /*
0744          * Only dump the non-zero part of the buffer - rarely will
0745          * any data completely fill the entire allocated size of
0746          * the buffer.
0747          */
0748         for (datalen = 0, i = 0; i < len >> 2; i++)
0749             if (buf[i])
0750                 datalen = ((i + 1) << 2);
0751 
0752         /*
0753          * If we reach here, then the originally captured binary buffer
0754          * will be replaced with the ascii85 encoded string
0755          */
0756         *ptr = adreno_gpu_ascii85_encode(buf, datalen);
0757 
0758         kvfree(buf);
0759 
0760         *encoded = true;
0761     }
0762 
0763     if (!*ptr)
0764         return;
0765 
0766     drm_puts(p, "    data: !!ascii85 |\n");
0767     drm_puts(p, "     ");
0768 
0769     drm_puts(p, *ptr);
0770 
0771     drm_puts(p, "\n");
0772 }
0773 
0774 void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
0775         struct drm_printer *p)
0776 {
0777     struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
0778     int i;
0779 
0780     if (IS_ERR_OR_NULL(state))
0781         return;
0782 
0783     drm_printf(p, "revision: %d (%d.%d.%d.%d)\n",
0784             adreno_gpu->info->revn, adreno_gpu->rev.core,
0785             adreno_gpu->rev.major, adreno_gpu->rev.minor,
0786             adreno_gpu->rev.patchid);
0787     /*
0788      * If this is state collected due to iova fault, so fault related info
0789      *
0790      * TTBR0 would not be zero, so this is a good way to distinguish
0791      */
0792     if (state->fault_info.ttbr0) {
0793         const struct msm_gpu_fault_info *info = &state->fault_info;
0794 
0795         drm_puts(p, "fault-info:\n");
0796         drm_printf(p, "  - ttbr0=%.16llx\n", info->ttbr0);
0797         drm_printf(p, "  - iova=%.16lx\n", info->iova);
0798         drm_printf(p, "  - dir=%s\n", info->flags & IOMMU_FAULT_WRITE ? "WRITE" : "READ");
0799         drm_printf(p, "  - type=%s\n", info->type);
0800         drm_printf(p, "  - source=%s\n", info->block);
0801     }
0802 
0803     drm_printf(p, "rbbm-status: 0x%08x\n", state->rbbm_status);
0804 
0805     drm_puts(p, "ringbuffer:\n");
0806 
0807     for (i = 0; i < gpu->nr_rings; i++) {
0808         drm_printf(p, "  - id: %d\n", i);
0809         drm_printf(p, "    iova: 0x%016llx\n", state->ring[i].iova);
0810         drm_printf(p, "    last-fence: %u\n", state->ring[i].seqno);
0811         drm_printf(p, "    retired-fence: %u\n", state->ring[i].fence);
0812         drm_printf(p, "    rptr: %u\n", state->ring[i].rptr);
0813         drm_printf(p, "    wptr: %u\n", state->ring[i].wptr);
0814         drm_printf(p, "    size: %u\n", MSM_GPU_RINGBUFFER_SZ);
0815 
0816         adreno_show_object(p, &state->ring[i].data,
0817             state->ring[i].data_size, &state->ring[i].encoded);
0818     }
0819 
0820     if (state->bos) {
0821         drm_puts(p, "bos:\n");
0822 
0823         for (i = 0; i < state->nr_bos; i++) {
0824             drm_printf(p, "  - iova: 0x%016llx\n",
0825                 state->bos[i].iova);
0826             drm_printf(p, "    size: %zd\n", state->bos[i].size);
0827             drm_printf(p, "    name: %-32s\n", state->bos[i].name);
0828 
0829             adreno_show_object(p, &state->bos[i].data,
0830                 state->bos[i].size, &state->bos[i].encoded);
0831         }
0832     }
0833 
0834     if (state->nr_registers) {
0835         drm_puts(p, "registers:\n");
0836 
0837         for (i = 0; i < state->nr_registers; i++) {
0838             drm_printf(p, "  - { offset: 0x%04x, value: 0x%08x }\n",
0839                 state->registers[i * 2] << 2,
0840                 state->registers[(i * 2) + 1]);
0841         }
0842     }
0843 }
0844 #endif
0845 
0846 /* Dump common gpu status and scratch registers on any hang, to make
0847  * the hangcheck logs more useful.  The scratch registers seem always
0848  * safe to read when GPU has hung (unlike some other regs, depending
0849  * on how the GPU hung), and they are useful to match up to cmdstream
0850  * dumps when debugging hangs:
0851  */
0852 void adreno_dump_info(struct msm_gpu *gpu)
0853 {
0854     struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
0855     int i;
0856 
0857     printk("revision: %d (%d.%d.%d.%d)\n",
0858             adreno_gpu->info->revn, adreno_gpu->rev.core,
0859             adreno_gpu->rev.major, adreno_gpu->rev.minor,
0860             adreno_gpu->rev.patchid);
0861 
0862     for (i = 0; i < gpu->nr_rings; i++) {
0863         struct msm_ringbuffer *ring = gpu->rb[i];
0864 
0865         printk("rb %d: fence:    %d/%d\n", i,
0866             ring->memptrs->fence,
0867             ring->fctx->last_fence);
0868 
0869         printk("rptr:     %d\n", get_rptr(adreno_gpu, ring));
0870         printk("rb wptr:  %d\n", get_wptr(ring));
0871     }
0872 }
0873 
0874 /* would be nice to not have to duplicate the _show() stuff with printk(): */
0875 void adreno_dump(struct msm_gpu *gpu)
0876 {
0877     struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
0878     int i;
0879 
0880     if (!adreno_gpu->registers)
0881         return;
0882 
0883     /* dump these out in a form that can be parsed by demsm: */
0884     printk("IO:region %s 00000000 00020000\n", gpu->name);
0885     for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
0886         uint32_t start = adreno_gpu->registers[i];
0887         uint32_t end   = adreno_gpu->registers[i+1];
0888         uint32_t addr;
0889 
0890         for (addr = start; addr <= end; addr++) {
0891             uint32_t val = gpu_read(gpu, addr);
0892             printk("IO:R %08x %08x\n", addr<<2, val);
0893         }
0894     }
0895 }
0896 
0897 static uint32_t ring_freewords(struct msm_ringbuffer *ring)
0898 {
0899     struct adreno_gpu *adreno_gpu = to_adreno_gpu(ring->gpu);
0900     uint32_t size = MSM_GPU_RINGBUFFER_SZ >> 2;
0901     /* Use ring->next to calculate free size */
0902     uint32_t wptr = ring->next - ring->start;
0903     uint32_t rptr = get_rptr(adreno_gpu, ring);
0904     return (rptr + (size - 1) - wptr) % size;
0905 }
0906 
0907 void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords)
0908 {
0909     if (spin_until(ring_freewords(ring) >= ndwords))
0910         DRM_DEV_ERROR(ring->gpu->dev->dev,
0911             "timeout waiting for space in ringbuffer %d\n",
0912             ring->id);
0913 }
0914 
0915 /* Get legacy powerlevels from qcom,gpu-pwrlevels and populate the opp table */
0916 static int adreno_get_legacy_pwrlevels(struct device *dev)
0917 {
0918     struct device_node *child, *node;
0919     int ret;
0920 
0921     node = of_get_compatible_child(dev->of_node, "qcom,gpu-pwrlevels");
0922     if (!node) {
0923         DRM_DEV_DEBUG(dev, "Could not find the GPU powerlevels\n");
0924         return -ENXIO;
0925     }
0926 
0927     for_each_child_of_node(node, child) {
0928         unsigned int val;
0929 
0930         ret = of_property_read_u32(child, "qcom,gpu-freq", &val);
0931         if (ret)
0932             continue;
0933 
0934         /*
0935          * Skip the intentionally bogus clock value found at the bottom
0936          * of most legacy frequency tables
0937          */
0938         if (val != 27000000)
0939             dev_pm_opp_add(dev, val, 0);
0940     }
0941 
0942     of_node_put(node);
0943 
0944     return 0;
0945 }
0946 
0947 static void adreno_get_pwrlevels(struct device *dev,
0948         struct msm_gpu *gpu)
0949 {
0950     unsigned long freq = ULONG_MAX;
0951     struct dev_pm_opp *opp;
0952     int ret;
0953 
0954     gpu->fast_rate = 0;
0955 
0956     /* You down with OPP? */
0957     if (!of_find_property(dev->of_node, "operating-points-v2", NULL))
0958         ret = adreno_get_legacy_pwrlevels(dev);
0959     else {
0960         ret = devm_pm_opp_of_add_table(dev);
0961         if (ret)
0962             DRM_DEV_ERROR(dev, "Unable to set the OPP table\n");
0963     }
0964 
0965     if (!ret) {
0966         /* Find the fastest defined rate */
0967         opp = dev_pm_opp_find_freq_floor(dev, &freq);
0968         if (!IS_ERR(opp)) {
0969             gpu->fast_rate = freq;
0970             dev_pm_opp_put(opp);
0971         }
0972     }
0973 
0974     if (!gpu->fast_rate) {
0975         dev_warn(dev,
0976             "Could not find a clock rate. Using a reasonable default\n");
0977         /* Pick a suitably safe clock speed for any target */
0978         gpu->fast_rate = 200000000;
0979     }
0980 
0981     DBG("fast_rate=%u, slow_rate=27000000", gpu->fast_rate);
0982 }
0983 
0984 int adreno_gpu_ocmem_init(struct device *dev, struct adreno_gpu *adreno_gpu,
0985               struct adreno_ocmem *adreno_ocmem)
0986 {
0987     struct ocmem_buf *ocmem_hdl;
0988     struct ocmem *ocmem;
0989 
0990     ocmem = of_get_ocmem(dev);
0991     if (IS_ERR(ocmem)) {
0992         if (PTR_ERR(ocmem) == -ENODEV) {
0993             /*
0994              * Return success since either the ocmem property was
0995              * not specified in device tree, or ocmem support is
0996              * not compiled into the kernel.
0997              */
0998             return 0;
0999         }
1000 
1001         return PTR_ERR(ocmem);
1002     }
1003 
1004     ocmem_hdl = ocmem_allocate(ocmem, OCMEM_GRAPHICS, adreno_gpu->gmem);
1005     if (IS_ERR(ocmem_hdl))
1006         return PTR_ERR(ocmem_hdl);
1007 
1008     adreno_ocmem->ocmem = ocmem;
1009     adreno_ocmem->base = ocmem_hdl->addr;
1010     adreno_ocmem->hdl = ocmem_hdl;
1011     adreno_gpu->gmem = ocmem_hdl->len;
1012 
1013     return 0;
1014 }
1015 
1016 void adreno_gpu_ocmem_cleanup(struct adreno_ocmem *adreno_ocmem)
1017 {
1018     if (adreno_ocmem && adreno_ocmem->base)
1019         ocmem_free(adreno_ocmem->ocmem, OCMEM_GRAPHICS,
1020                adreno_ocmem->hdl);
1021 }
1022 
1023 int adreno_read_speedbin(struct device *dev, u32 *speedbin)
1024 {
1025     return nvmem_cell_read_variable_le_u32(dev, "speed_bin", speedbin);
1026 }
1027 
1028 int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
1029         struct adreno_gpu *adreno_gpu,
1030         const struct adreno_gpu_funcs *funcs, int nr_rings)
1031 {
1032     struct device *dev = &pdev->dev;
1033     struct adreno_platform_config *config = dev->platform_data;
1034     struct msm_gpu_config adreno_gpu_config  = { 0 };
1035     struct msm_gpu *gpu = &adreno_gpu->base;
1036     struct adreno_rev *rev = &config->rev;
1037     const char *gpu_name;
1038     u32 speedbin;
1039 
1040     adreno_gpu->funcs = funcs;
1041     adreno_gpu->info = adreno_info(config->rev);
1042     adreno_gpu->gmem = adreno_gpu->info->gmem;
1043     adreno_gpu->revn = adreno_gpu->info->revn;
1044     adreno_gpu->rev = *rev;
1045 
1046     if (adreno_read_speedbin(dev, &speedbin) || !speedbin)
1047         speedbin = 0xffff;
1048     adreno_gpu->speedbin = (uint16_t) (0xffff & speedbin);
1049 
1050     gpu_name = adreno_gpu->info->name;
1051     if (!gpu_name) {
1052         gpu_name = devm_kasprintf(dev, GFP_KERNEL, "%d.%d.%d.%d",
1053                 rev->core, rev->major, rev->minor,
1054                 rev->patchid);
1055         if (!gpu_name)
1056             return -ENOMEM;
1057     }
1058 
1059     adreno_gpu_config.ioname = "kgsl_3d0_reg_memory";
1060 
1061     adreno_gpu_config.nr_rings = nr_rings;
1062 
1063     adreno_get_pwrlevels(dev, gpu);
1064 
1065     pm_runtime_set_autosuspend_delay(dev,
1066         adreno_gpu->info->inactive_period);
1067     pm_runtime_use_autosuspend(dev);
1068 
1069     return msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
1070             gpu_name, &adreno_gpu_config);
1071 }
1072 
1073 void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
1074 {
1075     struct msm_gpu *gpu = &adreno_gpu->base;
1076     struct msm_drm_private *priv = gpu->dev->dev_private;
1077     unsigned int i;
1078 
1079     for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++)
1080         release_firmware(adreno_gpu->fw[i]);
1081 
1082     if (pm_runtime_enabled(&priv->gpu_pdev->dev))
1083         pm_runtime_disable(&priv->gpu_pdev->dev);
1084 
1085     msm_gpu_cleanup(&adreno_gpu->base);
1086 }