0001
0002
0003
0004 #include <linux/clk.h>
0005 #include <linux/interconnect.h>
0006 #include <linux/pm_domain.h>
0007 #include <linux/pm_opp.h>
0008 #include <soc/qcom/cmd-db.h>
0009 #include <drm/drm_gem.h>
0010
0011 #include "a6xx_gpu.h"
0012 #include "a6xx_gmu.xml.h"
0013 #include "msm_gem.h"
0014 #include "msm_gpu_trace.h"
0015 #include "msm_mmu.h"
0016
0017 static void a6xx_gmu_fault(struct a6xx_gmu *gmu)
0018 {
0019 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
0020 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
0021 struct msm_gpu *gpu = &adreno_gpu->base;
0022
0023
0024 gmu->hung = true;
0025
0026
0027 del_timer(&gpu->hangcheck_timer);
0028
0029
0030 kthread_queue_work(gpu->worker, &gpu->recover_work);
0031 }
0032
0033 static irqreturn_t a6xx_gmu_irq(int irq, void *data)
0034 {
0035 struct a6xx_gmu *gmu = data;
0036 u32 status;
0037
0038 status = gmu_read(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS);
0039 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, status);
0040
0041 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE) {
0042 dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n");
0043
0044 a6xx_gmu_fault(gmu);
0045 }
0046
0047 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR)
0048 dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n");
0049
0050 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
0051 dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n",
0052 gmu_read(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS));
0053
0054 return IRQ_HANDLED;
0055 }
0056
0057 static irqreturn_t a6xx_hfi_irq(int irq, void *data)
0058 {
0059 struct a6xx_gmu *gmu = data;
0060 u32 status;
0061
0062 status = gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO);
0063 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, status);
0064
0065 if (status & A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) {
0066 dev_err_ratelimited(gmu->dev, "GMU firmware fault\n");
0067
0068 a6xx_gmu_fault(gmu);
0069 }
0070
0071 return IRQ_HANDLED;
0072 }
0073
0074 bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu)
0075 {
0076 u32 val;
0077
0078
0079 if (!gmu->initialized)
0080 return false;
0081
0082 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
0083
0084 return !(val &
0085 (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF |
0086 A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SP_CLOCK_OFF));
0087 }
0088
0089
0090 bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
0091 {
0092 u32 val;
0093
0094
0095 if (!gmu->initialized)
0096 return false;
0097
0098 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
0099
0100 return !(val &
0101 (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF |
0102 A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF));
0103 }
0104
0105 void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp,
0106 bool suspended)
0107 {
0108 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
0109 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
0110 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
0111 u32 perf_index;
0112 unsigned long gpu_freq;
0113 int ret = 0;
0114
0115 gpu_freq = dev_pm_opp_get_freq(opp);
0116
0117 if (gpu_freq == gmu->freq)
0118 return;
0119
0120 for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++)
0121 if (gpu_freq == gmu->gpu_freqs[perf_index])
0122 break;
0123
0124 gmu->current_perf_index = perf_index;
0125 gmu->freq = gmu->gpu_freqs[perf_index];
0126
0127 trace_msm_gmu_freq_change(gmu->freq, perf_index);
0128
0129
0130
0131
0132
0133
0134
0135 if (suspended)
0136 return;
0137
0138 if (!gmu->legacy) {
0139 a6xx_hfi_set_freq(gmu, perf_index);
0140 dev_pm_opp_set_opp(&gpu->pdev->dev, opp);
0141 return;
0142 }
0143
0144 gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0);
0145
0146 gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING,
0147 ((3 & 0xf) << 28) | perf_index);
0148
0149
0150
0151
0152
0153 gmu_write(gmu, REG_A6XX_GMU_DCVS_BW_SETTING, 0xff);
0154
0155
0156 a6xx_gmu_set_oob(gmu, GMU_OOB_DCVS_SET);
0157 a6xx_gmu_clear_oob(gmu, GMU_OOB_DCVS_SET);
0158
0159 ret = gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN);
0160 if (ret)
0161 dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret);
0162
0163 dev_pm_opp_set_opp(&gpu->pdev->dev, opp);
0164 }
0165
0166 unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu)
0167 {
0168 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
0169 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
0170 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
0171
0172 return gmu->freq;
0173 }
0174
0175 static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu)
0176 {
0177 u32 val;
0178 int local = gmu->idle_level;
0179
0180
0181 if (gmu->idle_level == GMU_IDLE_STATE_SPTP)
0182 local = GMU_IDLE_STATE_IFPC;
0183
0184 val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
0185
0186 if (val == local) {
0187 if (gmu->idle_level != GMU_IDLE_STATE_IFPC ||
0188 !a6xx_gmu_gx_is_on(gmu))
0189 return true;
0190 }
0191
0192 return false;
0193 }
0194
0195
0196 int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu)
0197 {
0198 return spin_until(a6xx_gmu_check_idle_level(gmu));
0199 }
0200
0201 static int a6xx_gmu_start(struct a6xx_gmu *gmu)
0202 {
0203 int ret;
0204 u32 val;
0205 u32 mask, reset_val;
0206
0207 val = gmu_read(gmu, REG_A6XX_GMU_CM3_DTCM_START + 0xff8);
0208 if (val <= 0x20010004) {
0209 mask = 0xffffffff;
0210 reset_val = 0xbabeface;
0211 } else {
0212 mask = 0x1ff;
0213 reset_val = 0x100;
0214 }
0215
0216 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
0217
0218
0219
0220
0221 gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP, 0);
0222
0223 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0);
0224
0225 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val,
0226 (val & mask) == reset_val, 100, 10000);
0227
0228 if (ret)
0229 DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n");
0230
0231 return ret;
0232 }
0233
0234 static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu)
0235 {
0236 u32 val;
0237 int ret;
0238
0239 gmu_write(gmu, REG_A6XX_GMU_HFI_CTRL_INIT, 1);
0240
0241 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val,
0242 val & 1, 100, 10000);
0243 if (ret)
0244 DRM_DEV_ERROR(gmu->dev, "Unable to start the HFI queues\n");
0245
0246 return ret;
0247 }
0248
0249 struct a6xx_gmu_oob_bits {
0250 int set, ack, set_new, ack_new, clear, clear_new;
0251 const char *name;
0252 };
0253
0254
0255
0256
0257 static const struct a6xx_gmu_oob_bits a6xx_gmu_oob_bits[] = {
0258 [GMU_OOB_GPU_SET] = {
0259 .name = "GPU_SET",
0260 .set = 16,
0261 .ack = 24,
0262 .set_new = 30,
0263 .ack_new = 31,
0264 .clear = 24,
0265 .clear_new = 31,
0266 },
0267
0268 [GMU_OOB_PERFCOUNTER_SET] = {
0269 .name = "PERFCOUNTER",
0270 .set = 17,
0271 .ack = 25,
0272 .set_new = 28,
0273 .ack_new = 30,
0274 .clear = 25,
0275 .clear_new = 29,
0276 },
0277
0278 [GMU_OOB_BOOT_SLUMBER] = {
0279 .name = "BOOT_SLUMBER",
0280 .set = 22,
0281 .ack = 30,
0282 .clear = 30,
0283 },
0284
0285 [GMU_OOB_DCVS_SET] = {
0286 .name = "GPU_DCVS",
0287 .set = 23,
0288 .ack = 31,
0289 .clear = 31,
0290 },
0291 };
0292
0293
0294 int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
0295 {
0296 int ret;
0297 u32 val;
0298 int request, ack;
0299
0300 WARN_ON_ONCE(!mutex_is_locked(&gmu->lock));
0301
0302 if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits))
0303 return -EINVAL;
0304
0305 if (gmu->legacy) {
0306 request = a6xx_gmu_oob_bits[state].set;
0307 ack = a6xx_gmu_oob_bits[state].ack;
0308 } else {
0309 request = a6xx_gmu_oob_bits[state].set_new;
0310 ack = a6xx_gmu_oob_bits[state].ack_new;
0311 if (!request || !ack) {
0312 DRM_DEV_ERROR(gmu->dev,
0313 "Invalid non-legacy GMU request %s\n",
0314 a6xx_gmu_oob_bits[state].name);
0315 return -EINVAL;
0316 }
0317 }
0318
0319
0320 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << request);
0321
0322
0323 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
0324 val & (1 << ack), 100, 10000);
0325
0326 if (ret)
0327 DRM_DEV_ERROR(gmu->dev,
0328 "Timeout waiting for GMU OOB set %s: 0x%x\n",
0329 a6xx_gmu_oob_bits[state].name,
0330 gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO));
0331
0332
0333 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, 1 << ack);
0334
0335 return ret;
0336 }
0337
0338
0339 void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
0340 {
0341 int bit;
0342
0343 WARN_ON_ONCE(!mutex_is_locked(&gmu->lock));
0344
0345 if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits))
0346 return;
0347
0348 if (gmu->legacy)
0349 bit = a6xx_gmu_oob_bits[state].clear;
0350 else
0351 bit = a6xx_gmu_oob_bits[state].clear_new;
0352
0353 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << bit);
0354 }
0355
0356
0357 static int a6xx_sptprac_enable(struct a6xx_gmu *gmu)
0358 {
0359 int ret;
0360 u32 val;
0361
0362 if (!gmu->legacy)
0363 return 0;
0364
0365 gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000);
0366
0367 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
0368 (val & 0x38) == 0x28, 1, 100);
0369
0370 if (ret) {
0371 DRM_DEV_ERROR(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n",
0372 gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
0373 }
0374
0375 return 0;
0376 }
0377
0378
0379 static void a6xx_sptprac_disable(struct a6xx_gmu *gmu)
0380 {
0381 u32 val;
0382 int ret;
0383
0384 if (!gmu->legacy)
0385 return;
0386
0387
0388 gmu_rmw(gmu, REG_A6XX_GPU_CC_GX_GDSCR, 0, (1 << 11));
0389
0390 gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778001);
0391
0392 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
0393 (val & 0x04), 100, 10000);
0394
0395 if (ret)
0396 DRM_DEV_ERROR(gmu->dev, "failed to power off SPTPRAC: 0x%x\n",
0397 gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
0398 }
0399
0400
0401 static int a6xx_gmu_gfx_rail_on(struct a6xx_gmu *gmu)
0402 {
0403 u32 vote;
0404
0405
0406 gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 0);
0407
0408
0409 vote = gmu->gx_arc_votes[gmu->nr_gpu_freqs - 1];
0410
0411 gmu_write(gmu, REG_A6XX_GMU_GX_VOTE_IDX, vote & 0xff);
0412 gmu_write(gmu, REG_A6XX_GMU_MX_VOTE_IDX, (vote >> 8) & 0xff);
0413
0414
0415 return a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
0416 }
0417
0418
0419 static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
0420 {
0421 int ret;
0422
0423
0424 gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0);
0425
0426
0427 if (gmu->idle_level < GMU_IDLE_STATE_SPTP)
0428 a6xx_sptprac_disable(gmu);
0429
0430 if (!gmu->legacy) {
0431 ret = a6xx_hfi_send_prep_slumber(gmu);
0432 goto out;
0433 }
0434
0435
0436 gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 1);
0437
0438 ret = a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
0439 a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER);
0440
0441 if (!ret) {
0442
0443 if (gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE)
0444 != 0x0f) {
0445 DRM_DEV_ERROR(gmu->dev, "The GMU did not go into slumber\n");
0446 ret = -ETIMEDOUT;
0447 }
0448 }
0449
0450 out:
0451
0452 gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
0453 return ret;
0454 }
0455
0456 static int a6xx_rpmh_start(struct a6xx_gmu *gmu)
0457 {
0458 int ret;
0459 u32 val;
0460
0461 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1 << 1);
0462
0463 wmb();
0464
0465 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val,
0466 val & (1 << 1), 100, 10000);
0467 if (ret) {
0468 DRM_DEV_ERROR(gmu->dev, "Unable to power on the GPU RSC\n");
0469 return ret;
0470 }
0471
0472 ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val,
0473 !val, 100, 10000);
0474
0475 if (ret) {
0476 DRM_DEV_ERROR(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n");
0477 return ret;
0478 }
0479
0480 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
0481
0482
0483 gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xff000000);
0484 gmu_rmw(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, 0xff, 0x20);
0485
0486
0487 gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
0488 return 0;
0489 }
0490
0491 static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
0492 {
0493 int ret;
0494 u32 val;
0495
0496 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1);
0497
0498 ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
0499 val, val & (1 << 16), 100, 10000);
0500 if (ret)
0501 DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n");
0502
0503 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
0504 }
0505
0506 static inline void pdc_write(void __iomem *ptr, u32 offset, u32 value)
0507 {
0508 msm_writel(value, ptr + (offset << 2));
0509 }
0510
0511 static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
0512 const char *name);
0513
0514 static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
0515 {
0516 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
0517 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
0518 struct platform_device *pdev = to_platform_device(gmu->dev);
0519 void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc");
0520 void __iomem *seqptr = NULL;
0521 uint32_t pdc_address_offset;
0522 bool pdc_in_aop = false;
0523
0524 if (IS_ERR(pdcptr))
0525 goto err;
0526
0527 if (adreno_is_a650(adreno_gpu) || adreno_is_a660_family(adreno_gpu))
0528 pdc_in_aop = true;
0529 else if (adreno_is_a618(adreno_gpu) || adreno_is_a640_family(adreno_gpu))
0530 pdc_address_offset = 0x30090;
0531 else if (adreno_is_a619(adreno_gpu))
0532 pdc_address_offset = 0x300a0;
0533 else
0534 pdc_address_offset = 0x30080;
0535
0536 if (!pdc_in_aop) {
0537 seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq");
0538 if (IS_ERR(seqptr))
0539 goto err;
0540 }
0541
0542
0543 gmu_write_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24));
0544
0545
0546 gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1);
0547 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0);
0548 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0);
0549 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0);
0550 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0);
0551 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, 0x80000000);
0552 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0);
0553 gmu_write_rscc(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0);
0554 gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520);
0555 gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510);
0556 gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514);
0557
0558
0559 if (adreno_is_a650_family(adreno_gpu)) {
0560 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xeaaae5a0);
0561 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xe1a1ebab);
0562 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e0a581);
0563 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xecac82e2);
0564 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020edad);
0565 } else {
0566 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0);
0567 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7);
0568 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e081e1);
0569 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xe9a982e2);
0570 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8);
0571 }
0572
0573 if (pdc_in_aop)
0574 goto setup_pdc;
0575
0576
0577 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1);
0578 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 1, 0xa5a4a3a2);
0579 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 2, 0x8382a6e0);
0580 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 3, 0xbce3e284);
0581 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 4, 0x002081fc);
0582
0583
0584 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK, 7);
0585 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK, 0);
0586 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CONTROL, 0);
0587 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID, 0x10108);
0588 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR, 0x30010);
0589 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA, 1);
0590 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108);
0591 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000);
0592 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0);
0593
0594 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108);
0595 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, pdc_address_offset);
0596 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0);
0597
0598 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7);
0599 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0);
0600 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0);
0601 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108);
0602 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010);
0603 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2);
0604
0605 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108);
0606 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000);
0607 if (adreno_is_a618(adreno_gpu) || adreno_is_a619(adreno_gpu) ||
0608 adreno_is_a650_family(adreno_gpu))
0609 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x2);
0610 else
0611 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3);
0612 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108);
0613 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, pdc_address_offset);
0614 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3);
0615
0616
0617 setup_pdc:
0618 pdc_write(pdcptr, REG_A6XX_PDC_GPU_SEQ_START_ADDR, 0);
0619 pdc_write(pdcptr, REG_A6XX_PDC_GPU_ENABLE_PDC, 0x80000001);
0620
0621
0622 wmb();
0623
0624 err:
0625 if (!IS_ERR_OR_NULL(pdcptr))
0626 iounmap(pdcptr);
0627 if (!IS_ERR_OR_NULL(seqptr))
0628 iounmap(seqptr);
0629 }
0630
0631
0632
0633
0634
0635
0636
0637 #define GMU_PWR_COL_HYST 0x000a1680
0638
0639
0640 static void a6xx_gmu_power_config(struct a6xx_gmu *gmu)
0641 {
0642
0643 gmu_write(gmu, REG_A6XX_GMU_SYS_BUS_CONFIG, 0x1);
0644 gmu_write(gmu, REG_A6XX_GMU_ICACHE_CONFIG, 0x1);
0645 gmu_write(gmu, REG_A6XX_GMU_DCACHE_CONFIG, 0x1);
0646
0647 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400);
0648
0649 switch (gmu->idle_level) {
0650 case GMU_IDLE_STATE_IFPC:
0651 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST,
0652 GMU_PWR_COL_HYST);
0653 gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
0654 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE |
0655 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE);
0656 fallthrough;
0657 case GMU_IDLE_STATE_SPTP:
0658 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST,
0659 GMU_PWR_COL_HYST);
0660 gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
0661 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE |
0662 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_SPTPRAC_POWER_CONTROL_ENABLE);
0663 }
0664
0665
0666 gmu_rmw(gmu, REG_A6XX_GMU_RPMH_CTRL, 0,
0667 A6XX_GMU_RPMH_CTRL_RPMH_INTERFACE_ENABLE |
0668 A6XX_GMU_RPMH_CTRL_LLC_VOTE_ENABLE |
0669 A6XX_GMU_RPMH_CTRL_DDR_VOTE_ENABLE |
0670 A6XX_GMU_RPMH_CTRL_MX_VOTE_ENABLE |
0671 A6XX_GMU_RPMH_CTRL_CX_VOTE_ENABLE |
0672 A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE);
0673 }
0674
0675 struct block_header {
0676 u32 addr;
0677 u32 size;
0678 u32 type;
0679 u32 value;
0680 u32 data[];
0681 };
0682
0683
0684 static int in_range(u32 addr, u32 start, u32 size)
0685 {
0686 return addr >= start && addr < start + size;
0687 }
0688
0689 static bool fw_block_mem(struct a6xx_gmu_bo *bo, const struct block_header *blk)
0690 {
0691 if (!in_range(blk->addr, bo->iova, bo->size))
0692 return false;
0693
0694 memcpy(bo->virt + blk->addr - bo->iova, blk->data, blk->size);
0695 return true;
0696 }
0697
0698 static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)
0699 {
0700 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
0701 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
0702 const struct firmware *fw_image = adreno_gpu->fw[ADRENO_FW_GMU];
0703 const struct block_header *blk;
0704 u32 reg_offset;
0705
0706 u32 itcm_base = 0x00000000;
0707 u32 dtcm_base = 0x00040000;
0708
0709 if (adreno_is_a650_family(adreno_gpu))
0710 dtcm_base = 0x10004000;
0711
0712 if (gmu->legacy) {
0713
0714 if (fw_image->size > 0x8000) {
0715 DRM_DEV_ERROR(gmu->dev,
0716 "GMU firmware is bigger than the available region\n");
0717 return -EINVAL;
0718 }
0719
0720 gmu_write_bulk(gmu, REG_A6XX_GMU_CM3_ITCM_START,
0721 (u32*) fw_image->data, fw_image->size);
0722 return 0;
0723 }
0724
0725
0726 for (blk = (const struct block_header *) fw_image->data;
0727 (const u8*) blk < fw_image->data + fw_image->size;
0728 blk = (const struct block_header *) &blk->data[blk->size >> 2]) {
0729 if (blk->size == 0)
0730 continue;
0731
0732 if (in_range(blk->addr, itcm_base, SZ_16K)) {
0733 reg_offset = (blk->addr - itcm_base) >> 2;
0734 gmu_write_bulk(gmu,
0735 REG_A6XX_GMU_CM3_ITCM_START + reg_offset,
0736 blk->data, blk->size);
0737 } else if (in_range(blk->addr, dtcm_base, SZ_16K)) {
0738 reg_offset = (blk->addr - dtcm_base) >> 2;
0739 gmu_write_bulk(gmu,
0740 REG_A6XX_GMU_CM3_DTCM_START + reg_offset,
0741 blk->data, blk->size);
0742 } else if (!fw_block_mem(&gmu->icache, blk) &&
0743 !fw_block_mem(&gmu->dcache, blk) &&
0744 !fw_block_mem(&gmu->dummy, blk)) {
0745 DRM_DEV_ERROR(gmu->dev,
0746 "failed to match fw block (addr=%.8x size=%d data[0]=%.8x)\n",
0747 blk->addr, blk->size, blk->data[0]);
0748 }
0749 }
0750
0751 return 0;
0752 }
0753
0754 static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
0755 {
0756 static bool rpmh_init;
0757 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
0758 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
0759 int ret;
0760 u32 chipid;
0761
0762 if (adreno_is_a650_family(adreno_gpu)) {
0763 gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, 1);
0764 gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF, 1);
0765 }
0766
0767 if (state == GMU_WARM_BOOT) {
0768 ret = a6xx_rpmh_start(gmu);
0769 if (ret)
0770 return ret;
0771 } else {
0772 if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU],
0773 "GMU firmware is not loaded\n"))
0774 return -ENOENT;
0775
0776
0777 gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1);
0778
0779
0780 if (!rpmh_init) {
0781 a6xx_gmu_rpmh_init(gmu);
0782 rpmh_init = true;
0783 } else {
0784 ret = a6xx_rpmh_start(gmu);
0785 if (ret)
0786 return ret;
0787 }
0788
0789 ret = a6xx_gmu_fw_load(gmu);
0790 if (ret)
0791 return ret;
0792 }
0793
0794 gmu_write(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, 0);
0795 gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, 0x02);
0796
0797
0798 gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi.iova);
0799 gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1);
0800
0801 gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0,
0802 (1 << 31) | (0xa << 18) | (0xa0));
0803
0804 chipid = adreno_gpu->rev.core << 24;
0805 chipid |= adreno_gpu->rev.major << 16;
0806 chipid |= adreno_gpu->rev.minor << 12;
0807 chipid |= adreno_gpu->rev.patchid << 8;
0808
0809 gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid);
0810
0811 gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_MSG,
0812 gmu->log.iova | (gmu->log.size / SZ_4K - 1));
0813
0814
0815 a6xx_gmu_power_config(gmu);
0816
0817 ret = a6xx_gmu_start(gmu);
0818 if (ret)
0819 return ret;
0820
0821 if (gmu->legacy) {
0822 ret = a6xx_gmu_gfx_rail_on(gmu);
0823 if (ret)
0824 return ret;
0825 }
0826
0827
0828 if (gmu->idle_level < GMU_IDLE_STATE_SPTP) {
0829 ret = a6xx_sptprac_enable(gmu);
0830 if (ret)
0831 return ret;
0832 }
0833
0834 ret = a6xx_gmu_hfi_start(gmu);
0835 if (ret)
0836 return ret;
0837
0838
0839 wmb();
0840
0841 return 0;
0842 }
0843
0844 #define A6XX_HFI_IRQ_MASK \
0845 (A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT)
0846
0847 #define A6XX_GMU_IRQ_MASK \
0848 (A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE | \
0849 A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR | \
0850 A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
0851
0852 static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu)
0853 {
0854 disable_irq(gmu->gmu_irq);
0855 disable_irq(gmu->hfi_irq);
0856
0857 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~0);
0858 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~0);
0859 }
0860
0861 static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu)
0862 {
0863 u32 val;
0864
0865
0866 gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val,
0867 (val & 1), 100, 10000);
0868 gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS, val,
0869 (val & 1), 100, 10000);
0870 gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS, val,
0871 (val & 1), 100, 10000);
0872 gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val,
0873 (val & 1), 100, 1000);
0874 }
0875
0876
0877 static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
0878 {
0879
0880 a6xx_hfi_stop(gmu);
0881
0882
0883 a6xx_gmu_irq_disable(gmu);
0884
0885
0886 a6xx_sptprac_disable(gmu);
0887
0888
0889 a6xx_gmu_rpmh_off(gmu);
0890 }
0891
0892 static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
0893 {
0894 struct dev_pm_opp *gpu_opp;
0895 unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index];
0896
0897 gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true);
0898 if (IS_ERR(gpu_opp))
0899 return;
0900
0901 gmu->freq = 0;
0902 a6xx_gmu_set_freq(gpu, gpu_opp, false);
0903 dev_pm_opp_put(gpu_opp);
0904 }
0905
0906 static void a6xx_gmu_set_initial_bw(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
0907 {
0908 struct dev_pm_opp *gpu_opp;
0909 unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index];
0910
0911 gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true);
0912 if (IS_ERR(gpu_opp))
0913 return;
0914
0915 dev_pm_opp_set_opp(&gpu->pdev->dev, gpu_opp);
0916 dev_pm_opp_put(gpu_opp);
0917 }
0918
0919 int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
0920 {
0921 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
0922 struct msm_gpu *gpu = &adreno_gpu->base;
0923 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
0924 int status, ret;
0925
0926 if (WARN(!gmu->initialized, "The GMU is not set up yet\n"))
0927 return 0;
0928
0929 gmu->hung = false;
0930
0931
0932 pm_runtime_get_sync(gmu->dev);
0933
0934
0935
0936
0937
0938
0939 if (!IS_ERR_OR_NULL(gmu->gxpd))
0940 pm_runtime_get_sync(gmu->gxpd);
0941
0942
0943 clk_set_rate(gmu->core_clk, 200000000);
0944 clk_set_rate(gmu->hub_clk, 150000000);
0945 ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
0946 if (ret) {
0947 pm_runtime_put(gmu->gxpd);
0948 pm_runtime_put(gmu->dev);
0949 return ret;
0950 }
0951
0952
0953 a6xx_gmu_set_initial_bw(gpu, gmu);
0954
0955
0956 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0);
0957 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~A6XX_GMU_IRQ_MASK);
0958 enable_irq(gmu->gmu_irq);
0959
0960
0961 status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ?
0962 GMU_WARM_BOOT : GMU_COLD_BOOT;
0963
0964
0965
0966
0967
0968 if (!gmu->legacy)
0969 status = GMU_COLD_BOOT;
0970
0971 ret = a6xx_gmu_fw_start(gmu, status);
0972 if (ret)
0973 goto out;
0974
0975 ret = a6xx_hfi_start(gmu, status);
0976 if (ret)
0977 goto out;
0978
0979
0980
0981
0982
0983 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0);
0984 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~A6XX_HFI_IRQ_MASK);
0985 enable_irq(gmu->hfi_irq);
0986
0987
0988 a6xx_gmu_set_initial_freq(gpu, gmu);
0989
0990 out:
0991
0992 if (ret) {
0993 disable_irq(gmu->gmu_irq);
0994 a6xx_rpmh_stop(gmu);
0995 pm_runtime_put(gmu->gxpd);
0996 pm_runtime_put(gmu->dev);
0997 }
0998
0999 return ret;
1000 }
1001
1002 bool a6xx_gmu_isidle(struct a6xx_gmu *gmu)
1003 {
1004 u32 reg;
1005
1006 if (!gmu->initialized)
1007 return true;
1008
1009 reg = gmu_read(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS);
1010
1011 if (reg & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB)
1012 return false;
1013
1014 return true;
1015 }
1016
1017 #define GBIF_CLIENT_HALT_MASK BIT(0)
1018 #define GBIF_ARB_HALT_MASK BIT(1)
1019
1020 static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu)
1021 {
1022 struct msm_gpu *gpu = &adreno_gpu->base;
1023
1024 if (!a6xx_has_gbif(adreno_gpu)) {
1025 gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
1026 spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) &
1027 0xf) == 0xf);
1028 gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
1029
1030 return;
1031 }
1032
1033
1034 gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
1035 spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
1036 (GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK);
1037
1038
1039 gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK);
1040 spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
1041 (GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK);
1042
1043
1044 gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);
1045 }
1046
1047
1048 static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
1049 {
1050 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
1051 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
1052 u32 val;
1053
1054
1055
1056
1057
1058 val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
1059
1060 if (val != 0xf) {
1061 int ret = a6xx_gmu_wait_for_idle(gmu);
1062
1063
1064 if (ret) {
1065 a6xx_gmu_force_off(gmu);
1066 return;
1067 }
1068
1069 a6xx_bus_clear_pending_transactions(adreno_gpu);
1070
1071
1072 a6xx_gmu_notify_slumber(gmu);
1073
1074 ret = gmu_poll_timeout(gmu,
1075 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val,
1076 !(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB),
1077 100, 10000);
1078
1079
1080
1081
1082
1083
1084 if (ret)
1085 DRM_DEV_ERROR(gmu->dev,
1086 "Unable to slumber GMU: status = 0%x/0%x\n",
1087 gmu_read(gmu,
1088 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS),
1089 gmu_read(gmu,
1090 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2));
1091 }
1092
1093
1094 a6xx_hfi_stop(gmu);
1095
1096
1097 a6xx_gmu_irq_disable(gmu);
1098
1099
1100 a6xx_rpmh_stop(gmu);
1101 }
1102
1103
1104 int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
1105 {
1106 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
1107 struct msm_gpu *gpu = &a6xx_gpu->base.base;
1108
1109 if (!pm_runtime_active(gmu->dev))
1110 return 0;
1111
1112
1113
1114
1115
1116 if (gmu->hung)
1117 a6xx_gmu_force_off(gmu);
1118 else
1119 a6xx_gmu_shutdown(gmu);
1120
1121
1122 dev_pm_opp_set_opp(&gpu->pdev->dev, NULL);
1123
1124
1125
1126
1127
1128
1129 if (!IS_ERR_OR_NULL(gmu->gxpd))
1130 pm_runtime_put_sync(gmu->gxpd);
1131
1132 clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks);
1133
1134 pm_runtime_put_sync(gmu->dev);
1135
1136 return 0;
1137 }
1138
1139 static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu)
1140 {
1141 msm_gem_kernel_put(gmu->hfi.obj, gmu->aspace);
1142 msm_gem_kernel_put(gmu->debug.obj, gmu->aspace);
1143 msm_gem_kernel_put(gmu->icache.obj, gmu->aspace);
1144 msm_gem_kernel_put(gmu->dcache.obj, gmu->aspace);
1145 msm_gem_kernel_put(gmu->dummy.obj, gmu->aspace);
1146 msm_gem_kernel_put(gmu->log.obj, gmu->aspace);
1147
1148 gmu->aspace->mmu->funcs->detach(gmu->aspace->mmu);
1149 msm_gem_address_space_put(gmu->aspace);
1150 }
1151
1152 static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo,
1153 size_t size, u64 iova, const char *name)
1154 {
1155 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
1156 struct drm_device *dev = a6xx_gpu->base.base.dev;
1157 uint32_t flags = MSM_BO_WC;
1158 u64 range_start, range_end;
1159 int ret;
1160
1161 size = PAGE_ALIGN(size);
1162 if (!iova) {
1163
1164 range_start = 0x60000000 + PAGE_SIZE;
1165 range_end = 0x80000000;
1166 } else {
1167
1168 range_start = iova;
1169 range_end = iova + size;
1170
1171 flags |= MSM_BO_MAP_PRIV;
1172 }
1173
1174 bo->obj = msm_gem_new(dev, size, flags);
1175 if (IS_ERR(bo->obj))
1176 return PTR_ERR(bo->obj);
1177
1178 ret = msm_gem_get_and_pin_iova_range(bo->obj, gmu->aspace, &bo->iova,
1179 range_start, range_end);
1180 if (ret) {
1181 drm_gem_object_put(bo->obj);
1182 return ret;
1183 }
1184
1185 bo->virt = msm_gem_get_vaddr(bo->obj);
1186 bo->size = size;
1187
1188 msm_gem_object_set_name(bo->obj, name);
1189
1190 return 0;
1191 }
1192
1193 static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
1194 {
1195 struct iommu_domain *domain;
1196 struct msm_mmu *mmu;
1197
1198 domain = iommu_domain_alloc(&platform_bus_type);
1199 if (!domain)
1200 return -ENODEV;
1201
1202 mmu = msm_iommu_new(gmu->dev, domain);
1203 gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x80000000);
1204 if (IS_ERR(gmu->aspace)) {
1205 iommu_domain_free(domain);
1206 return PTR_ERR(gmu->aspace);
1207 }
1208
1209 return 0;
1210 }
1211
1212
1213 static unsigned int a6xx_gmu_get_arc_level(struct device *dev,
1214 unsigned long freq)
1215 {
1216 struct dev_pm_opp *opp;
1217 unsigned int val;
1218
1219 if (!freq)
1220 return 0;
1221
1222 opp = dev_pm_opp_find_freq_exact(dev, freq, true);
1223 if (IS_ERR(opp))
1224 return 0;
1225
1226 val = dev_pm_opp_get_level(opp);
1227
1228 dev_pm_opp_put(opp);
1229
1230 return val;
1231 }
1232
1233 static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes,
1234 unsigned long *freqs, int freqs_count, const char *id)
1235 {
1236 int i, j;
1237 const u16 *pri, *sec;
1238 size_t pri_count, sec_count;
1239
1240 pri = cmd_db_read_aux_data(id, &pri_count);
1241 if (IS_ERR(pri))
1242 return PTR_ERR(pri);
1243
1244
1245
1246
1247 pri_count >>= 1;
1248 if (!pri_count)
1249 return -EINVAL;
1250
1251 sec = cmd_db_read_aux_data("mx.lvl", &sec_count);
1252 if (IS_ERR(sec))
1253 return PTR_ERR(sec);
1254
1255 sec_count >>= 1;
1256 if (!sec_count)
1257 return -EINVAL;
1258
1259
1260 for (i = 0; i < freqs_count; i++) {
1261 u8 pindex = 0, sindex = 0;
1262 unsigned int level = a6xx_gmu_get_arc_level(dev, freqs[i]);
1263
1264
1265 for (j = 0; j < pri_count; j++) {
1266 if (pri[j] >= level) {
1267 pindex = j;
1268 break;
1269 }
1270 }
1271
1272 if (j == pri_count) {
1273 DRM_DEV_ERROR(dev,
1274 "Level %u not found in the RPMh list\n",
1275 level);
1276 DRM_DEV_ERROR(dev, "Available levels:\n");
1277 for (j = 0; j < pri_count; j++)
1278 DRM_DEV_ERROR(dev, " %u\n", pri[j]);
1279
1280 return -EINVAL;
1281 }
1282
1283
1284
1285
1286
1287
1288 for (j = 0; j < sec_count; j++) {
1289 if (sec[j] >= level) {
1290 sindex = j;
1291 break;
1292 } else if (sec[j]) {
1293 sindex = j;
1294 }
1295 }
1296
1297
1298 votes[i] = ((pri[pindex] & 0xffff) << 16) |
1299 (sindex << 8) | pindex;
1300 }
1301
1302 return 0;
1303 }
1304
1305
1306
1307
1308
1309
1310
1311 static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu)
1312 {
1313 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
1314 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
1315 struct msm_gpu *gpu = &adreno_gpu->base;
1316 int ret;
1317
1318
1319 ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes,
1320 gmu->gpu_freqs, gmu->nr_gpu_freqs, "gfx.lvl");
1321
1322
1323 ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes,
1324 gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl");
1325
1326 return ret;
1327 }
1328
1329 static int a6xx_gmu_build_freq_table(struct device *dev, unsigned long *freqs,
1330 u32 size)
1331 {
1332 int count = dev_pm_opp_get_opp_count(dev);
1333 struct dev_pm_opp *opp;
1334 int i, index = 0;
1335 unsigned long freq = 1;
1336
1337
1338
1339
1340
1341
1342 if (WARN(count + 1 > size,
1343 "The GMU frequency table is being truncated\n"))
1344 count = size - 1;
1345
1346
1347 freqs[index++] = 0;
1348
1349 for (i = 0; i < count; i++) {
1350 opp = dev_pm_opp_find_freq_ceil(dev, &freq);
1351 if (IS_ERR(opp))
1352 break;
1353
1354 dev_pm_opp_put(opp);
1355 freqs[index++] = freq++;
1356 }
1357
1358 return index;
1359 }
1360
1361 static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu)
1362 {
1363 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
1364 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
1365 struct msm_gpu *gpu = &adreno_gpu->base;
1366
1367 int ret = 0;
1368
1369
1370
1371
1372
1373 ret = devm_pm_opp_of_add_table(gmu->dev);
1374 if (ret) {
1375 DRM_DEV_ERROR(gmu->dev, "Unable to set the OPP table for the GMU\n");
1376 return ret;
1377 }
1378
1379 gmu->nr_gmu_freqs = a6xx_gmu_build_freq_table(gmu->dev,
1380 gmu->gmu_freqs, ARRAY_SIZE(gmu->gmu_freqs));
1381
1382
1383
1384
1385
1386 gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(&gpu->pdev->dev,
1387 gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs));
1388
1389 gmu->current_perf_index = gmu->nr_gpu_freqs - 1;
1390
1391
1392 return a6xx_gmu_rpmh_votes_init(gmu);
1393 }
1394
1395 static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu)
1396 {
1397 int ret = devm_clk_bulk_get_all(gmu->dev, &gmu->clocks);
1398
1399 if (ret < 1)
1400 return ret;
1401
1402 gmu->nr_clocks = ret;
1403
1404 gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks,
1405 gmu->nr_clocks, "gmu");
1406
1407 gmu->hub_clk = msm_clk_bulk_get_clock(gmu->clocks,
1408 gmu->nr_clocks, "hub");
1409
1410 return 0;
1411 }
1412
1413 static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
1414 const char *name)
1415 {
1416 void __iomem *ret;
1417 struct resource *res = platform_get_resource_byname(pdev,
1418 IORESOURCE_MEM, name);
1419
1420 if (!res) {
1421 DRM_DEV_ERROR(&pdev->dev, "Unable to find the %s registers\n", name);
1422 return ERR_PTR(-EINVAL);
1423 }
1424
1425 ret = ioremap(res->start, resource_size(res));
1426 if (!ret) {
1427 DRM_DEV_ERROR(&pdev->dev, "Unable to map the %s registers\n", name);
1428 return ERR_PTR(-EINVAL);
1429 }
1430
1431 return ret;
1432 }
1433
1434 static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev,
1435 const char *name, irq_handler_t handler)
1436 {
1437 int irq, ret;
1438
1439 irq = platform_get_irq_byname(pdev, name);
1440
1441 ret = request_irq(irq, handler, IRQF_TRIGGER_HIGH, name, gmu);
1442 if (ret) {
1443 DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s %d\n",
1444 name, ret);
1445 return ret;
1446 }
1447
1448 disable_irq(irq);
1449
1450 return irq;
1451 }
1452
1453 void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
1454 {
1455 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
1456 struct platform_device *pdev = to_platform_device(gmu->dev);
1457
1458 if (!gmu->initialized)
1459 return;
1460
1461 pm_runtime_force_suspend(gmu->dev);
1462
1463 if (!IS_ERR_OR_NULL(gmu->gxpd)) {
1464 pm_runtime_disable(gmu->gxpd);
1465 dev_pm_domain_detach(gmu->gxpd, false);
1466 }
1467
1468 iounmap(gmu->mmio);
1469 if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc"))
1470 iounmap(gmu->rscc);
1471 gmu->mmio = NULL;
1472 gmu->rscc = NULL;
1473
1474 a6xx_gmu_memory_free(gmu);
1475
1476 free_irq(gmu->gmu_irq, gmu);
1477 free_irq(gmu->hfi_irq, gmu);
1478
1479
1480 put_device(gmu->dev);
1481
1482 gmu->initialized = false;
1483 }
1484
1485 int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
1486 {
1487 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
1488 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
1489 struct platform_device *pdev = of_find_device_by_node(node);
1490 int ret;
1491
1492 if (!pdev)
1493 return -ENODEV;
1494
1495 mutex_init(&gmu->lock);
1496
1497 gmu->dev = &pdev->dev;
1498
1499 of_dma_configure(gmu->dev, node, true);
1500
1501
1502 gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
1503
1504 pm_runtime_enable(gmu->dev);
1505
1506
1507 ret = a6xx_gmu_clocks_probe(gmu);
1508 if (ret)
1509 goto err_put_device;
1510
1511 ret = a6xx_gmu_memory_probe(gmu);
1512 if (ret)
1513 goto err_put_device;
1514
1515
1516
1517
1518
1519
1520
1521
1522 gmu->dummy.size = SZ_4K;
1523 if (adreno_is_a660_family(adreno_gpu)) {
1524 ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_4K * 7,
1525 0x60400000, "debug");
1526 if (ret)
1527 goto err_memory;
1528
1529 gmu->dummy.size = SZ_8K;
1530 }
1531
1532
1533 ret = a6xx_gmu_memory_alloc(gmu, &gmu->dummy, gmu->dummy.size,
1534 0x60000000, "dummy");
1535 if (ret)
1536 goto err_memory;
1537
1538
1539 if (adreno_is_a650_family(adreno_gpu)) {
1540 ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache,
1541 SZ_16M - SZ_16K, 0x04000, "icache");
1542 if (ret)
1543 goto err_memory;
1544
1545
1546
1547
1548
1549
1550 } else if (adreno_is_a640_family(adreno_gpu)) {
1551 ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache,
1552 SZ_256K - SZ_16K, 0x04000, "icache");
1553 if (ret)
1554 goto err_memory;
1555
1556 ret = a6xx_gmu_memory_alloc(gmu, &gmu->dcache,
1557 SZ_256K - SZ_16K, 0x44000, "dcache");
1558 if (ret)
1559 goto err_memory;
1560 } else if (adreno_is_a630(adreno_gpu) || adreno_is_a615_family(adreno_gpu)) {
1561
1562 gmu->legacy = true;
1563
1564
1565 ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_16K, 0, "debug");
1566 if (ret)
1567 goto err_memory;
1568 }
1569
1570
1571 ret = a6xx_gmu_memory_alloc(gmu, &gmu->hfi, SZ_16K, 0, "hfi");
1572 if (ret)
1573 goto err_memory;
1574
1575
1576 ret = a6xx_gmu_memory_alloc(gmu, &gmu->log, SZ_4K, 0, "log");
1577 if (ret)
1578 goto err_memory;
1579
1580
1581 gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu");
1582 if (IS_ERR(gmu->mmio)) {
1583 ret = PTR_ERR(gmu->mmio);
1584 goto err_memory;
1585 }
1586
1587 if (adreno_is_a650_family(adreno_gpu)) {
1588 gmu->rscc = a6xx_gmu_get_mmio(pdev, "rscc");
1589 if (IS_ERR(gmu->rscc))
1590 goto err_mmio;
1591 } else {
1592 gmu->rscc = gmu->mmio + 0x23000;
1593 }
1594
1595
1596 gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq);
1597 gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq);
1598
1599 if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0)
1600 goto err_mmio;
1601
1602
1603
1604
1605
1606 gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx");
1607
1608
1609 a6xx_gmu_pwrlevels_probe(gmu);
1610
1611
1612 a6xx_hfi_init(gmu);
1613
1614 gmu->initialized = true;
1615
1616 return 0;
1617
1618 err_mmio:
1619 iounmap(gmu->mmio);
1620 if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc"))
1621 iounmap(gmu->rscc);
1622 free_irq(gmu->gmu_irq, gmu);
1623 free_irq(gmu->hfi_irq, gmu);
1624
1625 ret = -ENODEV;
1626
1627 err_memory:
1628 a6xx_gmu_memory_free(gmu);
1629 err_put_device:
1630
1631 put_device(gmu->dev);
1632
1633 return ret;
1634 }