0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024 #include <linux/pci.h>
0025
0026 #include "amdgpu.h"
0027 #include "amdgpu_ih.h"
0028
0029 #include "oss/osssys_6_0_0_offset.h"
0030 #include "oss/osssys_6_0_0_sh_mask.h"
0031
0032 #include "soc15_common.h"
0033 #include "ih_v6_0.h"
0034
0035 #define MAX_REARM_RETRY 10
0036
0037 static void ih_v6_0_set_interrupt_funcs(struct amdgpu_device *adev);
0038
0039
0040
0041
0042
0043
0044
0045
0046 static void ih_v6_0_init_register_offset(struct amdgpu_device *adev)
0047 {
0048 struct amdgpu_ih_regs *ih_regs;
0049
0050
0051
0052 if (adev->irq.ih.ring_size) {
0053 ih_regs = &adev->irq.ih.ih_regs;
0054 ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_BASE);
0055 ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_BASE_HI);
0056 ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_CNTL);
0057 ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_WPTR);
0058 ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_RPTR);
0059 ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_DOORBELL_RPTR);
0060 ih_regs->ih_rb_wptr_addr_lo = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_WPTR_ADDR_LO);
0061 ih_regs->ih_rb_wptr_addr_hi = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_WPTR_ADDR_HI);
0062 ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL;
0063 }
0064
0065 if (adev->irq.ih1.ring_size) {
0066 ih_regs = &adev->irq.ih1.ih_regs;
0067 ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_BASE_RING1);
0068 ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_BASE_HI_RING1);
0069 ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_CNTL_RING1);
0070 ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_WPTR_RING1);
0071 ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_RPTR_RING1);
0072 ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_DOORBELL_RPTR_RING1);
0073 ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING1;
0074 }
0075 }
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092 static void
0093 force_update_wptr_for_self_int(struct amdgpu_device *adev,
0094 u32 threshold, u32 timeout, bool enabled)
0095 {
0096 u32 ih_cntl, ih_rb_cntl;
0097
0098 ih_cntl = RREG32_SOC15(OSSSYS, 0, regIH_CNTL2);
0099 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, regIH_RB_CNTL_RING1);
0100
0101 ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL2,
0102 SELF_IV_FORCE_WPTR_UPDATE_TIMEOUT, timeout);
0103 ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL2,
0104 SELF_IV_FORCE_WPTR_UPDATE_ENABLE, enabled);
0105 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
0106 RB_USED_INT_THRESHOLD, threshold);
0107
0108 WREG32_SOC15(OSSSYS, 0, regIH_RB_CNTL_RING1, ih_rb_cntl);
0109 WREG32_SOC15(OSSSYS, 0, regIH_CNTL2, ih_cntl);
0110 }
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121 static int ih_v6_0_toggle_ring_interrupts(struct amdgpu_device *adev,
0122 struct amdgpu_ih_ring *ih,
0123 bool enable)
0124 {
0125 struct amdgpu_ih_regs *ih_regs;
0126 uint32_t tmp;
0127
0128 ih_regs = &ih->ih_regs;
0129
0130 tmp = RREG32(ih_regs->ih_rb_cntl);
0131 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0));
0132
0133 if (ih == &adev->irq.ih)
0134 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
0135 WREG32(ih_regs->ih_rb_cntl, tmp);
0136
0137 if (enable) {
0138 ih->enabled = true;
0139 } else {
0140
0141 WREG32(ih_regs->ih_rb_rptr, 0);
0142 WREG32(ih_regs->ih_rb_wptr, 0);
0143 ih->enabled = false;
0144 ih->rptr = 0;
0145 }
0146
0147 return 0;
0148 }
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158 static int ih_v6_0_toggle_interrupts(struct amdgpu_device *adev, bool enable)
0159 {
0160 struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1};
0161 int i;
0162 int r;
0163
0164 for (i = 0; i < ARRAY_SIZE(ih); i++) {
0165 if (ih[i]->ring_size) {
0166 r = ih_v6_0_toggle_ring_interrupts(adev, ih[i], enable);
0167 if (r)
0168 return r;
0169 }
0170 }
0171
0172 return 0;
0173 }
0174
0175 static uint32_t ih_v6_0_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl)
0176 {
0177 int rb_bufsz = order_base_2(ih->ring_size / 4);
0178
0179 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
0180 MC_SPACE, ih->use_bus_addr ? 2 : 4);
0181 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
0182 WPTR_OVERFLOW_CLEAR, 1);
0183 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
0184 WPTR_OVERFLOW_ENABLE, 1);
0185 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz);
0186
0187
0188
0189 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
0190 WPTR_WRITEBACK_ENABLE, 1);
0191 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SNOOP, 1);
0192 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_RO, 0);
0193 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_VMID, 0);
0194
0195 return ih_rb_cntl;
0196 }
0197
0198 static uint32_t ih_v6_0_doorbell_rptr(struct amdgpu_ih_ring *ih)
0199 {
0200 u32 ih_doorbell_rtpr = 0;
0201
0202 if (ih->use_doorbell) {
0203 ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
0204 IH_DOORBELL_RPTR, OFFSET,
0205 ih->doorbell_index);
0206 ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
0207 IH_DOORBELL_RPTR,
0208 ENABLE, 1);
0209 } else {
0210 ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
0211 IH_DOORBELL_RPTR,
0212 ENABLE, 0);
0213 }
0214 return ih_doorbell_rtpr;
0215 }
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225 static int ih_v6_0_enable_ring(struct amdgpu_device *adev,
0226 struct amdgpu_ih_ring *ih)
0227 {
0228 struct amdgpu_ih_regs *ih_regs;
0229 uint32_t tmp;
0230
0231 ih_regs = &ih->ih_regs;
0232
0233
0234 WREG32(ih_regs->ih_rb_base, ih->gpu_addr >> 8);
0235 WREG32(ih_regs->ih_rb_base_hi, (ih->gpu_addr >> 40) & 0xff);
0236
0237 tmp = RREG32(ih_regs->ih_rb_cntl);
0238 tmp = ih_v6_0_rb_cntl(ih, tmp);
0239 if (ih == &adev->irq.ih)
0240 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled);
0241 if (ih == &adev->irq.ih1) {
0242 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0);
0243 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
0244 }
0245 WREG32(ih_regs->ih_rb_cntl, tmp);
0246
0247 if (ih == &adev->irq.ih) {
0248
0249 WREG32(ih_regs->ih_rb_wptr_addr_lo, lower_32_bits(ih->wptr_addr));
0250 WREG32(ih_regs->ih_rb_wptr_addr_hi, upper_32_bits(ih->wptr_addr) & 0xFFFF);
0251 }
0252
0253
0254 WREG32(ih_regs->ih_rb_wptr, 0);
0255 WREG32(ih_regs->ih_rb_rptr, 0);
0256
0257 WREG32(ih_regs->ih_doorbell_rptr, ih_v6_0_doorbell_rptr(ih));
0258
0259 return 0;
0260 }
0261
0262
0263
0264
0265
0266
0267
0268
0269
0270
0271
0272
0273 static int ih_v6_0_irq_init(struct amdgpu_device *adev)
0274 {
0275 struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1};
0276 u32 ih_chicken;
0277 u32 tmp;
0278 int ret;
0279 int i;
0280
0281
0282 ret = ih_v6_0_toggle_interrupts(adev, false);
0283 if (ret)
0284 return ret;
0285
0286 adev->nbio.funcs->ih_control(adev);
0287
0288 if (unlikely((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) ||
0289 (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO))) {
0290 if (ih[0]->use_bus_addr) {
0291 ih_chicken = RREG32_SOC15(OSSSYS, 0, regIH_CHICKEN);
0292 ih_chicken = REG_SET_FIELD(ih_chicken,
0293 IH_CHICKEN, MC_SPACE_GPA_ENABLE, 1);
0294 WREG32_SOC15(OSSSYS, 0, regIH_CHICKEN, ih_chicken);
0295 }
0296 }
0297
0298 for (i = 0; i < ARRAY_SIZE(ih); i++) {
0299 if (ih[i]->ring_size) {
0300 ret = ih_v6_0_enable_ring(adev, ih[i]);
0301 if (ret)
0302 return ret;
0303 }
0304 }
0305
0306
0307 adev->nbio.funcs->ih_doorbell_range(adev, ih[0]->use_doorbell,
0308 ih[0]->doorbell_index);
0309
0310 tmp = RREG32_SOC15(OSSSYS, 0, regIH_STORM_CLIENT_LIST_CNTL);
0311 tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
0312 CLIENT18_IS_STORM_CLIENT, 1);
0313 WREG32_SOC15(OSSSYS, 0, regIH_STORM_CLIENT_LIST_CNTL, tmp);
0314
0315 tmp = RREG32_SOC15(OSSSYS, 0, regIH_INT_FLOOD_CNTL);
0316 tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
0317 WREG32_SOC15(OSSSYS, 0, regIH_INT_FLOOD_CNTL, tmp);
0318
0319
0320
0321
0322
0323
0324 tmp = RREG32_SOC15(OSSSYS, 0, regIH_MSI_STORM_CTRL);
0325 tmp = REG_SET_FIELD(tmp, IH_MSI_STORM_CTRL,
0326 DELAY, 3);
0327 WREG32_SOC15(OSSSYS, 0, regIH_MSI_STORM_CTRL, tmp);
0328
0329 pci_set_master(adev->pdev);
0330
0331
0332 ret = ih_v6_0_toggle_interrupts(adev, true);
0333 if (ret)
0334 return ret;
0335
0336 force_update_wptr_for_self_int(adev, 0, 8, true);
0337
0338 if (adev->irq.ih_soft.ring_size)
0339 adev->irq.ih_soft.enabled = true;
0340
0341 return 0;
0342 }
0343
0344
0345
0346
0347
0348
0349
0350
0351 static void ih_v6_0_irq_disable(struct amdgpu_device *adev)
0352 {
0353 force_update_wptr_for_self_int(adev, 0, 8, false);
0354 ih_v6_0_toggle_interrupts(adev, false);
0355
0356
0357 mdelay(1);
0358 }
0359
0360
0361
0362
0363
0364
0365
0366
0367
0368
0369
0370 static u32 ih_v6_0_get_wptr(struct amdgpu_device *adev,
0371 struct amdgpu_ih_ring *ih)
0372 {
0373 u32 wptr, tmp;
0374 struct amdgpu_ih_regs *ih_regs;
0375
0376 wptr = le32_to_cpu(*ih->wptr_cpu);
0377 ih_regs = &ih->ih_regs;
0378
0379 if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
0380 goto out;
0381
0382 wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
0383 if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
0384 goto out;
0385 wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
0386
0387
0388
0389
0390
0391 tmp = (wptr + 32) & ih->ptr_mask;
0392 dev_warn(adev->dev, "IH ring buffer overflow "
0393 "(0x%08X, 0x%08X, 0x%08X)\n",
0394 wptr, ih->rptr, tmp);
0395 ih->rptr = tmp;
0396
0397 tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
0398 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
0399 WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
0400 out:
0401 return (wptr & ih->ptr_mask);
0402 }
0403
0404
0405
0406
0407
0408
0409
0410 static void ih_v6_0_irq_rearm(struct amdgpu_device *adev,
0411 struct amdgpu_ih_ring *ih)
0412 {
0413 uint32_t v = 0;
0414 uint32_t i = 0;
0415 struct amdgpu_ih_regs *ih_regs;
0416
0417 ih_regs = &ih->ih_regs;
0418
0419
0420 for (i = 0; i < MAX_REARM_RETRY; i++) {
0421 v = RREG32_NO_KIQ(ih_regs->ih_rb_rptr);
0422 if ((v < ih->ring_size) && (v != ih->rptr))
0423 WDOORBELL32(ih->doorbell_index, ih->rptr);
0424 else
0425 break;
0426 }
0427 }
0428
0429
0430
0431
0432
0433
0434
0435
0436 static void ih_v6_0_set_rptr(struct amdgpu_device *adev,
0437 struct amdgpu_ih_ring *ih)
0438 {
0439 struct amdgpu_ih_regs *ih_regs;
0440
0441 if (ih->use_doorbell) {
0442
0443 *ih->rptr_cpu = ih->rptr;
0444 WDOORBELL32(ih->doorbell_index, ih->rptr);
0445
0446 if (amdgpu_sriov_vf(adev))
0447 ih_v6_0_irq_rearm(adev, ih);
0448 } else {
0449 ih_regs = &ih->ih_regs;
0450 WREG32(ih_regs->ih_rb_rptr, ih->rptr);
0451 }
0452 }
0453
0454
0455
0456
0457
0458
0459
0460
0461
0462
0463 static int ih_v6_0_self_irq(struct amdgpu_device *adev,
0464 struct amdgpu_irq_src *source,
0465 struct amdgpu_iv_entry *entry)
0466 {
0467 uint32_t wptr = cpu_to_le32(entry->src_data[0]);
0468
0469 switch (entry->ring_id) {
0470 case 1:
0471 *adev->irq.ih1.wptr_cpu = wptr;
0472 schedule_work(&adev->irq.ih1_work);
0473 break;
0474 default: break;
0475 }
0476 return 0;
0477 }
0478
0479 static const struct amdgpu_irq_src_funcs ih_v6_0_self_irq_funcs = {
0480 .process = ih_v6_0_self_irq,
0481 };
0482
0483 static void ih_v6_0_set_self_irq_funcs(struct amdgpu_device *adev)
0484 {
0485 adev->irq.self_irq.num_types = 0;
0486 adev->irq.self_irq.funcs = &ih_v6_0_self_irq_funcs;
0487 }
0488
0489 static int ih_v6_0_early_init(void *handle)
0490 {
0491 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0492
0493 ih_v6_0_set_interrupt_funcs(adev);
0494 ih_v6_0_set_self_irq_funcs(adev);
0495 return 0;
0496 }
0497
0498 static int ih_v6_0_sw_init(void *handle)
0499 {
0500 int r;
0501 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0502 bool use_bus_addr;
0503
0504 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_IH, 0,
0505 &adev->irq.self_irq);
0506
0507 if (r)
0508 return r;
0509
0510
0511
0512
0513 use_bus_addr =
0514 (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) ? false : true;
0515 r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, use_bus_addr);
0516 if (r)
0517 return r;
0518
0519 adev->irq.ih.use_doorbell = true;
0520 adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
0521
0522 adev->irq.ih1.ring_size = 0;
0523 adev->irq.ih2.ring_size = 0;
0524
0525
0526 ih_v6_0_init_register_offset(adev);
0527
0528 r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, true);
0529 if (r)
0530 return r;
0531
0532 r = amdgpu_irq_init(adev);
0533
0534 return r;
0535 }
0536
0537 static int ih_v6_0_sw_fini(void *handle)
0538 {
0539 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0540
0541 amdgpu_irq_fini_sw(adev);
0542
0543 return 0;
0544 }
0545
0546 static int ih_v6_0_hw_init(void *handle)
0547 {
0548 int r;
0549 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0550
0551 r = ih_v6_0_irq_init(adev);
0552 if (r)
0553 return r;
0554
0555 return 0;
0556 }
0557
0558 static int ih_v6_0_hw_fini(void *handle)
0559 {
0560 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0561
0562 ih_v6_0_irq_disable(adev);
0563
0564 return 0;
0565 }
0566
0567 static int ih_v6_0_suspend(void *handle)
0568 {
0569 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0570
0571 return ih_v6_0_hw_fini(adev);
0572 }
0573
0574 static int ih_v6_0_resume(void *handle)
0575 {
0576 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0577
0578 return ih_v6_0_hw_init(adev);
0579 }
0580
0581 static bool ih_v6_0_is_idle(void *handle)
0582 {
0583
0584 return true;
0585 }
0586
0587 static int ih_v6_0_wait_for_idle(void *handle)
0588 {
0589
0590 return -ETIMEDOUT;
0591 }
0592
0593 static int ih_v6_0_soft_reset(void *handle)
0594 {
0595
0596 return 0;
0597 }
0598
0599 static void ih_v6_0_update_clockgating_state(struct amdgpu_device *adev,
0600 bool enable)
0601 {
0602 uint32_t data, def, field_val;
0603
0604 if (adev->cg_flags & AMD_CG_SUPPORT_IH_CG) {
0605 def = data = RREG32_SOC15(OSSSYS, 0, regIH_CLK_CTRL);
0606 field_val = enable ? 0 : 1;
0607 data = REG_SET_FIELD(data, IH_CLK_CTRL,
0608 DBUS_MUX_CLK_SOFT_OVERRIDE, field_val);
0609 data = REG_SET_FIELD(data, IH_CLK_CTRL,
0610 OSSSYS_SHARE_CLK_SOFT_OVERRIDE, field_val);
0611 data = REG_SET_FIELD(data, IH_CLK_CTRL,
0612 LIMIT_SMN_CLK_SOFT_OVERRIDE, field_val);
0613 data = REG_SET_FIELD(data, IH_CLK_CTRL,
0614 DYN_CLK_SOFT_OVERRIDE, field_val);
0615 data = REG_SET_FIELD(data, IH_CLK_CTRL,
0616 REG_CLK_SOFT_OVERRIDE, field_val);
0617 if (def != data)
0618 WREG32_SOC15(OSSSYS, 0, regIH_CLK_CTRL, data);
0619 }
0620
0621 return;
0622 }
0623
0624 static int ih_v6_0_set_clockgating_state(void *handle,
0625 enum amd_clockgating_state state)
0626 {
0627 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0628
0629 ih_v6_0_update_clockgating_state(adev,
0630 state == AMD_CG_STATE_GATE);
0631 return 0;
0632 }
0633
0634 static void ih_v6_0_update_ih_mem_power_gating(struct amdgpu_device *adev,
0635 bool enable)
0636 {
0637 uint32_t ih_mem_pwr_cntl;
0638
0639
0640 ih_mem_pwr_cntl = RREG32_SOC15(OSSSYS, 0, regIH_MEM_POWER_CTRL);
0641 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
0642 IH_BUFFER_MEM_POWER_CTRL_EN, 0);
0643 WREG32_SOC15(OSSSYS, 0, regIH_MEM_POWER_CTRL, ih_mem_pwr_cntl);
0644
0645
0646 if (enable) {
0647
0648 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
0649 IH_BUFFER_MEM_POWER_LS_EN, 0);
0650 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
0651 IH_BUFFER_MEM_POWER_DS_EN, 1);
0652 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
0653 IH_BUFFER_MEM_POWER_SD_EN, 0);
0654
0655 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
0656 IH_RETRY_INT_CAM_MEM_POWER_LS_EN, 0);
0657 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
0658 IH_RETRY_INT_CAM_MEM_POWER_DS_EN, 1);
0659 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
0660 IH_RETRY_INT_CAM_MEM_POWER_SD_EN, 0);
0661
0662 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
0663 IH_BUFFER_MEM_POWER_CTRL_EN, 1);
0664 } else {
0665
0666 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
0667 IH_BUFFER_MEM_POWER_LS_EN, 0);
0668 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
0669 IH_BUFFER_MEM_POWER_DS_EN, 0);
0670 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
0671 IH_BUFFER_MEM_POWER_SD_EN, 0);
0672
0673 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
0674 IH_RETRY_INT_CAM_MEM_POWER_LS_EN, 0);
0675 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
0676 IH_RETRY_INT_CAM_MEM_POWER_DS_EN, 0);
0677 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
0678 IH_RETRY_INT_CAM_MEM_POWER_SD_EN, 0);
0679
0680 ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
0681 IH_BUFFER_MEM_POWER_CTRL_EN, 1);
0682 }
0683
0684 WREG32_SOC15(OSSSYS, 0, regIH_MEM_POWER_CTRL, ih_mem_pwr_cntl);
0685 }
0686
0687 static int ih_v6_0_set_powergating_state(void *handle,
0688 enum amd_powergating_state state)
0689 {
0690 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0691 bool enable = (state == AMD_PG_STATE_GATE);
0692
0693 if (adev->pg_flags & AMD_PG_SUPPORT_IH_SRAM_PG)
0694 ih_v6_0_update_ih_mem_power_gating(adev, enable);
0695
0696 return 0;
0697 }
0698
0699 static void ih_v6_0_get_clockgating_state(void *handle, u64 *flags)
0700 {
0701 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0702
0703 if (!RREG32_SOC15(OSSSYS, 0, regIH_CLK_CTRL))
0704 *flags |= AMD_CG_SUPPORT_IH_CG;
0705
0706 return;
0707 }
0708
0709 static const struct amd_ip_funcs ih_v6_0_ip_funcs = {
0710 .name = "ih_v6_0",
0711 .early_init = ih_v6_0_early_init,
0712 .late_init = NULL,
0713 .sw_init = ih_v6_0_sw_init,
0714 .sw_fini = ih_v6_0_sw_fini,
0715 .hw_init = ih_v6_0_hw_init,
0716 .hw_fini = ih_v6_0_hw_fini,
0717 .suspend = ih_v6_0_suspend,
0718 .resume = ih_v6_0_resume,
0719 .is_idle = ih_v6_0_is_idle,
0720 .wait_for_idle = ih_v6_0_wait_for_idle,
0721 .soft_reset = ih_v6_0_soft_reset,
0722 .set_clockgating_state = ih_v6_0_set_clockgating_state,
0723 .set_powergating_state = ih_v6_0_set_powergating_state,
0724 .get_clockgating_state = ih_v6_0_get_clockgating_state,
0725 };
0726
0727 static const struct amdgpu_ih_funcs ih_v6_0_funcs = {
0728 .get_wptr = ih_v6_0_get_wptr,
0729 .decode_iv = amdgpu_ih_decode_iv_helper,
0730 .decode_iv_ts = amdgpu_ih_decode_iv_ts_helper,
0731 .set_rptr = ih_v6_0_set_rptr
0732 };
0733
0734 static void ih_v6_0_set_interrupt_funcs(struct amdgpu_device *adev)
0735 {
0736 adev->irq.ih_funcs = &ih_v6_0_funcs;
0737 }
0738
0739 const struct amdgpu_ip_block_version ih_v6_0_ip_block =
0740 {
0741 .type = AMD_IP_BLOCK_TYPE_IH,
0742 .major = 6,
0743 .minor = 0,
0744 .rev = 0,
0745 .funcs = &ih_v6_0_ip_funcs,
0746 };