Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright 2019 Advanced Micro Devices, Inc.
0003  *
0004  * Permission is hereby granted, free of charge, to any person obtaining a
0005  * copy of this software and associated documentation files (the "Software"),
0006  * to deal in the Software without restriction, including without limitation
0007  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0008  * and/or sell copies of the Software, and to permit persons to whom the
0009  * Software is furnished to do so, subject to the following conditions:
0010  *
0011  * The above copyright notice and this permission notice shall be included in
0012  * all copies or substantial portions of the Software.
0013  *
0014  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0015  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0016  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0017  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
0018  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
0019  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
0020  * OTHER DEALINGS IN THE SOFTWARE.
0021  *
0022  */
0023 
0024 #include <linux/pci.h>
0025 
0026 #include "amdgpu.h"
0027 #include "amdgpu_ih.h"
0028 
0029 #include "oss/osssys_5_0_0_offset.h"
0030 #include "oss/osssys_5_0_0_sh_mask.h"
0031 
0032 #include "soc15_common.h"
0033 #include "navi10_ih.h"
0034 
0035 #define MAX_REARM_RETRY 10
0036 
0037 #define mmIH_CHICKEN_Sienna_Cichlid                 0x018d
0038 #define mmIH_CHICKEN_Sienna_Cichlid_BASE_IDX        0
0039 
0040 static void navi10_ih_set_interrupt_funcs(struct amdgpu_device *adev);
0041 
0042 /**
0043  * navi10_ih_init_register_offset - Initialize register offset for ih rings
0044  *
0045  * @adev: amdgpu_device pointer
0046  *
0047  * Initialize register offset ih rings (NAVI10).
0048  */
0049 static void navi10_ih_init_register_offset(struct amdgpu_device *adev)
0050 {
0051     struct amdgpu_ih_regs *ih_regs;
0052 
0053     if (adev->irq.ih.ring_size) {
0054         ih_regs = &adev->irq.ih.ih_regs;
0055         ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE);
0056         ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI);
0057         ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL);
0058         ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR);
0059         ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR);
0060         ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR);
0061         ih_regs->ih_rb_wptr_addr_lo = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO);
0062         ih_regs->ih_rb_wptr_addr_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI);
0063         ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL;
0064     }
0065 
0066     if (adev->irq.ih1.ring_size) {
0067         ih_regs = &adev->irq.ih1.ih_regs;
0068         ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_RING1);
0069         ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI_RING1);
0070         ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING1);
0071         ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING1);
0072         ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1);
0073         ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING1);
0074         ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING1;
0075     }
0076 
0077     if (adev->irq.ih2.ring_size) {
0078         ih_regs = &adev->irq.ih2.ih_regs;
0079         ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_RING2);
0080         ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI_RING2);
0081         ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING2);
0082         ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING2);
0083         ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2);
0084         ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING2);
0085         ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING2;
0086     }
0087 }
0088 
0089 /**
0090  * force_update_wptr_for_self_int - Force update the wptr for self interrupt
0091  *
0092  * @adev: amdgpu_device pointer
0093  * @threshold: threshold to trigger the wptr reporting
0094  * @timeout: timeout to trigger the wptr reporting
0095  * @enabled: Enable/disable timeout flush mechanism
0096  *
0097  * threshold input range: 0 ~ 15, default 0,
0098  * real_threshold = 2^threshold
0099  * timeout input range: 0 ~ 20, default 8,
0100  * real_timeout = (2^timeout) * 1024 / (socclk_freq)
0101  *
0102  * Force update wptr for self interrupt ( >= SIENNA_CICHLID).
0103  */
0104 static void
0105 force_update_wptr_for_self_int(struct amdgpu_device *adev,
0106                    u32 threshold, u32 timeout, bool enabled)
0107 {
0108     u32 ih_cntl, ih_rb_cntl;
0109 
0110     if (adev->ip_versions[OSSSYS_HWIP][0] < IP_VERSION(5, 0, 3))
0111         return;
0112 
0113     ih_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_CNTL2);
0114     ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
0115 
0116     ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL2,
0117                 SELF_IV_FORCE_WPTR_UPDATE_TIMEOUT, timeout);
0118     ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL2,
0119                 SELF_IV_FORCE_WPTR_UPDATE_ENABLE, enabled);
0120     ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
0121                    RB_USED_INT_THRESHOLD, threshold);
0122 
0123     if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
0124         if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1, ih_rb_cntl))
0125             return;
0126     } else {
0127         WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
0128     }
0129 
0130     ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
0131     ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
0132                    RB_USED_INT_THRESHOLD, threshold);
0133     if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
0134         if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2, ih_rb_cntl))
0135             return;
0136     } else {
0137         WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
0138     }
0139 
0140     WREG32_SOC15(OSSSYS, 0, mmIH_CNTL2, ih_cntl);
0141 }
0142 
0143 /**
0144  * navi10_ih_toggle_ring_interrupts - toggle the interrupt ring buffer
0145  *
0146  * @adev: amdgpu_device pointer
0147  * @ih: amdgpu_ih_ring pointet
0148  * @enable: true - enable the interrupts, false - disable the interrupts
0149  *
0150  * Toggle the interrupt ring buffer (NAVI10)
0151  */
0152 static int navi10_ih_toggle_ring_interrupts(struct amdgpu_device *adev,
0153                         struct amdgpu_ih_ring *ih,
0154                         bool enable)
0155 {
0156     struct amdgpu_ih_regs *ih_regs;
0157     uint32_t tmp;
0158 
0159     ih_regs = &ih->ih_regs;
0160 
0161     tmp = RREG32(ih_regs->ih_rb_cntl);
0162     tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0));
0163     tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_GPU_TS_ENABLE, 1);
0164     /* enable_intr field is only valid in ring0 */
0165     if (ih == &adev->irq.ih)
0166         tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
0167 
0168     if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
0169         if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp))
0170             return -ETIMEDOUT;
0171     } else {
0172         WREG32(ih_regs->ih_rb_cntl, tmp);
0173     }
0174 
0175     if (enable) {
0176         ih->enabled = true;
0177     } else {
0178         /* set rptr, wptr to 0 */
0179         WREG32(ih_regs->ih_rb_rptr, 0);
0180         WREG32(ih_regs->ih_rb_wptr, 0);
0181         ih->enabled = false;
0182         ih->rptr = 0;
0183     }
0184 
0185     return 0;
0186 }
0187 
0188 /**
0189  * navi10_ih_toggle_interrupts - Toggle all the available interrupt ring buffers
0190  *
0191  * @adev: amdgpu_device pointer
0192  * @enable: enable or disable interrupt ring buffers
0193  *
0194  * Toggle all the available interrupt ring buffers (NAVI10).
0195  */
0196 static int navi10_ih_toggle_interrupts(struct amdgpu_device *adev, bool enable)
0197 {
0198     struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2};
0199     int i;
0200     int r;
0201 
0202     for (i = 0; i < ARRAY_SIZE(ih); i++) {
0203         if (ih[i]->ring_size) {
0204             r = navi10_ih_toggle_ring_interrupts(adev, ih[i], enable);
0205             if (r)
0206                 return r;
0207         }
0208     }
0209 
0210     return 0;
0211 }
0212 
0213 static uint32_t navi10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl)
0214 {
0215     int rb_bufsz = order_base_2(ih->ring_size / 4);
0216 
0217     ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
0218                    MC_SPACE, ih->use_bus_addr ? 1 : 4);
0219     ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
0220                    WPTR_OVERFLOW_CLEAR, 1);
0221     ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
0222                    WPTR_OVERFLOW_ENABLE, 1);
0223     ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz);
0224     /* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register
0225      * value is written to memory
0226      */
0227     ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
0228                    WPTR_WRITEBACK_ENABLE, 1);
0229     ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SNOOP, 1);
0230     ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_RO, 0);
0231     ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_VMID, 0);
0232 
0233     return ih_rb_cntl;
0234 }
0235 
0236 static uint32_t navi10_ih_doorbell_rptr(struct amdgpu_ih_ring *ih)
0237 {
0238     u32 ih_doorbell_rtpr = 0;
0239 
0240     if (ih->use_doorbell) {
0241         ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
0242                          IH_DOORBELL_RPTR, OFFSET,
0243                          ih->doorbell_index);
0244         ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
0245                          IH_DOORBELL_RPTR,
0246                          ENABLE, 1);
0247     } else {
0248         ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
0249                          IH_DOORBELL_RPTR,
0250                          ENABLE, 0);
0251     }
0252     return ih_doorbell_rtpr;
0253 }
0254 
0255 /**
0256  * navi10_ih_enable_ring - enable an ih ring buffer
0257  *
0258  * @adev: amdgpu_device pointer
0259  * @ih: amdgpu_ih_ring pointer
0260  *
0261  * Enable an ih ring buffer (NAVI10)
0262  */
0263 static int navi10_ih_enable_ring(struct amdgpu_device *adev,
0264                  struct amdgpu_ih_ring *ih)
0265 {
0266     struct amdgpu_ih_regs *ih_regs;
0267     uint32_t tmp;
0268 
0269     ih_regs = &ih->ih_regs;
0270 
0271     /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
0272     WREG32(ih_regs->ih_rb_base, ih->gpu_addr >> 8);
0273     WREG32(ih_regs->ih_rb_base_hi, (ih->gpu_addr >> 40) & 0xff);
0274 
0275     tmp = RREG32(ih_regs->ih_rb_cntl);
0276     tmp = navi10_ih_rb_cntl(ih, tmp);
0277     if (ih == &adev->irq.ih)
0278         tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled);
0279     if (ih == &adev->irq.ih1)
0280         tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
0281 
0282     if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
0283         if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
0284             DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
0285             return -ETIMEDOUT;
0286         }
0287     } else {
0288         WREG32(ih_regs->ih_rb_cntl, tmp);
0289     }
0290 
0291     if (ih == &adev->irq.ih) {
0292         /* set the ih ring 0 writeback address whether it's enabled or not */
0293         WREG32(ih_regs->ih_rb_wptr_addr_lo, lower_32_bits(ih->wptr_addr));
0294         WREG32(ih_regs->ih_rb_wptr_addr_hi, upper_32_bits(ih->wptr_addr) & 0xFFFF);
0295     }
0296 
0297     /* set rptr, wptr to 0 */
0298     WREG32(ih_regs->ih_rb_wptr, 0);
0299     WREG32(ih_regs->ih_rb_rptr, 0);
0300 
0301     WREG32(ih_regs->ih_doorbell_rptr, navi10_ih_doorbell_rptr(ih));
0302 
0303     return 0;
0304 }
0305 
0306 /**
0307  * navi10_ih_irq_init - init and enable the interrupt ring
0308  *
0309  * @adev: amdgpu_device pointer
0310  *
0311  * Allocate a ring buffer for the interrupt controller,
0312  * enable the RLC, disable interrupts, enable the IH
0313  * ring buffer and enable it (NAVI).
0314  * Called at device load and reume.
0315  * Returns 0 for success, errors for failure.
0316  */
0317 static int navi10_ih_irq_init(struct amdgpu_device *adev)
0318 {
0319     struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2};
0320     u32 ih_chicken;
0321     int ret;
0322     int i;
0323 
0324     /* disable irqs */
0325     ret = navi10_ih_toggle_interrupts(adev, false);
0326     if (ret)
0327         return ret;
0328 
0329     adev->nbio.funcs->ih_control(adev);
0330 
0331     if (unlikely(adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT)) {
0332         if (ih[0]->use_bus_addr) {
0333             switch (adev->ip_versions[OSSSYS_HWIP][0]) {
0334             case IP_VERSION(5, 0, 3):
0335             case IP_VERSION(5, 2, 0):
0336             case IP_VERSION(5, 2, 1):
0337                 ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN_Sienna_Cichlid);
0338                 ih_chicken = REG_SET_FIELD(ih_chicken,
0339                         IH_CHICKEN, MC_SPACE_GPA_ENABLE, 1);
0340                 WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN_Sienna_Cichlid, ih_chicken);
0341                 break;
0342             default:
0343                 ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN);
0344                 ih_chicken = REG_SET_FIELD(ih_chicken,
0345                         IH_CHICKEN, MC_SPACE_GPA_ENABLE, 1);
0346                 WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN, ih_chicken);
0347                 break;
0348             }
0349         }
0350     }
0351 
0352     for (i = 0; i < ARRAY_SIZE(ih); i++) {
0353         if (ih[i]->ring_size) {
0354             ret = navi10_ih_enable_ring(adev, ih[i]);
0355             if (ret)
0356                 return ret;
0357         }
0358     }
0359 
0360     /* update doorbell range for ih ring 0*/
0361     adev->nbio.funcs->ih_doorbell_range(adev, ih[0]->use_doorbell,
0362                         ih[0]->doorbell_index);
0363 
0364     pci_set_master(adev->pdev);
0365 
0366     /* enable interrupts */
0367     ret = navi10_ih_toggle_interrupts(adev, true);
0368     if (ret)
0369         return ret;
0370     /* enable wptr force update for self int */
0371     force_update_wptr_for_self_int(adev, 0, 8, true);
0372 
0373     if (adev->irq.ih_soft.ring_size)
0374         adev->irq.ih_soft.enabled = true;
0375 
0376     return 0;
0377 }
0378 
0379 /**
0380  * navi10_ih_irq_disable - disable interrupts
0381  *
0382  * @adev: amdgpu_device pointer
0383  *
0384  * Disable interrupts on the hw (NAVI10).
0385  */
0386 static void navi10_ih_irq_disable(struct amdgpu_device *adev)
0387 {
0388     force_update_wptr_for_self_int(adev, 0, 8, false);
0389     navi10_ih_toggle_interrupts(adev, false);
0390 
0391     /* Wait and acknowledge irq */
0392     mdelay(1);
0393 }
0394 
0395 /**
0396  * navi10_ih_get_wptr - get the IH ring buffer wptr
0397  *
0398  * @adev: amdgpu_device pointer
0399  * @ih: IH ring buffer to fetch wptr
0400  *
0401  * Get the IH ring buffer wptr from either the register
0402  * or the writeback memory buffer (NAVI10).  Also check for
0403  * ring buffer overflow and deal with it.
0404  * Returns the value of the wptr.
0405  */
0406 static u32 navi10_ih_get_wptr(struct amdgpu_device *adev,
0407                   struct amdgpu_ih_ring *ih)
0408 {
0409     u32 wptr, tmp;
0410     struct amdgpu_ih_regs *ih_regs;
0411 
0412     if (ih == &adev->irq.ih || ih == &adev->irq.ih_soft) {
0413         /* Only ring0 supports writeback. On other rings fall back
0414          * to register-based code with overflow checking below.
0415          * ih_soft ring doesn't have any backing hardware registers,
0416          * update wptr and return.
0417          */
0418         wptr = le32_to_cpu(*ih->wptr_cpu);
0419 
0420         if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
0421             goto out;
0422     }
0423 
0424     ih_regs = &ih->ih_regs;
0425 
0426     /* Double check that the overflow wasn't already cleared. */
0427     wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
0428     if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
0429         goto out;
0430     wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
0431 
0432     /* When a ring buffer overflow happen start parsing interrupt
0433      * from the last not overwritten vector (wptr + 32). Hopefully
0434      * this should allow us to catch up.
0435      */
0436     tmp = (wptr + 32) & ih->ptr_mask;
0437     dev_warn(adev->dev, "IH ring buffer overflow "
0438          "(0x%08X, 0x%08X, 0x%08X)\n",
0439          wptr, ih->rptr, tmp);
0440     ih->rptr = tmp;
0441 
0442     tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
0443     tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
0444     WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
0445 out:
0446     return (wptr & ih->ptr_mask);
0447 }
0448 
0449 /**
0450  * navi10_ih_irq_rearm - rearm IRQ if lost
0451  *
0452  * @adev: amdgpu_device pointer
0453  * @ih: IH ring to match
0454  *
0455  */
0456 static void navi10_ih_irq_rearm(struct amdgpu_device *adev,
0457                    struct amdgpu_ih_ring *ih)
0458 {
0459     uint32_t v = 0;
0460     uint32_t i = 0;
0461     struct amdgpu_ih_regs *ih_regs;
0462 
0463     ih_regs = &ih->ih_regs;
0464 
0465     /* Rearm IRQ / re-write doorbell if doorbell write is lost */
0466     for (i = 0; i < MAX_REARM_RETRY; i++) {
0467         v = RREG32_NO_KIQ(ih_regs->ih_rb_rptr);
0468         if ((v < ih->ring_size) && (v != ih->rptr))
0469             WDOORBELL32(ih->doorbell_index, ih->rptr);
0470         else
0471             break;
0472     }
0473 }
0474 
0475 /**
0476  * navi10_ih_set_rptr - set the IH ring buffer rptr
0477  *
0478  * @adev: amdgpu_device pointer
0479  *
0480  * @ih: IH ring buffer to set rptr
0481  * Set the IH ring buffer rptr.
0482  */
0483 static void navi10_ih_set_rptr(struct amdgpu_device *adev,
0484                    struct amdgpu_ih_ring *ih)
0485 {
0486     struct amdgpu_ih_regs *ih_regs;
0487 
0488     if (ih == &adev->irq.ih_soft)
0489         return;
0490 
0491     if (ih->use_doorbell) {
0492         /* XXX check if swapping is necessary on BE */
0493         *ih->rptr_cpu = ih->rptr;
0494         WDOORBELL32(ih->doorbell_index, ih->rptr);
0495 
0496         if (amdgpu_sriov_vf(adev))
0497             navi10_ih_irq_rearm(adev, ih);
0498     } else {
0499         ih_regs = &ih->ih_regs;
0500         WREG32(ih_regs->ih_rb_rptr, ih->rptr);
0501     }
0502 }
0503 
0504 /**
0505  * navi10_ih_self_irq - dispatch work for ring 1 and 2
0506  *
0507  * @adev: amdgpu_device pointer
0508  * @source: irq source
0509  * @entry: IV with WPTR update
0510  *
0511  * Update the WPTR from the IV and schedule work to handle the entries.
0512  */
0513 static int navi10_ih_self_irq(struct amdgpu_device *adev,
0514                   struct amdgpu_irq_src *source,
0515                   struct amdgpu_iv_entry *entry)
0516 {
0517     switch (entry->ring_id) {
0518     case 1:
0519         schedule_work(&adev->irq.ih1_work);
0520         break;
0521     case 2:
0522         schedule_work(&adev->irq.ih2_work);
0523         break;
0524     default: break;
0525     }
0526     return 0;
0527 }
0528 
0529 static const struct amdgpu_irq_src_funcs navi10_ih_self_irq_funcs = {
0530     .process = navi10_ih_self_irq,
0531 };
0532 
0533 static void navi10_ih_set_self_irq_funcs(struct amdgpu_device *adev)
0534 {
0535     adev->irq.self_irq.num_types = 0;
0536     adev->irq.self_irq.funcs = &navi10_ih_self_irq_funcs;
0537 }
0538 
0539 static int navi10_ih_early_init(void *handle)
0540 {
0541     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0542 
0543     navi10_ih_set_interrupt_funcs(adev);
0544     navi10_ih_set_self_irq_funcs(adev);
0545     return 0;
0546 }
0547 
0548 static int navi10_ih_sw_init(void *handle)
0549 {
0550     int r;
0551     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0552     bool use_bus_addr;
0553 
0554     r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_IH, 0,
0555                 &adev->irq.self_irq);
0556 
0557     if (r)
0558         return r;
0559 
0560     /* use gpu virtual address for ih ring
0561      * until ih_checken is programmed to allow
0562      * use bus address for ih ring by psp bl */
0563     if ((adev->flags & AMD_IS_APU) ||
0564         (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
0565         use_bus_addr = false;
0566     else
0567         use_bus_addr = true;
0568     r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, use_bus_addr);
0569     if (r)
0570         return r;
0571 
0572     adev->irq.ih.use_doorbell = true;
0573     adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
0574 
0575     adev->irq.ih1.ring_size = 0;
0576     adev->irq.ih2.ring_size = 0;
0577 
0578     /* initialize ih control registers offset */
0579     navi10_ih_init_register_offset(adev);
0580 
0581     r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, true);
0582     if (r)
0583         return r;
0584 
0585     r = amdgpu_irq_init(adev);
0586 
0587     return r;
0588 }
0589 
0590 static int navi10_ih_sw_fini(void *handle)
0591 {
0592     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0593 
0594     amdgpu_irq_fini_sw(adev);
0595 
0596     return 0;
0597 }
0598 
0599 static int navi10_ih_hw_init(void *handle)
0600 {
0601     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0602 
0603     return navi10_ih_irq_init(adev);
0604 }
0605 
0606 static int navi10_ih_hw_fini(void *handle)
0607 {
0608     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0609 
0610     navi10_ih_irq_disable(adev);
0611 
0612     return 0;
0613 }
0614 
0615 static int navi10_ih_suspend(void *handle)
0616 {
0617     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0618 
0619     return navi10_ih_hw_fini(adev);
0620 }
0621 
0622 static int navi10_ih_resume(void *handle)
0623 {
0624     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0625 
0626     return navi10_ih_hw_init(adev);
0627 }
0628 
0629 static bool navi10_ih_is_idle(void *handle)
0630 {
0631     /* todo */
0632     return true;
0633 }
0634 
0635 static int navi10_ih_wait_for_idle(void *handle)
0636 {
0637     /* todo */
0638     return -ETIMEDOUT;
0639 }
0640 
0641 static int navi10_ih_soft_reset(void *handle)
0642 {
0643     /* todo */
0644     return 0;
0645 }
0646 
0647 static void navi10_ih_update_clockgating_state(struct amdgpu_device *adev,
0648                            bool enable)
0649 {
0650     uint32_t data, def, field_val;
0651 
0652     if (adev->cg_flags & AMD_CG_SUPPORT_IH_CG) {
0653         def = data = RREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL);
0654         field_val = enable ? 0 : 1;
0655         data = REG_SET_FIELD(data, IH_CLK_CTRL,
0656                      DBUS_MUX_CLK_SOFT_OVERRIDE, field_val);
0657         data = REG_SET_FIELD(data, IH_CLK_CTRL,
0658                      OSSSYS_SHARE_CLK_SOFT_OVERRIDE, field_val);
0659         data = REG_SET_FIELD(data, IH_CLK_CTRL,
0660                      LIMIT_SMN_CLK_SOFT_OVERRIDE, field_val);
0661         data = REG_SET_FIELD(data, IH_CLK_CTRL,
0662                      DYN_CLK_SOFT_OVERRIDE, field_val);
0663         data = REG_SET_FIELD(data, IH_CLK_CTRL,
0664                      REG_CLK_SOFT_OVERRIDE, field_val);
0665         if (def != data)
0666             WREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL, data);
0667     }
0668 
0669     return;
0670 }
0671 
0672 static int navi10_ih_set_clockgating_state(void *handle,
0673                        enum amd_clockgating_state state)
0674 {
0675     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0676 
0677     navi10_ih_update_clockgating_state(adev,
0678                 state == AMD_CG_STATE_GATE);
0679     return 0;
0680 }
0681 
0682 static int navi10_ih_set_powergating_state(void *handle,
0683                        enum amd_powergating_state state)
0684 {
0685     return 0;
0686 }
0687 
0688 static void navi10_ih_get_clockgating_state(void *handle, u64 *flags)
0689 {
0690     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0691 
0692     if (!RREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL))
0693         *flags |= AMD_CG_SUPPORT_IH_CG;
0694 
0695     return;
0696 }
0697 
0698 static const struct amd_ip_funcs navi10_ih_ip_funcs = {
0699     .name = "navi10_ih",
0700     .early_init = navi10_ih_early_init,
0701     .late_init = NULL,
0702     .sw_init = navi10_ih_sw_init,
0703     .sw_fini = navi10_ih_sw_fini,
0704     .hw_init = navi10_ih_hw_init,
0705     .hw_fini = navi10_ih_hw_fini,
0706     .suspend = navi10_ih_suspend,
0707     .resume = navi10_ih_resume,
0708     .is_idle = navi10_ih_is_idle,
0709     .wait_for_idle = navi10_ih_wait_for_idle,
0710     .soft_reset = navi10_ih_soft_reset,
0711     .set_clockgating_state = navi10_ih_set_clockgating_state,
0712     .set_powergating_state = navi10_ih_set_powergating_state,
0713     .get_clockgating_state = navi10_ih_get_clockgating_state,
0714 };
0715 
0716 static const struct amdgpu_ih_funcs navi10_ih_funcs = {
0717     .get_wptr = navi10_ih_get_wptr,
0718     .decode_iv = amdgpu_ih_decode_iv_helper,
0719     .decode_iv_ts = amdgpu_ih_decode_iv_ts_helper,
0720     .set_rptr = navi10_ih_set_rptr
0721 };
0722 
0723 static void navi10_ih_set_interrupt_funcs(struct amdgpu_device *adev)
0724 {
0725     if (adev->irq.ih_funcs == NULL)
0726         adev->irq.ih_funcs = &navi10_ih_funcs;
0727 }
0728 
0729 const struct amdgpu_ip_block_version navi10_ih_ip_block =
0730 {
0731     .type = AMD_IP_BLOCK_TYPE_IH,
0732     .major = 5,
0733     .minor = 0,
0734     .rev = 0,
0735     .funcs = &navi10_ih_ip_funcs,
0736 };