Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright 2021 Advanced Micro Devices, Inc.
0003  *
0004  * Permission is hereby granted, free of charge, to any person obtaining a
0005  * copy of this software and associated documentation files (the "Software"),
0006  * to deal in the Software without restriction, including without limitation
0007  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0008  * and/or sell copies of the Software, and to permit persons to whom the
0009  * Software is furnished to do so, subject to the following conditions:
0010  *
0011  * The above copyright notice and this permission notice shall be included in
0012  * all copies or substantial portions of the Software.
0013  *
0014  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0015  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0016  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0017  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
0018  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
0019  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
0020  * OTHER DEALINGS IN THE SOFTWARE.
0021  *
0022  */
0023 #include "amdgpu.h"
0024 #include "amdgpu_atombios.h"
0025 #include "hdp_v5_2.h"
0026 
0027 #include "hdp/hdp_5_2_1_offset.h"
0028 #include "hdp/hdp_5_2_1_sh_mask.h"
0029 #include <uapi/linux/kfd_ioctl.h>
0030 
0031 static void hdp_v5_2_flush_hdp(struct amdgpu_device *adev,
0032                 struct amdgpu_ring *ring)
0033 {
0034     if (!ring || !ring->funcs->emit_wreg)
0035         WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2,
0036             0);
0037     else
0038         amdgpu_ring_emit_wreg(ring,
0039             (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2,
0040             0);
0041 }
0042 
0043 static void hdp_v5_2_update_mem_power_gating(struct amdgpu_device *adev,
0044                          bool enable)
0045 {
0046     uint32_t hdp_clk_cntl;
0047     uint32_t hdp_mem_pwr_cntl;
0048 
0049     if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS |
0050                 AMD_CG_SUPPORT_HDP_DS |
0051                 AMD_CG_SUPPORT_HDP_SD)))
0052         return;
0053 
0054     hdp_clk_cntl = RREG32_SOC15(HDP, 0, regHDP_CLK_CNTL);
0055     hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL);
0056 
0057     /* Before doing clock/power mode switch, forced on MEM clock */
0058     hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
0059                      ATOMIC_MEM_CLK_SOFT_OVERRIDE, 1);
0060     hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
0061                      RC_MEM_CLK_SOFT_OVERRIDE, 1);
0062     WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl);
0063 
0064     /* disable clock and power gating before any changing */
0065     hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
0066                      ATOMIC_MEM_POWER_CTRL_EN, 0);
0067     hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
0068                      ATOMIC_MEM_POWER_LS_EN, 0);
0069     hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
0070                      ATOMIC_MEM_POWER_DS_EN, 0);
0071     hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
0072                      ATOMIC_MEM_POWER_SD_EN, 0);
0073     hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
0074                      RC_MEM_POWER_CTRL_EN, 0);
0075     hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
0076                      RC_MEM_POWER_LS_EN, 0);
0077     hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
0078                      RC_MEM_POWER_DS_EN, 0);
0079     hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
0080                      RC_MEM_POWER_SD_EN, 0);
0081     WREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
0082 
0083     /* Already disabled above. The actions below are for "enabled" only */
0084     if (enable) {
0085         /* only one clock gating mode (LS/DS/SD) can be enabled */
0086         if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) {
0087             hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
0088                              HDP_MEM_POWER_CTRL,
0089                              ATOMIC_MEM_POWER_SD_EN, 1);
0090             hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
0091                              HDP_MEM_POWER_CTRL,
0092                              RC_MEM_POWER_SD_EN, 1);
0093         } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
0094             hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
0095                              HDP_MEM_POWER_CTRL,
0096                              ATOMIC_MEM_POWER_LS_EN, 1);
0097             hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
0098                              HDP_MEM_POWER_CTRL,
0099                              RC_MEM_POWER_LS_EN, 1);
0100         } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) {
0101             hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
0102                              HDP_MEM_POWER_CTRL,
0103                              ATOMIC_MEM_POWER_DS_EN, 1);
0104             hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
0105                              HDP_MEM_POWER_CTRL,
0106                              RC_MEM_POWER_DS_EN, 1);
0107         }
0108 
0109         /* confirmed that ATOMIC/RC_MEM_POWER_CTRL_EN have to be set for SRAM LS/DS/SD */
0110         if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_DS |
0111                       AMD_CG_SUPPORT_HDP_SD)) {
0112             hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
0113                              ATOMIC_MEM_POWER_CTRL_EN, 1);
0114             hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
0115                              RC_MEM_POWER_CTRL_EN, 1);
0116             WREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
0117         }
0118     }
0119 
0120     /* disable MEM clock override after clock/power mode changing */
0121     hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
0122                      ATOMIC_MEM_CLK_SOFT_OVERRIDE, 0);
0123     hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
0124                      RC_MEM_CLK_SOFT_OVERRIDE, 0);
0125     WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl);
0126 }
0127 
0128 static void hdp_v5_2_update_medium_grain_clock_gating(struct amdgpu_device *adev,
0129                               bool enable)
0130 {
0131     uint32_t hdp_clk_cntl;
0132 
0133     if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
0134         return;
0135 
0136     hdp_clk_cntl = RREG32_SOC15(HDP, 0, regHDP_CLK_CNTL);
0137 
0138     if (enable) {
0139         hdp_clk_cntl &=
0140             ~(uint32_t)
0141             (HDP_CLK_CNTL__ATOMIC_MEM_CLK_SOFT_OVERRIDE_MASK |
0142              HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
0143              HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
0144              HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
0145              HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
0146              HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK);
0147     } else {
0148         hdp_clk_cntl |= HDP_CLK_CNTL__ATOMIC_MEM_CLK_SOFT_OVERRIDE_MASK |
0149             HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
0150             HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
0151             HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
0152             HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
0153             HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK;
0154     }
0155 
0156     WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl);
0157 }
0158 
0159 static void hdp_v5_2_get_clockgating_state(struct amdgpu_device *adev,
0160                        u64 *flags)
0161 {
0162     uint32_t tmp;
0163 
0164     /* AMD_CG_SUPPORT_HDP_MGCG */
0165     tmp = RREG32_SOC15(HDP, 0, regHDP_CLK_CNTL);
0166     if (!(tmp & (HDP_CLK_CNTL__ATOMIC_MEM_CLK_SOFT_OVERRIDE_MASK |
0167              HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
0168              HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
0169              HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
0170              HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
0171              HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK)))
0172         *flags |= AMD_CG_SUPPORT_HDP_MGCG;
0173 
0174     /* AMD_CG_SUPPORT_HDP_LS/DS/SD */
0175     tmp = RREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL);
0176     if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_LS_EN_MASK)
0177         *flags |= AMD_CG_SUPPORT_HDP_LS;
0178     else if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_DS_EN_MASK)
0179         *flags |= AMD_CG_SUPPORT_HDP_DS;
0180     else if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_SD_EN_MASK)
0181         *flags |= AMD_CG_SUPPORT_HDP_SD;
0182 }
0183 
0184 static void hdp_v5_2_update_clock_gating(struct amdgpu_device *adev,
0185                           bool enable)
0186 {
0187     hdp_v5_2_update_mem_power_gating(adev, enable);
0188     hdp_v5_2_update_medium_grain_clock_gating(adev, enable);
0189 }
0190 
0191 const struct amdgpu_hdp_funcs hdp_v5_2_funcs = {
0192     .flush_hdp = hdp_v5_2_flush_hdp,
0193     .update_clock_gating = hdp_v5_2_update_clock_gating,
0194     .get_clock_gating_state = hdp_v5_2_get_clockgating_state,
0195 };