Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright 2020 Advanced Micro Devices, Inc.
0003  *
0004  * Permission is hereby granted, free of charge, to any person obtaining a
0005  * copy of this software and associated documentation files (the "Software"),
0006  * to deal in the Software without restriction, including without limitation
0007  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0008  * and/or sell copies of the Software, and to permit persons to whom the
0009  * Software is furnished to do so, subject to the following conditions:
0010  *
0011  * The above copyright notice and this permission notice shall be included in
0012  * all copies or substantial portions of the Software.
0013  *
0014  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0015  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0016  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0017  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
0018  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
0019  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
0020  * OTHER DEALINGS IN THE SOFTWARE.
0021  *
0022  */
0023 #include "amdgpu.h"
0024 #include "amdgpu_atombios.h"
0025 #include "hdp_v5_0.h"
0026 
0027 #include "hdp/hdp_5_0_0_offset.h"
0028 #include "hdp/hdp_5_0_0_sh_mask.h"
0029 #include <uapi/linux/kfd_ioctl.h>
0030 
0031 static void hdp_v5_0_flush_hdp(struct amdgpu_device *adev,
0032                 struct amdgpu_ring *ring)
0033 {
0034     if (!ring || !ring->funcs->emit_wreg)
0035         WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
0036     else
0037         amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
0038 }
0039 
0040 static void hdp_v5_0_invalidate_hdp(struct amdgpu_device *adev,
0041                     struct amdgpu_ring *ring)
0042 {
0043     if (!ring || !ring->funcs->emit_wreg) {
0044         WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
0045     } else {
0046         amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
0047                     HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
0048     }
0049 }
0050 
0051 static void hdp_v5_0_update_mem_power_gating(struct amdgpu_device *adev,
0052                       bool enable)
0053 {
0054     uint32_t hdp_clk_cntl, hdp_clk_cntl1;
0055     uint32_t hdp_mem_pwr_cntl;
0056 
0057     if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS |
0058                 AMD_CG_SUPPORT_HDP_DS |
0059                 AMD_CG_SUPPORT_HDP_SD)))
0060         return;
0061 
0062     hdp_clk_cntl = hdp_clk_cntl1 = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
0063     hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL);
0064 
0065     /* Before doing clock/power mode switch,
0066      * forced on IPH & RC clock */
0067     hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
0068                      IPH_MEM_CLK_SOFT_OVERRIDE, 1);
0069     hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
0070                      RC_MEM_CLK_SOFT_OVERRIDE, 1);
0071     WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
0072 
0073     /* HDP 5.0 doesn't support dynamic power mode switch,
0074      * disable clock and power gating before any changing */
0075     hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
0076                      IPH_MEM_POWER_CTRL_EN, 0);
0077     hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
0078                      IPH_MEM_POWER_LS_EN, 0);
0079     hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
0080                      IPH_MEM_POWER_DS_EN, 0);
0081     hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
0082                      IPH_MEM_POWER_SD_EN, 0);
0083     hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
0084                      RC_MEM_POWER_CTRL_EN, 0);
0085     hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
0086                      RC_MEM_POWER_LS_EN, 0);
0087     hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
0088                      RC_MEM_POWER_DS_EN, 0);
0089     hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
0090                      RC_MEM_POWER_SD_EN, 0);
0091     WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
0092 
0093     /* Already disabled above. The actions below are for "enabled" only */
0094     if (enable) {
0095         /* only one clock gating mode (LS/DS/SD) can be enabled */
0096         if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
0097             hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
0098                              HDP_MEM_POWER_CTRL,
0099                              IPH_MEM_POWER_LS_EN, 1);
0100             hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
0101                              HDP_MEM_POWER_CTRL,
0102                              RC_MEM_POWER_LS_EN, 1);
0103         } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) {
0104             hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
0105                              HDP_MEM_POWER_CTRL,
0106                              IPH_MEM_POWER_DS_EN, 1);
0107             hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
0108                              HDP_MEM_POWER_CTRL,
0109                              RC_MEM_POWER_DS_EN, 1);
0110         } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) {
0111             hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
0112                              HDP_MEM_POWER_CTRL,
0113                              IPH_MEM_POWER_SD_EN, 1);
0114             /* RC should not use shut down mode, fallback to ds  or ls if allowed */
0115             if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS)
0116                 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
0117                                  HDP_MEM_POWER_CTRL,
0118                                  RC_MEM_POWER_DS_EN, 1);
0119             else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)
0120                 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
0121                                  HDP_MEM_POWER_CTRL,
0122                                  RC_MEM_POWER_LS_EN, 1);
0123         }
0124 
0125         /* confirmed that IPH_MEM_POWER_CTRL_EN and RC_MEM_POWER_CTRL_EN have to
0126          * be set for SRAM LS/DS/SD */
0127         if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_DS |
0128                       AMD_CG_SUPPORT_HDP_SD)) {
0129             hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
0130                              IPH_MEM_POWER_CTRL_EN, 1);
0131             hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
0132                              RC_MEM_POWER_CTRL_EN, 1);
0133             WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
0134         }
0135     }
0136 
0137     /* disable IPH & RC clock override after clock/power mode changing */
0138     hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
0139                      IPH_MEM_CLK_SOFT_OVERRIDE, 0);
0140     hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
0141                      RC_MEM_CLK_SOFT_OVERRIDE, 0);
0142     WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
0143 }
0144 
0145 static void hdp_v5_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
0146                               bool enable)
0147 {
0148     uint32_t hdp_clk_cntl;
0149 
0150     if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
0151         return;
0152 
0153     hdp_clk_cntl = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
0154 
0155     if (enable) {
0156         hdp_clk_cntl &=
0157             ~(uint32_t)
0158             (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
0159              HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
0160              HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
0161              HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
0162              HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
0163              HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK);
0164     } else {
0165         hdp_clk_cntl |= HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
0166             HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
0167             HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
0168             HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
0169             HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
0170             HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK;
0171     }
0172 
0173     WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
0174 }
0175 
0176 static void hdp_v5_0_update_clock_gating(struct amdgpu_device *adev,
0177                           bool enable)
0178 {
0179     hdp_v5_0_update_mem_power_gating(adev, enable);
0180     hdp_v5_0_update_medium_grain_clock_gating(adev, enable);
0181 }
0182 
0183 static void hdp_v5_0_get_clockgating_state(struct amdgpu_device *adev,
0184                         u64 *flags)
0185 {
0186     uint32_t tmp;
0187 
0188     /* AMD_CG_SUPPORT_HDP_MGCG */
0189     tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
0190     if (!(tmp & (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
0191              HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
0192              HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
0193              HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
0194              HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
0195              HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK)))
0196         *flags |= AMD_CG_SUPPORT_HDP_MGCG;
0197 
0198     /* AMD_CG_SUPPORT_HDP_LS/DS/SD */
0199     tmp = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL);
0200     if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK)
0201         *flags |= AMD_CG_SUPPORT_HDP_LS;
0202     else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DS_EN_MASK)
0203         *flags |= AMD_CG_SUPPORT_HDP_DS;
0204     else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_SD_EN_MASK)
0205         *flags |= AMD_CG_SUPPORT_HDP_SD;
0206 }
0207 
0208 static void hdp_v5_0_init_registers(struct amdgpu_device *adev)
0209 {
0210     u32 tmp;
0211 
0212     tmp = RREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL);
0213     tmp |= HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK;
0214     WREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL, tmp);
0215 }
0216 
0217 const struct amdgpu_hdp_funcs hdp_v5_0_funcs = {
0218     .flush_hdp = hdp_v5_0_flush_hdp,
0219     .invalidate_hdp = hdp_v5_0_invalidate_hdp,
0220     .update_clock_gating = hdp_v5_0_update_clock_gating,
0221     .get_clock_gating_state = hdp_v5_0_get_clockgating_state,
0222     .init_registers = hdp_v5_0_init_registers,
0223 };