Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright 2020 Advanced Micro Devices, Inc.
0003  *
0004  * Permission is hereby granted, free of charge, to any person obtaining a
0005  * copy of this software and associated documentation files (the "Software"),
0006  * to deal in the Software without restriction, including without limitation
0007  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0008  * and/or sell copies of the Software, and to permit persons to whom the
0009  * Software is furnished to do so, subject to the following conditions:
0010  *
0011  * The above copyright notice and this permission notice shall be included in
0012  * all copies or substantial portions of the Software.
0013  *
0014  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0015  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0016  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0017  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
0018  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
0019  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
0020  * OTHER DEALINGS IN THE SOFTWARE.
0021  *
0022  */
0023 #include "amdgpu.h"
0024 #include "amdgpu_atombios.h"
0025 #include "hdp_v4_0.h"
0026 #include "amdgpu_ras.h"
0027 
0028 #include "hdp/hdp_4_0_offset.h"
0029 #include "hdp/hdp_4_0_sh_mask.h"
0030 #include <uapi/linux/kfd_ioctl.h>
0031 
0032 /* for Vega20 register name change */
0033 #define mmHDP_MEM_POWER_CTRL    0x00d4
0034 #define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK  0x00000001L
0035 #define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK    0x00000002L
0036 #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK   0x00010000L
0037 #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK     0x00020000L
0038 #define mmHDP_MEM_POWER_CTRL_BASE_IDX   0
0039 
0040 static void hdp_v4_0_flush_hdp(struct amdgpu_device *adev,
0041                 struct amdgpu_ring *ring)
0042 {
0043     if (!ring || !ring->funcs->emit_wreg)
0044         WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
0045     else
0046         amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
0047 }
0048 
0049 static void hdp_v4_0_invalidate_hdp(struct amdgpu_device *adev,
0050                     struct amdgpu_ring *ring)
0051 {
0052     if (adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 4, 0))
0053         return;
0054 
0055     if (!ring || !ring->funcs->emit_wreg)
0056         WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
0057     else
0058         amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
0059             HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
0060 }
0061 
0062 static void hdp_v4_0_query_ras_error_count(struct amdgpu_device *adev,
0063                        void *ras_error_status)
0064 {
0065     struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
0066 
0067     err_data->ue_count = 0;
0068     err_data->ce_count = 0;
0069 
0070     if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__HDP))
0071         return;
0072 
0073     /* HDP SRAM errors are uncorrectable ones (i.e. fatal errors) */
0074     err_data->ue_count += RREG32_SOC15(HDP, 0, mmHDP_EDC_CNT);
0075 };
0076 
0077 static void hdp_v4_0_reset_ras_error_count(struct amdgpu_device *adev)
0078 {
0079     if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__HDP))
0080         return;
0081 
0082     if (adev->ip_versions[HDP_HWIP][0] >= IP_VERSION(4, 4, 0))
0083         WREG32_SOC15(HDP, 0, mmHDP_EDC_CNT, 0);
0084     else
0085         /*read back hdp ras counter to reset it to 0 */
0086         RREG32_SOC15(HDP, 0, mmHDP_EDC_CNT);
0087 }
0088 
0089 static void hdp_v4_0_update_clock_gating(struct amdgpu_device *adev,
0090                      bool enable)
0091 {
0092     uint32_t def, data;
0093 
0094     if (adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 0, 0) ||
0095         adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 0, 1) ||
0096         adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 1, 1) ||
0097         adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 1, 0)) {
0098         def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
0099 
0100         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
0101             data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
0102         else
0103             data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
0104 
0105         if (def != data)
0106             WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data);
0107     } else {
0108         def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL));
0109 
0110         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
0111             data |= HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
0112                 HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
0113                 HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
0114                 HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK;
0115         else
0116             data &= ~(HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
0117                   HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
0118                   HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
0119                   HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK);
0120 
0121         if (def != data)
0122             WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL), data);
0123     }
0124 }
0125 
0126 static void hdp_v4_0_get_clockgating_state(struct amdgpu_device *adev,
0127                         u64 *flags)
0128 {
0129     int data;
0130 
0131     /* AMD_CG_SUPPORT_HDP_LS */
0132     data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
0133     if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK)
0134         *flags |= AMD_CG_SUPPORT_HDP_LS;
0135 }
0136 
0137 static void hdp_v4_0_init_registers(struct amdgpu_device *adev)
0138 {
0139     switch (adev->ip_versions[HDP_HWIP][0]) {
0140     case IP_VERSION(4, 2, 1):
0141         WREG32_FIELD15(HDP, 0, HDP_MMHUB_CNTL, HDP_MMHUB_GCC, 1);
0142         break;
0143     default:
0144         break;
0145     }
0146 
0147     WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
0148 
0149     if (adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 4, 0))
0150         WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, READ_BUFFER_WATERMARK, 2);
0151 
0152     WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
0153     WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40));
0154 }
0155 
0156 struct amdgpu_ras_block_hw_ops hdp_v4_0_ras_hw_ops = {
0157     .query_ras_error_count = hdp_v4_0_query_ras_error_count,
0158     .reset_ras_error_count = hdp_v4_0_reset_ras_error_count,
0159 };
0160 
0161 struct amdgpu_hdp_ras hdp_v4_0_ras = {
0162     .ras_block = {
0163         .ras_comm = {
0164             .name = "hdp",
0165             .block = AMDGPU_RAS_BLOCK__HDP,
0166             .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
0167         },
0168         .hw_ops = &hdp_v4_0_ras_hw_ops,
0169     },
0170 };
0171 
0172 const struct amdgpu_hdp_funcs hdp_v4_0_funcs = {
0173     .flush_hdp = hdp_v4_0_flush_hdp,
0174     .invalidate_hdp = hdp_v4_0_invalidate_hdp,
0175     .update_clock_gating = hdp_v4_0_update_clock_gating,
0176     .get_clock_gating_state = hdp_v4_0_get_clockgating_state,
0177     .init_registers = hdp_v4_0_init_registers,
0178 };