Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright 2014 Advanced Micro Devices, Inc.
0003  *
0004  * Permission is hereby granted, free of charge, to any person obtaining a
0005  * copy of this software and associated documentation files (the "Software"),
0006  * to deal in the Software without restriction, including without limitation
0007  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0008  * and/or sell copies of the Software, and to permit persons to whom the
0009  * Software is furnished to do so, subject to the following conditions:
0010  *
0011  * The above copyright notice and this permission notice shall be included in
0012  * all copies or substantial portions of the Software.
0013  *
0014  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0015  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0016  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0017  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
0018  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
0019  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
0020  * OTHER DEALINGS IN THE SOFTWARE.
0021  *
0022  */
0023 
0024 #include <linux/pci.h>
0025 #include <linux/slab.h>
0026 
0027 #include <drm/amdgpu_drm.h>
0028 
0029 #include "amdgpu.h"
0030 #include "amdgpu_atombios.h"
0031 #include "amdgpu_ih.h"
0032 #include "amdgpu_uvd.h"
0033 #include "amdgpu_vce.h"
0034 #include "amdgpu_ucode.h"
0035 #include "atom.h"
0036 #include "amd_pcie.h"
0037 
0038 #include "gmc/gmc_8_1_d.h"
0039 #include "gmc/gmc_8_1_sh_mask.h"
0040 
0041 #include "oss/oss_3_0_d.h"
0042 #include "oss/oss_3_0_sh_mask.h"
0043 
0044 #include "bif/bif_5_0_d.h"
0045 #include "bif/bif_5_0_sh_mask.h"
0046 
0047 #include "gca/gfx_8_0_d.h"
0048 #include "gca/gfx_8_0_sh_mask.h"
0049 
0050 #include "smu/smu_7_1_1_d.h"
0051 #include "smu/smu_7_1_1_sh_mask.h"
0052 
0053 #include "uvd/uvd_5_0_d.h"
0054 #include "uvd/uvd_5_0_sh_mask.h"
0055 
0056 #include "vce/vce_3_0_d.h"
0057 #include "vce/vce_3_0_sh_mask.h"
0058 
0059 #include "dce/dce_10_0_d.h"
0060 #include "dce/dce_10_0_sh_mask.h"
0061 
0062 #include "vid.h"
0063 #include "vi.h"
0064 #include "gmc_v8_0.h"
0065 #include "gmc_v7_0.h"
0066 #include "gfx_v8_0.h"
0067 #include "sdma_v2_4.h"
0068 #include "sdma_v3_0.h"
0069 #include "dce_v10_0.h"
0070 #include "dce_v11_0.h"
0071 #include "iceland_ih.h"
0072 #include "tonga_ih.h"
0073 #include "cz_ih.h"
0074 #include "uvd_v5_0.h"
0075 #include "uvd_v6_0.h"
0076 #include "vce_v3_0.h"
0077 #if defined(CONFIG_DRM_AMD_ACP)
0078 #include "amdgpu_acp.h"
0079 #endif
0080 #include "amdgpu_vkms.h"
0081 #include "mxgpu_vi.h"
0082 #include "amdgpu_dm.h"
0083 
0084 #if IS_ENABLED(CONFIG_X86)
0085 #include <asm/intel-family.h>
0086 #endif
0087 
0088 #define ixPCIE_LC_L1_PM_SUBSTATE    0x100100C6
0089 #define PCIE_LC_L1_PM_SUBSTATE__LC_L1_SUBSTATES_OVERRIDE_EN_MASK    0x00000001L
0090 #define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_2_OVERRIDE_MASK    0x00000002L
0091 #define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_1_OVERRIDE_MASK    0x00000004L
0092 #define PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_2_OVERRIDE_MASK      0x00000008L
0093 #define PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_1_OVERRIDE_MASK      0x00000010L
0094 #define ixPCIE_L1_PM_SUB_CNTL   0x378
0095 #define PCIE_L1_PM_SUB_CNTL__ASPM_L1_2_EN_MASK  0x00000004L
0096 #define PCIE_L1_PM_SUB_CNTL__ASPM_L1_1_EN_MASK  0x00000008L
0097 #define PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_2_EN_MASK    0x00000001L
0098 #define PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_1_EN_MASK    0x00000002L
0099 #define PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK     0x00200000L
0100 #define LINK_CAP    0x64
0101 #define PCIE_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK  0x00040000L
0102 #define ixCPM_CONTROL   0x1400118
0103 #define ixPCIE_LC_CNTL7 0x100100BC
0104 #define PCIE_LC_CNTL7__LC_L1_SIDEBAND_CLKREQ_PDWN_EN_MASK   0x00000400L
0105 #define PCIE_LC_CNTL__LC_L0S_INACTIVITY_DEFAULT 0x00000007
0106 #define PCIE_LC_CNTL__LC_L1_INACTIVITY_DEFAULT  0x00000009
0107 #define CPM_CONTROL__CLKREQb_UNGATE_TXCLK_ENABLE_MASK   0x01000000L
0108 #define PCIE_L1_PM_SUB_CNTL 0x378
0109 #define ASIC_IS_P22(asic_type, rid) ((asic_type >= CHIP_POLARIS10) && \
0110                                     (asic_type <= CHIP_POLARIS12) && \
0111                                     (rid >= 0x6E))
0112 /* Topaz */
0113 static const struct amdgpu_video_codecs topaz_video_codecs_encode =
0114 {
0115     .codec_count = 0,
0116     .codec_array = NULL,
0117 };
0118 
0119 /* Tonga, CZ, ST, Fiji */
0120 static const struct amdgpu_video_codec_info tonga_video_codecs_encode_array[] =
0121 {
0122     {
0123         .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
0124         .max_width = 4096,
0125         .max_height = 2304,
0126         .max_pixels_per_frame = 4096 * 2304,
0127         .max_level = 0,
0128     },
0129 };
0130 
0131 static const struct amdgpu_video_codecs tonga_video_codecs_encode =
0132 {
0133     .codec_count = ARRAY_SIZE(tonga_video_codecs_encode_array),
0134     .codec_array = tonga_video_codecs_encode_array,
0135 };
0136 
0137 /* Polaris */
0138 static const struct amdgpu_video_codec_info polaris_video_codecs_encode_array[] =
0139 {
0140     {
0141         .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
0142         .max_width = 4096,
0143         .max_height = 2304,
0144         .max_pixels_per_frame = 4096 * 2304,
0145         .max_level = 0,
0146     },
0147     {
0148         .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
0149         .max_width = 4096,
0150         .max_height = 2304,
0151         .max_pixels_per_frame = 4096 * 2304,
0152         .max_level = 0,
0153     },
0154 };
0155 
0156 static const struct amdgpu_video_codecs polaris_video_codecs_encode =
0157 {
0158     .codec_count = ARRAY_SIZE(polaris_video_codecs_encode_array),
0159     .codec_array = polaris_video_codecs_encode_array,
0160 };
0161 
0162 /* Topaz */
0163 static const struct amdgpu_video_codecs topaz_video_codecs_decode =
0164 {
0165     .codec_count = 0,
0166     .codec_array = NULL,
0167 };
0168 
0169 /* Tonga */
0170 static const struct amdgpu_video_codec_info tonga_video_codecs_decode_array[] =
0171 {
0172     {
0173         .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
0174         .max_width = 4096,
0175         .max_height = 4096,
0176         .max_pixels_per_frame = 4096 * 4096,
0177         .max_level = 3,
0178     },
0179     {
0180         .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
0181         .max_width = 4096,
0182         .max_height = 4096,
0183         .max_pixels_per_frame = 4096 * 4096,
0184         .max_level = 5,
0185     },
0186     {
0187         .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
0188         .max_width = 4096,
0189         .max_height = 4096,
0190         .max_pixels_per_frame = 4096 * 4096,
0191         .max_level = 52,
0192     },
0193     {
0194         .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
0195         .max_width = 4096,
0196         .max_height = 4096,
0197         .max_pixels_per_frame = 4096 * 4096,
0198         .max_level = 4,
0199     },
0200 };
0201 
0202 static const struct amdgpu_video_codecs tonga_video_codecs_decode =
0203 {
0204     .codec_count = ARRAY_SIZE(tonga_video_codecs_decode_array),
0205     .codec_array = tonga_video_codecs_decode_array,
0206 };
0207 
0208 /* CZ, ST, Fiji, Polaris */
0209 static const struct amdgpu_video_codec_info cz_video_codecs_decode_array[] =
0210 {
0211     {
0212         .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
0213         .max_width = 4096,
0214         .max_height = 4096,
0215         .max_pixels_per_frame = 4096 * 4096,
0216         .max_level = 3,
0217     },
0218     {
0219         .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
0220         .max_width = 4096,
0221         .max_height = 4096,
0222         .max_pixels_per_frame = 4096 * 4096,
0223         .max_level = 5,
0224     },
0225     {
0226         .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
0227         .max_width = 4096,
0228         .max_height = 4096,
0229         .max_pixels_per_frame = 4096 * 4096,
0230         .max_level = 52,
0231     },
0232     {
0233         .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
0234         .max_width = 4096,
0235         .max_height = 4096,
0236         .max_pixels_per_frame = 4096 * 4096,
0237         .max_level = 4,
0238     },
0239     {
0240         .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
0241         .max_width = 4096,
0242         .max_height = 4096,
0243         .max_pixels_per_frame = 4096 * 4096,
0244         .max_level = 186,
0245     },
0246     {
0247         .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
0248         .max_width = 4096,
0249         .max_height = 4096,
0250         .max_pixels_per_frame = 4096 * 4096,
0251         .max_level = 0,
0252     },
0253 };
0254 
0255 static const struct amdgpu_video_codecs cz_video_codecs_decode =
0256 {
0257     .codec_count = ARRAY_SIZE(cz_video_codecs_decode_array),
0258     .codec_array = cz_video_codecs_decode_array,
0259 };
0260 
0261 static int vi_query_video_codecs(struct amdgpu_device *adev, bool encode,
0262                  const struct amdgpu_video_codecs **codecs)
0263 {
0264     switch (adev->asic_type) {
0265     case CHIP_TOPAZ:
0266         if (encode)
0267             *codecs = &topaz_video_codecs_encode;
0268         else
0269             *codecs = &topaz_video_codecs_decode;
0270         return 0;
0271     case CHIP_TONGA:
0272         if (encode)
0273             *codecs = &tonga_video_codecs_encode;
0274         else
0275             *codecs = &tonga_video_codecs_decode;
0276         return 0;
0277     case CHIP_POLARIS10:
0278     case CHIP_POLARIS11:
0279     case CHIP_POLARIS12:
0280     case CHIP_VEGAM:
0281         if (encode)
0282             *codecs = &polaris_video_codecs_encode;
0283         else
0284             *codecs = &cz_video_codecs_decode;
0285         return 0;
0286     case CHIP_FIJI:
0287     case CHIP_CARRIZO:
0288     case CHIP_STONEY:
0289         if (encode)
0290             *codecs = &tonga_video_codecs_encode;
0291         else
0292             *codecs = &cz_video_codecs_decode;
0293         return 0;
0294     default:
0295         return -EINVAL;
0296     }
0297 }
0298 
0299 /*
0300  * Indirect registers accessor
0301  */
0302 static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg)
0303 {
0304     unsigned long flags;
0305     u32 r;
0306 
0307     spin_lock_irqsave(&adev->pcie_idx_lock, flags);
0308     WREG32_NO_KIQ(mmPCIE_INDEX, reg);
0309     (void)RREG32_NO_KIQ(mmPCIE_INDEX);
0310     r = RREG32_NO_KIQ(mmPCIE_DATA);
0311     spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
0312     return r;
0313 }
0314 
0315 static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
0316 {
0317     unsigned long flags;
0318 
0319     spin_lock_irqsave(&adev->pcie_idx_lock, flags);
0320     WREG32_NO_KIQ(mmPCIE_INDEX, reg);
0321     (void)RREG32_NO_KIQ(mmPCIE_INDEX);
0322     WREG32_NO_KIQ(mmPCIE_DATA, v);
0323     (void)RREG32_NO_KIQ(mmPCIE_DATA);
0324     spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
0325 }
0326 
0327 static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg)
0328 {
0329     unsigned long flags;
0330     u32 r;
0331 
0332     spin_lock_irqsave(&adev->smc_idx_lock, flags);
0333     WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg));
0334     r = RREG32_NO_KIQ(mmSMC_IND_DATA_11);
0335     spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
0336     return r;
0337 }
0338 
0339 static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
0340 {
0341     unsigned long flags;
0342 
0343     spin_lock_irqsave(&adev->smc_idx_lock, flags);
0344     WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg));
0345     WREG32_NO_KIQ(mmSMC_IND_DATA_11, (v));
0346     spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
0347 }
0348 
0349 /* smu_8_0_d.h */
0350 #define mmMP0PUB_IND_INDEX                                                      0x180
0351 #define mmMP0PUB_IND_DATA                                                       0x181
0352 
0353 static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg)
0354 {
0355     unsigned long flags;
0356     u32 r;
0357 
0358     spin_lock_irqsave(&adev->smc_idx_lock, flags);
0359     WREG32(mmMP0PUB_IND_INDEX, (reg));
0360     r = RREG32(mmMP0PUB_IND_DATA);
0361     spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
0362     return r;
0363 }
0364 
0365 static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
0366 {
0367     unsigned long flags;
0368 
0369     spin_lock_irqsave(&adev->smc_idx_lock, flags);
0370     WREG32(mmMP0PUB_IND_INDEX, (reg));
0371     WREG32(mmMP0PUB_IND_DATA, (v));
0372     spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
0373 }
0374 
0375 static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
0376 {
0377     unsigned long flags;
0378     u32 r;
0379 
0380     spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
0381     WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
0382     r = RREG32(mmUVD_CTX_DATA);
0383     spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
0384     return r;
0385 }
0386 
0387 static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
0388 {
0389     unsigned long flags;
0390 
0391     spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
0392     WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
0393     WREG32(mmUVD_CTX_DATA, (v));
0394     spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
0395 }
0396 
0397 static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg)
0398 {
0399     unsigned long flags;
0400     u32 r;
0401 
0402     spin_lock_irqsave(&adev->didt_idx_lock, flags);
0403     WREG32(mmDIDT_IND_INDEX, (reg));
0404     r = RREG32(mmDIDT_IND_DATA);
0405     spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
0406     return r;
0407 }
0408 
0409 static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
0410 {
0411     unsigned long flags;
0412 
0413     spin_lock_irqsave(&adev->didt_idx_lock, flags);
0414     WREG32(mmDIDT_IND_INDEX, (reg));
0415     WREG32(mmDIDT_IND_DATA, (v));
0416     spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
0417 }
0418 
0419 static u32 vi_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
0420 {
0421     unsigned long flags;
0422     u32 r;
0423 
0424     spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
0425     WREG32(mmGC_CAC_IND_INDEX, (reg));
0426     r = RREG32(mmGC_CAC_IND_DATA);
0427     spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
0428     return r;
0429 }
0430 
0431 static void vi_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
0432 {
0433     unsigned long flags;
0434 
0435     spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
0436     WREG32(mmGC_CAC_IND_INDEX, (reg));
0437     WREG32(mmGC_CAC_IND_DATA, (v));
0438     spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
0439 }
0440 
0441 
0442 static const u32 tonga_mgcg_cgcg_init[] =
0443 {
0444     mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
0445     mmPCIE_INDEX, 0xffffffff, 0x0140001c,
0446     mmPCIE_DATA, 0x000f0000, 0x00000000,
0447     mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
0448     mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
0449     mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
0450     mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
0451 };
0452 
0453 static const u32 fiji_mgcg_cgcg_init[] =
0454 {
0455     mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
0456     mmPCIE_INDEX, 0xffffffff, 0x0140001c,
0457     mmPCIE_DATA, 0x000f0000, 0x00000000,
0458     mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
0459     mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
0460     mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
0461     mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
0462 };
0463 
0464 static const u32 iceland_mgcg_cgcg_init[] =
0465 {
0466     mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2,
0467     mmPCIE_DATA, 0x000f0000, 0x00000000,
0468     mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0,
0469     mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
0470     mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
0471 };
0472 
0473 static const u32 cz_mgcg_cgcg_init[] =
0474 {
0475     mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
0476     mmPCIE_INDEX, 0xffffffff, 0x0140001c,
0477     mmPCIE_DATA, 0x000f0000, 0x00000000,
0478     mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
0479     mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
0480 };
0481 
0482 static const u32 stoney_mgcg_cgcg_init[] =
0483 {
0484     mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00000100,
0485     mmHDP_XDP_CGTT_BLK_CTRL, 0xffffffff, 0x00000104,
0486     mmHDP_HOST_PATH_CNTL, 0xffffffff, 0x0f000027,
0487 };
0488 
0489 static void vi_init_golden_registers(struct amdgpu_device *adev)
0490 {
0491     /* Some of the registers might be dependent on GRBM_GFX_INDEX */
0492     mutex_lock(&adev->grbm_idx_mutex);
0493 
0494     if (amdgpu_sriov_vf(adev)) {
0495         xgpu_vi_init_golden_registers(adev);
0496         mutex_unlock(&adev->grbm_idx_mutex);
0497         return;
0498     }
0499 
0500     switch (adev->asic_type) {
0501     case CHIP_TOPAZ:
0502         amdgpu_device_program_register_sequence(adev,
0503                             iceland_mgcg_cgcg_init,
0504                             ARRAY_SIZE(iceland_mgcg_cgcg_init));
0505         break;
0506     case CHIP_FIJI:
0507         amdgpu_device_program_register_sequence(adev,
0508                             fiji_mgcg_cgcg_init,
0509                             ARRAY_SIZE(fiji_mgcg_cgcg_init));
0510         break;
0511     case CHIP_TONGA:
0512         amdgpu_device_program_register_sequence(adev,
0513                             tonga_mgcg_cgcg_init,
0514                             ARRAY_SIZE(tonga_mgcg_cgcg_init));
0515         break;
0516     case CHIP_CARRIZO:
0517         amdgpu_device_program_register_sequence(adev,
0518                             cz_mgcg_cgcg_init,
0519                             ARRAY_SIZE(cz_mgcg_cgcg_init));
0520         break;
0521     case CHIP_STONEY:
0522         amdgpu_device_program_register_sequence(adev,
0523                             stoney_mgcg_cgcg_init,
0524                             ARRAY_SIZE(stoney_mgcg_cgcg_init));
0525         break;
0526     case CHIP_POLARIS10:
0527     case CHIP_POLARIS11:
0528     case CHIP_POLARIS12:
0529     case CHIP_VEGAM:
0530     default:
0531         break;
0532     }
0533     mutex_unlock(&adev->grbm_idx_mutex);
0534 }
0535 
0536 /**
0537  * vi_get_xclk - get the xclk
0538  *
0539  * @adev: amdgpu_device pointer
0540  *
0541  * Returns the reference clock used by the gfx engine
0542  * (VI).
0543  */
0544 static u32 vi_get_xclk(struct amdgpu_device *adev)
0545 {
0546     u32 reference_clock = adev->clock.spll.reference_freq;
0547     u32 tmp;
0548 
0549     if (adev->flags & AMD_IS_APU)
0550         return reference_clock;
0551 
0552     tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
0553     if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK))
0554         return 1000;
0555 
0556     tmp = RREG32_SMC(ixCG_CLKPIN_CNTL);
0557     if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE))
0558         return reference_clock / 4;
0559 
0560     return reference_clock;
0561 }
0562 
0563 /**
0564  * vi_srbm_select - select specific register instances
0565  *
0566  * @adev: amdgpu_device pointer
0567  * @me: selected ME (micro engine)
0568  * @pipe: pipe
0569  * @queue: queue
0570  * @vmid: VMID
0571  *
0572  * Switches the currently active registers instances.  Some
0573  * registers are instanced per VMID, others are instanced per
0574  * me/pipe/queue combination.
0575  */
0576 void vi_srbm_select(struct amdgpu_device *adev,
0577              u32 me, u32 pipe, u32 queue, u32 vmid)
0578 {
0579     u32 srbm_gfx_cntl = 0;
0580     srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe);
0581     srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me);
0582     srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid);
0583     srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue);
0584     WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl);
0585 }
0586 
0587 static void vi_vga_set_state(struct amdgpu_device *adev, bool state)
0588 {
0589     /* todo */
0590 }
0591 
0592 static bool vi_read_disabled_bios(struct amdgpu_device *adev)
0593 {
0594     u32 bus_cntl;
0595     u32 d1vga_control = 0;
0596     u32 d2vga_control = 0;
0597     u32 vga_render_control = 0;
0598     u32 rom_cntl;
0599     bool r;
0600 
0601     bus_cntl = RREG32(mmBUS_CNTL);
0602     if (adev->mode_info.num_crtc) {
0603         d1vga_control = RREG32(mmD1VGA_CONTROL);
0604         d2vga_control = RREG32(mmD2VGA_CONTROL);
0605         vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
0606     }
0607     rom_cntl = RREG32_SMC(ixROM_CNTL);
0608 
0609     /* enable the rom */
0610     WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK));
0611     if (adev->mode_info.num_crtc) {
0612         /* Disable VGA mode */
0613         WREG32(mmD1VGA_CONTROL,
0614                (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
0615                       D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK)));
0616         WREG32(mmD2VGA_CONTROL,
0617                (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK |
0618                       D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK)));
0619         WREG32(mmVGA_RENDER_CONTROL,
0620                (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK));
0621     }
0622     WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK);
0623 
0624     r = amdgpu_read_bios(adev);
0625 
0626     /* restore regs */
0627     WREG32(mmBUS_CNTL, bus_cntl);
0628     if (adev->mode_info.num_crtc) {
0629         WREG32(mmD1VGA_CONTROL, d1vga_control);
0630         WREG32(mmD2VGA_CONTROL, d2vga_control);
0631         WREG32(mmVGA_RENDER_CONTROL, vga_render_control);
0632     }
0633     WREG32_SMC(ixROM_CNTL, rom_cntl);
0634     return r;
0635 }
0636 
0637 static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
0638                   u8 *bios, u32 length_bytes)
0639 {
0640     u32 *dw_ptr;
0641     unsigned long flags;
0642     u32 i, length_dw;
0643 
0644     if (bios == NULL)
0645         return false;
0646     if (length_bytes == 0)
0647         return false;
0648     /* APU vbios image is part of sbios image */
0649     if (adev->flags & AMD_IS_APU)
0650         return false;
0651 
0652     dw_ptr = (u32 *)bios;
0653     length_dw = ALIGN(length_bytes, 4) / 4;
0654     /* take the smc lock since we are using the smc index */
0655     spin_lock_irqsave(&adev->smc_idx_lock, flags);
0656     /* set rom index to 0 */
0657     WREG32(mmSMC_IND_INDEX_11, ixROM_INDEX);
0658     WREG32(mmSMC_IND_DATA_11, 0);
0659     /* set index to data for continous read */
0660     WREG32(mmSMC_IND_INDEX_11, ixROM_DATA);
0661     for (i = 0; i < length_dw; i++)
0662         dw_ptr[i] = RREG32(mmSMC_IND_DATA_11);
0663     spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
0664 
0665     return true;
0666 }
0667 
0668 static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
0669     {mmGRBM_STATUS},
0670     {mmGRBM_STATUS2},
0671     {mmGRBM_STATUS_SE0},
0672     {mmGRBM_STATUS_SE1},
0673     {mmGRBM_STATUS_SE2},
0674     {mmGRBM_STATUS_SE3},
0675     {mmSRBM_STATUS},
0676     {mmSRBM_STATUS2},
0677     {mmSRBM_STATUS3},
0678     {mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET},
0679     {mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET},
0680     {mmCP_STAT},
0681     {mmCP_STALLED_STAT1},
0682     {mmCP_STALLED_STAT2},
0683     {mmCP_STALLED_STAT3},
0684     {mmCP_CPF_BUSY_STAT},
0685     {mmCP_CPF_STALLED_STAT1},
0686     {mmCP_CPF_STATUS},
0687     {mmCP_CPC_BUSY_STAT},
0688     {mmCP_CPC_STALLED_STAT1},
0689     {mmCP_CPC_STATUS},
0690     {mmGB_ADDR_CONFIG},
0691     {mmMC_ARB_RAMCFG},
0692     {mmGB_TILE_MODE0},
0693     {mmGB_TILE_MODE1},
0694     {mmGB_TILE_MODE2},
0695     {mmGB_TILE_MODE3},
0696     {mmGB_TILE_MODE4},
0697     {mmGB_TILE_MODE5},
0698     {mmGB_TILE_MODE6},
0699     {mmGB_TILE_MODE7},
0700     {mmGB_TILE_MODE8},
0701     {mmGB_TILE_MODE9},
0702     {mmGB_TILE_MODE10},
0703     {mmGB_TILE_MODE11},
0704     {mmGB_TILE_MODE12},
0705     {mmGB_TILE_MODE13},
0706     {mmGB_TILE_MODE14},
0707     {mmGB_TILE_MODE15},
0708     {mmGB_TILE_MODE16},
0709     {mmGB_TILE_MODE17},
0710     {mmGB_TILE_MODE18},
0711     {mmGB_TILE_MODE19},
0712     {mmGB_TILE_MODE20},
0713     {mmGB_TILE_MODE21},
0714     {mmGB_TILE_MODE22},
0715     {mmGB_TILE_MODE23},
0716     {mmGB_TILE_MODE24},
0717     {mmGB_TILE_MODE25},
0718     {mmGB_TILE_MODE26},
0719     {mmGB_TILE_MODE27},
0720     {mmGB_TILE_MODE28},
0721     {mmGB_TILE_MODE29},
0722     {mmGB_TILE_MODE30},
0723     {mmGB_TILE_MODE31},
0724     {mmGB_MACROTILE_MODE0},
0725     {mmGB_MACROTILE_MODE1},
0726     {mmGB_MACROTILE_MODE2},
0727     {mmGB_MACROTILE_MODE3},
0728     {mmGB_MACROTILE_MODE4},
0729     {mmGB_MACROTILE_MODE5},
0730     {mmGB_MACROTILE_MODE6},
0731     {mmGB_MACROTILE_MODE7},
0732     {mmGB_MACROTILE_MODE8},
0733     {mmGB_MACROTILE_MODE9},
0734     {mmGB_MACROTILE_MODE10},
0735     {mmGB_MACROTILE_MODE11},
0736     {mmGB_MACROTILE_MODE12},
0737     {mmGB_MACROTILE_MODE13},
0738     {mmGB_MACROTILE_MODE14},
0739     {mmGB_MACROTILE_MODE15},
0740     {mmCC_RB_BACKEND_DISABLE, true},
0741     {mmGC_USER_RB_BACKEND_DISABLE, true},
0742     {mmGB_BACKEND_MAP, false},
0743     {mmPA_SC_RASTER_CONFIG, true},
0744     {mmPA_SC_RASTER_CONFIG_1, true},
0745 };
0746 
0747 static uint32_t vi_get_register_value(struct amdgpu_device *adev,
0748                       bool indexed, u32 se_num,
0749                       u32 sh_num, u32 reg_offset)
0750 {
0751     if (indexed) {
0752         uint32_t val;
0753         unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num;
0754         unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num;
0755 
0756         switch (reg_offset) {
0757         case mmCC_RB_BACKEND_DISABLE:
0758             return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable;
0759         case mmGC_USER_RB_BACKEND_DISABLE:
0760             return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable;
0761         case mmPA_SC_RASTER_CONFIG:
0762             return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config;
0763         case mmPA_SC_RASTER_CONFIG_1:
0764             return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1;
0765         }
0766 
0767         mutex_lock(&adev->grbm_idx_mutex);
0768         if (se_num != 0xffffffff || sh_num != 0xffffffff)
0769             amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
0770 
0771         val = RREG32(reg_offset);
0772 
0773         if (se_num != 0xffffffff || sh_num != 0xffffffff)
0774             amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
0775         mutex_unlock(&adev->grbm_idx_mutex);
0776         return val;
0777     } else {
0778         unsigned idx;
0779 
0780         switch (reg_offset) {
0781         case mmGB_ADDR_CONFIG:
0782             return adev->gfx.config.gb_addr_config;
0783         case mmMC_ARB_RAMCFG:
0784             return adev->gfx.config.mc_arb_ramcfg;
0785         case mmGB_TILE_MODE0:
0786         case mmGB_TILE_MODE1:
0787         case mmGB_TILE_MODE2:
0788         case mmGB_TILE_MODE3:
0789         case mmGB_TILE_MODE4:
0790         case mmGB_TILE_MODE5:
0791         case mmGB_TILE_MODE6:
0792         case mmGB_TILE_MODE7:
0793         case mmGB_TILE_MODE8:
0794         case mmGB_TILE_MODE9:
0795         case mmGB_TILE_MODE10:
0796         case mmGB_TILE_MODE11:
0797         case mmGB_TILE_MODE12:
0798         case mmGB_TILE_MODE13:
0799         case mmGB_TILE_MODE14:
0800         case mmGB_TILE_MODE15:
0801         case mmGB_TILE_MODE16:
0802         case mmGB_TILE_MODE17:
0803         case mmGB_TILE_MODE18:
0804         case mmGB_TILE_MODE19:
0805         case mmGB_TILE_MODE20:
0806         case mmGB_TILE_MODE21:
0807         case mmGB_TILE_MODE22:
0808         case mmGB_TILE_MODE23:
0809         case mmGB_TILE_MODE24:
0810         case mmGB_TILE_MODE25:
0811         case mmGB_TILE_MODE26:
0812         case mmGB_TILE_MODE27:
0813         case mmGB_TILE_MODE28:
0814         case mmGB_TILE_MODE29:
0815         case mmGB_TILE_MODE30:
0816         case mmGB_TILE_MODE31:
0817             idx = (reg_offset - mmGB_TILE_MODE0);
0818             return adev->gfx.config.tile_mode_array[idx];
0819         case mmGB_MACROTILE_MODE0:
0820         case mmGB_MACROTILE_MODE1:
0821         case mmGB_MACROTILE_MODE2:
0822         case mmGB_MACROTILE_MODE3:
0823         case mmGB_MACROTILE_MODE4:
0824         case mmGB_MACROTILE_MODE5:
0825         case mmGB_MACROTILE_MODE6:
0826         case mmGB_MACROTILE_MODE7:
0827         case mmGB_MACROTILE_MODE8:
0828         case mmGB_MACROTILE_MODE9:
0829         case mmGB_MACROTILE_MODE10:
0830         case mmGB_MACROTILE_MODE11:
0831         case mmGB_MACROTILE_MODE12:
0832         case mmGB_MACROTILE_MODE13:
0833         case mmGB_MACROTILE_MODE14:
0834         case mmGB_MACROTILE_MODE15:
0835             idx = (reg_offset - mmGB_MACROTILE_MODE0);
0836             return adev->gfx.config.macrotile_mode_array[idx];
0837         default:
0838             return RREG32(reg_offset);
0839         }
0840     }
0841 }
0842 
0843 static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
0844                 u32 sh_num, u32 reg_offset, u32 *value)
0845 {
0846     uint32_t i;
0847 
0848     *value = 0;
0849     for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) {
0850         bool indexed = vi_allowed_read_registers[i].grbm_indexed;
0851 
0852         if (reg_offset != vi_allowed_read_registers[i].reg_offset)
0853             continue;
0854 
0855         *value = vi_get_register_value(adev, indexed, se_num, sh_num,
0856                            reg_offset);
0857         return 0;
0858     }
0859     return -EINVAL;
0860 }
0861 
0862 /**
0863  * vi_asic_pci_config_reset - soft reset GPU
0864  *
0865  * @adev: amdgpu_device pointer
0866  *
0867  * Use PCI Config method to reset the GPU.
0868  *
0869  * Returns 0 for success.
0870  */
0871 static int vi_asic_pci_config_reset(struct amdgpu_device *adev)
0872 {
0873     u32 i;
0874     int r = -EINVAL;
0875 
0876     amdgpu_atombios_scratch_regs_engine_hung(adev, true);
0877 
0878     /* disable BM */
0879     pci_clear_master(adev->pdev);
0880     /* reset */
0881     amdgpu_device_pci_config_reset(adev);
0882 
0883     udelay(100);
0884 
0885     /* wait for asic to come out of reset */
0886     for (i = 0; i < adev->usec_timeout; i++) {
0887         if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) {
0888             /* enable BM */
0889             pci_set_master(adev->pdev);
0890             adev->has_hw_reset = true;
0891             r = 0;
0892             break;
0893         }
0894         udelay(1);
0895     }
0896 
0897     amdgpu_atombios_scratch_regs_engine_hung(adev, false);
0898 
0899     return r;
0900 }
0901 
0902 static bool vi_asic_supports_baco(struct amdgpu_device *adev)
0903 {
0904     switch (adev->asic_type) {
0905     case CHIP_FIJI:
0906     case CHIP_TONGA:
0907     case CHIP_POLARIS10:
0908     case CHIP_POLARIS11:
0909     case CHIP_POLARIS12:
0910     case CHIP_TOPAZ:
0911         return amdgpu_dpm_is_baco_supported(adev);
0912     default:
0913         return false;
0914     }
0915 }
0916 
0917 static enum amd_reset_method
0918 vi_asic_reset_method(struct amdgpu_device *adev)
0919 {
0920     bool baco_reset;
0921 
0922     if (amdgpu_reset_method == AMD_RESET_METHOD_LEGACY ||
0923         amdgpu_reset_method == AMD_RESET_METHOD_BACO)
0924         return amdgpu_reset_method;
0925 
0926     if (amdgpu_reset_method != -1)
0927         dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
0928                   amdgpu_reset_method);
0929 
0930     switch (adev->asic_type) {
0931     case CHIP_FIJI:
0932     case CHIP_TONGA:
0933     case CHIP_POLARIS10:
0934     case CHIP_POLARIS11:
0935     case CHIP_POLARIS12:
0936     case CHIP_TOPAZ:
0937         baco_reset = amdgpu_dpm_is_baco_supported(adev);
0938         break;
0939     default:
0940         baco_reset = false;
0941         break;
0942     }
0943 
0944     if (baco_reset)
0945         return AMD_RESET_METHOD_BACO;
0946     else
0947         return AMD_RESET_METHOD_LEGACY;
0948 }
0949 
0950 /**
0951  * vi_asic_reset - soft reset GPU
0952  *
0953  * @adev: amdgpu_device pointer
0954  *
0955  * Look up which blocks are hung and attempt
0956  * to reset them.
0957  * Returns 0 for success.
0958  */
0959 static int vi_asic_reset(struct amdgpu_device *adev)
0960 {
0961     int r;
0962 
0963     /* APUs don't have full asic reset */
0964     if (adev->flags & AMD_IS_APU)
0965         return 0;
0966 
0967     if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
0968         dev_info(adev->dev, "BACO reset\n");
0969         r = amdgpu_dpm_baco_reset(adev);
0970     } else {
0971         dev_info(adev->dev, "PCI CONFIG reset\n");
0972         r = vi_asic_pci_config_reset(adev);
0973     }
0974 
0975     return r;
0976 }
0977 
0978 static u32 vi_get_config_memsize(struct amdgpu_device *adev)
0979 {
0980     return RREG32(mmCONFIG_MEMSIZE);
0981 }
0982 
0983 static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
0984             u32 cntl_reg, u32 status_reg)
0985 {
0986     int r, i;
0987     struct atom_clock_dividers dividers;
0988     uint32_t tmp;
0989 
0990     r = amdgpu_atombios_get_clock_dividers(adev,
0991                            COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
0992                            clock, false, &dividers);
0993     if (r)
0994         return r;
0995 
0996     tmp = RREG32_SMC(cntl_reg);
0997 
0998     if (adev->flags & AMD_IS_APU)
0999         tmp &= ~CG_DCLK_CNTL__DCLK_DIVIDER_MASK;
1000     else
1001         tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK |
1002                 CG_DCLK_CNTL__DCLK_DIVIDER_MASK);
1003     tmp |= dividers.post_divider;
1004     WREG32_SMC(cntl_reg, tmp);
1005 
1006     for (i = 0; i < 100; i++) {
1007         tmp = RREG32_SMC(status_reg);
1008         if (adev->flags & AMD_IS_APU) {
1009             if (tmp & 0x10000)
1010                 break;
1011         } else {
1012             if (tmp & CG_DCLK_STATUS__DCLK_STATUS_MASK)
1013                 break;
1014         }
1015         mdelay(10);
1016     }
1017     if (i == 100)
1018         return -ETIMEDOUT;
1019     return 0;
1020 }
1021 
1022 #define ixGNB_CLK1_DFS_CNTL 0xD82200F0
1023 #define ixGNB_CLK1_STATUS   0xD822010C
1024 #define ixGNB_CLK2_DFS_CNTL 0xD8220110
1025 #define ixGNB_CLK2_STATUS   0xD822012C
1026 #define ixGNB_CLK3_DFS_CNTL 0xD8220130
1027 #define ixGNB_CLK3_STATUS   0xD822014C
1028 
1029 static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
1030 {
1031     int r;
1032 
1033     if (adev->flags & AMD_IS_APU) {
1034         r = vi_set_uvd_clock(adev, vclk, ixGNB_CLK2_DFS_CNTL, ixGNB_CLK2_STATUS);
1035         if (r)
1036             return r;
1037 
1038         r = vi_set_uvd_clock(adev, dclk, ixGNB_CLK1_DFS_CNTL, ixGNB_CLK1_STATUS);
1039         if (r)
1040             return r;
1041     } else {
1042         r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
1043         if (r)
1044             return r;
1045 
1046         r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
1047         if (r)
1048             return r;
1049     }
1050 
1051     return 0;
1052 }
1053 
1054 static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
1055 {
1056     int r, i;
1057     struct atom_clock_dividers dividers;
1058     u32 tmp;
1059     u32 reg_ctrl;
1060     u32 reg_status;
1061     u32 status_mask;
1062     u32 reg_mask;
1063 
1064     if (adev->flags & AMD_IS_APU) {
1065         reg_ctrl = ixGNB_CLK3_DFS_CNTL;
1066         reg_status = ixGNB_CLK3_STATUS;
1067         status_mask = 0x00010000;
1068         reg_mask = CG_ECLK_CNTL__ECLK_DIVIDER_MASK;
1069     } else {
1070         reg_ctrl = ixCG_ECLK_CNTL;
1071         reg_status = ixCG_ECLK_STATUS;
1072         status_mask = CG_ECLK_STATUS__ECLK_STATUS_MASK;
1073         reg_mask = CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK | CG_ECLK_CNTL__ECLK_DIVIDER_MASK;
1074     }
1075 
1076     r = amdgpu_atombios_get_clock_dividers(adev,
1077                            COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1078                            ecclk, false, &dividers);
1079     if (r)
1080         return r;
1081 
1082     for (i = 0; i < 100; i++) {
1083         if (RREG32_SMC(reg_status) & status_mask)
1084             break;
1085         mdelay(10);
1086     }
1087 
1088     if (i == 100)
1089         return -ETIMEDOUT;
1090 
1091     tmp = RREG32_SMC(reg_ctrl);
1092     tmp &= ~reg_mask;
1093     tmp |= dividers.post_divider;
1094     WREG32_SMC(reg_ctrl, tmp);
1095 
1096     for (i = 0; i < 100; i++) {
1097         if (RREG32_SMC(reg_status) & status_mask)
1098             break;
1099         mdelay(10);
1100     }
1101 
1102     if (i == 100)
1103         return -ETIMEDOUT;
1104 
1105     return 0;
1106 }
1107 
1108 static void vi_pcie_gen3_enable(struct amdgpu_device *adev)
1109 {
1110     if (pci_is_root_bus(adev->pdev->bus))
1111         return;
1112 
1113     if (amdgpu_pcie_gen2 == 0)
1114         return;
1115 
1116     if (adev->flags & AMD_IS_APU)
1117         return;
1118 
1119     if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
1120                     CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
1121         return;
1122 
1123     /* todo */
1124 }
1125 
1126 static void vi_enable_aspm(struct amdgpu_device *adev)
1127 {
1128     u32 data, orig;
1129 
1130     orig = data = RREG32_PCIE(ixPCIE_LC_CNTL);
1131     data |= PCIE_LC_CNTL__LC_L0S_INACTIVITY_DEFAULT <<
1132             PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT;
1133     data |= PCIE_LC_CNTL__LC_L1_INACTIVITY_DEFAULT <<
1134             PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
1135     data &= ~PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
1136     data |= PCIE_LC_CNTL__LC_DELAY_L1_EXIT_MASK;
1137     if (orig != data)
1138         WREG32_PCIE(ixPCIE_LC_CNTL, data);
1139 }
1140 
1141 static bool aspm_support_quirk_check(void)
1142 {
1143 #if IS_ENABLED(CONFIG_X86)
1144     struct cpuinfo_x86 *c = &cpu_data(0);
1145 
1146     return !(c->x86 == 6 && c->x86_model == INTEL_FAM6_ALDERLAKE);
1147 #else
1148     return true;
1149 #endif
1150 }
1151 
1152 static void vi_program_aspm(struct amdgpu_device *adev)
1153 {
1154     u32 data, data1, orig;
1155     bool bL1SS = false;
1156     bool bClkReqSupport = true;
1157 
1158     if (!amdgpu_device_should_use_aspm(adev) || !aspm_support_quirk_check())
1159         return;
1160 
1161     if (adev->flags & AMD_IS_APU ||
1162         adev->asic_type < CHIP_POLARIS10)
1163         return;
1164 
1165     orig = data = RREG32_PCIE(ixPCIE_LC_CNTL);
1166     data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK;
1167     data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
1168     data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
1169     if (orig != data)
1170         WREG32_PCIE(ixPCIE_LC_CNTL, data);
1171 
1172     orig = data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL);
1173     data &= ~PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_MASK;
1174     data |= 0x0024 << PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS__SHIFT;
1175     data |= PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_OVERRIDE_EN_MASK;
1176     if (orig != data)
1177         WREG32_PCIE(ixPCIE_LC_N_FTS_CNTL, data);
1178 
1179     orig = data = RREG32_PCIE(ixPCIE_LC_CNTL3);
1180     data |= PCIE_LC_CNTL3__LC_GO_TO_RECOVERY_MASK;
1181     if (orig != data)
1182         WREG32_PCIE(ixPCIE_LC_CNTL3, data);
1183 
1184     orig = data = RREG32_PCIE(ixPCIE_P_CNTL);
1185     data |= PCIE_P_CNTL__P_IGNORE_EDB_ERR_MASK;
1186     if (orig != data)
1187         WREG32_PCIE(ixPCIE_P_CNTL, data);
1188 
1189     data = RREG32_PCIE(ixPCIE_LC_L1_PM_SUBSTATE);
1190     pci_read_config_dword(adev->pdev, PCIE_L1_PM_SUB_CNTL, &data1);
1191     if (data & PCIE_LC_L1_PM_SUBSTATE__LC_L1_SUBSTATES_OVERRIDE_EN_MASK &&
1192         (data & (PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_2_OVERRIDE_MASK |
1193             PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_1_OVERRIDE_MASK |
1194             PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_2_OVERRIDE_MASK |
1195             PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_1_OVERRIDE_MASK))) {
1196         bL1SS = true;
1197     } else if (data1 & (PCIE_L1_PM_SUB_CNTL__ASPM_L1_2_EN_MASK |
1198         PCIE_L1_PM_SUB_CNTL__ASPM_L1_1_EN_MASK |
1199         PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_2_EN_MASK |
1200         PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_1_EN_MASK)) {
1201         bL1SS = true;
1202     }
1203 
1204     orig = data = RREG32_PCIE(ixPCIE_LC_CNTL6);
1205     data |= PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK;
1206     if (orig != data)
1207         WREG32_PCIE(ixPCIE_LC_CNTL6, data);
1208 
1209     orig = data = RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL);
1210     data |= PCIE_LC_LINK_WIDTH_CNTL__LC_DYN_LANES_PWR_STATE_MASK;
1211     if (orig != data)
1212         WREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL, data);
1213 
1214     pci_read_config_dword(adev->pdev, LINK_CAP, &data);
1215     if (!(data & PCIE_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK))
1216         bClkReqSupport = false;
1217 
1218     if (bClkReqSupport) {
1219         orig = data = RREG32_SMC(ixTHM_CLK_CNTL);
1220         data &= ~(THM_CLK_CNTL__CMON_CLK_SEL_MASK | THM_CLK_CNTL__TMON_CLK_SEL_MASK);
1221         data |= (1 << THM_CLK_CNTL__CMON_CLK_SEL__SHIFT) |
1222                 (1 << THM_CLK_CNTL__TMON_CLK_SEL__SHIFT);
1223         if (orig != data)
1224             WREG32_SMC(ixTHM_CLK_CNTL, data);
1225 
1226         orig = data = RREG32_SMC(ixMISC_CLK_CTRL);
1227         data &= ~(MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK |
1228             MISC_CLK_CTRL__ZCLK_SEL_MASK | MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL_MASK);
1229         data |= (1 << MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT) |
1230                 (1 << MISC_CLK_CTRL__ZCLK_SEL__SHIFT);
1231         data |= (0x20 << MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL__SHIFT);
1232         if (orig != data)
1233             WREG32_SMC(ixMISC_CLK_CTRL, data);
1234 
1235         orig = data = RREG32_SMC(ixCG_CLKPIN_CNTL);
1236         data |= CG_CLKPIN_CNTL__XTALIN_DIVIDE_MASK;
1237         if (orig != data)
1238             WREG32_SMC(ixCG_CLKPIN_CNTL, data);
1239 
1240         orig = data = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
1241         data |= CG_CLKPIN_CNTL_2__ENABLE_XCLK_MASK;
1242         if (orig != data)
1243             WREG32_SMC(ixCG_CLKPIN_CNTL, data);
1244 
1245         orig = data = RREG32_SMC(ixMPLL_BYPASSCLK_SEL);
1246         data &= ~MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK;
1247         data |= (4 << MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT);
1248         if (orig != data)
1249             WREG32_SMC(ixMPLL_BYPASSCLK_SEL, data);
1250 
1251         orig = data = RREG32_PCIE(ixCPM_CONTROL);
1252         data |= (CPM_CONTROL__REFCLK_XSTCLK_ENABLE_MASK |
1253                 CPM_CONTROL__CLKREQb_UNGATE_TXCLK_ENABLE_MASK);
1254         if (orig != data)
1255             WREG32_PCIE(ixCPM_CONTROL, data);
1256 
1257         orig = data = RREG32_PCIE(ixPCIE_CONFIG_CNTL);
1258         data &= ~PCIE_CONFIG_CNTL__DYN_CLK_LATENCY_MASK;
1259         data |= (0xE << PCIE_CONFIG_CNTL__DYN_CLK_LATENCY__SHIFT);
1260         if (orig != data)
1261             WREG32_PCIE(ixPCIE_CONFIG_CNTL, data);
1262 
1263         orig = data = RREG32(mmBIF_CLK_CTRL);
1264         data |= BIF_CLK_CTRL__BIF_XSTCLK_READY_MASK;
1265         if (orig != data)
1266             WREG32(mmBIF_CLK_CTRL, data);
1267 
1268         orig = data = RREG32_PCIE(ixPCIE_LC_CNTL7);
1269         data |= PCIE_LC_CNTL7__LC_L1_SIDEBAND_CLKREQ_PDWN_EN_MASK;
1270         if (orig != data)
1271             WREG32_PCIE(ixPCIE_LC_CNTL7, data);
1272 
1273         orig = data = RREG32_PCIE(ixPCIE_HW_DEBUG);
1274         data |= PCIE_HW_DEBUG__HW_01_DEBUG_MASK;
1275         if (orig != data)
1276             WREG32_PCIE(ixPCIE_HW_DEBUG, data);
1277 
1278         orig = data = RREG32_PCIE(ixPCIE_LC_CNTL2);
1279         data |= PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK;
1280         data |= PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK;
1281         if (bL1SS)
1282             data &= ~PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK;
1283         if (orig != data)
1284             WREG32_PCIE(ixPCIE_LC_CNTL2, data);
1285 
1286     }
1287 
1288     vi_enable_aspm(adev);
1289 
1290     data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL);
1291     data1 = RREG32_PCIE(ixPCIE_LC_STATUS1);
1292     if (((data & PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK) == PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK) &&
1293         data1 & PCIE_LC_STATUS1__LC_REVERSE_XMIT_MASK &&
1294         data1 & PCIE_LC_STATUS1__LC_REVERSE_RCVR_MASK) {
1295         orig = data = RREG32_PCIE(ixPCIE_LC_CNTL);
1296         data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
1297         if (orig != data)
1298             WREG32_PCIE(ixPCIE_LC_CNTL, data);
1299     }
1300 
1301     if ((adev->asic_type == CHIP_POLARIS12 &&
1302         !(ASICID_IS_P23(adev->pdev->device, adev->pdev->revision))) ||
1303         ASIC_IS_P22(adev->asic_type, adev->external_rev_id)) {
1304         orig = data = RREG32_PCIE(ixPCIE_LC_TRAINING_CNTL);
1305         data &= ~PCIE_LC_TRAINING_CNTL__LC_DISABLE_TRAINING_BIT_ARCH_MASK;
1306         if (orig != data)
1307             WREG32_PCIE(ixPCIE_LC_TRAINING_CNTL, data);
1308     }
1309 }
1310 
1311 static void vi_enable_doorbell_aperture(struct amdgpu_device *adev,
1312                     bool enable)
1313 {
1314     u32 tmp;
1315 
1316     /* not necessary on CZ */
1317     if (adev->flags & AMD_IS_APU)
1318         return;
1319 
1320     tmp = RREG32(mmBIF_DOORBELL_APER_EN);
1321     if (enable)
1322         tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1);
1323     else
1324         tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0);
1325 
1326     WREG32(mmBIF_DOORBELL_APER_EN, tmp);
1327 }
1328 
1329 #define ATI_REV_ID_FUSE_MACRO__ADDRESS      0xC0014044
1330 #define ATI_REV_ID_FUSE_MACRO__SHIFT        9
1331 #define ATI_REV_ID_FUSE_MACRO__MASK         0x00001E00
1332 
1333 static uint32_t vi_get_rev_id(struct amdgpu_device *adev)
1334 {
1335     if (adev->flags & AMD_IS_APU)
1336         return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK)
1337             >> ATI_REV_ID_FUSE_MACRO__SHIFT;
1338     else
1339         return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK)
1340             >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT;
1341 }
1342 
1343 static void vi_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
1344 {
1345     if (!ring || !ring->funcs->emit_wreg) {
1346         WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
1347         RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL);
1348     } else {
1349         amdgpu_ring_emit_wreg(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
1350     }
1351 }
1352 
1353 static void vi_invalidate_hdp(struct amdgpu_device *adev,
1354                   struct amdgpu_ring *ring)
1355 {
1356     if (!ring || !ring->funcs->emit_wreg) {
1357         WREG32(mmHDP_DEBUG0, 1);
1358         RREG32(mmHDP_DEBUG0);
1359     } else {
1360         amdgpu_ring_emit_wreg(ring, mmHDP_DEBUG0, 1);
1361     }
1362 }
1363 
1364 static bool vi_need_full_reset(struct amdgpu_device *adev)
1365 {
1366     switch (adev->asic_type) {
1367     case CHIP_CARRIZO:
1368     case CHIP_STONEY:
1369         /* CZ has hang issues with full reset at the moment */
1370         return false;
1371     case CHIP_FIJI:
1372     case CHIP_TONGA:
1373         /* XXX: soft reset should work on fiji and tonga */
1374         return true;
1375     case CHIP_POLARIS10:
1376     case CHIP_POLARIS11:
1377     case CHIP_POLARIS12:
1378     case CHIP_TOPAZ:
1379     default:
1380         /* change this when we support soft reset */
1381         return true;
1382     }
1383 }
1384 
1385 static void vi_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
1386                   uint64_t *count1)
1387 {
1388     uint32_t perfctr = 0;
1389     uint64_t cnt0_of, cnt1_of;
1390     int tmp;
1391 
1392     /* This reports 0 on APUs, so return to avoid writing/reading registers
1393      * that may or may not be different from their GPU counterparts
1394      */
1395     if (adev->flags & AMD_IS_APU)
1396         return;
1397 
1398     /* Set the 2 events that we wish to watch, defined above */
1399     /* Reg 40 is # received msgs, Reg 104 is # of posted requests sent */
1400     perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40);
1401     perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104);
1402 
1403     /* Write to enable desired perf counters */
1404     WREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK, perfctr);
1405     /* Zero out and enable the perf counters
1406      * Write 0x5:
1407      * Bit 0 = Start all counters(1)
1408      * Bit 2 = Global counter reset enable(1)
1409      */
1410     WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000005);
1411 
1412     msleep(1000);
1413 
1414     /* Load the shadow and disable the perf counters
1415      * Write 0x2:
1416      * Bit 0 = Stop counters(0)
1417      * Bit 1 = Load the shadow counters(1)
1418      */
1419     WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000002);
1420 
1421     /* Read register values to get any >32bit overflow */
1422     tmp = RREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK);
1423     cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER);
1424     cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER);
1425 
1426     /* Get the values and add the overflow */
1427     *count0 = RREG32_PCIE(ixPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32);
1428     *count1 = RREG32_PCIE(ixPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
1429 }
1430 
1431 static uint64_t vi_get_pcie_replay_count(struct amdgpu_device *adev)
1432 {
1433     uint64_t nak_r, nak_g;
1434 
1435     /* Get the number of NAKs received and generated */
1436     nak_r = RREG32_PCIE(ixPCIE_RX_NUM_NAK);
1437     nak_g = RREG32_PCIE(ixPCIE_RX_NUM_NAK_GENERATED);
1438 
1439     /* Add the total number of NAKs, i.e the number of replays */
1440     return (nak_r + nak_g);
1441 }
1442 
1443 static bool vi_need_reset_on_init(struct amdgpu_device *adev)
1444 {
1445     u32 clock_cntl, pc;
1446 
1447     if (adev->flags & AMD_IS_APU)
1448         return false;
1449 
1450     /* check if the SMC is already running */
1451     clock_cntl = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
1452     pc = RREG32_SMC(ixSMC_PC_C);
1453     if ((0 == REG_GET_FIELD(clock_cntl, SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) &&
1454         (0x20100 <= pc))
1455         return true;
1456 
1457     return false;
1458 }
1459 
1460 static void vi_pre_asic_init(struct amdgpu_device *adev)
1461 {
1462 }
1463 
1464 static const struct amdgpu_asic_funcs vi_asic_funcs =
1465 {
1466     .read_disabled_bios = &vi_read_disabled_bios,
1467     .read_bios_from_rom = &vi_read_bios_from_rom,
1468     .read_register = &vi_read_register,
1469     .reset = &vi_asic_reset,
1470     .reset_method = &vi_asic_reset_method,
1471     .set_vga_state = &vi_vga_set_state,
1472     .get_xclk = &vi_get_xclk,
1473     .set_uvd_clocks = &vi_set_uvd_clocks,
1474     .set_vce_clocks = &vi_set_vce_clocks,
1475     .get_config_memsize = &vi_get_config_memsize,
1476     .flush_hdp = &vi_flush_hdp,
1477     .invalidate_hdp = &vi_invalidate_hdp,
1478     .need_full_reset = &vi_need_full_reset,
1479     .init_doorbell_index = &legacy_doorbell_index_init,
1480     .get_pcie_usage = &vi_get_pcie_usage,
1481     .need_reset_on_init = &vi_need_reset_on_init,
1482     .get_pcie_replay_count = &vi_get_pcie_replay_count,
1483     .supports_baco = &vi_asic_supports_baco,
1484     .pre_asic_init = &vi_pre_asic_init,
1485     .query_video_codecs = &vi_query_video_codecs,
1486 };
1487 
1488 #define CZ_REV_BRISTOL(rev)  \
1489     ((rev >= 0xC8 && rev <= 0xCE) || (rev >= 0xE1 && rev <= 0xE6))
1490 
1491 static int vi_common_early_init(void *handle)
1492 {
1493     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1494 
1495     if (adev->flags & AMD_IS_APU) {
1496         adev->smc_rreg = &cz_smc_rreg;
1497         adev->smc_wreg = &cz_smc_wreg;
1498     } else {
1499         adev->smc_rreg = &vi_smc_rreg;
1500         adev->smc_wreg = &vi_smc_wreg;
1501     }
1502     adev->pcie_rreg = &vi_pcie_rreg;
1503     adev->pcie_wreg = &vi_pcie_wreg;
1504     adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg;
1505     adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg;
1506     adev->didt_rreg = &vi_didt_rreg;
1507     adev->didt_wreg = &vi_didt_wreg;
1508     adev->gc_cac_rreg = &vi_gc_cac_rreg;
1509     adev->gc_cac_wreg = &vi_gc_cac_wreg;
1510 
1511     adev->asic_funcs = &vi_asic_funcs;
1512 
1513     adev->rev_id = vi_get_rev_id(adev);
1514     adev->external_rev_id = 0xFF;
1515     switch (adev->asic_type) {
1516     case CHIP_TOPAZ:
1517         adev->cg_flags = 0;
1518         adev->pg_flags = 0;
1519         adev->external_rev_id = 0x1;
1520         break;
1521     case CHIP_FIJI:
1522         adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1523             AMD_CG_SUPPORT_GFX_MGLS |
1524             AMD_CG_SUPPORT_GFX_RLC_LS |
1525             AMD_CG_SUPPORT_GFX_CP_LS |
1526             AMD_CG_SUPPORT_GFX_CGTS |
1527             AMD_CG_SUPPORT_GFX_CGTS_LS |
1528             AMD_CG_SUPPORT_GFX_CGCG |
1529             AMD_CG_SUPPORT_GFX_CGLS |
1530             AMD_CG_SUPPORT_SDMA_MGCG |
1531             AMD_CG_SUPPORT_SDMA_LS |
1532             AMD_CG_SUPPORT_BIF_LS |
1533             AMD_CG_SUPPORT_HDP_MGCG |
1534             AMD_CG_SUPPORT_HDP_LS |
1535             AMD_CG_SUPPORT_ROM_MGCG |
1536             AMD_CG_SUPPORT_MC_MGCG |
1537             AMD_CG_SUPPORT_MC_LS |
1538             AMD_CG_SUPPORT_UVD_MGCG;
1539         adev->pg_flags = 0;
1540         adev->external_rev_id = adev->rev_id + 0x3c;
1541         break;
1542     case CHIP_TONGA:
1543         adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1544             AMD_CG_SUPPORT_GFX_CGCG |
1545             AMD_CG_SUPPORT_GFX_CGLS |
1546             AMD_CG_SUPPORT_SDMA_MGCG |
1547             AMD_CG_SUPPORT_SDMA_LS |
1548             AMD_CG_SUPPORT_BIF_LS |
1549             AMD_CG_SUPPORT_HDP_MGCG |
1550             AMD_CG_SUPPORT_HDP_LS |
1551             AMD_CG_SUPPORT_ROM_MGCG |
1552             AMD_CG_SUPPORT_MC_MGCG |
1553             AMD_CG_SUPPORT_MC_LS |
1554             AMD_CG_SUPPORT_DRM_LS |
1555             AMD_CG_SUPPORT_UVD_MGCG;
1556         adev->pg_flags = 0;
1557         adev->external_rev_id = adev->rev_id + 0x14;
1558         break;
1559     case CHIP_POLARIS11:
1560         adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1561             AMD_CG_SUPPORT_GFX_RLC_LS |
1562             AMD_CG_SUPPORT_GFX_CP_LS |
1563             AMD_CG_SUPPORT_GFX_CGCG |
1564             AMD_CG_SUPPORT_GFX_CGLS |
1565             AMD_CG_SUPPORT_GFX_3D_CGCG |
1566             AMD_CG_SUPPORT_GFX_3D_CGLS |
1567             AMD_CG_SUPPORT_SDMA_MGCG |
1568             AMD_CG_SUPPORT_SDMA_LS |
1569             AMD_CG_SUPPORT_BIF_MGCG |
1570             AMD_CG_SUPPORT_BIF_LS |
1571             AMD_CG_SUPPORT_HDP_MGCG |
1572             AMD_CG_SUPPORT_HDP_LS |
1573             AMD_CG_SUPPORT_ROM_MGCG |
1574             AMD_CG_SUPPORT_MC_MGCG |
1575             AMD_CG_SUPPORT_MC_LS |
1576             AMD_CG_SUPPORT_DRM_LS |
1577             AMD_CG_SUPPORT_UVD_MGCG |
1578             AMD_CG_SUPPORT_VCE_MGCG;
1579         adev->pg_flags = 0;
1580         adev->external_rev_id = adev->rev_id + 0x5A;
1581         break;
1582     case CHIP_POLARIS10:
1583         adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1584             AMD_CG_SUPPORT_GFX_RLC_LS |
1585             AMD_CG_SUPPORT_GFX_CP_LS |
1586             AMD_CG_SUPPORT_GFX_CGCG |
1587             AMD_CG_SUPPORT_GFX_CGLS |
1588             AMD_CG_SUPPORT_GFX_3D_CGCG |
1589             AMD_CG_SUPPORT_GFX_3D_CGLS |
1590             AMD_CG_SUPPORT_SDMA_MGCG |
1591             AMD_CG_SUPPORT_SDMA_LS |
1592             AMD_CG_SUPPORT_BIF_MGCG |
1593             AMD_CG_SUPPORT_BIF_LS |
1594             AMD_CG_SUPPORT_HDP_MGCG |
1595             AMD_CG_SUPPORT_HDP_LS |
1596             AMD_CG_SUPPORT_ROM_MGCG |
1597             AMD_CG_SUPPORT_MC_MGCG |
1598             AMD_CG_SUPPORT_MC_LS |
1599             AMD_CG_SUPPORT_DRM_LS |
1600             AMD_CG_SUPPORT_UVD_MGCG |
1601             AMD_CG_SUPPORT_VCE_MGCG;
1602         adev->pg_flags = 0;
1603         adev->external_rev_id = adev->rev_id + 0x50;
1604         break;
1605     case CHIP_POLARIS12:
1606         adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1607             AMD_CG_SUPPORT_GFX_RLC_LS |
1608             AMD_CG_SUPPORT_GFX_CP_LS |
1609             AMD_CG_SUPPORT_GFX_CGCG |
1610             AMD_CG_SUPPORT_GFX_CGLS |
1611             AMD_CG_SUPPORT_GFX_3D_CGCG |
1612             AMD_CG_SUPPORT_GFX_3D_CGLS |
1613             AMD_CG_SUPPORT_SDMA_MGCG |
1614             AMD_CG_SUPPORT_SDMA_LS |
1615             AMD_CG_SUPPORT_BIF_MGCG |
1616             AMD_CG_SUPPORT_BIF_LS |
1617             AMD_CG_SUPPORT_HDP_MGCG |
1618             AMD_CG_SUPPORT_HDP_LS |
1619             AMD_CG_SUPPORT_ROM_MGCG |
1620             AMD_CG_SUPPORT_MC_MGCG |
1621             AMD_CG_SUPPORT_MC_LS |
1622             AMD_CG_SUPPORT_DRM_LS |
1623             AMD_CG_SUPPORT_UVD_MGCG |
1624             AMD_CG_SUPPORT_VCE_MGCG;
1625         adev->pg_flags = 0;
1626         adev->external_rev_id = adev->rev_id + 0x64;
1627         break;
1628     case CHIP_VEGAM:
1629         adev->cg_flags = 0;
1630             /*AMD_CG_SUPPORT_GFX_MGCG |
1631             AMD_CG_SUPPORT_GFX_RLC_LS |
1632             AMD_CG_SUPPORT_GFX_CP_LS |
1633             AMD_CG_SUPPORT_GFX_CGCG |
1634             AMD_CG_SUPPORT_GFX_CGLS |
1635             AMD_CG_SUPPORT_GFX_3D_CGCG |
1636             AMD_CG_SUPPORT_GFX_3D_CGLS |
1637             AMD_CG_SUPPORT_SDMA_MGCG |
1638             AMD_CG_SUPPORT_SDMA_LS |
1639             AMD_CG_SUPPORT_BIF_MGCG |
1640             AMD_CG_SUPPORT_BIF_LS |
1641             AMD_CG_SUPPORT_HDP_MGCG |
1642             AMD_CG_SUPPORT_HDP_LS |
1643             AMD_CG_SUPPORT_ROM_MGCG |
1644             AMD_CG_SUPPORT_MC_MGCG |
1645             AMD_CG_SUPPORT_MC_LS |
1646             AMD_CG_SUPPORT_DRM_LS |
1647             AMD_CG_SUPPORT_UVD_MGCG |
1648             AMD_CG_SUPPORT_VCE_MGCG;*/
1649         adev->pg_flags = 0;
1650         adev->external_rev_id = adev->rev_id + 0x6E;
1651         break;
1652     case CHIP_CARRIZO:
1653         adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
1654             AMD_CG_SUPPORT_GFX_MGCG |
1655             AMD_CG_SUPPORT_GFX_MGLS |
1656             AMD_CG_SUPPORT_GFX_RLC_LS |
1657             AMD_CG_SUPPORT_GFX_CP_LS |
1658             AMD_CG_SUPPORT_GFX_CGTS |
1659             AMD_CG_SUPPORT_GFX_CGTS_LS |
1660             AMD_CG_SUPPORT_GFX_CGCG |
1661             AMD_CG_SUPPORT_GFX_CGLS |
1662             AMD_CG_SUPPORT_BIF_LS |
1663             AMD_CG_SUPPORT_HDP_MGCG |
1664             AMD_CG_SUPPORT_HDP_LS |
1665             AMD_CG_SUPPORT_SDMA_MGCG |
1666             AMD_CG_SUPPORT_SDMA_LS |
1667             AMD_CG_SUPPORT_VCE_MGCG;
1668         /* rev0 hardware requires workarounds to support PG */
1669         adev->pg_flags = 0;
1670         if (adev->rev_id != 0x00 || CZ_REV_BRISTOL(adev->pdev->revision)) {
1671             adev->pg_flags |= AMD_PG_SUPPORT_GFX_SMG |
1672                 AMD_PG_SUPPORT_GFX_PIPELINE |
1673                 AMD_PG_SUPPORT_CP |
1674                 AMD_PG_SUPPORT_UVD |
1675                 AMD_PG_SUPPORT_VCE;
1676         }
1677         adev->external_rev_id = adev->rev_id + 0x1;
1678         break;
1679     case CHIP_STONEY:
1680         adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
1681             AMD_CG_SUPPORT_GFX_MGCG |
1682             AMD_CG_SUPPORT_GFX_MGLS |
1683             AMD_CG_SUPPORT_GFX_RLC_LS |
1684             AMD_CG_SUPPORT_GFX_CP_LS |
1685             AMD_CG_SUPPORT_GFX_CGTS |
1686             AMD_CG_SUPPORT_GFX_CGTS_LS |
1687             AMD_CG_SUPPORT_GFX_CGLS |
1688             AMD_CG_SUPPORT_BIF_LS |
1689             AMD_CG_SUPPORT_HDP_MGCG |
1690             AMD_CG_SUPPORT_HDP_LS |
1691             AMD_CG_SUPPORT_SDMA_MGCG |
1692             AMD_CG_SUPPORT_SDMA_LS |
1693             AMD_CG_SUPPORT_VCE_MGCG;
1694         adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
1695             AMD_PG_SUPPORT_GFX_SMG |
1696             AMD_PG_SUPPORT_GFX_PIPELINE |
1697             AMD_PG_SUPPORT_CP |
1698             AMD_PG_SUPPORT_UVD |
1699             AMD_PG_SUPPORT_VCE;
1700         adev->external_rev_id = adev->rev_id + 0x61;
1701         break;
1702     default:
1703         /* FIXME: not supported yet */
1704         return -EINVAL;
1705     }
1706 
1707     if (amdgpu_sriov_vf(adev)) {
1708         amdgpu_virt_init_setting(adev);
1709         xgpu_vi_mailbox_set_irq_funcs(adev);
1710     }
1711 
1712     return 0;
1713 }
1714 
1715 static int vi_common_late_init(void *handle)
1716 {
1717     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1718 
1719     if (amdgpu_sriov_vf(adev))
1720         xgpu_vi_mailbox_get_irq(adev);
1721 
1722     return 0;
1723 }
1724 
1725 static int vi_common_sw_init(void *handle)
1726 {
1727     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1728 
1729     if (amdgpu_sriov_vf(adev))
1730         xgpu_vi_mailbox_add_irq_id(adev);
1731 
1732     return 0;
1733 }
1734 
1735 static int vi_common_sw_fini(void *handle)
1736 {
1737     return 0;
1738 }
1739 
1740 static int vi_common_hw_init(void *handle)
1741 {
1742     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1743 
1744     /* move the golden regs per IP block */
1745     vi_init_golden_registers(adev);
1746     /* enable pcie gen2/3 link */
1747     vi_pcie_gen3_enable(adev);
1748     /* enable aspm */
1749     vi_program_aspm(adev);
1750     /* enable the doorbell aperture */
1751     vi_enable_doorbell_aperture(adev, true);
1752 
1753     return 0;
1754 }
1755 
1756 static int vi_common_hw_fini(void *handle)
1757 {
1758     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1759 
1760     /* enable the doorbell aperture */
1761     vi_enable_doorbell_aperture(adev, false);
1762 
1763     if (amdgpu_sriov_vf(adev))
1764         xgpu_vi_mailbox_put_irq(adev);
1765 
1766     return 0;
1767 }
1768 
1769 static int vi_common_suspend(void *handle)
1770 {
1771     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1772 
1773     return vi_common_hw_fini(adev);
1774 }
1775 
1776 static int vi_common_resume(void *handle)
1777 {
1778     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1779 
1780     return vi_common_hw_init(adev);
1781 }
1782 
1783 static bool vi_common_is_idle(void *handle)
1784 {
1785     return true;
1786 }
1787 
1788 static int vi_common_wait_for_idle(void *handle)
1789 {
1790     return 0;
1791 }
1792 
1793 static int vi_common_soft_reset(void *handle)
1794 {
1795     return 0;
1796 }
1797 
1798 static void vi_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev,
1799                            bool enable)
1800 {
1801     uint32_t temp, data;
1802 
1803     temp = data = RREG32_PCIE(ixPCIE_CNTL2);
1804 
1805     if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
1806         data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
1807                 PCIE_CNTL2__MST_MEM_LS_EN_MASK |
1808                 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK;
1809     else
1810         data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
1811                 PCIE_CNTL2__MST_MEM_LS_EN_MASK |
1812                 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
1813 
1814     if (temp != data)
1815         WREG32_PCIE(ixPCIE_CNTL2, data);
1816 }
1817 
1818 static void vi_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev,
1819                             bool enable)
1820 {
1821     uint32_t temp, data;
1822 
1823     temp = data = RREG32(mmHDP_HOST_PATH_CNTL);
1824 
1825     if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
1826         data &= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
1827     else
1828         data |= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
1829 
1830     if (temp != data)
1831         WREG32(mmHDP_HOST_PATH_CNTL, data);
1832 }
1833 
1834 static void vi_update_hdp_light_sleep(struct amdgpu_device *adev,
1835                       bool enable)
1836 {
1837     uint32_t temp, data;
1838 
1839     temp = data = RREG32(mmHDP_MEM_POWER_LS);
1840 
1841     if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
1842         data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1843     else
1844         data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1845 
1846     if (temp != data)
1847         WREG32(mmHDP_MEM_POWER_LS, data);
1848 }
1849 
1850 static void vi_update_drm_light_sleep(struct amdgpu_device *adev,
1851                       bool enable)
1852 {
1853     uint32_t temp, data;
1854 
1855     temp = data = RREG32(0x157a);
1856 
1857     if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS))
1858         data |= 1;
1859     else
1860         data &= ~1;
1861 
1862     if (temp != data)
1863         WREG32(0x157a, data);
1864 }
1865 
1866 
1867 static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
1868                             bool enable)
1869 {
1870     uint32_t temp, data;
1871 
1872     temp = data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0);
1873 
1874     if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
1875         data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1876                 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
1877     else
1878         data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1879                 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
1880 
1881     if (temp != data)
1882         WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data);
1883 }
1884 
1885 static int vi_common_set_clockgating_state_by_smu(void *handle,
1886                        enum amd_clockgating_state state)
1887 {
1888     uint32_t msg_id, pp_state = 0;
1889     uint32_t pp_support_state = 0;
1890     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1891 
1892     if (adev->cg_flags & (AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG)) {
1893         if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) {
1894             pp_support_state = PP_STATE_SUPPORT_LS;
1895             pp_state = PP_STATE_LS;
1896         }
1897         if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG) {
1898             pp_support_state |= PP_STATE_SUPPORT_CG;
1899             pp_state |= PP_STATE_CG;
1900         }
1901         if (state == AMD_CG_STATE_UNGATE)
1902             pp_state = 0;
1903         msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1904                    PP_BLOCK_SYS_MC,
1905                    pp_support_state,
1906                    pp_state);
1907         amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1908     }
1909 
1910     if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) {
1911         if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS) {
1912             pp_support_state = PP_STATE_SUPPORT_LS;
1913             pp_state = PP_STATE_LS;
1914         }
1915         if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG) {
1916             pp_support_state |= PP_STATE_SUPPORT_CG;
1917             pp_state |= PP_STATE_CG;
1918         }
1919         if (state == AMD_CG_STATE_UNGATE)
1920             pp_state = 0;
1921         msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1922                    PP_BLOCK_SYS_SDMA,
1923                    pp_support_state,
1924                    pp_state);
1925         amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1926     }
1927 
1928     if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) {
1929         if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
1930             pp_support_state = PP_STATE_SUPPORT_LS;
1931             pp_state = PP_STATE_LS;
1932         }
1933         if (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG) {
1934             pp_support_state |= PP_STATE_SUPPORT_CG;
1935             pp_state |= PP_STATE_CG;
1936         }
1937         if (state == AMD_CG_STATE_UNGATE)
1938             pp_state = 0;
1939         msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1940                    PP_BLOCK_SYS_HDP,
1941                    pp_support_state,
1942                    pp_state);
1943         amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1944     }
1945 
1946 
1947     if (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS) {
1948         if (state == AMD_CG_STATE_UNGATE)
1949             pp_state = 0;
1950         else
1951             pp_state = PP_STATE_LS;
1952 
1953         msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1954                    PP_BLOCK_SYS_BIF,
1955                    PP_STATE_SUPPORT_LS,
1956                     pp_state);
1957         amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1958     }
1959     if (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG) {
1960         if (state == AMD_CG_STATE_UNGATE)
1961             pp_state = 0;
1962         else
1963             pp_state = PP_STATE_CG;
1964 
1965         msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1966                    PP_BLOCK_SYS_BIF,
1967                    PP_STATE_SUPPORT_CG,
1968                    pp_state);
1969         amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1970     }
1971 
1972     if (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS) {
1973 
1974         if (state == AMD_CG_STATE_UNGATE)
1975             pp_state = 0;
1976         else
1977             pp_state = PP_STATE_LS;
1978 
1979         msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1980                    PP_BLOCK_SYS_DRM,
1981                    PP_STATE_SUPPORT_LS,
1982                    pp_state);
1983         amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1984     }
1985 
1986     if (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG) {
1987 
1988         if (state == AMD_CG_STATE_UNGATE)
1989             pp_state = 0;
1990         else
1991             pp_state = PP_STATE_CG;
1992 
1993         msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1994                    PP_BLOCK_SYS_ROM,
1995                    PP_STATE_SUPPORT_CG,
1996                    pp_state);
1997         amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1998     }
1999     return 0;
2000 }
2001 
2002 static int vi_common_set_clockgating_state(void *handle,
2003                        enum amd_clockgating_state state)
2004 {
2005     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2006 
2007     if (amdgpu_sriov_vf(adev))
2008         return 0;
2009 
2010     switch (adev->asic_type) {
2011     case CHIP_FIJI:
2012         vi_update_bif_medium_grain_light_sleep(adev,
2013                 state == AMD_CG_STATE_GATE);
2014         vi_update_hdp_medium_grain_clock_gating(adev,
2015                 state == AMD_CG_STATE_GATE);
2016         vi_update_hdp_light_sleep(adev,
2017                 state == AMD_CG_STATE_GATE);
2018         vi_update_rom_medium_grain_clock_gating(adev,
2019                 state == AMD_CG_STATE_GATE);
2020         break;
2021     case CHIP_CARRIZO:
2022     case CHIP_STONEY:
2023         vi_update_bif_medium_grain_light_sleep(adev,
2024                 state == AMD_CG_STATE_GATE);
2025         vi_update_hdp_medium_grain_clock_gating(adev,
2026                 state == AMD_CG_STATE_GATE);
2027         vi_update_hdp_light_sleep(adev,
2028                 state == AMD_CG_STATE_GATE);
2029         vi_update_drm_light_sleep(adev,
2030                 state == AMD_CG_STATE_GATE);
2031         break;
2032     case CHIP_TONGA:
2033     case CHIP_POLARIS10:
2034     case CHIP_POLARIS11:
2035     case CHIP_POLARIS12:
2036     case CHIP_VEGAM:
2037         vi_common_set_clockgating_state_by_smu(adev, state);
2038         break;
2039     default:
2040         break;
2041     }
2042     return 0;
2043 }
2044 
2045 static int vi_common_set_powergating_state(void *handle,
2046                         enum amd_powergating_state state)
2047 {
2048     return 0;
2049 }
2050 
2051 static void vi_common_get_clockgating_state(void *handle, u64 *flags)
2052 {
2053     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2054     int data;
2055 
2056     if (amdgpu_sriov_vf(adev))
2057         *flags = 0;
2058 
2059     /* AMD_CG_SUPPORT_BIF_LS */
2060     data = RREG32_PCIE(ixPCIE_CNTL2);
2061     if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
2062         *flags |= AMD_CG_SUPPORT_BIF_LS;
2063 
2064     /* AMD_CG_SUPPORT_HDP_LS */
2065     data = RREG32(mmHDP_MEM_POWER_LS);
2066     if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK)
2067         *flags |= AMD_CG_SUPPORT_HDP_LS;
2068 
2069     /* AMD_CG_SUPPORT_HDP_MGCG */
2070     data = RREG32(mmHDP_HOST_PATH_CNTL);
2071     if (!(data & HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK))
2072         *flags |= AMD_CG_SUPPORT_HDP_MGCG;
2073 
2074     /* AMD_CG_SUPPORT_ROM_MGCG */
2075     data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0);
2076     if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
2077         *flags |= AMD_CG_SUPPORT_ROM_MGCG;
2078 }
2079 
2080 static const struct amd_ip_funcs vi_common_ip_funcs = {
2081     .name = "vi_common",
2082     .early_init = vi_common_early_init,
2083     .late_init = vi_common_late_init,
2084     .sw_init = vi_common_sw_init,
2085     .sw_fini = vi_common_sw_fini,
2086     .hw_init = vi_common_hw_init,
2087     .hw_fini = vi_common_hw_fini,
2088     .suspend = vi_common_suspend,
2089     .resume = vi_common_resume,
2090     .is_idle = vi_common_is_idle,
2091     .wait_for_idle = vi_common_wait_for_idle,
2092     .soft_reset = vi_common_soft_reset,
2093     .set_clockgating_state = vi_common_set_clockgating_state,
2094     .set_powergating_state = vi_common_set_powergating_state,
2095     .get_clockgating_state = vi_common_get_clockgating_state,
2096 };
2097 
2098 static const struct amdgpu_ip_block_version vi_common_ip_block =
2099 {
2100     .type = AMD_IP_BLOCK_TYPE_COMMON,
2101     .major = 1,
2102     .minor = 0,
2103     .rev = 0,
2104     .funcs = &vi_common_ip_funcs,
2105 };
2106 
2107 void vi_set_virt_ops(struct amdgpu_device *adev)
2108 {
2109     adev->virt.ops = &xgpu_vi_virt_ops;
2110 }
2111 
2112 int vi_set_ip_blocks(struct amdgpu_device *adev)
2113 {
2114     switch (adev->asic_type) {
2115     case CHIP_TOPAZ:
2116         /* topaz has no DCE, UVD, VCE */
2117         amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
2118         amdgpu_device_ip_block_add(adev, &gmc_v7_4_ip_block);
2119         amdgpu_device_ip_block_add(adev, &iceland_ih_ip_block);
2120         amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
2121         amdgpu_device_ip_block_add(adev, &sdma_v2_4_ip_block);
2122         amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
2123         if (adev->enable_virtual_display)
2124             amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2125         break;
2126     case CHIP_FIJI:
2127         amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
2128         amdgpu_device_ip_block_add(adev, &gmc_v8_5_ip_block);
2129         amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
2130         amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
2131         amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
2132         amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
2133         if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
2134             amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2135 #if defined(CONFIG_DRM_AMD_DC)
2136         else if (amdgpu_device_has_dc_support(adev))
2137             amdgpu_device_ip_block_add(adev, &dm_ip_block);
2138 #endif
2139         else
2140             amdgpu_device_ip_block_add(adev, &dce_v10_1_ip_block);
2141         if (!amdgpu_sriov_vf(adev)) {
2142             amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block);
2143             amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block);
2144         }
2145         break;
2146     case CHIP_TONGA:
2147         amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
2148         amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
2149         amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
2150         amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
2151         amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
2152         amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
2153         if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
2154             amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2155 #if defined(CONFIG_DRM_AMD_DC)
2156         else if (amdgpu_device_has_dc_support(adev))
2157             amdgpu_device_ip_block_add(adev, &dm_ip_block);
2158 #endif
2159         else
2160             amdgpu_device_ip_block_add(adev, &dce_v10_0_ip_block);
2161         if (!amdgpu_sriov_vf(adev)) {
2162             amdgpu_device_ip_block_add(adev, &uvd_v5_0_ip_block);
2163             amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block);
2164         }
2165         break;
2166     case CHIP_POLARIS10:
2167     case CHIP_POLARIS11:
2168     case CHIP_POLARIS12:
2169     case CHIP_VEGAM:
2170         amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
2171         amdgpu_device_ip_block_add(adev, &gmc_v8_1_ip_block);
2172         amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
2173         amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
2174         amdgpu_device_ip_block_add(adev, &sdma_v3_1_ip_block);
2175         amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
2176         if (adev->enable_virtual_display)
2177             amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2178 #if defined(CONFIG_DRM_AMD_DC)
2179         else if (amdgpu_device_has_dc_support(adev))
2180             amdgpu_device_ip_block_add(adev, &dm_ip_block);
2181 #endif
2182         else
2183             amdgpu_device_ip_block_add(adev, &dce_v11_2_ip_block);
2184         amdgpu_device_ip_block_add(adev, &uvd_v6_3_ip_block);
2185         amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
2186         break;
2187     case CHIP_CARRIZO:
2188         amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
2189         amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
2190         amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
2191         amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
2192         amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
2193         amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
2194         if (adev->enable_virtual_display)
2195             amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2196 #if defined(CONFIG_DRM_AMD_DC)
2197         else if (amdgpu_device_has_dc_support(adev))
2198             amdgpu_device_ip_block_add(adev, &dm_ip_block);
2199 #endif
2200         else
2201             amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
2202         amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block);
2203         amdgpu_device_ip_block_add(adev, &vce_v3_1_ip_block);
2204 #if defined(CONFIG_DRM_AMD_ACP)
2205         amdgpu_device_ip_block_add(adev, &acp_ip_block);
2206 #endif
2207         break;
2208     case CHIP_STONEY:
2209         amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
2210         amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
2211         amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
2212         amdgpu_device_ip_block_add(adev, &gfx_v8_1_ip_block);
2213         amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
2214         amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
2215         if (adev->enable_virtual_display)
2216             amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2217 #if defined(CONFIG_DRM_AMD_DC)
2218         else if (amdgpu_device_has_dc_support(adev))
2219             amdgpu_device_ip_block_add(adev, &dm_ip_block);
2220 #endif
2221         else
2222             amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
2223         amdgpu_device_ip_block_add(adev, &uvd_v6_2_ip_block);
2224         amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
2225 #if defined(CONFIG_DRM_AMD_ACP)
2226         amdgpu_device_ip_block_add(adev, &acp_ip_block);
2227 #endif
2228         break;
2229     default:
2230         /* FIXME: not supported yet */
2231         return -EINVAL;
2232     }
2233 
2234     return 0;
2235 }
2236 
2237 void legacy_doorbell_index_init(struct amdgpu_device *adev)
2238 {
2239     adev->doorbell_index.kiq = AMDGPU_DOORBELL_KIQ;
2240     adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_MEC_RING0;
2241     adev->doorbell_index.mec_ring1 = AMDGPU_DOORBELL_MEC_RING1;
2242     adev->doorbell_index.mec_ring2 = AMDGPU_DOORBELL_MEC_RING2;
2243     adev->doorbell_index.mec_ring3 = AMDGPU_DOORBELL_MEC_RING3;
2244     adev->doorbell_index.mec_ring4 = AMDGPU_DOORBELL_MEC_RING4;
2245     adev->doorbell_index.mec_ring5 = AMDGPU_DOORBELL_MEC_RING5;
2246     adev->doorbell_index.mec_ring6 = AMDGPU_DOORBELL_MEC_RING6;
2247     adev->doorbell_index.mec_ring7 = AMDGPU_DOORBELL_MEC_RING7;
2248     adev->doorbell_index.gfx_ring0 = AMDGPU_DOORBELL_GFX_RING0;
2249     adev->doorbell_index.sdma_engine[0] = AMDGPU_DOORBELL_sDMA_ENGINE0;
2250     adev->doorbell_index.sdma_engine[1] = AMDGPU_DOORBELL_sDMA_ENGINE1;
2251     adev->doorbell_index.ih = AMDGPU_DOORBELL_IH;
2252     adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_MAX_ASSIGNMENT;
2253 }