0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025 #include "amdgpu.h"
0026 #include "vi.h"
0027 #include "bif/bif_5_0_d.h"
0028 #include "bif/bif_5_0_sh_mask.h"
0029 #include "vid.h"
0030 #include "gca/gfx_8_0_d.h"
0031 #include "gca/gfx_8_0_sh_mask.h"
0032 #include "gmc_v8_0.h"
0033 #include "gfx_v8_0.h"
0034 #include "sdma_v3_0.h"
0035 #include "tonga_ih.h"
0036 #include "gmc/gmc_8_2_d.h"
0037 #include "gmc/gmc_8_2_sh_mask.h"
0038 #include "oss/oss_3_0_d.h"
0039 #include "oss/oss_3_0_sh_mask.h"
0040 #include "dce/dce_10_0_d.h"
0041 #include "dce/dce_10_0_sh_mask.h"
0042 #include "smu/smu_7_1_3_d.h"
0043 #include "mxgpu_vi.h"
0044
0045 #include "amdgpu_reset.h"
0046
0047
0048 static const u32 xgpu_fiji_mgcg_cgcg_init[] = {
0049 mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
0050 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
0051 mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
0052 mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
0053 mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
0054 mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
0055 mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
0056 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
0057 mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
0058 mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
0059 mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
0060 mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
0061 mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
0062 mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
0063 mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
0064 mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
0065 mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
0066 mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
0067 mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
0068 mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
0069 mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
0070 mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
0071 mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
0072 mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
0073 mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
0074 mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
0075 mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
0076 mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
0077 mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
0078 mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
0079 mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
0080 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
0081 mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
0082 mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
0083 mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
0084 mmPCIE_INDEX, 0xffffffff, 0x0140001c,
0085 mmPCIE_DATA, 0x000f0000, 0x00000000,
0086 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
0087 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
0088 mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
0089 mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
0090 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104,
0091 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
0092 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
0093 mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
0094 mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
0095 mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100,
0096 };
0097
0098 static const u32 xgpu_fiji_golden_settings_a10[] = {
0099 mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
0100 mmDB_DEBUG2, 0xf00fffff, 0x00000400,
0101 mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
0102 mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
0103 mmFBC_MISC, 0x1f311fff, 0x12300000,
0104 mmHDMI_CONTROL, 0x31000111, 0x00000011,
0105 mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
0106 mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
0107 mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
0108 mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
0109 mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
0110 mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
0111 mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
0112 mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
0113 mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
0114 mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
0115 mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
0116 mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
0117 mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
0118 mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000ff,
0119 mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
0120 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
0121 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
0122 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
0123 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
0124 };
0125
0126 static const u32 xgpu_fiji_golden_common_all[] = {
0127 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
0128 mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x3a00161a,
0129 mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002e,
0130 mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
0131 mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
0132 mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
0133 mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
0134 mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF,
0135 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
0136 mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x00000009,
0137 };
0138
0139 static const u32 xgpu_tonga_mgcg_cgcg_init[] = {
0140 mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
0141 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
0142 mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
0143 mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
0144 mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
0145 mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
0146 mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
0147 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
0148 mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
0149 mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
0150 mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
0151 mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
0152 mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
0153 mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
0154 mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
0155 mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
0156 mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
0157 mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
0158 mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
0159 mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
0160 mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
0161 mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
0162 mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
0163 mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
0164 mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
0165 mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
0166 mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
0167 mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
0168 mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
0169 mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
0170 mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
0171 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
0172 mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000,
0173 mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
0174 mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
0175 mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005,
0176 mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
0177 mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000,
0178 mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
0179 mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007,
0180 mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005,
0181 mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
0182 mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000,
0183 mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
0184 mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007,
0185 mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005,
0186 mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
0187 mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000,
0188 mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
0189 mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007,
0190 mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005,
0191 mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
0192 mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000,
0193 mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
0194 mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
0195 mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005,
0196 mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
0197 mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000,
0198 mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
0199 mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007,
0200 mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005,
0201 mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
0202 mmCGTS_CU6_SP0_CTRL_REG, 0xffffffff, 0x00010000,
0203 mmCGTS_CU6_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
0204 mmCGTS_CU6_TA_CTRL_REG, 0xffffffff, 0x00040007,
0205 mmCGTS_CU6_SP1_CTRL_REG, 0xffffffff, 0x00060005,
0206 mmCGTS_CU6_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
0207 mmCGTS_CU7_SP0_CTRL_REG, 0xffffffff, 0x00010000,
0208 mmCGTS_CU7_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
0209 mmCGTS_CU7_TA_CTRL_REG, 0xffffffff, 0x00040007,
0210 mmCGTS_CU7_SP1_CTRL_REG, 0xffffffff, 0x00060005,
0211 mmCGTS_CU7_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
0212 mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
0213 mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
0214 mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
0215 mmPCIE_INDEX, 0xffffffff, 0x0140001c,
0216 mmPCIE_DATA, 0x000f0000, 0x00000000,
0217 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
0218 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
0219 mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
0220 mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
0221 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104,
0222 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
0223 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
0224 mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
0225 mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
0226 mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100,
0227 };
0228
0229 static const u32 xgpu_tonga_golden_settings_a11[] = {
0230 mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208,
0231 mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
0232 mmDB_DEBUG2, 0xf00fffff, 0x00000400,
0233 mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
0234 mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
0235 mmFBC_MISC, 0x1f311fff, 0x12300000,
0236 mmGB_GPU_ID, 0x0000000f, 0x00000000,
0237 mmHDMI_CONTROL, 0x31000111, 0x00000011,
0238 mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
0239 mmMC_HUB_RDREQ_DMIF_LIMIT, 0x0000007f, 0x00000028,
0240 mmMC_HUB_WDP_UMC, 0x00007fb6, 0x00000991,
0241 mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
0242 mmPA_SC_FIFO_DEPTH_CNTL, 0x000003ff, 0x000000fc,
0243 mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
0244 mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
0245 mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
0246 mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
0247 mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
0248 mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
0249 mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
0250 mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
0251 mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
0252 mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
0253 mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
0254 mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
0255 mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
0256 mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
0257 mmTCC_CTRL, 0x00100000, 0xf31fff7f,
0258 mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
0259 mmTCP_ADDR_CONFIG, 0x000003ff, 0x000002fb,
0260 mmTCP_CHAN_STEER_HI, 0xffffffff, 0x0000543b,
0261 mmTCP_CHAN_STEER_LO, 0xffffffff, 0xa9210876,
0262 mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
0263 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
0264 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
0265 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
0266 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
0267 };
0268
0269 static const u32 xgpu_tonga_golden_common_all[] = {
0270 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
0271 mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x16000012,
0272 mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002A,
0273 mmGB_ADDR_CONFIG, 0xffffffff, 0x22011002,
0274 mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
0275 mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
0276 mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
0277 };
0278
0279 void xgpu_vi_init_golden_registers(struct amdgpu_device *adev)
0280 {
0281 switch (adev->asic_type) {
0282 case CHIP_FIJI:
0283 amdgpu_device_program_register_sequence(adev,
0284 xgpu_fiji_mgcg_cgcg_init,
0285 ARRAY_SIZE(
0286 xgpu_fiji_mgcg_cgcg_init));
0287 amdgpu_device_program_register_sequence(adev,
0288 xgpu_fiji_golden_settings_a10,
0289 ARRAY_SIZE(
0290 xgpu_fiji_golden_settings_a10));
0291 amdgpu_device_program_register_sequence(adev,
0292 xgpu_fiji_golden_common_all,
0293 ARRAY_SIZE(
0294 xgpu_fiji_golden_common_all));
0295 break;
0296 case CHIP_TONGA:
0297 amdgpu_device_program_register_sequence(adev,
0298 xgpu_tonga_mgcg_cgcg_init,
0299 ARRAY_SIZE(
0300 xgpu_tonga_mgcg_cgcg_init));
0301 amdgpu_device_program_register_sequence(adev,
0302 xgpu_tonga_golden_settings_a11,
0303 ARRAY_SIZE(
0304 xgpu_tonga_golden_settings_a11));
0305 amdgpu_device_program_register_sequence(adev,
0306 xgpu_tonga_golden_common_all,
0307 ARRAY_SIZE(
0308 xgpu_tonga_golden_common_all));
0309 break;
0310 default:
0311 BUG_ON("Doesn't support chip type.\n");
0312 break;
0313 }
0314 }
0315
0316
0317
0318
0319 static void xgpu_vi_mailbox_send_ack(struct amdgpu_device *adev)
0320 {
0321 u32 reg;
0322 int timeout = VI_MAILBOX_TIMEDOUT;
0323 u32 mask = REG_FIELD_MASK(MAILBOX_CONTROL, RCV_MSG_VALID);
0324
0325 reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
0326 reg = REG_SET_FIELD(reg, MAILBOX_CONTROL, RCV_MSG_ACK, 1);
0327 WREG32_NO_KIQ(mmMAILBOX_CONTROL, reg);
0328
0329
0330 reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
0331 while (reg & mask) {
0332 if (timeout <= 0) {
0333 pr_err("RCV_MSG_VALID is not cleared\n");
0334 break;
0335 }
0336 mdelay(1);
0337 timeout -=1;
0338
0339 reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
0340 }
0341 }
0342
0343 static void xgpu_vi_mailbox_set_valid(struct amdgpu_device *adev, bool val)
0344 {
0345 u32 reg;
0346
0347 reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
0348 reg = REG_SET_FIELD(reg, MAILBOX_CONTROL,
0349 TRN_MSG_VALID, val ? 1 : 0);
0350 WREG32_NO_KIQ(mmMAILBOX_CONTROL, reg);
0351 }
0352
0353 static void xgpu_vi_mailbox_trans_msg(struct amdgpu_device *adev,
0354 enum idh_request req)
0355 {
0356 u32 reg;
0357
0358 reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0);
0359 reg = REG_SET_FIELD(reg, MAILBOX_MSGBUF_TRN_DW0,
0360 MSGBUF_DATA, req);
0361 WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0, reg);
0362
0363 xgpu_vi_mailbox_set_valid(adev, true);
0364 }
0365
0366 static int xgpu_vi_mailbox_rcv_msg(struct amdgpu_device *adev,
0367 enum idh_event event)
0368 {
0369 u32 reg;
0370 u32 mask = REG_FIELD_MASK(MAILBOX_CONTROL, RCV_MSG_VALID);
0371
0372
0373 if (event != IDH_FLR_NOTIFICATION_CMPL) {
0374 reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
0375 if (!(reg & mask))
0376 return -ENOENT;
0377 }
0378
0379 reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
0380 if (reg != event)
0381 return -ENOENT;
0382
0383
0384 xgpu_vi_mailbox_send_ack(adev);
0385
0386 return 0;
0387 }
0388
0389 static int xgpu_vi_poll_ack(struct amdgpu_device *adev)
0390 {
0391 int r = 0, timeout = VI_MAILBOX_TIMEDOUT;
0392 u32 mask = REG_FIELD_MASK(MAILBOX_CONTROL, TRN_MSG_ACK);
0393 u32 reg;
0394
0395 reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
0396 while (!(reg & mask)) {
0397 if (timeout <= 0) {
0398 pr_err("Doesn't get ack from pf.\n");
0399 r = -ETIME;
0400 break;
0401 }
0402 mdelay(5);
0403 timeout -= 5;
0404
0405 reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
0406 }
0407
0408 return r;
0409 }
0410
0411 static int xgpu_vi_poll_msg(struct amdgpu_device *adev, enum idh_event event)
0412 {
0413 int r = 0, timeout = VI_MAILBOX_TIMEDOUT;
0414
0415 r = xgpu_vi_mailbox_rcv_msg(adev, event);
0416 while (r) {
0417 if (timeout <= 0) {
0418 pr_err("Doesn't get ack from pf.\n");
0419 r = -ETIME;
0420 break;
0421 }
0422 mdelay(5);
0423 timeout -= 5;
0424
0425 r = xgpu_vi_mailbox_rcv_msg(adev, event);
0426 }
0427
0428 return r;
0429 }
0430
0431 static int xgpu_vi_send_access_requests(struct amdgpu_device *adev,
0432 enum idh_request request)
0433 {
0434 int r;
0435
0436 xgpu_vi_mailbox_trans_msg(adev, request);
0437
0438
0439 r = xgpu_vi_poll_ack(adev);
0440 if (r)
0441 return r;
0442
0443 xgpu_vi_mailbox_set_valid(adev, false);
0444
0445
0446 if (request == IDH_REQ_GPU_INIT_ACCESS ||
0447 request == IDH_REQ_GPU_FINI_ACCESS ||
0448 request == IDH_REQ_GPU_RESET_ACCESS) {
0449 r = xgpu_vi_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
0450 if (r) {
0451 pr_err("Doesn't get ack from pf, give up\n");
0452 return r;
0453 }
0454 }
0455
0456 return 0;
0457 }
0458
0459 static int xgpu_vi_request_reset(struct amdgpu_device *adev)
0460 {
0461 return xgpu_vi_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
0462 }
0463
0464 static int xgpu_vi_wait_reset_cmpl(struct amdgpu_device *adev)
0465 {
0466 return xgpu_vi_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL);
0467 }
0468
0469 static int xgpu_vi_request_full_gpu_access(struct amdgpu_device *adev,
0470 bool init)
0471 {
0472 enum idh_request req;
0473
0474 req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
0475 return xgpu_vi_send_access_requests(adev, req);
0476 }
0477
0478 static int xgpu_vi_release_full_gpu_access(struct amdgpu_device *adev,
0479 bool init)
0480 {
0481 enum idh_request req;
0482 int r = 0;
0483
0484 req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
0485 r = xgpu_vi_send_access_requests(adev, req);
0486
0487 return r;
0488 }
0489
0490
0491 static int xgpu_vi_mailbox_ack_irq(struct amdgpu_device *adev,
0492 struct amdgpu_irq_src *source,
0493 struct amdgpu_iv_entry *entry)
0494 {
0495 DRM_DEBUG("get ack intr and do nothing.\n");
0496 return 0;
0497 }
0498
0499 static int xgpu_vi_set_mailbox_ack_irq(struct amdgpu_device *adev,
0500 struct amdgpu_irq_src *src,
0501 unsigned type,
0502 enum amdgpu_interrupt_state state)
0503 {
0504 u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
0505
0506 tmp = REG_SET_FIELD(tmp, MAILBOX_INT_CNTL, ACK_INT_EN,
0507 (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
0508 WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
0509
0510 return 0;
0511 }
0512
0513 static void xgpu_vi_mailbox_flr_work(struct work_struct *work)
0514 {
0515 struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
0516 struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
0517
0518
0519 if (xgpu_vi_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL)) {
0520 pr_err("failed to receive FLR_CMPL\n");
0521 return;
0522 }
0523
0524
0525 if (amdgpu_device_should_recover_gpu(adev)) {
0526 struct amdgpu_reset_context reset_context;
0527 memset(&reset_context, 0, sizeof(reset_context));
0528
0529 reset_context.method = AMD_RESET_METHOD_NONE;
0530 reset_context.reset_req_dev = adev;
0531 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
0532
0533 amdgpu_device_gpu_recover(adev, NULL, &reset_context);
0534 }
0535 }
0536
0537 static int xgpu_vi_set_mailbox_rcv_irq(struct amdgpu_device *adev,
0538 struct amdgpu_irq_src *src,
0539 unsigned type,
0540 enum amdgpu_interrupt_state state)
0541 {
0542 u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
0543
0544 tmp = REG_SET_FIELD(tmp, MAILBOX_INT_CNTL, VALID_INT_EN,
0545 (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
0546 WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
0547
0548 return 0;
0549 }
0550
0551 static int xgpu_vi_mailbox_rcv_irq(struct amdgpu_device *adev,
0552 struct amdgpu_irq_src *source,
0553 struct amdgpu_iv_entry *entry)
0554 {
0555 int r;
0556
0557
0558 if (!amdgpu_gpu_recovery) {
0559
0560 r = xgpu_vi_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION);
0561
0562
0563 if (!r && !amdgpu_in_reset(adev))
0564 WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
0565 &adev->virt.flr_work),
0566 "Failed to queue work! at %s",
0567 __func__);
0568 }
0569
0570 return 0;
0571 }
0572
0573 static const struct amdgpu_irq_src_funcs xgpu_vi_mailbox_ack_irq_funcs = {
0574 .set = xgpu_vi_set_mailbox_ack_irq,
0575 .process = xgpu_vi_mailbox_ack_irq,
0576 };
0577
0578 static const struct amdgpu_irq_src_funcs xgpu_vi_mailbox_rcv_irq_funcs = {
0579 .set = xgpu_vi_set_mailbox_rcv_irq,
0580 .process = xgpu_vi_mailbox_rcv_irq,
0581 };
0582
0583 void xgpu_vi_mailbox_set_irq_funcs(struct amdgpu_device *adev)
0584 {
0585 adev->virt.ack_irq.num_types = 1;
0586 adev->virt.ack_irq.funcs = &xgpu_vi_mailbox_ack_irq_funcs;
0587 adev->virt.rcv_irq.num_types = 1;
0588 adev->virt.rcv_irq.funcs = &xgpu_vi_mailbox_rcv_irq_funcs;
0589 }
0590
0591 int xgpu_vi_mailbox_add_irq_id(struct amdgpu_device *adev)
0592 {
0593 int r;
0594
0595 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 135, &adev->virt.rcv_irq);
0596 if (r)
0597 return r;
0598
0599 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 138, &adev->virt.ack_irq);
0600 if (r) {
0601 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
0602 return r;
0603 }
0604
0605 return 0;
0606 }
0607
0608 int xgpu_vi_mailbox_get_irq(struct amdgpu_device *adev)
0609 {
0610 int r;
0611
0612 r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
0613 if (r)
0614 return r;
0615 r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
0616 if (r) {
0617 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
0618 return r;
0619 }
0620
0621 INIT_WORK(&adev->virt.flr_work, xgpu_vi_mailbox_flr_work);
0622
0623 return 0;
0624 }
0625
0626 void xgpu_vi_mailbox_put_irq(struct amdgpu_device *adev)
0627 {
0628 amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
0629 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
0630 }
0631
0632 const struct amdgpu_virt_ops xgpu_vi_virt_ops = {
0633 .req_full_gpu = xgpu_vi_request_full_gpu_access,
0634 .rel_full_gpu = xgpu_vi_release_full_gpu_access,
0635 .reset_gpu = xgpu_vi_request_reset,
0636 .wait_reset = xgpu_vi_wait_reset_cmpl,
0637 .trans_msg = NULL,
0638 };