0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023 #include <linux/firmware.h>
0024 #include <linux/module.h>
0025 #include <linux/pci.h>
0026 #include <linux/reboot.h>
0027
0028 #define SMU_13_0_PARTIAL_PPTABLE
0029 #define SWSMU_CODE_LAYER_L3
0030
0031 #include "amdgpu.h"
0032 #include "amdgpu_smu.h"
0033 #include "atomfirmware.h"
0034 #include "amdgpu_atomfirmware.h"
0035 #include "amdgpu_atombios.h"
0036 #include "smu_v13_0.h"
0037 #include "soc15_common.h"
0038 #include "atom.h"
0039 #include "amdgpu_ras.h"
0040 #include "smu_cmn.h"
0041
0042 #include "asic_reg/thm/thm_13_0_2_offset.h"
0043 #include "asic_reg/thm/thm_13_0_2_sh_mask.h"
0044 #include "asic_reg/mp/mp_13_0_2_offset.h"
0045 #include "asic_reg/mp/mp_13_0_2_sh_mask.h"
0046 #include "asic_reg/smuio/smuio_13_0_2_offset.h"
0047 #include "asic_reg/smuio/smuio_13_0_2_sh_mask.h"
0048
0049
0050
0051
0052
0053
0054 #undef pr_err
0055 #undef pr_warn
0056 #undef pr_info
0057 #undef pr_debug
0058
0059 MODULE_FIRMWARE("amdgpu/aldebaran_smc.bin");
0060 MODULE_FIRMWARE("amdgpu/smu_13_0_0.bin");
0061 MODULE_FIRMWARE("amdgpu/smu_13_0_7.bin");
0062
0063 #define mmMP1_SMN_C2PMSG_66 0x0282
0064 #define mmMP1_SMN_C2PMSG_66_BASE_IDX 0
0065
0066 #define mmMP1_SMN_C2PMSG_82 0x0292
0067 #define mmMP1_SMN_C2PMSG_82_BASE_IDX 0
0068
0069 #define mmMP1_SMN_C2PMSG_90 0x029a
0070 #define mmMP1_SMN_C2PMSG_90_BASE_IDX 0
0071
0072 #define SMU13_VOLTAGE_SCALE 4
0073
0074 #define LINK_WIDTH_MAX 6
0075 #define LINK_SPEED_MAX 3
0076
0077 #define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288
0078 #define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK 0x00000070L
0079 #define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT 0x4
0080 #define smnPCIE_LC_SPEED_CNTL 0x11140290
0081 #define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xC000
0082 #define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0xE
0083
0084 static const int link_width[] = {0, 1, 2, 4, 8, 12, 16};
0085 static const int link_speed[] = {25, 50, 80, 160};
0086
0087 int smu_v13_0_init_microcode(struct smu_context *smu)
0088 {
0089 struct amdgpu_device *adev = smu->adev;
0090 const char *chip_name;
0091 char fw_name[30];
0092 char ucode_prefix[30];
0093 int err = 0;
0094 const struct smc_firmware_header_v1_0 *hdr;
0095 const struct common_firmware_header *header;
0096 struct amdgpu_firmware_info *ucode = NULL;
0097
0098
0099 if (amdgpu_sriov_vf(adev))
0100 return 0;
0101
0102 switch (adev->ip_versions[MP1_HWIP][0]) {
0103 case IP_VERSION(13, 0, 2):
0104 chip_name = "aldebaran_smc";
0105 break;
0106 default:
0107 amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix));
0108 chip_name = ucode_prefix;
0109 }
0110
0111 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", chip_name);
0112
0113 err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
0114 if (err)
0115 goto out;
0116 err = amdgpu_ucode_validate(adev->pm.fw);
0117 if (err)
0118 goto out;
0119
0120 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
0121 amdgpu_ucode_print_smc_hdr(&hdr->header);
0122 adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
0123
0124 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
0125 ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
0126 ucode->ucode_id = AMDGPU_UCODE_ID_SMC;
0127 ucode->fw = adev->pm.fw;
0128 header = (const struct common_firmware_header *)ucode->fw->data;
0129 adev->firmware.fw_size +=
0130 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
0131 }
0132
0133 out:
0134 if (err) {
0135 DRM_ERROR("smu_v13_0: Failed to load firmware \"%s\"\n",
0136 fw_name);
0137 release_firmware(adev->pm.fw);
0138 adev->pm.fw = NULL;
0139 }
0140 return err;
0141 }
0142
0143 void smu_v13_0_fini_microcode(struct smu_context *smu)
0144 {
0145 struct amdgpu_device *adev = smu->adev;
0146
0147 release_firmware(adev->pm.fw);
0148 adev->pm.fw = NULL;
0149 adev->pm.fw_version = 0;
0150 }
0151
0152 int smu_v13_0_load_microcode(struct smu_context *smu)
0153 {
0154 #if 0
0155 struct amdgpu_device *adev = smu->adev;
0156 const uint32_t *src;
0157 const struct smc_firmware_header_v1_0 *hdr;
0158 uint32_t addr_start = MP1_SRAM;
0159 uint32_t i;
0160 uint32_t smc_fw_size;
0161 uint32_t mp1_fw_flags;
0162
0163 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
0164 src = (const uint32_t *)(adev->pm.fw->data +
0165 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
0166 smc_fw_size = hdr->header.ucode_size_bytes;
0167
0168 for (i = 1; i < smc_fw_size/4 - 1; i++) {
0169 WREG32_PCIE(addr_start, src[i]);
0170 addr_start += 4;
0171 }
0172
0173 WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff),
0174 1 & MP1_SMN_PUB_CTRL__RESET_MASK);
0175 WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff),
0176 1 & ~MP1_SMN_PUB_CTRL__RESET_MASK);
0177
0178 for (i = 0; i < adev->usec_timeout; i++) {
0179 mp1_fw_flags = RREG32_PCIE(MP1_Public |
0180 (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
0181 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
0182 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
0183 break;
0184 udelay(1);
0185 }
0186
0187 if (i == adev->usec_timeout)
0188 return -ETIME;
0189 #endif
0190
0191 return 0;
0192 }
0193
0194 int smu_v13_0_init_pptable_microcode(struct smu_context *smu)
0195 {
0196 struct amdgpu_device *adev = smu->adev;
0197 struct amdgpu_firmware_info *ucode = NULL;
0198 uint32_t size = 0, pptable_id = 0;
0199 int ret = 0;
0200 void *table;
0201
0202
0203 if (amdgpu_sriov_vf(adev))
0204 return 0;
0205
0206 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
0207 return 0;
0208
0209 if (!adev->scpm_enabled)
0210 return 0;
0211
0212 if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 7)) ||
0213 (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)))
0214 return 0;
0215
0216
0217 if (amdgpu_smu_pptable_id >= 0) {
0218 pptable_id = amdgpu_smu_pptable_id;
0219 dev_info(adev->dev, "override pptable id %d\n", pptable_id);
0220 } else {
0221 pptable_id = smu->smu_table.boot_values.pp_table_id;
0222 }
0223
0224
0225 if (!pptable_id)
0226 return 0;
0227
0228 ret = smu_v13_0_get_pptable_from_firmware(smu, &table, &size, pptable_id);
0229 if (ret)
0230 return ret;
0231
0232 smu->pptable_firmware.data = table;
0233 smu->pptable_firmware.size = size;
0234
0235 ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_PPTABLE];
0236 ucode->ucode_id = AMDGPU_UCODE_ID_PPTABLE;
0237 ucode->fw = &smu->pptable_firmware;
0238 adev->firmware.fw_size +=
0239 ALIGN(smu->pptable_firmware.size, PAGE_SIZE);
0240
0241 return 0;
0242 }
0243
0244 int smu_v13_0_check_fw_status(struct smu_context *smu)
0245 {
0246 struct amdgpu_device *adev = smu->adev;
0247 uint32_t mp1_fw_flags;
0248
0249 switch (adev->ip_versions[MP1_HWIP][0]) {
0250 case IP_VERSION(13, 0, 4):
0251 mp1_fw_flags = RREG32_PCIE(MP1_Public |
0252 (smnMP1_V13_0_4_FIRMWARE_FLAGS & 0xffffffff));
0253 break;
0254 default:
0255 mp1_fw_flags = RREG32_PCIE(MP1_Public |
0256 (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
0257 break;
0258 }
0259
0260 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
0261 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
0262 return 0;
0263
0264 return -EIO;
0265 }
0266
0267 int smu_v13_0_check_fw_version(struct smu_context *smu)
0268 {
0269 struct amdgpu_device *adev = smu->adev;
0270 uint32_t if_version = 0xff, smu_version = 0xff;
0271 uint8_t smu_program, smu_major, smu_minor, smu_debug;
0272 int ret = 0;
0273
0274 ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
0275 if (ret)
0276 return ret;
0277
0278 smu_program = (smu_version >> 24) & 0xff;
0279 smu_major = (smu_version >> 16) & 0xff;
0280 smu_minor = (smu_version >> 8) & 0xff;
0281 smu_debug = (smu_version >> 0) & 0xff;
0282 if (smu->is_apu)
0283 adev->pm.fw_version = smu_version;
0284
0285 switch (adev->ip_versions[MP1_HWIP][0]) {
0286 case IP_VERSION(13, 0, 2):
0287 smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_ALDE;
0288 break;
0289 case IP_VERSION(13, 0, 0):
0290 smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_0;
0291 break;
0292 case IP_VERSION(13, 0, 7):
0293 smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_7;
0294 break;
0295 case IP_VERSION(13, 0, 1):
0296 case IP_VERSION(13, 0, 3):
0297 case IP_VERSION(13, 0, 8):
0298 smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_YELLOW_CARP;
0299 break;
0300 case IP_VERSION(13, 0, 4):
0301 smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_4;
0302 break;
0303 case IP_VERSION(13, 0, 5):
0304 smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_5;
0305 break;
0306 default:
0307 dev_err(adev->dev, "smu unsupported IP version: 0x%x.\n",
0308 adev->ip_versions[MP1_HWIP][0]);
0309 smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_INV;
0310 break;
0311 }
0312
0313
0314 if (adev->pm.fw)
0315 dev_dbg(smu->adev->dev, "smu fw reported program %d, version = 0x%08x (%d.%d.%d)\n",
0316 smu_program, smu_version, smu_major, smu_minor, smu_debug);
0317
0318
0319
0320
0321
0322
0323
0324
0325
0326 if (if_version != smu->smc_driver_if_version) {
0327 dev_info(adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
0328 "smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n",
0329 smu->smc_driver_if_version, if_version,
0330 smu_program, smu_version, smu_major, smu_minor, smu_debug);
0331 dev_warn(adev->dev, "SMU driver if version not matched\n");
0332 }
0333
0334 return ret;
0335 }
0336
0337 static int smu_v13_0_set_pptable_v2_0(struct smu_context *smu, void **table, uint32_t *size)
0338 {
0339 struct amdgpu_device *adev = smu->adev;
0340 uint32_t ppt_offset_bytes;
0341 const struct smc_firmware_header_v2_0 *v2;
0342
0343 v2 = (const struct smc_firmware_header_v2_0 *) adev->pm.fw->data;
0344
0345 ppt_offset_bytes = le32_to_cpu(v2->ppt_offset_bytes);
0346 *size = le32_to_cpu(v2->ppt_size_bytes);
0347 *table = (uint8_t *)v2 + ppt_offset_bytes;
0348
0349 return 0;
0350 }
0351
0352 static int smu_v13_0_set_pptable_v2_1(struct smu_context *smu, void **table,
0353 uint32_t *size, uint32_t pptable_id)
0354 {
0355 struct amdgpu_device *adev = smu->adev;
0356 const struct smc_firmware_header_v2_1 *v2_1;
0357 struct smc_soft_pptable_entry *entries;
0358 uint32_t pptable_count = 0;
0359 int i = 0;
0360
0361 v2_1 = (const struct smc_firmware_header_v2_1 *) adev->pm.fw->data;
0362 entries = (struct smc_soft_pptable_entry *)
0363 ((uint8_t *)v2_1 + le32_to_cpu(v2_1->pptable_entry_offset));
0364 pptable_count = le32_to_cpu(v2_1->pptable_count);
0365 for (i = 0; i < pptable_count; i++) {
0366 if (le32_to_cpu(entries[i].id) == pptable_id) {
0367 *table = ((uint8_t *)v2_1 + le32_to_cpu(entries[i].ppt_offset_bytes));
0368 *size = le32_to_cpu(entries[i].ppt_size_bytes);
0369 break;
0370 }
0371 }
0372
0373 if (i == pptable_count)
0374 return -EINVAL;
0375
0376 return 0;
0377 }
0378
0379 static int smu_v13_0_get_pptable_from_vbios(struct smu_context *smu, void **table, uint32_t *size)
0380 {
0381 struct amdgpu_device *adev = smu->adev;
0382 uint16_t atom_table_size;
0383 uint8_t frev, crev;
0384 int ret, index;
0385
0386 dev_info(adev->dev, "use vbios provided pptable\n");
0387 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
0388 powerplayinfo);
0389
0390 ret = amdgpu_atombios_get_data_table(adev, index, &atom_table_size, &frev, &crev,
0391 (uint8_t **)table);
0392 if (ret)
0393 return ret;
0394
0395 if (size)
0396 *size = atom_table_size;
0397
0398 return 0;
0399 }
0400
0401 int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu,
0402 void **table,
0403 uint32_t *size,
0404 uint32_t pptable_id)
0405 {
0406 const struct smc_firmware_header_v1_0 *hdr;
0407 struct amdgpu_device *adev = smu->adev;
0408 uint16_t version_major, version_minor;
0409 int ret;
0410
0411 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
0412 if (!hdr)
0413 return -EINVAL;
0414
0415 dev_info(adev->dev, "use driver provided pptable %d\n", pptable_id);
0416
0417 version_major = le16_to_cpu(hdr->header.header_version_major);
0418 version_minor = le16_to_cpu(hdr->header.header_version_minor);
0419 if (version_major != 2) {
0420 dev_err(adev->dev, "Unsupported smu firmware version %d.%d\n",
0421 version_major, version_minor);
0422 return -EINVAL;
0423 }
0424
0425 switch (version_minor) {
0426 case 0:
0427 ret = smu_v13_0_set_pptable_v2_0(smu, table, size);
0428 break;
0429 case 1:
0430 ret = smu_v13_0_set_pptable_v2_1(smu, table, size, pptable_id);
0431 break;
0432 default:
0433 ret = -EINVAL;
0434 break;
0435 }
0436
0437 return ret;
0438 }
0439
0440 int smu_v13_0_setup_pptable(struct smu_context *smu)
0441 {
0442 struct amdgpu_device *adev = smu->adev;
0443 uint32_t size = 0, pptable_id = 0;
0444 void *table;
0445 int ret = 0;
0446
0447
0448 if (amdgpu_smu_pptable_id >= 0) {
0449 pptable_id = amdgpu_smu_pptable_id;
0450 dev_info(adev->dev, "override pptable id %d\n", pptable_id);
0451 } else {
0452 pptable_id = smu->smu_table.boot_values.pp_table_id;
0453
0454 }
0455
0456
0457 if ((amdgpu_sriov_vf(adev) || !pptable_id) && (amdgpu_emu_mode != 1))
0458 ret = smu_v13_0_get_pptable_from_vbios(smu, &table, &size);
0459 else
0460 ret = smu_v13_0_get_pptable_from_firmware(smu, &table, &size, pptable_id);
0461
0462 if (ret)
0463 return ret;
0464
0465 if (!smu->smu_table.power_play_table)
0466 smu->smu_table.power_play_table = table;
0467 if (!smu->smu_table.power_play_table_size)
0468 smu->smu_table.power_play_table_size = size;
0469
0470 return 0;
0471 }
0472
0473 int smu_v13_0_init_smc_tables(struct smu_context *smu)
0474 {
0475 struct smu_table_context *smu_table = &smu->smu_table;
0476 struct smu_table *tables = smu_table->tables;
0477 int ret = 0;
0478
0479 smu_table->driver_pptable =
0480 kzalloc(tables[SMU_TABLE_PPTABLE].size, GFP_KERNEL);
0481 if (!smu_table->driver_pptable) {
0482 ret = -ENOMEM;
0483 goto err0_out;
0484 }
0485
0486 smu_table->max_sustainable_clocks =
0487 kzalloc(sizeof(struct smu_13_0_max_sustainable_clocks), GFP_KERNEL);
0488 if (!smu_table->max_sustainable_clocks) {
0489 ret = -ENOMEM;
0490 goto err1_out;
0491 }
0492
0493
0494 if (tables[SMU_TABLE_OVERDRIVE].size) {
0495 smu_table->overdrive_table =
0496 kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL);
0497 if (!smu_table->overdrive_table) {
0498 ret = -ENOMEM;
0499 goto err2_out;
0500 }
0501
0502 smu_table->boot_overdrive_table =
0503 kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL);
0504 if (!smu_table->boot_overdrive_table) {
0505 ret = -ENOMEM;
0506 goto err3_out;
0507 }
0508 }
0509
0510 smu_table->combo_pptable =
0511 kzalloc(tables[SMU_TABLE_COMBO_PPTABLE].size, GFP_KERNEL);
0512 if (!smu_table->combo_pptable) {
0513 ret = -ENOMEM;
0514 goto err4_out;
0515 }
0516
0517 return 0;
0518
0519 err4_out:
0520 kfree(smu_table->boot_overdrive_table);
0521 err3_out:
0522 kfree(smu_table->overdrive_table);
0523 err2_out:
0524 kfree(smu_table->max_sustainable_clocks);
0525 err1_out:
0526 kfree(smu_table->driver_pptable);
0527 err0_out:
0528 return ret;
0529 }
0530
0531 int smu_v13_0_fini_smc_tables(struct smu_context *smu)
0532 {
0533 struct smu_table_context *smu_table = &smu->smu_table;
0534 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
0535
0536 kfree(smu_table->gpu_metrics_table);
0537 kfree(smu_table->combo_pptable);
0538 kfree(smu_table->boot_overdrive_table);
0539 kfree(smu_table->overdrive_table);
0540 kfree(smu_table->max_sustainable_clocks);
0541 kfree(smu_table->driver_pptable);
0542 smu_table->gpu_metrics_table = NULL;
0543 smu_table->combo_pptable = NULL;
0544 smu_table->boot_overdrive_table = NULL;
0545 smu_table->overdrive_table = NULL;
0546 smu_table->max_sustainable_clocks = NULL;
0547 smu_table->driver_pptable = NULL;
0548 kfree(smu_table->hardcode_pptable);
0549 smu_table->hardcode_pptable = NULL;
0550
0551 kfree(smu_table->ecc_table);
0552 kfree(smu_table->metrics_table);
0553 kfree(smu_table->watermarks_table);
0554 smu_table->ecc_table = NULL;
0555 smu_table->metrics_table = NULL;
0556 smu_table->watermarks_table = NULL;
0557 smu_table->metrics_time = 0;
0558
0559 kfree(smu_dpm->dpm_context);
0560 kfree(smu_dpm->golden_dpm_context);
0561 kfree(smu_dpm->dpm_current_power_state);
0562 kfree(smu_dpm->dpm_request_power_state);
0563 smu_dpm->dpm_context = NULL;
0564 smu_dpm->golden_dpm_context = NULL;
0565 smu_dpm->dpm_context_size = 0;
0566 smu_dpm->dpm_current_power_state = NULL;
0567 smu_dpm->dpm_request_power_state = NULL;
0568
0569 return 0;
0570 }
0571
0572 int smu_v13_0_init_power(struct smu_context *smu)
0573 {
0574 struct smu_power_context *smu_power = &smu->smu_power;
0575
0576 if (smu_power->power_context || smu_power->power_context_size != 0)
0577 return -EINVAL;
0578
0579 smu_power->power_context = kzalloc(sizeof(struct smu_13_0_dpm_context),
0580 GFP_KERNEL);
0581 if (!smu_power->power_context)
0582 return -ENOMEM;
0583 smu_power->power_context_size = sizeof(struct smu_13_0_dpm_context);
0584
0585 return 0;
0586 }
0587
0588 int smu_v13_0_fini_power(struct smu_context *smu)
0589 {
0590 struct smu_power_context *smu_power = &smu->smu_power;
0591
0592 if (!smu_power->power_context || smu_power->power_context_size == 0)
0593 return -EINVAL;
0594
0595 kfree(smu_power->power_context);
0596 smu_power->power_context = NULL;
0597 smu_power->power_context_size = 0;
0598
0599 return 0;
0600 }
0601
0602 int smu_v13_0_get_vbios_bootup_values(struct smu_context *smu)
0603 {
0604 int ret, index;
0605 uint16_t size;
0606 uint8_t frev, crev;
0607 struct atom_common_table_header *header;
0608 struct atom_firmware_info_v3_4 *v_3_4;
0609 struct atom_firmware_info_v3_3 *v_3_3;
0610 struct atom_firmware_info_v3_1 *v_3_1;
0611 struct atom_smu_info_v3_6 *smu_info_v3_6;
0612 struct atom_smu_info_v4_0 *smu_info_v4_0;
0613
0614 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
0615 firmwareinfo);
0616
0617 ret = amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev,
0618 (uint8_t **)&header);
0619 if (ret)
0620 return ret;
0621
0622 if (header->format_revision != 3) {
0623 dev_err(smu->adev->dev, "unknown atom_firmware_info version! for smu13\n");
0624 return -EINVAL;
0625 }
0626
0627 switch (header->content_revision) {
0628 case 0:
0629 case 1:
0630 case 2:
0631 v_3_1 = (struct atom_firmware_info_v3_1 *)header;
0632 smu->smu_table.boot_values.revision = v_3_1->firmware_revision;
0633 smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz;
0634 smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz;
0635 smu->smu_table.boot_values.socclk = 0;
0636 smu->smu_table.boot_values.dcefclk = 0;
0637 smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv;
0638 smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv;
0639 smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv;
0640 smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv;
0641 smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id;
0642 smu->smu_table.boot_values.pp_table_id = 0;
0643 break;
0644 case 3:
0645 v_3_3 = (struct atom_firmware_info_v3_3 *)header;
0646 smu->smu_table.boot_values.revision = v_3_3->firmware_revision;
0647 smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz;
0648 smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz;
0649 smu->smu_table.boot_values.socclk = 0;
0650 smu->smu_table.boot_values.dcefclk = 0;
0651 smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv;
0652 smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv;
0653 smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv;
0654 smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv;
0655 smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id;
0656 smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id;
0657 break;
0658 case 4:
0659 default:
0660 v_3_4 = (struct atom_firmware_info_v3_4 *)header;
0661 smu->smu_table.boot_values.revision = v_3_4->firmware_revision;
0662 smu->smu_table.boot_values.gfxclk = v_3_4->bootup_sclk_in10khz;
0663 smu->smu_table.boot_values.uclk = v_3_4->bootup_mclk_in10khz;
0664 smu->smu_table.boot_values.socclk = 0;
0665 smu->smu_table.boot_values.dcefclk = 0;
0666 smu->smu_table.boot_values.vddc = v_3_4->bootup_vddc_mv;
0667 smu->smu_table.boot_values.vddci = v_3_4->bootup_vddci_mv;
0668 smu->smu_table.boot_values.mvddc = v_3_4->bootup_mvddc_mv;
0669 smu->smu_table.boot_values.vdd_gfx = v_3_4->bootup_vddgfx_mv;
0670 smu->smu_table.boot_values.cooling_id = v_3_4->coolingsolution_id;
0671 smu->smu_table.boot_values.pp_table_id = v_3_4->pplib_pptable_id;
0672 break;
0673 }
0674
0675 smu->smu_table.boot_values.format_revision = header->format_revision;
0676 smu->smu_table.boot_values.content_revision = header->content_revision;
0677
0678 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
0679 smu_info);
0680 if (!amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev,
0681 (uint8_t **)&header)) {
0682
0683 if ((frev == 3) && (crev == 6)) {
0684 smu_info_v3_6 = (struct atom_smu_info_v3_6 *)header;
0685
0686 smu->smu_table.boot_values.socclk = smu_info_v3_6->bootup_socclk_10khz;
0687 smu->smu_table.boot_values.vclk = smu_info_v3_6->bootup_vclk_10khz;
0688 smu->smu_table.boot_values.dclk = smu_info_v3_6->bootup_dclk_10khz;
0689 smu->smu_table.boot_values.fclk = smu_info_v3_6->bootup_fclk_10khz;
0690 } else if ((frev == 3) && (crev == 1)) {
0691 return 0;
0692 } else if ((frev == 4) && (crev == 0)) {
0693 smu_info_v4_0 = (struct atom_smu_info_v4_0 *)header;
0694
0695 smu->smu_table.boot_values.socclk = smu_info_v4_0->bootup_socclk_10khz;
0696 smu->smu_table.boot_values.dcefclk = smu_info_v4_0->bootup_dcefclk_10khz;
0697 smu->smu_table.boot_values.vclk = smu_info_v4_0->bootup_vclk0_10khz;
0698 smu->smu_table.boot_values.dclk = smu_info_v4_0->bootup_dclk0_10khz;
0699 smu->smu_table.boot_values.fclk = smu_info_v4_0->bootup_fclk_10khz;
0700 } else {
0701 dev_warn(smu->adev->dev, "Unexpected and unhandled version: %d.%d\n",
0702 (uint32_t)frev, (uint32_t)crev);
0703 }
0704 }
0705
0706 return 0;
0707 }
0708
0709
0710 int smu_v13_0_notify_memory_pool_location(struct smu_context *smu)
0711 {
0712 struct smu_table_context *smu_table = &smu->smu_table;
0713 struct smu_table *memory_pool = &smu_table->memory_pool;
0714 int ret = 0;
0715 uint64_t address;
0716 uint32_t address_low, address_high;
0717
0718 if (memory_pool->size == 0 || memory_pool->cpu_addr == NULL)
0719 return ret;
0720
0721 address = memory_pool->mc_address;
0722 address_high = (uint32_t)upper_32_bits(address);
0723 address_low = (uint32_t)lower_32_bits(address);
0724
0725 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
0726 address_high, NULL);
0727 if (ret)
0728 return ret;
0729 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
0730 address_low, NULL);
0731 if (ret)
0732 return ret;
0733 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
0734 (uint32_t)memory_pool->size, NULL);
0735 if (ret)
0736 return ret;
0737
0738 return ret;
0739 }
0740
0741 int smu_v13_0_set_min_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
0742 {
0743 int ret;
0744
0745 ret = smu_cmn_send_smc_msg_with_param(smu,
0746 SMU_MSG_SetMinDeepSleepDcefclk, clk, NULL);
0747 if (ret)
0748 dev_err(smu->adev->dev, "SMU13 attempt to set divider for DCEFCLK Failed!");
0749
0750 return ret;
0751 }
0752
0753 int smu_v13_0_set_driver_table_location(struct smu_context *smu)
0754 {
0755 struct smu_table *driver_table = &smu->smu_table.driver_table;
0756 int ret = 0;
0757
0758 if (driver_table->mc_address) {
0759 ret = smu_cmn_send_smc_msg_with_param(smu,
0760 SMU_MSG_SetDriverDramAddrHigh,
0761 upper_32_bits(driver_table->mc_address),
0762 NULL);
0763 if (!ret)
0764 ret = smu_cmn_send_smc_msg_with_param(smu,
0765 SMU_MSG_SetDriverDramAddrLow,
0766 lower_32_bits(driver_table->mc_address),
0767 NULL);
0768 }
0769
0770 return ret;
0771 }
0772
0773 int smu_v13_0_set_tool_table_location(struct smu_context *smu)
0774 {
0775 int ret = 0;
0776 struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG];
0777
0778 if (tool_table->mc_address) {
0779 ret = smu_cmn_send_smc_msg_with_param(smu,
0780 SMU_MSG_SetToolsDramAddrHigh,
0781 upper_32_bits(tool_table->mc_address),
0782 NULL);
0783 if (!ret)
0784 ret = smu_cmn_send_smc_msg_with_param(smu,
0785 SMU_MSG_SetToolsDramAddrLow,
0786 lower_32_bits(tool_table->mc_address),
0787 NULL);
0788 }
0789
0790 return ret;
0791 }
0792
0793 int smu_v13_0_init_display_count(struct smu_context *smu, uint32_t count)
0794 {
0795 int ret = 0;
0796
0797 if (!smu->pm_enabled)
0798 return ret;
0799
0800 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count, NULL);
0801
0802 return ret;
0803 }
0804
0805 int smu_v13_0_set_allowed_mask(struct smu_context *smu)
0806 {
0807 struct smu_feature *feature = &smu->smu_feature;
0808 int ret = 0;
0809 uint32_t feature_mask[2];
0810
0811 if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) ||
0812 feature->feature_num < 64)
0813 return -EINVAL;
0814
0815 bitmap_to_arr32(feature_mask, feature->allowed, 64);
0816
0817 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
0818 feature_mask[1], NULL);
0819 if (ret)
0820 return ret;
0821
0822 return smu_cmn_send_smc_msg_with_param(smu,
0823 SMU_MSG_SetAllowedFeaturesMaskLow,
0824 feature_mask[0],
0825 NULL);
0826 }
0827
0828 int smu_v13_0_gfx_off_control(struct smu_context *smu, bool enable)
0829 {
0830 int ret = 0;
0831 struct amdgpu_device *adev = smu->adev;
0832
0833 switch (adev->ip_versions[MP1_HWIP][0]) {
0834 case IP_VERSION(13, 0, 0):
0835 case IP_VERSION(13, 0, 1):
0836 case IP_VERSION(13, 0, 3):
0837 case IP_VERSION(13, 0, 4):
0838 case IP_VERSION(13, 0, 5):
0839 case IP_VERSION(13, 0, 7):
0840 case IP_VERSION(13, 0, 8):
0841 if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
0842 return 0;
0843 if (enable)
0844 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
0845 else
0846 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
0847 break;
0848 default:
0849 break;
0850 }
0851
0852 return ret;
0853 }
0854
0855 int smu_v13_0_system_features_control(struct smu_context *smu,
0856 bool en)
0857 {
0858 return smu_cmn_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
0859 SMU_MSG_DisableAllSmuFeatures), NULL);
0860 }
0861
0862 int smu_v13_0_notify_display_change(struct smu_context *smu)
0863 {
0864 int ret = 0;
0865
0866 if (!smu->pm_enabled)
0867 return ret;
0868
0869 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
0870 smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM)
0871 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL);
0872
0873 return ret;
0874 }
0875
0876 static int
0877 smu_v13_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
0878 enum smu_clk_type clock_select)
0879 {
0880 int ret = 0;
0881 int clk_id;
0882
0883 if ((smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GetDcModeMaxDpmFreq) < 0) ||
0884 (smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GetMaxDpmFreq) < 0))
0885 return 0;
0886
0887 clk_id = smu_cmn_to_asic_specific_index(smu,
0888 CMN2ASIC_MAPPING_CLK,
0889 clock_select);
0890 if (clk_id < 0)
0891 return -EINVAL;
0892
0893 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq,
0894 clk_id << 16, clock);
0895 if (ret) {
0896 dev_err(smu->adev->dev, "[GetMaxSustainableClock] Failed to get max DC clock from SMC!");
0897 return ret;
0898 }
0899
0900 if (*clock != 0)
0901 return 0;
0902
0903
0904 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
0905 clk_id << 16, clock);
0906 if (ret) {
0907 dev_err(smu->adev->dev, "[GetMaxSustainableClock] failed to get max AC clock from SMC!");
0908 return ret;
0909 }
0910
0911 return 0;
0912 }
0913
0914 int smu_v13_0_init_max_sustainable_clocks(struct smu_context *smu)
0915 {
0916 struct smu_13_0_max_sustainable_clocks *max_sustainable_clocks =
0917 smu->smu_table.max_sustainable_clocks;
0918 int ret = 0;
0919
0920 max_sustainable_clocks->uclock = smu->smu_table.boot_values.uclk / 100;
0921 max_sustainable_clocks->soc_clock = smu->smu_table.boot_values.socclk / 100;
0922 max_sustainable_clocks->dcef_clock = smu->smu_table.boot_values.dcefclk / 100;
0923 max_sustainable_clocks->display_clock = 0xFFFFFFFF;
0924 max_sustainable_clocks->phy_clock = 0xFFFFFFFF;
0925 max_sustainable_clocks->pixel_clock = 0xFFFFFFFF;
0926
0927 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
0928 ret = smu_v13_0_get_max_sustainable_clock(smu,
0929 &(max_sustainable_clocks->uclock),
0930 SMU_UCLK);
0931 if (ret) {
0932 dev_err(smu->adev->dev, "[%s] failed to get max UCLK from SMC!",
0933 __func__);
0934 return ret;
0935 }
0936 }
0937
0938 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
0939 ret = smu_v13_0_get_max_sustainable_clock(smu,
0940 &(max_sustainable_clocks->soc_clock),
0941 SMU_SOCCLK);
0942 if (ret) {
0943 dev_err(smu->adev->dev, "[%s] failed to get max SOCCLK from SMC!",
0944 __func__);
0945 return ret;
0946 }
0947 }
0948
0949 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
0950 ret = smu_v13_0_get_max_sustainable_clock(smu,
0951 &(max_sustainable_clocks->dcef_clock),
0952 SMU_DCEFCLK);
0953 if (ret) {
0954 dev_err(smu->adev->dev, "[%s] failed to get max DCEFCLK from SMC!",
0955 __func__);
0956 return ret;
0957 }
0958
0959 ret = smu_v13_0_get_max_sustainable_clock(smu,
0960 &(max_sustainable_clocks->display_clock),
0961 SMU_DISPCLK);
0962 if (ret) {
0963 dev_err(smu->adev->dev, "[%s] failed to get max DISPCLK from SMC!",
0964 __func__);
0965 return ret;
0966 }
0967 ret = smu_v13_0_get_max_sustainable_clock(smu,
0968 &(max_sustainable_clocks->phy_clock),
0969 SMU_PHYCLK);
0970 if (ret) {
0971 dev_err(smu->adev->dev, "[%s] failed to get max PHYCLK from SMC!",
0972 __func__);
0973 return ret;
0974 }
0975 ret = smu_v13_0_get_max_sustainable_clock(smu,
0976 &(max_sustainable_clocks->pixel_clock),
0977 SMU_PIXCLK);
0978 if (ret) {
0979 dev_err(smu->adev->dev, "[%s] failed to get max PIXCLK from SMC!",
0980 __func__);
0981 return ret;
0982 }
0983 }
0984
0985 if (max_sustainable_clocks->soc_clock < max_sustainable_clocks->uclock)
0986 max_sustainable_clocks->uclock = max_sustainable_clocks->soc_clock;
0987
0988 return 0;
0989 }
0990
0991 int smu_v13_0_get_current_power_limit(struct smu_context *smu,
0992 uint32_t *power_limit)
0993 {
0994 int power_src;
0995 int ret = 0;
0996
0997 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT))
0998 return -EINVAL;
0999
1000 power_src = smu_cmn_to_asic_specific_index(smu,
1001 CMN2ASIC_MAPPING_PWR,
1002 smu->adev->pm.ac_power ?
1003 SMU_POWER_SOURCE_AC :
1004 SMU_POWER_SOURCE_DC);
1005 if (power_src < 0)
1006 return -EINVAL;
1007
1008 ret = smu_cmn_send_smc_msg_with_param(smu,
1009 SMU_MSG_GetPptLimit,
1010 power_src << 16,
1011 power_limit);
1012 if (ret)
1013 dev_err(smu->adev->dev, "[%s] get PPT limit failed!", __func__);
1014
1015 return ret;
1016 }
1017
1018 int smu_v13_0_set_power_limit(struct smu_context *smu,
1019 enum smu_ppt_limit_type limit_type,
1020 uint32_t limit)
1021 {
1022 int ret = 0;
1023
1024 if (limit_type != SMU_DEFAULT_PPT_LIMIT)
1025 return -EINVAL;
1026
1027 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
1028 dev_err(smu->adev->dev, "Setting new power limit is not supported!\n");
1029 return -EOPNOTSUPP;
1030 }
1031
1032 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, limit, NULL);
1033 if (ret) {
1034 dev_err(smu->adev->dev, "[%s] Set power limit Failed!\n", __func__);
1035 return ret;
1036 }
1037
1038 smu->current_power_limit = limit;
1039
1040 return 0;
1041 }
1042
1043 static int smu_v13_0_allow_ih_interrupt(struct smu_context *smu)
1044 {
1045 return smu_cmn_send_smc_msg(smu,
1046 SMU_MSG_AllowIHHostInterrupt,
1047 NULL);
1048 }
1049
1050 static int smu_v13_0_process_pending_interrupt(struct smu_context *smu)
1051 {
1052 int ret = 0;
1053
1054 if (smu->dc_controlled_by_gpio &&
1055 smu_cmn_feature_is_enabled(smu, SMU_FEATURE_ACDC_BIT))
1056 ret = smu_v13_0_allow_ih_interrupt(smu);
1057
1058 return ret;
1059 }
1060
1061 int smu_v13_0_enable_thermal_alert(struct smu_context *smu)
1062 {
1063 int ret = 0;
1064
1065 ret = amdgpu_irq_get(smu->adev, &smu->irq_source, 0);
1066 if (ret)
1067 return ret;
1068
1069 return smu_v13_0_process_pending_interrupt(smu);
1070 }
1071
1072 int smu_v13_0_disable_thermal_alert(struct smu_context *smu)
1073 {
1074 return amdgpu_irq_put(smu->adev, &smu->irq_source, 0);
1075 }
1076
1077 static uint16_t convert_to_vddc(uint8_t vid)
1078 {
1079 return (uint16_t) ((6200 - (vid * 25)) / SMU13_VOLTAGE_SCALE);
1080 }
1081
1082 int smu_v13_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value)
1083 {
1084 struct amdgpu_device *adev = smu->adev;
1085 uint32_t vdd = 0, val_vid = 0;
1086
1087 if (!value)
1088 return -EINVAL;
1089 val_vid = (RREG32_SOC15(SMUIO, 0, regSMUSVI0_TEL_PLANE0) &
1090 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK) >>
1091 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT;
1092
1093 vdd = (uint32_t)convert_to_vddc((uint8_t)val_vid);
1094
1095 *value = vdd;
1096
1097 return 0;
1098
1099 }
1100
1101 int
1102 smu_v13_0_display_clock_voltage_request(struct smu_context *smu,
1103 struct pp_display_clock_request
1104 *clock_req)
1105 {
1106 enum amd_pp_clock_type clk_type = clock_req->clock_type;
1107 int ret = 0;
1108 enum smu_clk_type clk_select = 0;
1109 uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
1110
1111 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) ||
1112 smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
1113 switch (clk_type) {
1114 case amd_pp_dcef_clock:
1115 clk_select = SMU_DCEFCLK;
1116 break;
1117 case amd_pp_disp_clock:
1118 clk_select = SMU_DISPCLK;
1119 break;
1120 case amd_pp_pixel_clock:
1121 clk_select = SMU_PIXCLK;
1122 break;
1123 case amd_pp_phy_clock:
1124 clk_select = SMU_PHYCLK;
1125 break;
1126 case amd_pp_mem_clock:
1127 clk_select = SMU_UCLK;
1128 break;
1129 default:
1130 dev_info(smu->adev->dev, "[%s] Invalid Clock Type!", __func__);
1131 ret = -EINVAL;
1132 break;
1133 }
1134
1135 if (ret)
1136 goto failed;
1137
1138 if (clk_select == SMU_UCLK && smu->disable_uclk_switch)
1139 return 0;
1140
1141 ret = smu_v13_0_set_hard_freq_limited_range(smu, clk_select, clk_freq, 0);
1142
1143 if(clk_select == SMU_UCLK)
1144 smu->hard_min_uclk_req_from_dal = clk_freq;
1145 }
1146
1147 failed:
1148 return ret;
1149 }
1150
1151 uint32_t smu_v13_0_get_fan_control_mode(struct smu_context *smu)
1152 {
1153 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT))
1154 return AMD_FAN_CTRL_MANUAL;
1155 else
1156 return AMD_FAN_CTRL_AUTO;
1157 }
1158
1159 static int
1160 smu_v13_0_auto_fan_control(struct smu_context *smu, bool auto_fan_control)
1161 {
1162 int ret = 0;
1163
1164 if (!smu_cmn_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT))
1165 return 0;
1166
1167 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, auto_fan_control);
1168 if (ret)
1169 dev_err(smu->adev->dev, "[%s]%s smc FAN CONTROL feature failed!",
1170 __func__, (auto_fan_control ? "Start" : "Stop"));
1171
1172 return ret;
1173 }
1174
1175 static int
1176 smu_v13_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode)
1177 {
1178 struct amdgpu_device *adev = smu->adev;
1179
1180 WREG32_SOC15(THM, 0, regCG_FDO_CTRL2,
1181 REG_SET_FIELD(RREG32_SOC15(THM, 0, regCG_FDO_CTRL2),
1182 CG_FDO_CTRL2, TMIN, 0));
1183 WREG32_SOC15(THM, 0, regCG_FDO_CTRL2,
1184 REG_SET_FIELD(RREG32_SOC15(THM, 0, regCG_FDO_CTRL2),
1185 CG_FDO_CTRL2, FDO_PWM_MODE, mode));
1186
1187 return 0;
1188 }
1189
1190 int smu_v13_0_set_fan_speed_pwm(struct smu_context *smu,
1191 uint32_t speed)
1192 {
1193 struct amdgpu_device *adev = smu->adev;
1194 uint32_t duty100, duty;
1195 uint64_t tmp64;
1196
1197 speed = MIN(speed, 255);
1198
1199 if (smu_v13_0_auto_fan_control(smu, 0))
1200 return -EINVAL;
1201
1202 duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, regCG_FDO_CTRL1),
1203 CG_FDO_CTRL1, FMAX_DUTY100);
1204 if (!duty100)
1205 return -EINVAL;
1206
1207 tmp64 = (uint64_t)speed * duty100;
1208 do_div(tmp64, 255);
1209 duty = (uint32_t)tmp64;
1210
1211 WREG32_SOC15(THM, 0, regCG_FDO_CTRL0,
1212 REG_SET_FIELD(RREG32_SOC15(THM, 0, regCG_FDO_CTRL0),
1213 CG_FDO_CTRL0, FDO_STATIC_DUTY, duty));
1214
1215 return smu_v13_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC);
1216 }
1217
1218 int
1219 smu_v13_0_set_fan_control_mode(struct smu_context *smu,
1220 uint32_t mode)
1221 {
1222 int ret = 0;
1223
1224 switch (mode) {
1225 case AMD_FAN_CTRL_NONE:
1226 ret = smu_v13_0_set_fan_speed_pwm(smu, 255);
1227 break;
1228 case AMD_FAN_CTRL_MANUAL:
1229 ret = smu_v13_0_auto_fan_control(smu, 0);
1230 break;
1231 case AMD_FAN_CTRL_AUTO:
1232 ret = smu_v13_0_auto_fan_control(smu, 1);
1233 break;
1234 default:
1235 break;
1236 }
1237
1238 if (ret) {
1239 dev_err(smu->adev->dev, "[%s]Set fan control mode failed!", __func__);
1240 return -EINVAL;
1241 }
1242
1243 return ret;
1244 }
1245
1246 int smu_v13_0_set_fan_speed_rpm(struct smu_context *smu,
1247 uint32_t speed)
1248 {
1249 struct amdgpu_device *adev = smu->adev;
1250 uint32_t tach_period, crystal_clock_freq;
1251 int ret;
1252
1253 if (!speed)
1254 return -EINVAL;
1255
1256 ret = smu_v13_0_auto_fan_control(smu, 0);
1257 if (ret)
1258 return ret;
1259
1260 crystal_clock_freq = amdgpu_asic_get_xclk(adev);
1261 tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
1262 WREG32_SOC15(THM, 0, regCG_TACH_CTRL,
1263 REG_SET_FIELD(RREG32_SOC15(THM, 0, regCG_TACH_CTRL),
1264 CG_TACH_CTRL, TARGET_PERIOD,
1265 tach_period));
1266
1267 return smu_v13_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM);
1268 }
1269
1270 int smu_v13_0_set_xgmi_pstate(struct smu_context *smu,
1271 uint32_t pstate)
1272 {
1273 int ret = 0;
1274 ret = smu_cmn_send_smc_msg_with_param(smu,
1275 SMU_MSG_SetXgmiMode,
1276 pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3,
1277 NULL);
1278 return ret;
1279 }
1280
1281 static int smu_v13_0_set_irq_state(struct amdgpu_device *adev,
1282 struct amdgpu_irq_src *source,
1283 unsigned tyep,
1284 enum amdgpu_interrupt_state state)
1285 {
1286 struct smu_context *smu = adev->powerplay.pp_handle;
1287 uint32_t low, high;
1288 uint32_t val = 0;
1289
1290 switch (state) {
1291 case AMDGPU_IRQ_STATE_DISABLE:
1292
1293 val = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL);
1294 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 1);
1295 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 1);
1296 WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, val);
1297
1298 WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_ENA, 0);
1299
1300
1301 val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
1302 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1);
1303 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val);
1304
1305 break;
1306 case AMDGPU_IRQ_STATE_ENABLE:
1307
1308 low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP,
1309 smu->thermal_range.min / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES);
1310 high = min(SMU_THERMAL_MAXIMUM_ALERT_TEMP,
1311 smu->thermal_range.software_shutdown_temp);
1312
1313 val = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL);
1314 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
1315 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
1316 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0);
1317 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0);
1318 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high & 0xff));
1319 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low & 0xff));
1320 val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
1321 WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, val);
1322
1323 val = (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT);
1324 val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT);
1325 val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT);
1326 WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_ENA, val);
1327
1328
1329 val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT);
1330 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE);
1331 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0);
1332 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT, val);
1333
1334 val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
1335 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0);
1336 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val);
1337
1338 break;
1339 default:
1340 break;
1341 }
1342
1343 return 0;
1344 }
1345
1346 static int smu_v13_0_ack_ac_dc_interrupt(struct smu_context *smu)
1347 {
1348 return smu_cmn_send_smc_msg(smu,
1349 SMU_MSG_ReenableAcDcInterrupt,
1350 NULL);
1351 }
1352
1353 #define THM_11_0__SRCID__THM_DIG_THERM_L2H 0
1354 #define THM_11_0__SRCID__THM_DIG_THERM_H2L 1
1355 #define SMUIO_11_0__SRCID__SMUIO_GPIO19 83
1356
1357 static int smu_v13_0_irq_process(struct amdgpu_device *adev,
1358 struct amdgpu_irq_src *source,
1359 struct amdgpu_iv_entry *entry)
1360 {
1361 struct smu_context *smu = adev->powerplay.pp_handle;
1362 uint32_t client_id = entry->client_id;
1363 uint32_t src_id = entry->src_id;
1364
1365
1366
1367
1368 uint32_t ctxid = entry->src_data[0];
1369 uint32_t data;
1370
1371 if (client_id == SOC15_IH_CLIENTID_THM) {
1372 switch (src_id) {
1373 case THM_11_0__SRCID__THM_DIG_THERM_L2H:
1374 dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
1375
1376
1377
1378
1379 dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
1380 orderly_poweroff(true);
1381 break;
1382 case THM_11_0__SRCID__THM_DIG_THERM_H2L:
1383 dev_emerg(adev->dev, "ERROR: GPU under temperature range detected\n");
1384 break;
1385 default:
1386 dev_emerg(adev->dev, "ERROR: GPU under temperature range unknown src id (%d)\n",
1387 src_id);
1388 break;
1389 }
1390 } else if (client_id == SOC15_IH_CLIENTID_ROM_SMUIO) {
1391 dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n");
1392
1393
1394
1395 dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n");
1396 orderly_poweroff(true);
1397 } else if (client_id == SOC15_IH_CLIENTID_MP1) {
1398 if (src_id == 0xfe) {
1399
1400 data = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
1401 data = REG_SET_FIELD(data, MP1_SMN_IH_SW_INT_CTRL, INT_ACK, 1);
1402 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, data);
1403
1404 switch (ctxid) {
1405 case 0x3:
1406 dev_dbg(adev->dev, "Switched to AC mode!\n");
1407 smu_v13_0_ack_ac_dc_interrupt(smu);
1408 break;
1409 case 0x4:
1410 dev_dbg(adev->dev, "Switched to DC mode!\n");
1411 smu_v13_0_ack_ac_dc_interrupt(smu);
1412 break;
1413 case 0x7:
1414
1415
1416
1417 atomic64_inc(&smu->throttle_int_counter);
1418
1419 if (!atomic_read(&adev->throttling_logging_enabled))
1420 return 0;
1421
1422 if (__ratelimit(&adev->throttling_logging_rs))
1423 schedule_work(&smu->throttling_logging_work);
1424
1425 break;
1426 }
1427 }
1428 }
1429
1430 return 0;
1431 }
1432
1433 static const struct amdgpu_irq_src_funcs smu_v13_0_irq_funcs =
1434 {
1435 .set = smu_v13_0_set_irq_state,
1436 .process = smu_v13_0_irq_process,
1437 };
1438
1439 int smu_v13_0_register_irq_handler(struct smu_context *smu)
1440 {
1441 struct amdgpu_device *adev = smu->adev;
1442 struct amdgpu_irq_src *irq_src = &smu->irq_source;
1443 int ret = 0;
1444
1445 irq_src->num_types = 1;
1446 irq_src->funcs = &smu_v13_0_irq_funcs;
1447
1448 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM,
1449 THM_11_0__SRCID__THM_DIG_THERM_L2H,
1450 irq_src);
1451 if (ret)
1452 return ret;
1453
1454 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM,
1455 THM_11_0__SRCID__THM_DIG_THERM_H2L,
1456 irq_src);
1457 if (ret)
1458 return ret;
1459
1460
1461 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_ROM_SMUIO,
1462 SMUIO_11_0__SRCID__SMUIO_GPIO19,
1463 irq_src);
1464 if (ret)
1465 return ret;
1466
1467 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1,
1468 0xfe,
1469 irq_src);
1470 if (ret)
1471 return ret;
1472
1473 return ret;
1474 }
1475
1476 int smu_v13_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
1477 struct pp_smu_nv_clock_table *max_clocks)
1478 {
1479 struct smu_table_context *table_context = &smu->smu_table;
1480 struct smu_13_0_max_sustainable_clocks *sustainable_clocks = NULL;
1481
1482 if (!max_clocks || !table_context->max_sustainable_clocks)
1483 return -EINVAL;
1484
1485 sustainable_clocks = table_context->max_sustainable_clocks;
1486
1487 max_clocks->dcfClockInKhz =
1488 (unsigned int) sustainable_clocks->dcef_clock * 1000;
1489 max_clocks->displayClockInKhz =
1490 (unsigned int) sustainable_clocks->display_clock * 1000;
1491 max_clocks->phyClockInKhz =
1492 (unsigned int) sustainable_clocks->phy_clock * 1000;
1493 max_clocks->pixelClockInKhz =
1494 (unsigned int) sustainable_clocks->pixel_clock * 1000;
1495 max_clocks->uClockInKhz =
1496 (unsigned int) sustainable_clocks->uclock * 1000;
1497 max_clocks->socClockInKhz =
1498 (unsigned int) sustainable_clocks->soc_clock * 1000;
1499 max_clocks->dscClockInKhz = 0;
1500 max_clocks->dppClockInKhz = 0;
1501 max_clocks->fabricClockInKhz = 0;
1502
1503 return 0;
1504 }
1505
1506 int smu_v13_0_set_azalia_d3_pme(struct smu_context *smu)
1507 {
1508 int ret = 0;
1509
1510 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME, NULL);
1511
1512 return ret;
1513 }
1514
1515 static int smu_v13_0_wait_for_reset_complete(struct smu_context *smu,
1516 uint64_t event_arg)
1517 {
1518 int ret = 0;
1519
1520 dev_dbg(smu->adev->dev, "waiting for smu reset complete\n");
1521 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GfxDriverResetRecovery, NULL);
1522
1523 return ret;
1524 }
1525
1526 int smu_v13_0_wait_for_event(struct smu_context *smu, enum smu_event_type event,
1527 uint64_t event_arg)
1528 {
1529 int ret = -EINVAL;
1530
1531 switch (event) {
1532 case SMU_EVENT_RESET_COMPLETE:
1533 ret = smu_v13_0_wait_for_reset_complete(smu, event_arg);
1534 break;
1535 default:
1536 break;
1537 }
1538
1539 return ret;
1540 }
1541
1542 int smu_v13_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
1543 uint32_t *min, uint32_t *max)
1544 {
1545 int ret = 0, clk_id = 0;
1546 uint32_t param = 0;
1547 uint32_t clock_limit;
1548
1549 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) {
1550 switch (clk_type) {
1551 case SMU_MCLK:
1552 case SMU_UCLK:
1553 clock_limit = smu->smu_table.boot_values.uclk;
1554 break;
1555 case SMU_GFXCLK:
1556 case SMU_SCLK:
1557 clock_limit = smu->smu_table.boot_values.gfxclk;
1558 break;
1559 case SMU_SOCCLK:
1560 clock_limit = smu->smu_table.boot_values.socclk;
1561 break;
1562 default:
1563 clock_limit = 0;
1564 break;
1565 }
1566
1567
1568 if (min)
1569 *min = clock_limit / 100;
1570 if (max)
1571 *max = clock_limit / 100;
1572
1573 return 0;
1574 }
1575
1576 clk_id = smu_cmn_to_asic_specific_index(smu,
1577 CMN2ASIC_MAPPING_CLK,
1578 clk_type);
1579 if (clk_id < 0) {
1580 ret = -EINVAL;
1581 goto failed;
1582 }
1583 param = (clk_id & 0xffff) << 16;
1584
1585 if (max) {
1586 if (smu->adev->pm.ac_power)
1587 ret = smu_cmn_send_smc_msg_with_param(smu,
1588 SMU_MSG_GetMaxDpmFreq,
1589 param,
1590 max);
1591 else
1592 ret = smu_cmn_send_smc_msg_with_param(smu,
1593 SMU_MSG_GetDcModeMaxDpmFreq,
1594 param,
1595 max);
1596 if (ret)
1597 goto failed;
1598 }
1599
1600 if (min) {
1601 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param, min);
1602 if (ret)
1603 goto failed;
1604 }
1605
1606 failed:
1607 return ret;
1608 }
1609
1610 int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu,
1611 enum smu_clk_type clk_type,
1612 uint32_t min,
1613 uint32_t max)
1614 {
1615 int ret = 0, clk_id = 0;
1616 uint32_t param;
1617
1618 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
1619 return 0;
1620
1621 clk_id = smu_cmn_to_asic_specific_index(smu,
1622 CMN2ASIC_MAPPING_CLK,
1623 clk_type);
1624 if (clk_id < 0)
1625 return clk_id;
1626
1627 if (max > 0) {
1628 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
1629 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
1630 param, NULL);
1631 if (ret)
1632 goto out;
1633 }
1634
1635 if (min > 0) {
1636 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
1637 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
1638 param, NULL);
1639 if (ret)
1640 goto out;
1641 }
1642
1643 out:
1644 return ret;
1645 }
1646
1647 int smu_v13_0_set_hard_freq_limited_range(struct smu_context *smu,
1648 enum smu_clk_type clk_type,
1649 uint32_t min,
1650 uint32_t max)
1651 {
1652 int ret = 0, clk_id = 0;
1653 uint32_t param;
1654
1655 if (min <= 0 && max <= 0)
1656 return -EINVAL;
1657
1658 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
1659 return 0;
1660
1661 clk_id = smu_cmn_to_asic_specific_index(smu,
1662 CMN2ASIC_MAPPING_CLK,
1663 clk_type);
1664 if (clk_id < 0)
1665 return clk_id;
1666
1667 if (max > 0) {
1668 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
1669 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
1670 param, NULL);
1671 if (ret)
1672 return ret;
1673 }
1674
1675 if (min > 0) {
1676 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
1677 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
1678 param, NULL);
1679 if (ret)
1680 return ret;
1681 }
1682
1683 return ret;
1684 }
1685
1686 int smu_v13_0_set_performance_level(struct smu_context *smu,
1687 enum amd_dpm_forced_level level)
1688 {
1689 struct smu_13_0_dpm_context *dpm_context =
1690 smu->smu_dpm.dpm_context;
1691 struct smu_13_0_dpm_table *gfx_table =
1692 &dpm_context->dpm_tables.gfx_table;
1693 struct smu_13_0_dpm_table *mem_table =
1694 &dpm_context->dpm_tables.uclk_table;
1695 struct smu_13_0_dpm_table *soc_table =
1696 &dpm_context->dpm_tables.soc_table;
1697 struct smu_13_0_dpm_table *vclk_table =
1698 &dpm_context->dpm_tables.vclk_table;
1699 struct smu_13_0_dpm_table *dclk_table =
1700 &dpm_context->dpm_tables.dclk_table;
1701 struct smu_13_0_dpm_table *fclk_table =
1702 &dpm_context->dpm_tables.fclk_table;
1703 struct smu_umd_pstate_table *pstate_table =
1704 &smu->pstate_table;
1705 struct amdgpu_device *adev = smu->adev;
1706 uint32_t sclk_min = 0, sclk_max = 0;
1707 uint32_t mclk_min = 0, mclk_max = 0;
1708 uint32_t socclk_min = 0, socclk_max = 0;
1709 uint32_t vclk_min = 0, vclk_max = 0;
1710 uint32_t dclk_min = 0, dclk_max = 0;
1711 uint32_t fclk_min = 0, fclk_max = 0;
1712 int ret = 0, i;
1713
1714 switch (level) {
1715 case AMD_DPM_FORCED_LEVEL_HIGH:
1716 sclk_min = sclk_max = gfx_table->max;
1717 mclk_min = mclk_max = mem_table->max;
1718 socclk_min = socclk_max = soc_table->max;
1719 vclk_min = vclk_max = vclk_table->max;
1720 dclk_min = dclk_max = dclk_table->max;
1721 fclk_min = fclk_max = fclk_table->max;
1722 break;
1723 case AMD_DPM_FORCED_LEVEL_LOW:
1724 sclk_min = sclk_max = gfx_table->min;
1725 mclk_min = mclk_max = mem_table->min;
1726 socclk_min = socclk_max = soc_table->min;
1727 vclk_min = vclk_max = vclk_table->min;
1728 dclk_min = dclk_max = dclk_table->min;
1729 fclk_min = fclk_max = fclk_table->min;
1730 break;
1731 case AMD_DPM_FORCED_LEVEL_AUTO:
1732 sclk_min = gfx_table->min;
1733 sclk_max = gfx_table->max;
1734 mclk_min = mem_table->min;
1735 mclk_max = mem_table->max;
1736 socclk_min = soc_table->min;
1737 socclk_max = soc_table->max;
1738 vclk_min = vclk_table->min;
1739 vclk_max = vclk_table->max;
1740 dclk_min = dclk_table->min;
1741 dclk_max = dclk_table->max;
1742 fclk_min = fclk_table->min;
1743 fclk_max = fclk_table->max;
1744 break;
1745 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1746 sclk_min = sclk_max = pstate_table->gfxclk_pstate.standard;
1747 mclk_min = mclk_max = pstate_table->uclk_pstate.standard;
1748 socclk_min = socclk_max = pstate_table->socclk_pstate.standard;
1749 vclk_min = vclk_max = pstate_table->vclk_pstate.standard;
1750 dclk_min = dclk_max = pstate_table->dclk_pstate.standard;
1751 fclk_min = fclk_max = pstate_table->fclk_pstate.standard;
1752 break;
1753 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1754 sclk_min = sclk_max = pstate_table->gfxclk_pstate.min;
1755 break;
1756 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1757 mclk_min = mclk_max = pstate_table->uclk_pstate.min;
1758 break;
1759 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1760 sclk_min = sclk_max = pstate_table->gfxclk_pstate.peak;
1761 mclk_min = mclk_max = pstate_table->uclk_pstate.peak;
1762 socclk_min = socclk_max = pstate_table->socclk_pstate.peak;
1763 vclk_min = vclk_max = pstate_table->vclk_pstate.peak;
1764 dclk_min = dclk_max = pstate_table->dclk_pstate.peak;
1765 fclk_min = fclk_max = pstate_table->fclk_pstate.peak;
1766 break;
1767 case AMD_DPM_FORCED_LEVEL_MANUAL:
1768 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1769 return 0;
1770 default:
1771 dev_err(adev->dev, "Invalid performance level %d\n", level);
1772 return -EINVAL;
1773 }
1774
1775
1776
1777
1778
1779 if (smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2)) {
1780 mclk_min = mclk_max = 0;
1781 socclk_min = socclk_max = 0;
1782 vclk_min = vclk_max = 0;
1783 dclk_min = dclk_max = 0;
1784 fclk_min = fclk_max = 0;
1785 }
1786
1787 if (sclk_min && sclk_max) {
1788 ret = smu_v13_0_set_soft_freq_limited_range(smu,
1789 SMU_GFXCLK,
1790 sclk_min,
1791 sclk_max);
1792 if (ret)
1793 return ret;
1794
1795 pstate_table->gfxclk_pstate.curr.min = sclk_min;
1796 pstate_table->gfxclk_pstate.curr.max = sclk_max;
1797 }
1798
1799 if (mclk_min && mclk_max) {
1800 ret = smu_v13_0_set_soft_freq_limited_range(smu,
1801 SMU_MCLK,
1802 mclk_min,
1803 mclk_max);
1804 if (ret)
1805 return ret;
1806
1807 pstate_table->uclk_pstate.curr.min = mclk_min;
1808 pstate_table->uclk_pstate.curr.max = mclk_max;
1809 }
1810
1811 if (socclk_min && socclk_max) {
1812 ret = smu_v13_0_set_soft_freq_limited_range(smu,
1813 SMU_SOCCLK,
1814 socclk_min,
1815 socclk_max);
1816 if (ret)
1817 return ret;
1818
1819 pstate_table->socclk_pstate.curr.min = socclk_min;
1820 pstate_table->socclk_pstate.curr.max = socclk_max;
1821 }
1822
1823 if (vclk_min && vclk_max) {
1824 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1825 if (adev->vcn.harvest_config & (1 << i))
1826 continue;
1827 ret = smu_v13_0_set_soft_freq_limited_range(smu,
1828 i ? SMU_VCLK1 : SMU_VCLK,
1829 vclk_min,
1830 vclk_max);
1831 if (ret)
1832 return ret;
1833 }
1834 pstate_table->vclk_pstate.curr.min = vclk_min;
1835 pstate_table->vclk_pstate.curr.max = vclk_max;
1836 }
1837
1838 if (dclk_min && dclk_max) {
1839 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1840 if (adev->vcn.harvest_config & (1 << i))
1841 continue;
1842 ret = smu_v13_0_set_soft_freq_limited_range(smu,
1843 i ? SMU_DCLK1 : SMU_DCLK,
1844 dclk_min,
1845 dclk_max);
1846 if (ret)
1847 return ret;
1848 }
1849 pstate_table->dclk_pstate.curr.min = dclk_min;
1850 pstate_table->dclk_pstate.curr.max = dclk_max;
1851 }
1852
1853 if (fclk_min && fclk_max) {
1854 ret = smu_v13_0_set_soft_freq_limited_range(smu,
1855 SMU_FCLK,
1856 fclk_min,
1857 fclk_max);
1858 if (ret)
1859 return ret;
1860
1861 pstate_table->fclk_pstate.curr.min = fclk_min;
1862 pstate_table->fclk_pstate.curr.max = fclk_max;
1863 }
1864
1865 return ret;
1866 }
1867
1868 int smu_v13_0_set_power_source(struct smu_context *smu,
1869 enum smu_power_src_type power_src)
1870 {
1871 int pwr_source;
1872
1873 pwr_source = smu_cmn_to_asic_specific_index(smu,
1874 CMN2ASIC_MAPPING_PWR,
1875 (uint32_t)power_src);
1876 if (pwr_source < 0)
1877 return -EINVAL;
1878
1879 return smu_cmn_send_smc_msg_with_param(smu,
1880 SMU_MSG_NotifyPowerSource,
1881 pwr_source,
1882 NULL);
1883 }
1884
1885 static int smu_v13_0_get_dpm_freq_by_index(struct smu_context *smu,
1886 enum smu_clk_type clk_type,
1887 uint16_t level,
1888 uint32_t *value)
1889 {
1890 int ret = 0, clk_id = 0;
1891 uint32_t param;
1892
1893 if (!value)
1894 return -EINVAL;
1895
1896 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
1897 return 0;
1898
1899 clk_id = smu_cmn_to_asic_specific_index(smu,
1900 CMN2ASIC_MAPPING_CLK,
1901 clk_type);
1902 if (clk_id < 0)
1903 return clk_id;
1904
1905 param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
1906
1907 ret = smu_cmn_send_smc_msg_with_param(smu,
1908 SMU_MSG_GetDpmFreqByIndex,
1909 param,
1910 value);
1911 if (ret)
1912 return ret;
1913
1914 *value = *value & 0x7fffffff;
1915
1916 return ret;
1917 }
1918
1919 static int smu_v13_0_get_dpm_level_count(struct smu_context *smu,
1920 enum smu_clk_type clk_type,
1921 uint32_t *value)
1922 {
1923 int ret;
1924
1925 ret = smu_v13_0_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
1926
1927 if((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2)) && (!ret && value))
1928 ++(*value);
1929
1930 return ret;
1931 }
1932
1933 static int smu_v13_0_get_fine_grained_status(struct smu_context *smu,
1934 enum smu_clk_type clk_type,
1935 bool *is_fine_grained_dpm)
1936 {
1937 int ret = 0, clk_id = 0;
1938 uint32_t param;
1939 uint32_t value;
1940
1941 if (!is_fine_grained_dpm)
1942 return -EINVAL;
1943
1944 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
1945 return 0;
1946
1947 clk_id = smu_cmn_to_asic_specific_index(smu,
1948 CMN2ASIC_MAPPING_CLK,
1949 clk_type);
1950 if (clk_id < 0)
1951 return clk_id;
1952
1953 param = (uint32_t)(((clk_id & 0xffff) << 16) | 0xff);
1954
1955 ret = smu_cmn_send_smc_msg_with_param(smu,
1956 SMU_MSG_GetDpmFreqByIndex,
1957 param,
1958 &value);
1959 if (ret)
1960 return ret;
1961
1962
1963
1964
1965
1966 *is_fine_grained_dpm = value & 0x80000000;
1967
1968 return 0;
1969 }
1970
1971 int smu_v13_0_set_single_dpm_table(struct smu_context *smu,
1972 enum smu_clk_type clk_type,
1973 struct smu_13_0_dpm_table *single_dpm_table)
1974 {
1975 int ret = 0;
1976 uint32_t clk;
1977 int i;
1978
1979 ret = smu_v13_0_get_dpm_level_count(smu,
1980 clk_type,
1981 &single_dpm_table->count);
1982 if (ret) {
1983 dev_err(smu->adev->dev, "[%s] failed to get dpm levels!\n", __func__);
1984 return ret;
1985 }
1986
1987 if (smu->adev->ip_versions[MP1_HWIP][0] != IP_VERSION(13, 0, 2)) {
1988 ret = smu_v13_0_get_fine_grained_status(smu,
1989 clk_type,
1990 &single_dpm_table->is_fine_grained);
1991 if (ret) {
1992 dev_err(smu->adev->dev, "[%s] failed to get fine grained status!\n", __func__);
1993 return ret;
1994 }
1995 }
1996
1997 for (i = 0; i < single_dpm_table->count; i++) {
1998 ret = smu_v13_0_get_dpm_freq_by_index(smu,
1999 clk_type,
2000 i,
2001 &clk);
2002 if (ret) {
2003 dev_err(smu->adev->dev, "[%s] failed to get dpm freq by index!\n", __func__);
2004 return ret;
2005 }
2006
2007 single_dpm_table->dpm_levels[i].value = clk;
2008 single_dpm_table->dpm_levels[i].enabled = true;
2009
2010 if (i == 0)
2011 single_dpm_table->min = clk;
2012 else if (i == single_dpm_table->count - 1)
2013 single_dpm_table->max = clk;
2014 }
2015
2016 return 0;
2017 }
2018
2019 int smu_v13_0_get_dpm_level_range(struct smu_context *smu,
2020 enum smu_clk_type clk_type,
2021 uint32_t *min_value,
2022 uint32_t *max_value)
2023 {
2024 uint32_t level_count = 0;
2025 int ret = 0;
2026
2027 if (!min_value && !max_value)
2028 return -EINVAL;
2029
2030 if (min_value) {
2031
2032 ret = smu_v13_0_get_dpm_freq_by_index(smu,
2033 clk_type,
2034 0,
2035 min_value);
2036 if (ret)
2037 return ret;
2038 }
2039
2040 if (max_value) {
2041 ret = smu_v13_0_get_dpm_level_count(smu,
2042 clk_type,
2043 &level_count);
2044 if (ret)
2045 return ret;
2046
2047 ret = smu_v13_0_get_dpm_freq_by_index(smu,
2048 clk_type,
2049 level_count - 1,
2050 max_value);
2051 if (ret)
2052 return ret;
2053 }
2054
2055 return ret;
2056 }
2057
2058 int smu_v13_0_get_current_pcie_link_width_level(struct smu_context *smu)
2059 {
2060 struct amdgpu_device *adev = smu->adev;
2061
2062 return (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
2063 PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
2064 >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
2065 }
2066
2067 int smu_v13_0_get_current_pcie_link_width(struct smu_context *smu)
2068 {
2069 uint32_t width_level;
2070
2071 width_level = smu_v13_0_get_current_pcie_link_width_level(smu);
2072 if (width_level > LINK_WIDTH_MAX)
2073 width_level = 0;
2074
2075 return link_width[width_level];
2076 }
2077
2078 int smu_v13_0_get_current_pcie_link_speed_level(struct smu_context *smu)
2079 {
2080 struct amdgpu_device *adev = smu->adev;
2081
2082 return (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
2083 PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
2084 >> PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
2085 }
2086
2087 int smu_v13_0_get_current_pcie_link_speed(struct smu_context *smu)
2088 {
2089 uint32_t speed_level;
2090
2091 speed_level = smu_v13_0_get_current_pcie_link_speed_level(smu);
2092 if (speed_level > LINK_SPEED_MAX)
2093 speed_level = 0;
2094
2095 return link_speed[speed_level];
2096 }
2097
2098 int smu_v13_0_set_vcn_enable(struct smu_context *smu,
2099 bool enable)
2100 {
2101 struct amdgpu_device *adev = smu->adev;
2102 int i, ret = 0;
2103
2104 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
2105 if (adev->vcn.harvest_config & (1 << i))
2106 continue;
2107
2108 ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
2109 SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn,
2110 i << 16U, NULL);
2111 if (ret)
2112 return ret;
2113 }
2114
2115 return ret;
2116 }
2117
2118 int smu_v13_0_set_jpeg_enable(struct smu_context *smu,
2119 bool enable)
2120 {
2121 return smu_cmn_send_smc_msg_with_param(smu, enable ?
2122 SMU_MSG_PowerUpJpeg : SMU_MSG_PowerDownJpeg,
2123 0, NULL);
2124 }
2125
2126 int smu_v13_0_run_btc(struct smu_context *smu)
2127 {
2128 int res;
2129
2130 res = smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL);
2131 if (res)
2132 dev_err(smu->adev->dev, "RunDcBtc failed!\n");
2133
2134 return res;
2135 }
2136
2137 int smu_v13_0_deep_sleep_control(struct smu_context *smu,
2138 bool enablement)
2139 {
2140 struct amdgpu_device *adev = smu->adev;
2141 int ret = 0;
2142
2143 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_GFXCLK_BIT)) {
2144 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_GFXCLK_BIT, enablement);
2145 if (ret) {
2146 dev_err(adev->dev, "Failed to %s GFXCLK DS!\n", enablement ? "enable" : "disable");
2147 return ret;
2148 }
2149 }
2150
2151 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_UCLK_BIT)) {
2152 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_UCLK_BIT, enablement);
2153 if (ret) {
2154 dev_err(adev->dev, "Failed to %s UCLK DS!\n", enablement ? "enable" : "disable");
2155 return ret;
2156 }
2157 }
2158
2159 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_FCLK_BIT)) {
2160 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_FCLK_BIT, enablement);
2161 if (ret) {
2162 dev_err(adev->dev, "Failed to %s FCLK DS!\n", enablement ? "enable" : "disable");
2163 return ret;
2164 }
2165 }
2166
2167 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_SOCCLK_BIT)) {
2168 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_SOCCLK_BIT, enablement);
2169 if (ret) {
2170 dev_err(adev->dev, "Failed to %s SOCCLK DS!\n", enablement ? "enable" : "disable");
2171 return ret;
2172 }
2173 }
2174
2175 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_LCLK_BIT)) {
2176 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_LCLK_BIT, enablement);
2177 if (ret) {
2178 dev_err(adev->dev, "Failed to %s LCLK DS!\n", enablement ? "enable" : "disable");
2179 return ret;
2180 }
2181 }
2182
2183 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_VCN_BIT)) {
2184 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_VCN_BIT, enablement);
2185 if (ret) {
2186 dev_err(adev->dev, "Failed to %s VCN DS!\n", enablement ? "enable" : "disable");
2187 return ret;
2188 }
2189 }
2190
2191 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_MP0CLK_BIT)) {
2192 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_MP0CLK_BIT, enablement);
2193 if (ret) {
2194 dev_err(adev->dev, "Failed to %s MP0/MPIOCLK DS!\n", enablement ? "enable" : "disable");
2195 return ret;
2196 }
2197 }
2198
2199 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_MP1CLK_BIT)) {
2200 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_MP1CLK_BIT, enablement);
2201 if (ret) {
2202 dev_err(adev->dev, "Failed to %s MP1CLK DS!\n", enablement ? "enable" : "disable");
2203 return ret;
2204 }
2205 }
2206
2207 return ret;
2208 }
2209
2210 int smu_v13_0_gfx_ulv_control(struct smu_context *smu,
2211 bool enablement)
2212 {
2213 int ret = 0;
2214
2215 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_GFX_ULV_BIT))
2216 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_GFX_ULV_BIT, enablement);
2217
2218 return ret;
2219 }
2220
2221 bool smu_v13_0_baco_is_support(struct smu_context *smu)
2222 {
2223 struct smu_baco_context *smu_baco = &smu->smu_baco;
2224
2225 if (amdgpu_sriov_vf(smu->adev) ||
2226 !smu_baco->platform_support)
2227 return false;
2228
2229 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
2230 !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
2231 return false;
2232
2233 return true;
2234 }
2235
2236 enum smu_baco_state smu_v13_0_baco_get_state(struct smu_context *smu)
2237 {
2238 struct smu_baco_context *smu_baco = &smu->smu_baco;
2239
2240 return smu_baco->state;
2241 }
2242
2243 int smu_v13_0_baco_set_state(struct smu_context *smu,
2244 enum smu_baco_state state)
2245 {
2246 struct smu_baco_context *smu_baco = &smu->smu_baco;
2247 struct amdgpu_device *adev = smu->adev;
2248 int ret = 0;
2249
2250 if (smu_v13_0_baco_get_state(smu) == state)
2251 return 0;
2252
2253 if (state == SMU_BACO_STATE_ENTER) {
2254 ret = smu_cmn_send_smc_msg_with_param(smu,
2255 SMU_MSG_EnterBaco,
2256 smu_baco->maco_support ?
2257 BACO_SEQ_BAMACO : BACO_SEQ_BACO,
2258 NULL);
2259 } else {
2260 ret = smu_cmn_send_smc_msg(smu,
2261 SMU_MSG_ExitBaco,
2262 NULL);
2263 if (ret)
2264 return ret;
2265
2266
2267 WREG32(adev->bios_scratch_reg_offset + 6, 0);
2268 WREG32(adev->bios_scratch_reg_offset + 7, 0);
2269 }
2270
2271 if (!ret)
2272 smu_baco->state = state;
2273
2274 return ret;
2275 }
2276
2277 int smu_v13_0_baco_enter(struct smu_context *smu)
2278 {
2279 int ret = 0;
2280
2281 ret = smu_v13_0_baco_set_state(smu,
2282 SMU_BACO_STATE_ENTER);
2283 if (ret)
2284 return ret;
2285
2286 msleep(10);
2287
2288 return ret;
2289 }
2290
2291 int smu_v13_0_baco_exit(struct smu_context *smu)
2292 {
2293 return smu_v13_0_baco_set_state(smu,
2294 SMU_BACO_STATE_EXIT);
2295 }
2296
2297 int smu_v13_0_set_gfx_power_up_by_imu(struct smu_context *smu)
2298 {
2299 uint16_t index;
2300
2301 index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
2302 SMU_MSG_EnableGfxImu);
2303
2304 return smu_cmn_send_msg_without_waiting(smu, index, 1);
2305 }
2306
2307 int smu_v13_0_od_edit_dpm_table(struct smu_context *smu,
2308 enum PP_OD_DPM_TABLE_COMMAND type,
2309 long input[], uint32_t size)
2310 {
2311 struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
2312 int ret = 0;
2313
2314
2315 if (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
2316 return -EINVAL;
2317
2318 switch (type) {
2319 case PP_OD_EDIT_SCLK_VDDC_TABLE:
2320 if (size != 2) {
2321 dev_err(smu->adev->dev, "Input parameter number not correct\n");
2322 return -EINVAL;
2323 }
2324
2325 if (input[0] == 0) {
2326 if (input[1] < smu->gfx_default_hard_min_freq) {
2327 dev_warn(smu->adev->dev,
2328 "Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
2329 input[1], smu->gfx_default_hard_min_freq);
2330 return -EINVAL;
2331 }
2332 smu->gfx_actual_hard_min_freq = input[1];
2333 } else if (input[0] == 1) {
2334 if (input[1] > smu->gfx_default_soft_max_freq) {
2335 dev_warn(smu->adev->dev,
2336 "Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
2337 input[1], smu->gfx_default_soft_max_freq);
2338 return -EINVAL;
2339 }
2340 smu->gfx_actual_soft_max_freq = input[1];
2341 } else {
2342 return -EINVAL;
2343 }
2344 break;
2345 case PP_OD_RESTORE_DEFAULT_TABLE:
2346 if (size != 0) {
2347 dev_err(smu->adev->dev, "Input parameter number not correct\n");
2348 return -EINVAL;
2349 }
2350 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
2351 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
2352 break;
2353 case PP_OD_COMMIT_DPM_TABLE:
2354 if (size != 0) {
2355 dev_err(smu->adev->dev, "Input parameter number not correct\n");
2356 return -EINVAL;
2357 }
2358 if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) {
2359 dev_err(smu->adev->dev,
2360 "The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
2361 smu->gfx_actual_hard_min_freq,
2362 smu->gfx_actual_soft_max_freq);
2363 return -EINVAL;
2364 }
2365
2366 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
2367 smu->gfx_actual_hard_min_freq,
2368 NULL);
2369 if (ret) {
2370 dev_err(smu->adev->dev, "Set hard min sclk failed!");
2371 return ret;
2372 }
2373
2374 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
2375 smu->gfx_actual_soft_max_freq,
2376 NULL);
2377 if (ret) {
2378 dev_err(smu->adev->dev, "Set soft max sclk failed!");
2379 return ret;
2380 }
2381 break;
2382 default:
2383 return -ENOSYS;
2384 }
2385
2386 return ret;
2387 }
2388
2389 int smu_v13_0_set_default_dpm_tables(struct smu_context *smu)
2390 {
2391 struct smu_table_context *smu_table = &smu->smu_table;
2392
2393 return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0,
2394 smu_table->clocks_table, false);
2395 }
2396
2397 void smu_v13_0_set_smu_mailbox_registers(struct smu_context *smu)
2398 {
2399 struct amdgpu_device *adev = smu->adev;
2400
2401 smu->param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_82);
2402 smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_66);
2403 smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
2404 }
2405
2406 int smu_v13_0_mode1_reset(struct smu_context *smu)
2407 {
2408 int ret = 0;
2409
2410 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL);
2411 if (!ret)
2412 msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS);
2413
2414 return ret;
2415 }