0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023 #define SWSMU_CODE_LAYER_L4
0024
0025 #include "amdgpu.h"
0026 #include "amdgpu_smu.h"
0027 #include "smu_cmn.h"
0028 #include "soc15_common.h"
0029
0030
0031
0032
0033
0034
0035 #undef pr_err
0036 #undef pr_warn
0037 #undef pr_info
0038 #undef pr_debug
0039
0040 #define MP1_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
0041
0042 #undef __SMU_DUMMY_MAP
0043 #define __SMU_DUMMY_MAP(type) #type
0044 static const char * const __smu_message_names[] = {
0045 SMU_MESSAGE_TYPES
0046 };
0047
0048 #define smu_cmn_call_asic_func(intf, smu, args...) \
0049 ((smu)->ppt_funcs ? ((smu)->ppt_funcs->intf ? \
0050 (smu)->ppt_funcs->intf(smu, ##args) : \
0051 -ENOTSUPP) : \
0052 -EINVAL)
0053
0054 static const char *smu_get_message_name(struct smu_context *smu,
0055 enum smu_message_type type)
0056 {
0057 if (type < 0 || type >= SMU_MSG_MAX_COUNT)
0058 return "unknown smu message";
0059
0060 return __smu_message_names[type];
0061 }
0062
0063 static void smu_cmn_read_arg(struct smu_context *smu,
0064 uint32_t *arg)
0065 {
0066 struct amdgpu_device *adev = smu->adev;
0067
0068 *arg = RREG32(smu->param_reg);
0069 }
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079 #define SMU_RESP_NONE 0
0080 #define SMU_RESP_OK 1
0081 #define SMU_RESP_CMD_FAIL 0xFF
0082 #define SMU_RESP_CMD_UNKNOWN 0xFE
0083 #define SMU_RESP_CMD_BAD_PREREQ 0xFD
0084 #define SMU_RESP_BUSY_OTHER 0xFC
0085 #define SMU_RESP_DEBUG_END 0xFB
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107 static u32 __smu_cmn_poll_stat(struct smu_context *smu)
0108 {
0109 struct amdgpu_device *adev = smu->adev;
0110 int timeout = adev->usec_timeout * 20;
0111 u32 reg;
0112
0113 for ( ; timeout > 0; timeout--) {
0114 reg = RREG32(smu->resp_reg);
0115 if ((reg & MP1_C2PMSG_90__CONTENT_MASK) != 0)
0116 break;
0117
0118 udelay(1);
0119 }
0120
0121 return reg;
0122 }
0123
0124 static void __smu_cmn_reg_print_error(struct smu_context *smu,
0125 u32 reg_c2pmsg_90,
0126 int msg_index,
0127 u32 param,
0128 enum smu_message_type msg)
0129 {
0130 struct amdgpu_device *adev = smu->adev;
0131 const char *message = smu_get_message_name(smu, msg);
0132 u32 msg_idx, prm;
0133
0134 switch (reg_c2pmsg_90) {
0135 case SMU_RESP_NONE: {
0136 msg_idx = RREG32(smu->msg_reg);
0137 prm = RREG32(smu->param_reg);
0138 dev_err_ratelimited(adev->dev,
0139 "SMU: I'm not done with your previous command: SMN_C2PMSG_66:0x%08X SMN_C2PMSG_82:0x%08X",
0140 msg_idx, prm);
0141 }
0142 break;
0143 case SMU_RESP_OK:
0144
0145
0146
0147 break;
0148 case SMU_RESP_CMD_FAIL:
0149
0150
0151
0152 break;
0153 case SMU_RESP_CMD_UNKNOWN:
0154 dev_err_ratelimited(adev->dev,
0155 "SMU: unknown command: index:%d param:0x%08X message:%s",
0156 msg_index, param, message);
0157 break;
0158 case SMU_RESP_CMD_BAD_PREREQ:
0159 dev_err_ratelimited(adev->dev,
0160 "SMU: valid command, bad prerequisites: index:%d param:0x%08X message:%s",
0161 msg_index, param, message);
0162 break;
0163 case SMU_RESP_BUSY_OTHER:
0164 dev_err_ratelimited(adev->dev,
0165 "SMU: I'm very busy for your command: index:%d param:0x%08X message:%s",
0166 msg_index, param, message);
0167 break;
0168 case SMU_RESP_DEBUG_END:
0169 dev_err_ratelimited(adev->dev,
0170 "SMU: I'm debugging!");
0171 break;
0172 default:
0173 dev_err_ratelimited(adev->dev,
0174 "SMU: response:0x%08X for index:%d param:0x%08X message:%s?",
0175 reg_c2pmsg_90, msg_index, param, message);
0176 break;
0177 }
0178 }
0179
0180 static int __smu_cmn_reg2errno(struct smu_context *smu, u32 reg_c2pmsg_90)
0181 {
0182 int res;
0183
0184 switch (reg_c2pmsg_90) {
0185 case SMU_RESP_NONE:
0186
0187
0188 res = -ETIME;
0189 break;
0190 case SMU_RESP_OK:
0191 res = 0;
0192 break;
0193 case SMU_RESP_CMD_FAIL:
0194
0195
0196
0197 res = -EIO;
0198 break;
0199 case SMU_RESP_CMD_UNKNOWN:
0200
0201
0202 res = -EOPNOTSUPP;
0203 break;
0204 case SMU_RESP_CMD_BAD_PREREQ:
0205
0206
0207 res = -EINVAL;
0208 break;
0209 case SMU_RESP_BUSY_OTHER:
0210
0211
0212
0213 res = -EBUSY;
0214 break;
0215 default:
0216
0217
0218 res = -EREMOTEIO;
0219 break;
0220 }
0221
0222 return res;
0223 }
0224
0225 static void __smu_cmn_send_msg(struct smu_context *smu,
0226 u16 msg,
0227 u32 param)
0228 {
0229 struct amdgpu_device *adev = smu->adev;
0230
0231 WREG32(smu->resp_reg, 0);
0232 WREG32(smu->param_reg, param);
0233 WREG32(smu->msg_reg, msg);
0234 }
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249 int smu_cmn_send_msg_without_waiting(struct smu_context *smu,
0250 uint16_t msg_index,
0251 uint32_t param)
0252 {
0253 struct amdgpu_device *adev = smu->adev;
0254 u32 reg;
0255 int res;
0256
0257 if (adev->no_hw_access)
0258 return 0;
0259
0260 reg = __smu_cmn_poll_stat(smu);
0261 res = __smu_cmn_reg2errno(smu, reg);
0262 if (reg == SMU_RESP_NONE ||
0263 res == -EREMOTEIO)
0264 goto Out;
0265 __smu_cmn_send_msg(smu, msg_index, param);
0266 res = 0;
0267 Out:
0268 if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) &&
0269 res && (res != -ETIME)) {
0270 amdgpu_device_halt(adev);
0271 WARN_ON(1);
0272 }
0273
0274 return res;
0275 }
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285
0286
0287 int smu_cmn_wait_for_response(struct smu_context *smu)
0288 {
0289 u32 reg;
0290 int res;
0291
0292 reg = __smu_cmn_poll_stat(smu);
0293 res = __smu_cmn_reg2errno(smu, reg);
0294
0295 if (unlikely(smu->adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) &&
0296 res && (res != -ETIME)) {
0297 amdgpu_device_halt(smu->adev);
0298 WARN_ON(1);
0299 }
0300
0301 return res;
0302 }
0303
0304
0305
0306
0307
0308
0309
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331
0332
0333
0334
0335
0336 int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
0337 enum smu_message_type msg,
0338 uint32_t param,
0339 uint32_t *read_arg)
0340 {
0341 struct amdgpu_device *adev = smu->adev;
0342 int res, index;
0343 u32 reg;
0344
0345 if (adev->no_hw_access)
0346 return 0;
0347
0348 index = smu_cmn_to_asic_specific_index(smu,
0349 CMN2ASIC_MAPPING_MSG,
0350 msg);
0351 if (index < 0)
0352 return index == -EACCES ? 0 : index;
0353
0354 mutex_lock(&smu->message_lock);
0355 reg = __smu_cmn_poll_stat(smu);
0356 res = __smu_cmn_reg2errno(smu, reg);
0357 if (reg == SMU_RESP_NONE ||
0358 res == -EREMOTEIO) {
0359 __smu_cmn_reg_print_error(smu, reg, index, param, msg);
0360 goto Out;
0361 }
0362 __smu_cmn_send_msg(smu, (uint16_t) index, param);
0363 reg = __smu_cmn_poll_stat(smu);
0364 res = __smu_cmn_reg2errno(smu, reg);
0365 if (res != 0)
0366 __smu_cmn_reg_print_error(smu, reg, index, param, msg);
0367 if (read_arg)
0368 smu_cmn_read_arg(smu, read_arg);
0369 Out:
0370 if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) && res) {
0371 amdgpu_device_halt(adev);
0372 WARN_ON(1);
0373 }
0374
0375 mutex_unlock(&smu->message_lock);
0376 return res;
0377 }
0378
0379 int smu_cmn_send_smc_msg(struct smu_context *smu,
0380 enum smu_message_type msg,
0381 uint32_t *read_arg)
0382 {
0383 return smu_cmn_send_smc_msg_with_param(smu,
0384 msg,
0385 0,
0386 read_arg);
0387 }
0388
0389 int smu_cmn_to_asic_specific_index(struct smu_context *smu,
0390 enum smu_cmn2asic_mapping_type type,
0391 uint32_t index)
0392 {
0393 struct cmn2asic_msg_mapping msg_mapping;
0394 struct cmn2asic_mapping mapping;
0395
0396 switch (type) {
0397 case CMN2ASIC_MAPPING_MSG:
0398 if (index >= SMU_MSG_MAX_COUNT ||
0399 !smu->message_map)
0400 return -EINVAL;
0401
0402 msg_mapping = smu->message_map[index];
0403 if (!msg_mapping.valid_mapping)
0404 return -EINVAL;
0405
0406 if (amdgpu_sriov_vf(smu->adev) &&
0407 !msg_mapping.valid_in_vf)
0408 return -EACCES;
0409
0410 return msg_mapping.map_to;
0411
0412 case CMN2ASIC_MAPPING_CLK:
0413 if (index >= SMU_CLK_COUNT ||
0414 !smu->clock_map)
0415 return -EINVAL;
0416
0417 mapping = smu->clock_map[index];
0418 if (!mapping.valid_mapping)
0419 return -EINVAL;
0420
0421 return mapping.map_to;
0422
0423 case CMN2ASIC_MAPPING_FEATURE:
0424 if (index >= SMU_FEATURE_COUNT ||
0425 !smu->feature_map)
0426 return -EINVAL;
0427
0428 mapping = smu->feature_map[index];
0429 if (!mapping.valid_mapping)
0430 return -EINVAL;
0431
0432 return mapping.map_to;
0433
0434 case CMN2ASIC_MAPPING_TABLE:
0435 if (index >= SMU_TABLE_COUNT ||
0436 !smu->table_map)
0437 return -EINVAL;
0438
0439 mapping = smu->table_map[index];
0440 if (!mapping.valid_mapping)
0441 return -EINVAL;
0442
0443 return mapping.map_to;
0444
0445 case CMN2ASIC_MAPPING_PWR:
0446 if (index >= SMU_POWER_SOURCE_COUNT ||
0447 !smu->pwr_src_map)
0448 return -EINVAL;
0449
0450 mapping = smu->pwr_src_map[index];
0451 if (!mapping.valid_mapping)
0452 return -EINVAL;
0453
0454 return mapping.map_to;
0455
0456 case CMN2ASIC_MAPPING_WORKLOAD:
0457 if (index > PP_SMC_POWER_PROFILE_WINDOW3D ||
0458 !smu->workload_map)
0459 return -EINVAL;
0460
0461 mapping = smu->workload_map[index];
0462 if (!mapping.valid_mapping)
0463 return -EINVAL;
0464
0465 return mapping.map_to;
0466
0467 default:
0468 return -EINVAL;
0469 }
0470 }
0471
0472 int smu_cmn_feature_is_supported(struct smu_context *smu,
0473 enum smu_feature_mask mask)
0474 {
0475 struct smu_feature *feature = &smu->smu_feature;
0476 int feature_id;
0477
0478 feature_id = smu_cmn_to_asic_specific_index(smu,
0479 CMN2ASIC_MAPPING_FEATURE,
0480 mask);
0481 if (feature_id < 0)
0482 return 0;
0483
0484 WARN_ON(feature_id > feature->feature_num);
0485
0486 return test_bit(feature_id, feature->supported);
0487 }
0488
0489 static int __smu_get_enabled_features(struct smu_context *smu,
0490 uint64_t *enabled_features)
0491 {
0492 return smu_cmn_call_asic_func(get_enabled_mask, smu, enabled_features);
0493 }
0494
0495 int smu_cmn_feature_is_enabled(struct smu_context *smu,
0496 enum smu_feature_mask mask)
0497 {
0498 struct amdgpu_device *adev = smu->adev;
0499 uint64_t enabled_features;
0500 int feature_id;
0501
0502 if (__smu_get_enabled_features(smu, &enabled_features)) {
0503 dev_err(adev->dev, "Failed to retrieve enabled ppfeatures!\n");
0504 return 0;
0505 }
0506
0507
0508
0509
0510
0511
0512 if (enabled_features == ULLONG_MAX)
0513 return 1;
0514
0515 feature_id = smu_cmn_to_asic_specific_index(smu,
0516 CMN2ASIC_MAPPING_FEATURE,
0517 mask);
0518 if (feature_id < 0)
0519 return 0;
0520
0521 return test_bit(feature_id, (unsigned long *)&enabled_features);
0522 }
0523
0524 bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu,
0525 enum smu_clk_type clk_type)
0526 {
0527 enum smu_feature_mask feature_id = 0;
0528
0529 switch (clk_type) {
0530 case SMU_MCLK:
0531 case SMU_UCLK:
0532 feature_id = SMU_FEATURE_DPM_UCLK_BIT;
0533 break;
0534 case SMU_GFXCLK:
0535 case SMU_SCLK:
0536 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
0537 break;
0538 case SMU_SOCCLK:
0539 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
0540 break;
0541 case SMU_VCLK:
0542 case SMU_VCLK1:
0543 feature_id = SMU_FEATURE_DPM_VCLK_BIT;
0544 break;
0545 case SMU_DCLK:
0546 case SMU_DCLK1:
0547 feature_id = SMU_FEATURE_DPM_DCLK_BIT;
0548 break;
0549 case SMU_FCLK:
0550 feature_id = SMU_FEATURE_DPM_FCLK_BIT;
0551 break;
0552 default:
0553 return true;
0554 }
0555
0556 if (!smu_cmn_feature_is_enabled(smu, feature_id))
0557 return false;
0558
0559 return true;
0560 }
0561
0562 int smu_cmn_get_enabled_mask(struct smu_context *smu,
0563 uint64_t *feature_mask)
0564 {
0565 uint32_t *feature_mask_high;
0566 uint32_t *feature_mask_low;
0567 int ret = 0, index = 0;
0568
0569 if (!feature_mask)
0570 return -EINVAL;
0571
0572 feature_mask_low = &((uint32_t *)feature_mask)[0];
0573 feature_mask_high = &((uint32_t *)feature_mask)[1];
0574
0575 index = smu_cmn_to_asic_specific_index(smu,
0576 CMN2ASIC_MAPPING_MSG,
0577 SMU_MSG_GetEnabledSmuFeatures);
0578 if (index > 0) {
0579 ret = smu_cmn_send_smc_msg_with_param(smu,
0580 SMU_MSG_GetEnabledSmuFeatures,
0581 0,
0582 feature_mask_low);
0583 if (ret)
0584 return ret;
0585
0586 ret = smu_cmn_send_smc_msg_with_param(smu,
0587 SMU_MSG_GetEnabledSmuFeatures,
0588 1,
0589 feature_mask_high);
0590 } else {
0591 ret = smu_cmn_send_smc_msg(smu,
0592 SMU_MSG_GetEnabledSmuFeaturesHigh,
0593 feature_mask_high);
0594 if (ret)
0595 return ret;
0596
0597 ret = smu_cmn_send_smc_msg(smu,
0598 SMU_MSG_GetEnabledSmuFeaturesLow,
0599 feature_mask_low);
0600 }
0601
0602 return ret;
0603 }
0604
0605 uint64_t smu_cmn_get_indep_throttler_status(
0606 const unsigned long dep_status,
0607 const uint8_t *throttler_map)
0608 {
0609 uint64_t indep_status = 0;
0610 uint8_t dep_bit = 0;
0611
0612 for_each_set_bit(dep_bit, &dep_status, 32)
0613 indep_status |= 1ULL << throttler_map[dep_bit];
0614
0615 return indep_status;
0616 }
0617
0618 int smu_cmn_feature_update_enable_state(struct smu_context *smu,
0619 uint64_t feature_mask,
0620 bool enabled)
0621 {
0622 int ret = 0;
0623
0624 if (enabled) {
0625 ret = smu_cmn_send_smc_msg_with_param(smu,
0626 SMU_MSG_EnableSmuFeaturesLow,
0627 lower_32_bits(feature_mask),
0628 NULL);
0629 if (ret)
0630 return ret;
0631 ret = smu_cmn_send_smc_msg_with_param(smu,
0632 SMU_MSG_EnableSmuFeaturesHigh,
0633 upper_32_bits(feature_mask),
0634 NULL);
0635 } else {
0636 ret = smu_cmn_send_smc_msg_with_param(smu,
0637 SMU_MSG_DisableSmuFeaturesLow,
0638 lower_32_bits(feature_mask),
0639 NULL);
0640 if (ret)
0641 return ret;
0642 ret = smu_cmn_send_smc_msg_with_param(smu,
0643 SMU_MSG_DisableSmuFeaturesHigh,
0644 upper_32_bits(feature_mask),
0645 NULL);
0646 }
0647
0648 return ret;
0649 }
0650
0651 int smu_cmn_feature_set_enabled(struct smu_context *smu,
0652 enum smu_feature_mask mask,
0653 bool enable)
0654 {
0655 int feature_id;
0656
0657 feature_id = smu_cmn_to_asic_specific_index(smu,
0658 CMN2ASIC_MAPPING_FEATURE,
0659 mask);
0660 if (feature_id < 0)
0661 return -EINVAL;
0662
0663 return smu_cmn_feature_update_enable_state(smu,
0664 1ULL << feature_id,
0665 enable);
0666 }
0667
0668 #undef __SMU_DUMMY_MAP
0669 #define __SMU_DUMMY_MAP(fea) #fea
0670 static const char* __smu_feature_names[] = {
0671 SMU_FEATURE_MASKS
0672 };
0673
0674 static const char *smu_get_feature_name(struct smu_context *smu,
0675 enum smu_feature_mask feature)
0676 {
0677 if (feature < 0 || feature >= SMU_FEATURE_COUNT)
0678 return "unknown smu feature";
0679 return __smu_feature_names[feature];
0680 }
0681
0682 size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,
0683 char *buf)
0684 {
0685 int8_t sort_feature[max(SMU_FEATURE_COUNT, SMU_FEATURE_MAX)];
0686 uint64_t feature_mask;
0687 int i, feature_index;
0688 uint32_t count = 0;
0689 size_t size = 0;
0690
0691 if (__smu_get_enabled_features(smu, &feature_mask))
0692 return 0;
0693
0694 size = sysfs_emit_at(buf, size, "features high: 0x%08x low: 0x%08x\n",
0695 upper_32_bits(feature_mask), lower_32_bits(feature_mask));
0696
0697 memset(sort_feature, -1, sizeof(sort_feature));
0698
0699 for (i = 0; i < SMU_FEATURE_COUNT; i++) {
0700 feature_index = smu_cmn_to_asic_specific_index(smu,
0701 CMN2ASIC_MAPPING_FEATURE,
0702 i);
0703 if (feature_index < 0)
0704 continue;
0705
0706 sort_feature[feature_index] = i;
0707 }
0708
0709 size += sysfs_emit_at(buf, size, "%-2s. %-20s %-3s : %-s\n",
0710 "No", "Feature", "Bit", "State");
0711
0712 for (feature_index = 0; feature_index < SMU_FEATURE_MAX; feature_index++) {
0713 if (sort_feature[feature_index] < 0)
0714 continue;
0715
0716 size += sysfs_emit_at(buf, size, "%02d. %-20s (%2d) : %s\n",
0717 count++,
0718 smu_get_feature_name(smu, sort_feature[feature_index]),
0719 feature_index,
0720 !!test_bit(feature_index, (unsigned long *)&feature_mask) ?
0721 "enabled" : "disabled");
0722 }
0723
0724 return size;
0725 }
0726
0727 int smu_cmn_set_pp_feature_mask(struct smu_context *smu,
0728 uint64_t new_mask)
0729 {
0730 int ret = 0;
0731 uint64_t feature_mask;
0732 uint64_t feature_2_enabled = 0;
0733 uint64_t feature_2_disabled = 0;
0734
0735 ret = __smu_get_enabled_features(smu, &feature_mask);
0736 if (ret)
0737 return ret;
0738
0739 feature_2_enabled = ~feature_mask & new_mask;
0740 feature_2_disabled = feature_mask & ~new_mask;
0741
0742 if (feature_2_enabled) {
0743 ret = smu_cmn_feature_update_enable_state(smu,
0744 feature_2_enabled,
0745 true);
0746 if (ret)
0747 return ret;
0748 }
0749 if (feature_2_disabled) {
0750 ret = smu_cmn_feature_update_enable_state(smu,
0751 feature_2_disabled,
0752 false);
0753 if (ret)
0754 return ret;
0755 }
0756
0757 return ret;
0758 }
0759
0760
0761
0762
0763
0764
0765
0766
0767
0768
0769
0770
0771
0772
0773 int smu_cmn_disable_all_features_with_exception(struct smu_context *smu,
0774 enum smu_feature_mask mask)
0775 {
0776 uint64_t features_to_disable = U64_MAX;
0777 int skipped_feature_id;
0778
0779 if (mask != SMU_FEATURE_COUNT) {
0780 skipped_feature_id = smu_cmn_to_asic_specific_index(smu,
0781 CMN2ASIC_MAPPING_FEATURE,
0782 mask);
0783 if (skipped_feature_id < 0)
0784 return -EINVAL;
0785
0786 features_to_disable &= ~(1ULL << skipped_feature_id);
0787 }
0788
0789 return smu_cmn_feature_update_enable_state(smu,
0790 features_to_disable,
0791 0);
0792 }
0793
0794 int smu_cmn_get_smc_version(struct smu_context *smu,
0795 uint32_t *if_version,
0796 uint32_t *smu_version)
0797 {
0798 int ret = 0;
0799
0800 if (!if_version && !smu_version)
0801 return -EINVAL;
0802
0803 if (smu->smc_fw_if_version && smu->smc_fw_version)
0804 {
0805 if (if_version)
0806 *if_version = smu->smc_fw_if_version;
0807
0808 if (smu_version)
0809 *smu_version = smu->smc_fw_version;
0810
0811 return 0;
0812 }
0813
0814 if (if_version) {
0815 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version);
0816 if (ret)
0817 return ret;
0818
0819 smu->smc_fw_if_version = *if_version;
0820 }
0821
0822 if (smu_version) {
0823 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version);
0824 if (ret)
0825 return ret;
0826
0827 smu->smc_fw_version = *smu_version;
0828 }
0829
0830 return ret;
0831 }
0832
0833 int smu_cmn_update_table(struct smu_context *smu,
0834 enum smu_table_id table_index,
0835 int argument,
0836 void *table_data,
0837 bool drv2smu)
0838 {
0839 struct smu_table_context *smu_table = &smu->smu_table;
0840 struct amdgpu_device *adev = smu->adev;
0841 struct smu_table *table = &smu_table->driver_table;
0842 int table_id = smu_cmn_to_asic_specific_index(smu,
0843 CMN2ASIC_MAPPING_TABLE,
0844 table_index);
0845 uint32_t table_size;
0846 int ret = 0;
0847 if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
0848 return -EINVAL;
0849
0850 table_size = smu_table->tables[table_index].size;
0851
0852 if (drv2smu) {
0853 memcpy(table->cpu_addr, table_data, table_size);
0854
0855
0856
0857
0858 amdgpu_asic_flush_hdp(adev, NULL);
0859 }
0860
0861 ret = smu_cmn_send_smc_msg_with_param(smu, drv2smu ?
0862 SMU_MSG_TransferTableDram2Smu :
0863 SMU_MSG_TransferTableSmu2Dram,
0864 table_id | ((argument & 0xFFFF) << 16),
0865 NULL);
0866 if (ret)
0867 return ret;
0868
0869 if (!drv2smu) {
0870 amdgpu_asic_invalidate_hdp(adev, NULL);
0871 memcpy(table_data, table->cpu_addr, table_size);
0872 }
0873
0874 return 0;
0875 }
0876
0877 int smu_cmn_write_watermarks_table(struct smu_context *smu)
0878 {
0879 void *watermarks_table = smu->smu_table.watermarks_table;
0880
0881 if (!watermarks_table)
0882 return -EINVAL;
0883
0884 return smu_cmn_update_table(smu,
0885 SMU_TABLE_WATERMARKS,
0886 0,
0887 watermarks_table,
0888 true);
0889 }
0890
0891 int smu_cmn_write_pptable(struct smu_context *smu)
0892 {
0893 void *pptable = smu->smu_table.driver_pptable;
0894
0895 return smu_cmn_update_table(smu,
0896 SMU_TABLE_PPTABLE,
0897 0,
0898 pptable,
0899 true);
0900 }
0901
0902 int smu_cmn_get_metrics_table(struct smu_context *smu,
0903 void *metrics_table,
0904 bool bypass_cache)
0905 {
0906 struct smu_table_context *smu_table= &smu->smu_table;
0907 uint32_t table_size =
0908 smu_table->tables[SMU_TABLE_SMU_METRICS].size;
0909 int ret = 0;
0910
0911 if (bypass_cache ||
0912 !smu_table->metrics_time ||
0913 time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) {
0914 ret = smu_cmn_update_table(smu,
0915 SMU_TABLE_SMU_METRICS,
0916 0,
0917 smu_table->metrics_table,
0918 false);
0919 if (ret) {
0920 dev_info(smu->adev->dev, "Failed to export SMU metrics table!\n");
0921 return ret;
0922 }
0923 smu_table->metrics_time = jiffies;
0924 }
0925
0926 if (metrics_table)
0927 memcpy(metrics_table, smu_table->metrics_table, table_size);
0928
0929 return 0;
0930 }
0931
0932 int smu_cmn_get_combo_pptable(struct smu_context *smu)
0933 {
0934 void *pptable = smu->smu_table.combo_pptable;
0935
0936 return smu_cmn_update_table(smu,
0937 SMU_TABLE_COMBO_PPTABLE,
0938 0,
0939 pptable,
0940 false);
0941 }
0942
0943 void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
0944 {
0945 struct metrics_table_header *header = (struct metrics_table_header *)table;
0946 uint16_t structure_size;
0947
0948 #define METRICS_VERSION(a, b) ((a << 16) | b )
0949
0950 switch (METRICS_VERSION(frev, crev)) {
0951 case METRICS_VERSION(1, 0):
0952 structure_size = sizeof(struct gpu_metrics_v1_0);
0953 break;
0954 case METRICS_VERSION(1, 1):
0955 structure_size = sizeof(struct gpu_metrics_v1_1);
0956 break;
0957 case METRICS_VERSION(1, 2):
0958 structure_size = sizeof(struct gpu_metrics_v1_2);
0959 break;
0960 case METRICS_VERSION(1, 3):
0961 structure_size = sizeof(struct gpu_metrics_v1_3);
0962 break;
0963 case METRICS_VERSION(2, 0):
0964 structure_size = sizeof(struct gpu_metrics_v2_0);
0965 break;
0966 case METRICS_VERSION(2, 1):
0967 structure_size = sizeof(struct gpu_metrics_v2_1);
0968 break;
0969 case METRICS_VERSION(2, 2):
0970 structure_size = sizeof(struct gpu_metrics_v2_2);
0971 break;
0972 default:
0973 return;
0974 }
0975
0976 #undef METRICS_VERSION
0977
0978 memset(header, 0xFF, structure_size);
0979
0980 header->format_revision = frev;
0981 header->content_revision = crev;
0982 header->structure_size = structure_size;
0983
0984 }
0985
0986 int smu_cmn_set_mp1_state(struct smu_context *smu,
0987 enum pp_mp1_state mp1_state)
0988 {
0989 enum smu_message_type msg;
0990 int ret;
0991
0992 switch (mp1_state) {
0993 case PP_MP1_STATE_SHUTDOWN:
0994 msg = SMU_MSG_PrepareMp1ForShutdown;
0995 break;
0996 case PP_MP1_STATE_UNLOAD:
0997 msg = SMU_MSG_PrepareMp1ForUnload;
0998 break;
0999 case PP_MP1_STATE_RESET:
1000 msg = SMU_MSG_PrepareMp1ForReset;
1001 break;
1002 case PP_MP1_STATE_NONE:
1003 default:
1004 return 0;
1005 }
1006
1007 ret = smu_cmn_send_smc_msg(smu, msg, NULL);
1008 if (ret)
1009 dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n");
1010
1011 return ret;
1012 }
1013
1014 bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev)
1015 {
1016 struct pci_dev *p = NULL;
1017 bool snd_driver_loaded;
1018
1019
1020
1021
1022
1023 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
1024 adev->pdev->bus->number, 1);
1025 if (!p)
1026 return true;
1027
1028 snd_driver_loaded = pci_is_enabled(p) ? true : false;
1029
1030 pci_dev_put(p);
1031
1032 return snd_driver_loaded;
1033 }