0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024 #include <linux/pci.h>
0025 #include <linux/reboot.h>
0026
0027 #include "hwmgr.h"
0028 #include "pp_debug.h"
0029 #include "ppatomctrl.h"
0030 #include "ppsmc.h"
0031 #include "atom.h"
0032 #include "ivsrcid/thm/irqsrcs_thm_9_0.h"
0033 #include "ivsrcid/smuio/irqsrcs_smuio_9_0.h"
0034 #include "ivsrcid/ivsrcid_vislands30.h"
0035
0036 uint8_t convert_to_vid(uint16_t vddc)
0037 {
0038 return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
0039 }
0040
0041 uint16_t convert_to_vddc(uint8_t vid)
0042 {
0043 return (uint16_t) ((6200 - (vid * 25)) / VOLTAGE_SCALE);
0044 }
0045
0046 int phm_copy_clock_limits_array(
0047 struct pp_hwmgr *hwmgr,
0048 uint32_t **pptable_info_array,
0049 const uint32_t *pptable_array,
0050 uint32_t power_saving_clock_count)
0051 {
0052 uint32_t array_size, i;
0053 uint32_t *table;
0054
0055 array_size = sizeof(uint32_t) * power_saving_clock_count;
0056 table = kzalloc(array_size, GFP_KERNEL);
0057 if (NULL == table)
0058 return -ENOMEM;
0059
0060 for (i = 0; i < power_saving_clock_count; i++)
0061 table[i] = le32_to_cpu(pptable_array[i]);
0062
0063 *pptable_info_array = table;
0064
0065 return 0;
0066 }
0067
0068 int phm_copy_overdrive_settings_limits_array(
0069 struct pp_hwmgr *hwmgr,
0070 uint32_t **pptable_info_array,
0071 const uint32_t *pptable_array,
0072 uint32_t od_setting_count)
0073 {
0074 uint32_t array_size, i;
0075 uint32_t *table;
0076
0077 array_size = sizeof(uint32_t) * od_setting_count;
0078 table = kzalloc(array_size, GFP_KERNEL);
0079 if (NULL == table)
0080 return -ENOMEM;
0081
0082 for (i = 0; i < od_setting_count; i++)
0083 table[i] = le32_to_cpu(pptable_array[i]);
0084
0085 *pptable_info_array = table;
0086
0087 return 0;
0088 }
0089
0090 uint32_t phm_set_field_to_u32(u32 offset, u32 original_data, u32 field, u32 size)
0091 {
0092 u32 mask = 0;
0093 u32 shift = 0;
0094
0095 shift = (offset % 4) << 3;
0096 if (size == sizeof(uint8_t))
0097 mask = 0xFF << shift;
0098 else if (size == sizeof(uint16_t))
0099 mask = 0xFFFF << shift;
0100
0101 original_data &= ~mask;
0102 original_data |= (field << shift);
0103 return original_data;
0104 }
0105
0106
0107
0108
0109
0110 int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
0111 uint32_t value, uint32_t mask)
0112 {
0113 uint32_t i;
0114 uint32_t cur_value;
0115
0116 if (hwmgr == NULL || hwmgr->device == NULL) {
0117 pr_err("Invalid Hardware Manager!");
0118 return -EINVAL;
0119 }
0120
0121 for (i = 0; i < hwmgr->usec_timeout; i++) {
0122 cur_value = cgs_read_register(hwmgr->device, index);
0123 if ((cur_value & mask) == (value & mask))
0124 break;
0125 udelay(1);
0126 }
0127
0128
0129 if (i == hwmgr->usec_timeout)
0130 return -1;
0131 return 0;
0132 }
0133
0134
0135
0136
0137
0138
0139
0140 int phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
0141 uint32_t indirect_port,
0142 uint32_t index,
0143 uint32_t value,
0144 uint32_t mask)
0145 {
0146 if (hwmgr == NULL || hwmgr->device == NULL) {
0147 pr_err("Invalid Hardware Manager!");
0148 return -EINVAL;
0149 }
0150
0151 cgs_write_register(hwmgr->device, indirect_port, index);
0152 return phm_wait_on_register(hwmgr, indirect_port + 1, mask, value);
0153 }
0154
0155 int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
0156 uint32_t index,
0157 uint32_t value, uint32_t mask)
0158 {
0159 uint32_t i;
0160 uint32_t cur_value;
0161
0162 if (hwmgr == NULL || hwmgr->device == NULL)
0163 return -EINVAL;
0164
0165 for (i = 0; i < hwmgr->usec_timeout; i++) {
0166 cur_value = cgs_read_register(hwmgr->device,
0167 index);
0168 if ((cur_value & mask) != (value & mask))
0169 break;
0170 udelay(1);
0171 }
0172
0173
0174 if (i == hwmgr->usec_timeout)
0175 return -ETIME;
0176 return 0;
0177 }
0178
0179 int phm_wait_for_indirect_register_unequal(struct pp_hwmgr *hwmgr,
0180 uint32_t indirect_port,
0181 uint32_t index,
0182 uint32_t value,
0183 uint32_t mask)
0184 {
0185 if (hwmgr == NULL || hwmgr->device == NULL)
0186 return -EINVAL;
0187
0188 cgs_write_register(hwmgr->device, indirect_port, index);
0189 return phm_wait_for_register_unequal(hwmgr, indirect_port + 1,
0190 value, mask);
0191 }
0192
0193 bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr)
0194 {
0195 return phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDPowerGating);
0196 }
0197
0198 bool phm_cf_want_vce_power_gating(struct pp_hwmgr *hwmgr)
0199 {
0200 return phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEPowerGating);
0201 }
0202
0203
0204 int phm_trim_voltage_table(struct pp_atomctrl_voltage_table *vol_table)
0205 {
0206 uint32_t i, j;
0207 uint16_t vvalue;
0208 bool found = false;
0209 struct pp_atomctrl_voltage_table *table;
0210
0211 PP_ASSERT_WITH_CODE((NULL != vol_table),
0212 "Voltage Table empty.", return -EINVAL);
0213
0214 table = kzalloc(sizeof(struct pp_atomctrl_voltage_table),
0215 GFP_KERNEL);
0216
0217 if (NULL == table)
0218 return -EINVAL;
0219
0220 table->mask_low = vol_table->mask_low;
0221 table->phase_delay = vol_table->phase_delay;
0222
0223 for (i = 0; i < vol_table->count; i++) {
0224 vvalue = vol_table->entries[i].value;
0225 found = false;
0226
0227 for (j = 0; j < table->count; j++) {
0228 if (vvalue == table->entries[j].value) {
0229 found = true;
0230 break;
0231 }
0232 }
0233
0234 if (!found) {
0235 table->entries[table->count].value = vvalue;
0236 table->entries[table->count].smio_low =
0237 vol_table->entries[i].smio_low;
0238 table->count++;
0239 }
0240 }
0241
0242 memcpy(vol_table, table, sizeof(struct pp_atomctrl_voltage_table));
0243 kfree(table);
0244 table = NULL;
0245 return 0;
0246 }
0247
0248 int phm_get_svi2_mvdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
0249 phm_ppt_v1_clock_voltage_dependency_table *dep_table)
0250 {
0251 uint32_t i;
0252 int result;
0253
0254 PP_ASSERT_WITH_CODE((0 != dep_table->count),
0255 "Voltage Dependency Table empty.", return -EINVAL);
0256
0257 PP_ASSERT_WITH_CODE((NULL != vol_table),
0258 "vol_table empty.", return -EINVAL);
0259
0260 vol_table->mask_low = 0;
0261 vol_table->phase_delay = 0;
0262 vol_table->count = dep_table->count;
0263
0264 for (i = 0; i < dep_table->count; i++) {
0265 vol_table->entries[i].value = dep_table->entries[i].mvdd;
0266 vol_table->entries[i].smio_low = 0;
0267 }
0268
0269 result = phm_trim_voltage_table(vol_table);
0270 PP_ASSERT_WITH_CODE((0 == result),
0271 "Failed to trim MVDD table.", return result);
0272
0273 return 0;
0274 }
0275
0276 int phm_get_svi2_vddci_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
0277 phm_ppt_v1_clock_voltage_dependency_table *dep_table)
0278 {
0279 uint32_t i;
0280 int result;
0281
0282 PP_ASSERT_WITH_CODE((0 != dep_table->count),
0283 "Voltage Dependency Table empty.", return -EINVAL);
0284
0285 PP_ASSERT_WITH_CODE((NULL != vol_table),
0286 "vol_table empty.", return -EINVAL);
0287
0288 vol_table->mask_low = 0;
0289 vol_table->phase_delay = 0;
0290 vol_table->count = dep_table->count;
0291
0292 for (i = 0; i < dep_table->count; i++) {
0293 vol_table->entries[i].value = dep_table->entries[i].vddci;
0294 vol_table->entries[i].smio_low = 0;
0295 }
0296
0297 result = phm_trim_voltage_table(vol_table);
0298 PP_ASSERT_WITH_CODE((0 == result),
0299 "Failed to trim VDDCI table.", return result);
0300
0301 return 0;
0302 }
0303
0304 int phm_get_svi2_vdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
0305 phm_ppt_v1_voltage_lookup_table *lookup_table)
0306 {
0307 int i = 0;
0308
0309 PP_ASSERT_WITH_CODE((0 != lookup_table->count),
0310 "Voltage Lookup Table empty.", return -EINVAL);
0311
0312 PP_ASSERT_WITH_CODE((NULL != vol_table),
0313 "vol_table empty.", return -EINVAL);
0314
0315 vol_table->mask_low = 0;
0316 vol_table->phase_delay = 0;
0317
0318 vol_table->count = lookup_table->count;
0319
0320 for (i = 0; i < vol_table->count; i++) {
0321 vol_table->entries[i].value = lookup_table->entries[i].us_vdd;
0322 vol_table->entries[i].smio_low = 0;
0323 }
0324
0325 return 0;
0326 }
0327
0328 void phm_trim_voltage_table_to_fit_state_table(uint32_t max_vol_steps,
0329 struct pp_atomctrl_voltage_table *vol_table)
0330 {
0331 unsigned int i, diff;
0332
0333 if (vol_table->count <= max_vol_steps)
0334 return;
0335
0336 diff = vol_table->count - max_vol_steps;
0337
0338 for (i = 0; i < max_vol_steps; i++)
0339 vol_table->entries[i] = vol_table->entries[i + diff];
0340
0341 vol_table->count = max_vol_steps;
0342
0343 return;
0344 }
0345
0346 int phm_reset_single_dpm_table(void *table,
0347 uint32_t count, int max)
0348 {
0349 int i;
0350
0351 struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
0352
0353 dpm_table->count = count > max ? max : count;
0354
0355 for (i = 0; i < dpm_table->count; i++)
0356 dpm_table->dpm_level[i].enabled = false;
0357
0358 return 0;
0359 }
0360
0361 void phm_setup_pcie_table_entry(
0362 void *table,
0363 uint32_t index, uint32_t pcie_gen,
0364 uint32_t pcie_lanes)
0365 {
0366 struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
0367 dpm_table->dpm_level[index].value = pcie_gen;
0368 dpm_table->dpm_level[index].param1 = pcie_lanes;
0369 dpm_table->dpm_level[index].enabled = 1;
0370 }
0371
0372 int32_t phm_get_dpm_level_enable_mask_value(void *table)
0373 {
0374 int32_t i;
0375 int32_t mask = 0;
0376 struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
0377
0378 for (i = dpm_table->count; i > 0; i--) {
0379 mask = mask << 1;
0380 if (dpm_table->dpm_level[i - 1].enabled)
0381 mask |= 0x1;
0382 else
0383 mask &= 0xFFFFFFFE;
0384 }
0385
0386 return mask;
0387 }
0388
0389 uint8_t phm_get_voltage_index(
0390 struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage)
0391 {
0392 uint8_t count = (uint8_t) (lookup_table->count);
0393 uint8_t i;
0394
0395 PP_ASSERT_WITH_CODE((NULL != lookup_table),
0396 "Lookup Table empty.", return 0);
0397 PP_ASSERT_WITH_CODE((0 != count),
0398 "Lookup Table empty.", return 0);
0399
0400 for (i = 0; i < lookup_table->count; i++) {
0401
0402 if (lookup_table->entries[i].us_vdd >= voltage)
0403 return i;
0404 }
0405
0406 return i - 1;
0407 }
0408
0409 uint8_t phm_get_voltage_id(pp_atomctrl_voltage_table *voltage_table,
0410 uint32_t voltage)
0411 {
0412 uint8_t count = (uint8_t) (voltage_table->count);
0413 uint8_t i = 0;
0414
0415 PP_ASSERT_WITH_CODE((NULL != voltage_table),
0416 "Voltage Table empty.", return 0;);
0417 PP_ASSERT_WITH_CODE((0 != count),
0418 "Voltage Table empty.", return 0;);
0419
0420 for (i = 0; i < count; i++) {
0421
0422 if (voltage_table->entries[i].value >= voltage)
0423 return i;
0424 }
0425
0426
0427 return i - 1;
0428 }
0429
0430 uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, uint16_t vddci)
0431 {
0432 uint32_t i;
0433
0434 for (i = 0; i < vddci_table->count; i++) {
0435 if (vddci_table->entries[i].value >= vddci)
0436 return vddci_table->entries[i].value;
0437 }
0438
0439 pr_debug("vddci is larger than max value in vddci_table\n");
0440 return vddci_table->entries[i-1].value;
0441 }
0442
0443 int phm_find_boot_level(void *table,
0444 uint32_t value, uint32_t *boot_level)
0445 {
0446 int result = -EINVAL;
0447 uint32_t i;
0448 struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
0449
0450 for (i = 0; i < dpm_table->count; i++) {
0451 if (value == dpm_table->dpm_level[i].value) {
0452 *boot_level = i;
0453 result = 0;
0454 }
0455 }
0456
0457 return result;
0458 }
0459
0460 int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
0461 phm_ppt_v1_voltage_lookup_table *lookup_table,
0462 uint16_t virtual_voltage_id, int32_t *sclk)
0463 {
0464 uint8_t entry_id;
0465 uint8_t voltage_id;
0466 struct phm_ppt_v1_information *table_info =
0467 (struct phm_ppt_v1_information *)(hwmgr->pptable);
0468
0469 PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -EINVAL);
0470
0471
0472 for (entry_id = 0; entry_id < table_info->vdd_dep_on_sclk->count; entry_id++) {
0473 voltage_id = table_info->vdd_dep_on_sclk->entries[entry_id].vddInd;
0474 if (lookup_table->entries[voltage_id].us_vdd == virtual_voltage_id)
0475 break;
0476 }
0477
0478 if (entry_id >= table_info->vdd_dep_on_sclk->count) {
0479 pr_debug("Can't find requested voltage id in vdd_dep_on_sclk table\n");
0480 return -EINVAL;
0481 }
0482
0483 *sclk = table_info->vdd_dep_on_sclk->entries[entry_id].clk;
0484
0485 return 0;
0486 }
0487
0488
0489
0490
0491
0492
0493 int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr)
0494 {
0495 struct phm_clock_voltage_dependency_table *table_clk_vlt;
0496 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
0497
0498
0499 table_clk_vlt = kzalloc(struct_size(table_clk_vlt, entries, 4),
0500 GFP_KERNEL);
0501
0502 if (NULL == table_clk_vlt) {
0503 pr_err("Can not allocate space for vddc_dep_on_dal_pwrl! \n");
0504 return -ENOMEM;
0505 } else {
0506 table_clk_vlt->count = 4;
0507 table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_ULTRALOW;
0508 if (hwmgr->chip_id >= CHIP_POLARIS10 &&
0509 hwmgr->chip_id <= CHIP_VEGAM)
0510 table_clk_vlt->entries[0].v = 700;
0511 else
0512 table_clk_vlt->entries[0].v = 0;
0513 table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_LOW;
0514 if (hwmgr->chip_id >= CHIP_POLARIS10 &&
0515 hwmgr->chip_id <= CHIP_VEGAM)
0516 table_clk_vlt->entries[1].v = 740;
0517 else
0518 table_clk_vlt->entries[1].v = 720;
0519 table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_NOMINAL;
0520 if (hwmgr->chip_id >= CHIP_POLARIS10 &&
0521 hwmgr->chip_id <= CHIP_VEGAM)
0522 table_clk_vlt->entries[2].v = 800;
0523 else
0524 table_clk_vlt->entries[2].v = 810;
0525 table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE;
0526 table_clk_vlt->entries[3].v = 900;
0527 if (pptable_info != NULL)
0528 pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt;
0529 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
0530 }
0531
0532 return 0;
0533 }
0534
0535 uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask)
0536 {
0537 uint32_t level = 0;
0538
0539 while (0 == (mask & (1 << level)))
0540 level++;
0541
0542 return level;
0543 }
0544
0545 void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
0546 {
0547 struct phm_ppt_v1_information *table_info =
0548 (struct phm_ppt_v1_information *)hwmgr->pptable;
0549 struct phm_clock_voltage_dependency_table *table =
0550 table_info->vddc_dep_on_dal_pwrl;
0551 struct phm_ppt_v1_clock_voltage_dependency_table *vddc_table;
0552 enum PP_DAL_POWERLEVEL dal_power_level = hwmgr->dal_power_level;
0553 uint32_t req_vddc = 0, req_volt, i;
0554
0555 if (!table || table->count <= 0
0556 || dal_power_level < PP_DAL_POWERLEVEL_ULTRALOW
0557 || dal_power_level > PP_DAL_POWERLEVEL_PERFORMANCE)
0558 return;
0559
0560 for (i = 0; i < table->count; i++) {
0561 if (dal_power_level == table->entries[i].clk) {
0562 req_vddc = table->entries[i].v;
0563 break;
0564 }
0565 }
0566
0567 vddc_table = table_info->vdd_dep_on_sclk;
0568 for (i = 0; i < vddc_table->count; i++) {
0569 if (req_vddc <= vddc_table->entries[i].vddc) {
0570 req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE);
0571 smum_send_msg_to_smc_with_parameter(hwmgr,
0572 PPSMC_MSG_VddC_Request,
0573 req_volt,
0574 NULL);
0575 return;
0576 }
0577 }
0578 pr_err("DAL requested level can not"
0579 " found a available voltage in VDDC DPM Table \n");
0580 }
0581
0582 int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
0583 uint32_t sclk, uint16_t id, uint16_t *voltage)
0584 {
0585 uint32_t vol;
0586 int ret = 0;
0587
0588 if (hwmgr->chip_id < CHIP_TONGA) {
0589 ret = atomctrl_get_voltage_evv(hwmgr, id, voltage);
0590 } else if (hwmgr->chip_id < CHIP_POLARIS10) {
0591 ret = atomctrl_get_voltage_evv_on_sclk(hwmgr, voltage_type, sclk, id, voltage);
0592 if (*voltage >= 2000 || *voltage == 0)
0593 *voltage = 1150;
0594 } else {
0595 ret = atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, voltage_type, sclk, id, &vol);
0596 *voltage = (uint16_t)(vol/100);
0597 }
0598 return ret;
0599 }
0600
0601
0602 int phm_irq_process(struct amdgpu_device *adev,
0603 struct amdgpu_irq_src *source,
0604 struct amdgpu_iv_entry *entry)
0605 {
0606 uint32_t client_id = entry->client_id;
0607 uint32_t src_id = entry->src_id;
0608
0609 if (client_id == AMDGPU_IRQ_CLIENTID_LEGACY) {
0610 if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH) {
0611 dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
0612
0613
0614
0615
0616 dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
0617 orderly_poweroff(true);
0618 } else if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW)
0619 dev_emerg(adev->dev, "ERROR: GPU under temperature range detected!\n");
0620 else if (src_id == VISLANDS30_IV_SRCID_GPIO_19) {
0621 dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n");
0622
0623
0624
0625 dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n");
0626 orderly_poweroff(true);
0627 }
0628 } else if (client_id == SOC15_IH_CLIENTID_THM) {
0629 if (src_id == 0) {
0630 dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
0631
0632
0633
0634
0635 dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
0636 orderly_poweroff(true);
0637 } else
0638 dev_emerg(adev->dev, "ERROR: GPU under temperature range detected!\n");
0639 } else if (client_id == SOC15_IH_CLIENTID_ROM_SMUIO) {
0640 dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n");
0641
0642
0643
0644 dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n");
0645 orderly_poweroff(true);
0646 }
0647
0648 return 0;
0649 }
0650
0651 static const struct amdgpu_irq_src_funcs smu9_irq_funcs = {
0652 .process = phm_irq_process,
0653 };
0654
0655 int smu9_register_irq_handlers(struct pp_hwmgr *hwmgr)
0656 {
0657 struct amdgpu_irq_src *source =
0658 kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
0659
0660 if (!source)
0661 return -ENOMEM;
0662
0663 source->funcs = &smu9_irq_funcs;
0664
0665 amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
0666 SOC15_IH_CLIENTID_THM,
0667 THM_9_0__SRCID__THM_DIG_THERM_L2H,
0668 source);
0669 amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
0670 SOC15_IH_CLIENTID_THM,
0671 THM_9_0__SRCID__THM_DIG_THERM_H2L,
0672 source);
0673
0674
0675 amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
0676 SOC15_IH_CLIENTID_ROM_SMUIO,
0677 SMUIO_9_0__SRCID__SMUIO_GPIO19,
0678 source);
0679
0680 return 0;
0681 }
0682
0683 void *smu_atom_get_data_table(void *dev, uint32_t table, uint16_t *size,
0684 uint8_t *frev, uint8_t *crev)
0685 {
0686 struct amdgpu_device *adev = dev;
0687 uint16_t data_start;
0688
0689 if (amdgpu_atom_parse_data_header(
0690 adev->mode_info.atom_context, table, size,
0691 frev, crev, &data_start))
0692 return (uint8_t *)adev->mode_info.atom_context->bios +
0693 data_start;
0694
0695 return NULL;
0696 }
0697
0698 int smu_get_voltage_dependency_table_ppt_v1(
0699 const struct phm_ppt_v1_clock_voltage_dependency_table *allowed_dep_table,
0700 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table)
0701 {
0702 uint8_t i = 0;
0703 PP_ASSERT_WITH_CODE((0 != allowed_dep_table->count),
0704 "Voltage Lookup Table empty",
0705 return -EINVAL);
0706
0707 dep_table->count = allowed_dep_table->count;
0708 for (i=0; i<dep_table->count; i++) {
0709 dep_table->entries[i].clk = allowed_dep_table->entries[i].clk;
0710 dep_table->entries[i].vddInd = allowed_dep_table->entries[i].vddInd;
0711 dep_table->entries[i].vdd_offset = allowed_dep_table->entries[i].vdd_offset;
0712 dep_table->entries[i].vddc = allowed_dep_table->entries[i].vddc;
0713 dep_table->entries[i].vddgfx = allowed_dep_table->entries[i].vddgfx;
0714 dep_table->entries[i].vddci = allowed_dep_table->entries[i].vddci;
0715 dep_table->entries[i].mvdd = allowed_dep_table->entries[i].mvdd;
0716 dep_table->entries[i].phases = allowed_dep_table->entries[i].phases;
0717 dep_table->entries[i].cks_enable = allowed_dep_table->entries[i].cks_enable;
0718 dep_table->entries[i].cks_voffset = allowed_dep_table->entries[i].cks_voffset;
0719 }
0720
0721 return 0;
0722 }
0723
0724 int smu_set_watermarks_for_clocks_ranges(void *wt_table,
0725 struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
0726 {
0727 uint32_t i;
0728 struct watermarks *table = wt_table;
0729
0730 if (!table || !wm_with_clock_ranges)
0731 return -EINVAL;
0732
0733 if (wm_with_clock_ranges->num_wm_dmif_sets > 4 || wm_with_clock_ranges->num_wm_mcif_sets > 4)
0734 return -EINVAL;
0735
0736 for (i = 0; i < wm_with_clock_ranges->num_wm_dmif_sets; i++) {
0737 table->WatermarkRow[1][i].MinClock =
0738 cpu_to_le16((uint16_t)
0739 (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz /
0740 1000));
0741 table->WatermarkRow[1][i].MaxClock =
0742 cpu_to_le16((uint16_t)
0743 (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz /
0744 1000));
0745 table->WatermarkRow[1][i].MinUclk =
0746 cpu_to_le16((uint16_t)
0747 (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz /
0748 1000));
0749 table->WatermarkRow[1][i].MaxUclk =
0750 cpu_to_le16((uint16_t)
0751 (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz /
0752 1000));
0753 table->WatermarkRow[1][i].WmSetting = (uint8_t)
0754 wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
0755 }
0756
0757 for (i = 0; i < wm_with_clock_ranges->num_wm_mcif_sets; i++) {
0758 table->WatermarkRow[0][i].MinClock =
0759 cpu_to_le16((uint16_t)
0760 (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz /
0761 1000));
0762 table->WatermarkRow[0][i].MaxClock =
0763 cpu_to_le16((uint16_t)
0764 (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz /
0765 1000));
0766 table->WatermarkRow[0][i].MinUclk =
0767 cpu_to_le16((uint16_t)
0768 (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz /
0769 1000));
0770 table->WatermarkRow[0][i].MaxUclk =
0771 cpu_to_le16((uint16_t)
0772 (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz /
0773 1000));
0774 table->WatermarkRow[0][i].WmSetting = (uint8_t)
0775 wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
0776 }
0777 return 0;
0778 }