0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026 #include "amdgpu.h"
0027 #include "amdgpu_drv.h"
0028 #include "amdgpu_pm.h"
0029 #include "amdgpu_dpm.h"
0030 #include "atom.h"
0031 #include <linux/pci.h>
0032 #include <linux/hwmon.h>
0033 #include <linux/hwmon-sysfs.h>
0034 #include <linux/nospec.h>
0035 #include <linux/pm_runtime.h>
0036 #include <asm/processor.h>
0037
0038 static const struct cg_flag_name clocks[] = {
0039 {AMD_CG_SUPPORT_GFX_FGCG, "Graphics Fine Grain Clock Gating"},
0040 {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
0041 {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
0042 {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"},
0043 {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"},
0044 {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"},
0045 {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"},
0046 {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"},
0047 {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"},
0048 {AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"},
0049 {AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"},
0050 {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"},
0051 {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"},
0052 {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"},
0053 {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"},
0054 {AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"},
0055 {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"},
0056 {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"},
0057 {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"},
0058 {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"},
0059 {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"},
0060 {AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"},
0061 {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"},
0062 {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"},
0063 {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"},
0064 {AMD_CG_SUPPORT_VCN_MGCG, "VCN Medium Grain Clock Gating"},
0065 {AMD_CG_SUPPORT_HDP_DS, "Host Data Path Deep Sleep"},
0066 {AMD_CG_SUPPORT_HDP_SD, "Host Data Path Shutdown"},
0067 {AMD_CG_SUPPORT_IH_CG, "Interrupt Handler Clock Gating"},
0068 {AMD_CG_SUPPORT_JPEG_MGCG, "JPEG Medium Grain Clock Gating"},
0069 {AMD_CG_SUPPORT_REPEATER_FGCG, "Repeater Fine Grain Clock Gating"},
0070 {AMD_CG_SUPPORT_GFX_PERF_CLK, "Perfmon Clock Gating"},
0071 {AMD_CG_SUPPORT_ATHUB_MGCG, "Address Translation Hub Medium Grain Clock Gating"},
0072 {AMD_CG_SUPPORT_ATHUB_LS, "Address Translation Hub Light Sleep"},
0073 {0, NULL},
0074 };
0075
0076 static const struct hwmon_temp_label {
0077 enum PP_HWMON_TEMP channel;
0078 const char *label;
0079 } temp_label[] = {
0080 {PP_TEMP_EDGE, "edge"},
0081 {PP_TEMP_JUNCTION, "junction"},
0082 {PP_TEMP_MEM, "mem"},
0083 };
0084
0085 const char * const amdgpu_pp_profile_name[] = {
0086 "BOOTUP_DEFAULT",
0087 "3D_FULL_SCREEN",
0088 "POWER_SAVING",
0089 "VIDEO",
0090 "VR",
0091 "COMPUTE",
0092 "CUSTOM",
0093 "WINDOW_3D",
0094 };
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130 static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
0131 struct device_attribute *attr,
0132 char *buf)
0133 {
0134 struct drm_device *ddev = dev_get_drvdata(dev);
0135 struct amdgpu_device *adev = drm_to_adev(ddev);
0136 enum amd_pm_state_type pm;
0137 int ret;
0138
0139 if (amdgpu_in_reset(adev))
0140 return -EPERM;
0141 if (adev->in_suspend && !adev->in_runpm)
0142 return -EPERM;
0143
0144 ret = pm_runtime_get_sync(ddev->dev);
0145 if (ret < 0) {
0146 pm_runtime_put_autosuspend(ddev->dev);
0147 return ret;
0148 }
0149
0150 amdgpu_dpm_get_current_power_state(adev, &pm);
0151
0152 pm_runtime_mark_last_busy(ddev->dev);
0153 pm_runtime_put_autosuspend(ddev->dev);
0154
0155 return sysfs_emit(buf, "%s\n",
0156 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
0157 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
0158 }
0159
0160 static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
0161 struct device_attribute *attr,
0162 const char *buf,
0163 size_t count)
0164 {
0165 struct drm_device *ddev = dev_get_drvdata(dev);
0166 struct amdgpu_device *adev = drm_to_adev(ddev);
0167 enum amd_pm_state_type state;
0168 int ret;
0169
0170 if (amdgpu_in_reset(adev))
0171 return -EPERM;
0172 if (adev->in_suspend && !adev->in_runpm)
0173 return -EPERM;
0174
0175 if (strncmp("battery", buf, strlen("battery")) == 0)
0176 state = POWER_STATE_TYPE_BATTERY;
0177 else if (strncmp("balanced", buf, strlen("balanced")) == 0)
0178 state = POWER_STATE_TYPE_BALANCED;
0179 else if (strncmp("performance", buf, strlen("performance")) == 0)
0180 state = POWER_STATE_TYPE_PERFORMANCE;
0181 else
0182 return -EINVAL;
0183
0184 ret = pm_runtime_get_sync(ddev->dev);
0185 if (ret < 0) {
0186 pm_runtime_put_autosuspend(ddev->dev);
0187 return ret;
0188 }
0189
0190 amdgpu_dpm_set_power_state(adev, state);
0191
0192 pm_runtime_mark_last_busy(ddev->dev);
0193 pm_runtime_put_autosuspend(ddev->dev);
0194
0195 return count;
0196 }
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254
0255
0256
0257
0258 static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
0259 struct device_attribute *attr,
0260 char *buf)
0261 {
0262 struct drm_device *ddev = dev_get_drvdata(dev);
0263 struct amdgpu_device *adev = drm_to_adev(ddev);
0264 enum amd_dpm_forced_level level = 0xff;
0265 int ret;
0266
0267 if (amdgpu_in_reset(adev))
0268 return -EPERM;
0269 if (adev->in_suspend && !adev->in_runpm)
0270 return -EPERM;
0271
0272 ret = pm_runtime_get_sync(ddev->dev);
0273 if (ret < 0) {
0274 pm_runtime_put_autosuspend(ddev->dev);
0275 return ret;
0276 }
0277
0278 level = amdgpu_dpm_get_performance_level(adev);
0279
0280 pm_runtime_mark_last_busy(ddev->dev);
0281 pm_runtime_put_autosuspend(ddev->dev);
0282
0283 return sysfs_emit(buf, "%s\n",
0284 (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
0285 (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
0286 (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
0287 (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
0288 (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
0289 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
0290 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
0291 (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
0292 (level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) ? "perf_determinism" :
0293 "unknown");
0294 }
0295
0296 static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
0297 struct device_attribute *attr,
0298 const char *buf,
0299 size_t count)
0300 {
0301 struct drm_device *ddev = dev_get_drvdata(dev);
0302 struct amdgpu_device *adev = drm_to_adev(ddev);
0303 enum amd_dpm_forced_level level;
0304 int ret = 0;
0305
0306 if (amdgpu_in_reset(adev))
0307 return -EPERM;
0308 if (adev->in_suspend && !adev->in_runpm)
0309 return -EPERM;
0310
0311 if (strncmp("low", buf, strlen("low")) == 0) {
0312 level = AMD_DPM_FORCED_LEVEL_LOW;
0313 } else if (strncmp("high", buf, strlen("high")) == 0) {
0314 level = AMD_DPM_FORCED_LEVEL_HIGH;
0315 } else if (strncmp("auto", buf, strlen("auto")) == 0) {
0316 level = AMD_DPM_FORCED_LEVEL_AUTO;
0317 } else if (strncmp("manual", buf, strlen("manual")) == 0) {
0318 level = AMD_DPM_FORCED_LEVEL_MANUAL;
0319 } else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) {
0320 level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
0321 } else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) {
0322 level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
0323 } else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) {
0324 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
0325 } else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) {
0326 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
0327 } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) {
0328 level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
0329 } else if (strncmp("perf_determinism", buf, strlen("perf_determinism")) == 0) {
0330 level = AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM;
0331 } else {
0332 return -EINVAL;
0333 }
0334
0335 ret = pm_runtime_get_sync(ddev->dev);
0336 if (ret < 0) {
0337 pm_runtime_put_autosuspend(ddev->dev);
0338 return ret;
0339 }
0340
0341 mutex_lock(&adev->pm.stable_pstate_ctx_lock);
0342 if (amdgpu_dpm_force_performance_level(adev, level)) {
0343 pm_runtime_mark_last_busy(ddev->dev);
0344 pm_runtime_put_autosuspend(ddev->dev);
0345 mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
0346 return -EINVAL;
0347 }
0348
0349 adev->pm.stable_pstate_ctx = NULL;
0350 mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
0351
0352 pm_runtime_mark_last_busy(ddev->dev);
0353 pm_runtime_put_autosuspend(ddev->dev);
0354
0355 return count;
0356 }
0357
0358 static ssize_t amdgpu_get_pp_num_states(struct device *dev,
0359 struct device_attribute *attr,
0360 char *buf)
0361 {
0362 struct drm_device *ddev = dev_get_drvdata(dev);
0363 struct amdgpu_device *adev = drm_to_adev(ddev);
0364 struct pp_states_info data;
0365 uint32_t i;
0366 int buf_len, ret;
0367
0368 if (amdgpu_in_reset(adev))
0369 return -EPERM;
0370 if (adev->in_suspend && !adev->in_runpm)
0371 return -EPERM;
0372
0373 ret = pm_runtime_get_sync(ddev->dev);
0374 if (ret < 0) {
0375 pm_runtime_put_autosuspend(ddev->dev);
0376 return ret;
0377 }
0378
0379 if (amdgpu_dpm_get_pp_num_states(adev, &data))
0380 memset(&data, 0, sizeof(data));
0381
0382 pm_runtime_mark_last_busy(ddev->dev);
0383 pm_runtime_put_autosuspend(ddev->dev);
0384
0385 buf_len = sysfs_emit(buf, "states: %d\n", data.nums);
0386 for (i = 0; i < data.nums; i++)
0387 buf_len += sysfs_emit_at(buf, buf_len, "%d %s\n", i,
0388 (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" :
0389 (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" :
0390 (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" :
0391 (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default");
0392
0393 return buf_len;
0394 }
0395
0396 static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
0397 struct device_attribute *attr,
0398 char *buf)
0399 {
0400 struct drm_device *ddev = dev_get_drvdata(dev);
0401 struct amdgpu_device *adev = drm_to_adev(ddev);
0402 struct pp_states_info data = {0};
0403 enum amd_pm_state_type pm = 0;
0404 int i = 0, ret = 0;
0405
0406 if (amdgpu_in_reset(adev))
0407 return -EPERM;
0408 if (adev->in_suspend && !adev->in_runpm)
0409 return -EPERM;
0410
0411 ret = pm_runtime_get_sync(ddev->dev);
0412 if (ret < 0) {
0413 pm_runtime_put_autosuspend(ddev->dev);
0414 return ret;
0415 }
0416
0417 amdgpu_dpm_get_current_power_state(adev, &pm);
0418
0419 ret = amdgpu_dpm_get_pp_num_states(adev, &data);
0420
0421 pm_runtime_mark_last_busy(ddev->dev);
0422 pm_runtime_put_autosuspend(ddev->dev);
0423
0424 if (ret)
0425 return ret;
0426
0427 for (i = 0; i < data.nums; i++) {
0428 if (pm == data.states[i])
0429 break;
0430 }
0431
0432 if (i == data.nums)
0433 i = -EINVAL;
0434
0435 return sysfs_emit(buf, "%d\n", i);
0436 }
0437
0438 static ssize_t amdgpu_get_pp_force_state(struct device *dev,
0439 struct device_attribute *attr,
0440 char *buf)
0441 {
0442 struct drm_device *ddev = dev_get_drvdata(dev);
0443 struct amdgpu_device *adev = drm_to_adev(ddev);
0444
0445 if (amdgpu_in_reset(adev))
0446 return -EPERM;
0447 if (adev->in_suspend && !adev->in_runpm)
0448 return -EPERM;
0449
0450 if (adev->pm.pp_force_state_enabled)
0451 return amdgpu_get_pp_cur_state(dev, attr, buf);
0452 else
0453 return sysfs_emit(buf, "\n");
0454 }
0455
0456 static ssize_t amdgpu_set_pp_force_state(struct device *dev,
0457 struct device_attribute *attr,
0458 const char *buf,
0459 size_t count)
0460 {
0461 struct drm_device *ddev = dev_get_drvdata(dev);
0462 struct amdgpu_device *adev = drm_to_adev(ddev);
0463 enum amd_pm_state_type state = 0;
0464 struct pp_states_info data;
0465 unsigned long idx;
0466 int ret;
0467
0468 if (amdgpu_in_reset(adev))
0469 return -EPERM;
0470 if (adev->in_suspend && !adev->in_runpm)
0471 return -EPERM;
0472
0473 adev->pm.pp_force_state_enabled = false;
0474
0475 if (strlen(buf) == 1)
0476 return count;
0477
0478 ret = kstrtoul(buf, 0, &idx);
0479 if (ret || idx >= ARRAY_SIZE(data.states))
0480 return -EINVAL;
0481
0482 idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
0483
0484 ret = pm_runtime_get_sync(ddev->dev);
0485 if (ret < 0) {
0486 pm_runtime_put_autosuspend(ddev->dev);
0487 return ret;
0488 }
0489
0490 ret = amdgpu_dpm_get_pp_num_states(adev, &data);
0491 if (ret)
0492 goto err_out;
0493
0494 state = data.states[idx];
0495
0496
0497 if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
0498 state != POWER_STATE_TYPE_DEFAULT) {
0499 ret = amdgpu_dpm_dispatch_task(adev,
0500 AMD_PP_TASK_ENABLE_USER_STATE, &state);
0501 if (ret)
0502 goto err_out;
0503
0504 adev->pm.pp_force_state_enabled = true;
0505 }
0506
0507 pm_runtime_mark_last_busy(ddev->dev);
0508 pm_runtime_put_autosuspend(ddev->dev);
0509
0510 return count;
0511
0512 err_out:
0513 pm_runtime_mark_last_busy(ddev->dev);
0514 pm_runtime_put_autosuspend(ddev->dev);
0515 return ret;
0516 }
0517
0518
0519
0520
0521
0522
0523
0524
0525
0526
0527
0528
0529 static ssize_t amdgpu_get_pp_table(struct device *dev,
0530 struct device_attribute *attr,
0531 char *buf)
0532 {
0533 struct drm_device *ddev = dev_get_drvdata(dev);
0534 struct amdgpu_device *adev = drm_to_adev(ddev);
0535 char *table = NULL;
0536 int size, ret;
0537
0538 if (amdgpu_in_reset(adev))
0539 return -EPERM;
0540 if (adev->in_suspend && !adev->in_runpm)
0541 return -EPERM;
0542
0543 ret = pm_runtime_get_sync(ddev->dev);
0544 if (ret < 0) {
0545 pm_runtime_put_autosuspend(ddev->dev);
0546 return ret;
0547 }
0548
0549 size = amdgpu_dpm_get_pp_table(adev, &table);
0550
0551 pm_runtime_mark_last_busy(ddev->dev);
0552 pm_runtime_put_autosuspend(ddev->dev);
0553
0554 if (size <= 0)
0555 return size;
0556
0557 if (size >= PAGE_SIZE)
0558 size = PAGE_SIZE - 1;
0559
0560 memcpy(buf, table, size);
0561
0562 return size;
0563 }
0564
0565 static ssize_t amdgpu_set_pp_table(struct device *dev,
0566 struct device_attribute *attr,
0567 const char *buf,
0568 size_t count)
0569 {
0570 struct drm_device *ddev = dev_get_drvdata(dev);
0571 struct amdgpu_device *adev = drm_to_adev(ddev);
0572 int ret = 0;
0573
0574 if (amdgpu_in_reset(adev))
0575 return -EPERM;
0576 if (adev->in_suspend && !adev->in_runpm)
0577 return -EPERM;
0578
0579 ret = pm_runtime_get_sync(ddev->dev);
0580 if (ret < 0) {
0581 pm_runtime_put_autosuspend(ddev->dev);
0582 return ret;
0583 }
0584
0585 ret = amdgpu_dpm_set_pp_table(adev, buf, count);
0586
0587 pm_runtime_mark_last_busy(ddev->dev);
0588 pm_runtime_put_autosuspend(ddev->dev);
0589
0590 if (ret)
0591 return ret;
0592
0593 return count;
0594 }
0595
0596
0597
0598
0599
0600
0601
0602
0603
0604
0605
0606
0607
0608
0609
0610
0611
0612
0613
0614
0615
0616
0617
0618
0619
0620
0621
0622
0623
0624
0625
0626
0627
0628
0629
0630
0631
0632
0633
0634
0635
0636
0637
0638
0639
0640
0641
0642
0643
0644
0645
0646
0647
0648
0649
0650
0651
0652
0653
0654
0655
0656
0657
0658
0659
0660
0661
0662
0663
0664
0665
0666
0667
0668
0669
0670
0671
0672
0673
0674
0675
0676
0677
0678
0679
0680
0681
0682
0683
0684
0685
0686
0687
0688
0689
0690
0691
0692
0693
0694
0695
0696
0697
0698
0699
0700
0701
0702
0703
0704
0705
0706
0707
0708
0709
0710
0711
0712
0713
0714
0715
0716
0717
0718
0719
0720
0721
0722
0723
0724
0725
0726
0727
0728
0729
0730
0731
0732
0733
0734
0735
0736
0737
0738
0739
0740 static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
0741 struct device_attribute *attr,
0742 const char *buf,
0743 size_t count)
0744 {
0745 struct drm_device *ddev = dev_get_drvdata(dev);
0746 struct amdgpu_device *adev = drm_to_adev(ddev);
0747 int ret;
0748 uint32_t parameter_size = 0;
0749 long parameter[64];
0750 char buf_cpy[128];
0751 char *tmp_str;
0752 char *sub_str;
0753 const char delimiter[3] = {' ', '\n', '\0'};
0754 uint32_t type;
0755
0756 if (amdgpu_in_reset(adev))
0757 return -EPERM;
0758 if (adev->in_suspend && !adev->in_runpm)
0759 return -EPERM;
0760
0761 if (count > 127)
0762 return -EINVAL;
0763
0764 if (*buf == 's')
0765 type = PP_OD_EDIT_SCLK_VDDC_TABLE;
0766 else if (*buf == 'p')
0767 type = PP_OD_EDIT_CCLK_VDDC_TABLE;
0768 else if (*buf == 'm')
0769 type = PP_OD_EDIT_MCLK_VDDC_TABLE;
0770 else if(*buf == 'r')
0771 type = PP_OD_RESTORE_DEFAULT_TABLE;
0772 else if (*buf == 'c')
0773 type = PP_OD_COMMIT_DPM_TABLE;
0774 else if (!strncmp(buf, "vc", 2))
0775 type = PP_OD_EDIT_VDDC_CURVE;
0776 else if (!strncmp(buf, "vo", 2))
0777 type = PP_OD_EDIT_VDDGFX_OFFSET;
0778 else
0779 return -EINVAL;
0780
0781 memcpy(buf_cpy, buf, count+1);
0782
0783 tmp_str = buf_cpy;
0784
0785 if ((type == PP_OD_EDIT_VDDC_CURVE) ||
0786 (type == PP_OD_EDIT_VDDGFX_OFFSET))
0787 tmp_str++;
0788 while (isspace(*++tmp_str));
0789
0790 while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
0791 if (strlen(sub_str) == 0)
0792 continue;
0793 ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
0794 if (ret)
0795 return -EINVAL;
0796 parameter_size++;
0797
0798 while (isspace(*tmp_str))
0799 tmp_str++;
0800 }
0801
0802 ret = pm_runtime_get_sync(ddev->dev);
0803 if (ret < 0) {
0804 pm_runtime_put_autosuspend(ddev->dev);
0805 return ret;
0806 }
0807
0808 if (amdgpu_dpm_set_fine_grain_clk_vol(adev,
0809 type,
0810 parameter,
0811 parameter_size))
0812 goto err_out;
0813
0814 if (amdgpu_dpm_odn_edit_dpm_table(adev, type,
0815 parameter, parameter_size))
0816 goto err_out;
0817
0818 if (type == PP_OD_COMMIT_DPM_TABLE) {
0819 if (amdgpu_dpm_dispatch_task(adev,
0820 AMD_PP_TASK_READJUST_POWER_STATE,
0821 NULL))
0822 goto err_out;
0823 }
0824
0825 pm_runtime_mark_last_busy(ddev->dev);
0826 pm_runtime_put_autosuspend(ddev->dev);
0827
0828 return count;
0829
0830 err_out:
0831 pm_runtime_mark_last_busy(ddev->dev);
0832 pm_runtime_put_autosuspend(ddev->dev);
0833 return -EINVAL;
0834 }
0835
0836 static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
0837 struct device_attribute *attr,
0838 char *buf)
0839 {
0840 struct drm_device *ddev = dev_get_drvdata(dev);
0841 struct amdgpu_device *adev = drm_to_adev(ddev);
0842 int size = 0;
0843 int ret;
0844 enum pp_clock_type od_clocks[6] = {
0845 OD_SCLK,
0846 OD_MCLK,
0847 OD_VDDC_CURVE,
0848 OD_RANGE,
0849 OD_VDDGFX_OFFSET,
0850 OD_CCLK,
0851 };
0852 uint clk_index;
0853
0854 if (amdgpu_in_reset(adev))
0855 return -EPERM;
0856 if (adev->in_suspend && !adev->in_runpm)
0857 return -EPERM;
0858
0859 ret = pm_runtime_get_sync(ddev->dev);
0860 if (ret < 0) {
0861 pm_runtime_put_autosuspend(ddev->dev);
0862 return ret;
0863 }
0864
0865 for (clk_index = 0 ; clk_index < 6 ; clk_index++) {
0866 ret = amdgpu_dpm_emit_clock_levels(adev, od_clocks[clk_index], buf, &size);
0867 if (ret)
0868 break;
0869 }
0870 if (ret == -ENOENT) {
0871 size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
0872 if (size > 0) {
0873 size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf + size);
0874 size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf + size);
0875 size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf + size);
0876 size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf + size);
0877 size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK, buf + size);
0878 }
0879 }
0880
0881 if (size == 0)
0882 size = sysfs_emit(buf, "\n");
0883
0884 pm_runtime_mark_last_busy(ddev->dev);
0885 pm_runtime_put_autosuspend(ddev->dev);
0886
0887 return size;
0888 }
0889
0890
0891
0892
0893
0894
0895
0896
0897
0898
0899
0900
0901
0902
0903
0904
0905
0906 static ssize_t amdgpu_set_pp_features(struct device *dev,
0907 struct device_attribute *attr,
0908 const char *buf,
0909 size_t count)
0910 {
0911 struct drm_device *ddev = dev_get_drvdata(dev);
0912 struct amdgpu_device *adev = drm_to_adev(ddev);
0913 uint64_t featuremask;
0914 int ret;
0915
0916 if (amdgpu_in_reset(adev))
0917 return -EPERM;
0918 if (adev->in_suspend && !adev->in_runpm)
0919 return -EPERM;
0920
0921 ret = kstrtou64(buf, 0, &featuremask);
0922 if (ret)
0923 return -EINVAL;
0924
0925 ret = pm_runtime_get_sync(ddev->dev);
0926 if (ret < 0) {
0927 pm_runtime_put_autosuspend(ddev->dev);
0928 return ret;
0929 }
0930
0931 ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
0932
0933 pm_runtime_mark_last_busy(ddev->dev);
0934 pm_runtime_put_autosuspend(ddev->dev);
0935
0936 if (ret)
0937 return -EINVAL;
0938
0939 return count;
0940 }
0941
0942 static ssize_t amdgpu_get_pp_features(struct device *dev,
0943 struct device_attribute *attr,
0944 char *buf)
0945 {
0946 struct drm_device *ddev = dev_get_drvdata(dev);
0947 struct amdgpu_device *adev = drm_to_adev(ddev);
0948 ssize_t size;
0949 int ret;
0950
0951 if (amdgpu_in_reset(adev))
0952 return -EPERM;
0953 if (adev->in_suspend && !adev->in_runpm)
0954 return -EPERM;
0955
0956 ret = pm_runtime_get_sync(ddev->dev);
0957 if (ret < 0) {
0958 pm_runtime_put_autosuspend(ddev->dev);
0959 return ret;
0960 }
0961
0962 size = amdgpu_dpm_get_ppfeature_status(adev, buf);
0963 if (size <= 0)
0964 size = sysfs_emit(buf, "\n");
0965
0966 pm_runtime_mark_last_busy(ddev->dev);
0967 pm_runtime_put_autosuspend(ddev->dev);
0968
0969 return size;
0970 }
0971
0972
0973
0974
0975
0976
0977
0978
0979
0980
0981
0982
0983
0984
0985
0986
0987
0988
0989
0990
0991
0992
0993
0994
0995
0996
0997
0998
0999
1000
1001
1002 static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
1003 enum pp_clock_type type,
1004 char *buf)
1005 {
1006 struct drm_device *ddev = dev_get_drvdata(dev);
1007 struct amdgpu_device *adev = drm_to_adev(ddev);
1008 int size = 0;
1009 int ret = 0;
1010
1011 if (amdgpu_in_reset(adev))
1012 return -EPERM;
1013 if (adev->in_suspend && !adev->in_runpm)
1014 return -EPERM;
1015
1016 ret = pm_runtime_get_sync(ddev->dev);
1017 if (ret < 0) {
1018 pm_runtime_put_autosuspend(ddev->dev);
1019 return ret;
1020 }
1021
1022 ret = amdgpu_dpm_emit_clock_levels(adev, type, buf, &size);
1023 if (ret == -ENOENT)
1024 size = amdgpu_dpm_print_clock_levels(adev, type, buf);
1025
1026 if (size == 0)
1027 size = sysfs_emit(buf, "\n");
1028
1029 pm_runtime_mark_last_busy(ddev->dev);
1030 pm_runtime_put_autosuspend(ddev->dev);
1031
1032 return size;
1033 }
1034
1035
1036
1037
1038
1039 #define AMDGPU_MASK_BUF_MAX (32 * 13)
1040
1041 static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
1042 {
1043 int ret;
1044 unsigned long level;
1045 char *sub_str = NULL;
1046 char *tmp;
1047 char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
1048 const char delimiter[3] = {' ', '\n', '\0'};
1049 size_t bytes;
1050
1051 *mask = 0;
1052
1053 bytes = min(count, sizeof(buf_cpy) - 1);
1054 memcpy(buf_cpy, buf, bytes);
1055 buf_cpy[bytes] = '\0';
1056 tmp = buf_cpy;
1057 while ((sub_str = strsep(&tmp, delimiter)) != NULL) {
1058 if (strlen(sub_str)) {
1059 ret = kstrtoul(sub_str, 0, &level);
1060 if (ret || level > 31)
1061 return -EINVAL;
1062 *mask |= 1 << level;
1063 } else
1064 break;
1065 }
1066
1067 return 0;
1068 }
1069
1070 static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev,
1071 enum pp_clock_type type,
1072 const char *buf,
1073 size_t count)
1074 {
1075 struct drm_device *ddev = dev_get_drvdata(dev);
1076 struct amdgpu_device *adev = drm_to_adev(ddev);
1077 int ret;
1078 uint32_t mask = 0;
1079
1080 if (amdgpu_in_reset(adev))
1081 return -EPERM;
1082 if (adev->in_suspend && !adev->in_runpm)
1083 return -EPERM;
1084
1085 ret = amdgpu_read_mask(buf, count, &mask);
1086 if (ret)
1087 return ret;
1088
1089 ret = pm_runtime_get_sync(ddev->dev);
1090 if (ret < 0) {
1091 pm_runtime_put_autosuspend(ddev->dev);
1092 return ret;
1093 }
1094
1095 ret = amdgpu_dpm_force_clock_level(adev, type, mask);
1096
1097 pm_runtime_mark_last_busy(ddev->dev);
1098 pm_runtime_put_autosuspend(ddev->dev);
1099
1100 if (ret)
1101 return -EINVAL;
1102
1103 return count;
1104 }
1105
1106 static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
1107 struct device_attribute *attr,
1108 char *buf)
1109 {
1110 return amdgpu_get_pp_dpm_clock(dev, PP_SCLK, buf);
1111 }
1112
1113 static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
1114 struct device_attribute *attr,
1115 const char *buf,
1116 size_t count)
1117 {
1118 return amdgpu_set_pp_dpm_clock(dev, PP_SCLK, buf, count);
1119 }
1120
1121 static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
1122 struct device_attribute *attr,
1123 char *buf)
1124 {
1125 return amdgpu_get_pp_dpm_clock(dev, PP_MCLK, buf);
1126 }
1127
1128 static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
1129 struct device_attribute *attr,
1130 const char *buf,
1131 size_t count)
1132 {
1133 return amdgpu_set_pp_dpm_clock(dev, PP_MCLK, buf, count);
1134 }
1135
1136 static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
1137 struct device_attribute *attr,
1138 char *buf)
1139 {
1140 return amdgpu_get_pp_dpm_clock(dev, PP_SOCCLK, buf);
1141 }
1142
1143 static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
1144 struct device_attribute *attr,
1145 const char *buf,
1146 size_t count)
1147 {
1148 return amdgpu_set_pp_dpm_clock(dev, PP_SOCCLK, buf, count);
1149 }
1150
1151 static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
1152 struct device_attribute *attr,
1153 char *buf)
1154 {
1155 return amdgpu_get_pp_dpm_clock(dev, PP_FCLK, buf);
1156 }
1157
1158 static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
1159 struct device_attribute *attr,
1160 const char *buf,
1161 size_t count)
1162 {
1163 return amdgpu_set_pp_dpm_clock(dev, PP_FCLK, buf, count);
1164 }
1165
1166 static ssize_t amdgpu_get_pp_dpm_vclk(struct device *dev,
1167 struct device_attribute *attr,
1168 char *buf)
1169 {
1170 return amdgpu_get_pp_dpm_clock(dev, PP_VCLK, buf);
1171 }
1172
1173 static ssize_t amdgpu_set_pp_dpm_vclk(struct device *dev,
1174 struct device_attribute *attr,
1175 const char *buf,
1176 size_t count)
1177 {
1178 return amdgpu_set_pp_dpm_clock(dev, PP_VCLK, buf, count);
1179 }
1180
1181 static ssize_t amdgpu_get_pp_dpm_dclk(struct device *dev,
1182 struct device_attribute *attr,
1183 char *buf)
1184 {
1185 return amdgpu_get_pp_dpm_clock(dev, PP_DCLK, buf);
1186 }
1187
1188 static ssize_t amdgpu_set_pp_dpm_dclk(struct device *dev,
1189 struct device_attribute *attr,
1190 const char *buf,
1191 size_t count)
1192 {
1193 return amdgpu_set_pp_dpm_clock(dev, PP_DCLK, buf, count);
1194 }
1195
1196 static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
1197 struct device_attribute *attr,
1198 char *buf)
1199 {
1200 return amdgpu_get_pp_dpm_clock(dev, PP_DCEFCLK, buf);
1201 }
1202
1203 static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
1204 struct device_attribute *attr,
1205 const char *buf,
1206 size_t count)
1207 {
1208 return amdgpu_set_pp_dpm_clock(dev, PP_DCEFCLK, buf, count);
1209 }
1210
1211 static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
1212 struct device_attribute *attr,
1213 char *buf)
1214 {
1215 return amdgpu_get_pp_dpm_clock(dev, PP_PCIE, buf);
1216 }
1217
1218 static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
1219 struct device_attribute *attr,
1220 const char *buf,
1221 size_t count)
1222 {
1223 return amdgpu_set_pp_dpm_clock(dev, PP_PCIE, buf, count);
1224 }
1225
1226 static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
1227 struct device_attribute *attr,
1228 char *buf)
1229 {
1230 struct drm_device *ddev = dev_get_drvdata(dev);
1231 struct amdgpu_device *adev = drm_to_adev(ddev);
1232 uint32_t value = 0;
1233 int ret;
1234
1235 if (amdgpu_in_reset(adev))
1236 return -EPERM;
1237 if (adev->in_suspend && !adev->in_runpm)
1238 return -EPERM;
1239
1240 ret = pm_runtime_get_sync(ddev->dev);
1241 if (ret < 0) {
1242 pm_runtime_put_autosuspend(ddev->dev);
1243 return ret;
1244 }
1245
1246 value = amdgpu_dpm_get_sclk_od(adev);
1247
1248 pm_runtime_mark_last_busy(ddev->dev);
1249 pm_runtime_put_autosuspend(ddev->dev);
1250
1251 return sysfs_emit(buf, "%d\n", value);
1252 }
1253
1254 static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
1255 struct device_attribute *attr,
1256 const char *buf,
1257 size_t count)
1258 {
1259 struct drm_device *ddev = dev_get_drvdata(dev);
1260 struct amdgpu_device *adev = drm_to_adev(ddev);
1261 int ret;
1262 long int value;
1263
1264 if (amdgpu_in_reset(adev))
1265 return -EPERM;
1266 if (adev->in_suspend && !adev->in_runpm)
1267 return -EPERM;
1268
1269 ret = kstrtol(buf, 0, &value);
1270
1271 if (ret)
1272 return -EINVAL;
1273
1274 ret = pm_runtime_get_sync(ddev->dev);
1275 if (ret < 0) {
1276 pm_runtime_put_autosuspend(ddev->dev);
1277 return ret;
1278 }
1279
1280 amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
1281
1282 pm_runtime_mark_last_busy(ddev->dev);
1283 pm_runtime_put_autosuspend(ddev->dev);
1284
1285 return count;
1286 }
1287
1288 static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
1289 struct device_attribute *attr,
1290 char *buf)
1291 {
1292 struct drm_device *ddev = dev_get_drvdata(dev);
1293 struct amdgpu_device *adev = drm_to_adev(ddev);
1294 uint32_t value = 0;
1295 int ret;
1296
1297 if (amdgpu_in_reset(adev))
1298 return -EPERM;
1299 if (adev->in_suspend && !adev->in_runpm)
1300 return -EPERM;
1301
1302 ret = pm_runtime_get_sync(ddev->dev);
1303 if (ret < 0) {
1304 pm_runtime_put_autosuspend(ddev->dev);
1305 return ret;
1306 }
1307
1308 value = amdgpu_dpm_get_mclk_od(adev);
1309
1310 pm_runtime_mark_last_busy(ddev->dev);
1311 pm_runtime_put_autosuspend(ddev->dev);
1312
1313 return sysfs_emit(buf, "%d\n", value);
1314 }
1315
1316 static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
1317 struct device_attribute *attr,
1318 const char *buf,
1319 size_t count)
1320 {
1321 struct drm_device *ddev = dev_get_drvdata(dev);
1322 struct amdgpu_device *adev = drm_to_adev(ddev);
1323 int ret;
1324 long int value;
1325
1326 if (amdgpu_in_reset(adev))
1327 return -EPERM;
1328 if (adev->in_suspend && !adev->in_runpm)
1329 return -EPERM;
1330
1331 ret = kstrtol(buf, 0, &value);
1332
1333 if (ret)
1334 return -EINVAL;
1335
1336 ret = pm_runtime_get_sync(ddev->dev);
1337 if (ret < 0) {
1338 pm_runtime_put_autosuspend(ddev->dev);
1339 return ret;
1340 }
1341
1342 amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
1343
1344 pm_runtime_mark_last_busy(ddev->dev);
1345 pm_runtime_put_autosuspend(ddev->dev);
1346
1347 return count;
1348 }
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370 static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
1371 struct device_attribute *attr,
1372 char *buf)
1373 {
1374 struct drm_device *ddev = dev_get_drvdata(dev);
1375 struct amdgpu_device *adev = drm_to_adev(ddev);
1376 ssize_t size;
1377 int ret;
1378
1379 if (amdgpu_in_reset(adev))
1380 return -EPERM;
1381 if (adev->in_suspend && !adev->in_runpm)
1382 return -EPERM;
1383
1384 ret = pm_runtime_get_sync(ddev->dev);
1385 if (ret < 0) {
1386 pm_runtime_put_autosuspend(ddev->dev);
1387 return ret;
1388 }
1389
1390 size = amdgpu_dpm_get_power_profile_mode(adev, buf);
1391 if (size <= 0)
1392 size = sysfs_emit(buf, "\n");
1393
1394 pm_runtime_mark_last_busy(ddev->dev);
1395 pm_runtime_put_autosuspend(ddev->dev);
1396
1397 return size;
1398 }
1399
1400
1401 static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
1402 struct device_attribute *attr,
1403 const char *buf,
1404 size_t count)
1405 {
1406 int ret;
1407 struct drm_device *ddev = dev_get_drvdata(dev);
1408 struct amdgpu_device *adev = drm_to_adev(ddev);
1409 uint32_t parameter_size = 0;
1410 long parameter[64];
1411 char *sub_str, buf_cpy[128];
1412 char *tmp_str;
1413 uint32_t i = 0;
1414 char tmp[2];
1415 long int profile_mode = 0;
1416 const char delimiter[3] = {' ', '\n', '\0'};
1417
1418 if (amdgpu_in_reset(adev))
1419 return -EPERM;
1420 if (adev->in_suspend && !adev->in_runpm)
1421 return -EPERM;
1422
1423 tmp[0] = *(buf);
1424 tmp[1] = '\0';
1425 ret = kstrtol(tmp, 0, &profile_mode);
1426 if (ret)
1427 return -EINVAL;
1428
1429 if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
1430 if (count < 2 || count > 127)
1431 return -EINVAL;
1432 while (isspace(*++buf))
1433 i++;
1434 memcpy(buf_cpy, buf, count-i);
1435 tmp_str = buf_cpy;
1436 while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
1437 if (strlen(sub_str) == 0)
1438 continue;
1439 ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
1440 if (ret)
1441 return -EINVAL;
1442 parameter_size++;
1443 while (isspace(*tmp_str))
1444 tmp_str++;
1445 }
1446 }
1447 parameter[parameter_size] = profile_mode;
1448
1449 ret = pm_runtime_get_sync(ddev->dev);
1450 if (ret < 0) {
1451 pm_runtime_put_autosuspend(ddev->dev);
1452 return ret;
1453 }
1454
1455 ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
1456
1457 pm_runtime_mark_last_busy(ddev->dev);
1458 pm_runtime_put_autosuspend(ddev->dev);
1459
1460 if (!ret)
1461 return count;
1462
1463 return -EINVAL;
1464 }
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474 static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
1475 struct device_attribute *attr,
1476 char *buf)
1477 {
1478 struct drm_device *ddev = dev_get_drvdata(dev);
1479 struct amdgpu_device *adev = drm_to_adev(ddev);
1480 int r, value, size = sizeof(value);
1481
1482 if (amdgpu_in_reset(adev))
1483 return -EPERM;
1484 if (adev->in_suspend && !adev->in_runpm)
1485 return -EPERM;
1486
1487 r = pm_runtime_get_sync(ddev->dev);
1488 if (r < 0) {
1489 pm_runtime_put_autosuspend(ddev->dev);
1490 return r;
1491 }
1492
1493
1494 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD,
1495 (void *)&value, &size);
1496
1497 pm_runtime_mark_last_busy(ddev->dev);
1498 pm_runtime_put_autosuspend(ddev->dev);
1499
1500 if (r)
1501 return r;
1502
1503 return sysfs_emit(buf, "%d\n", value);
1504 }
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514 static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
1515 struct device_attribute *attr,
1516 char *buf)
1517 {
1518 struct drm_device *ddev = dev_get_drvdata(dev);
1519 struct amdgpu_device *adev = drm_to_adev(ddev);
1520 int r, value, size = sizeof(value);
1521
1522 if (amdgpu_in_reset(adev))
1523 return -EPERM;
1524 if (adev->in_suspend && !adev->in_runpm)
1525 return -EPERM;
1526
1527 r = pm_runtime_get_sync(ddev->dev);
1528 if (r < 0) {
1529 pm_runtime_put_autosuspend(ddev->dev);
1530 return r;
1531 }
1532
1533
1534 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD,
1535 (void *)&value, &size);
1536
1537 pm_runtime_mark_last_busy(ddev->dev);
1538 pm_runtime_put_autosuspend(ddev->dev);
1539
1540 if (r)
1541 return r;
1542
1543 return sysfs_emit(buf, "%d\n", value);
1544 }
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558 static ssize_t amdgpu_get_pcie_bw(struct device *dev,
1559 struct device_attribute *attr,
1560 char *buf)
1561 {
1562 struct drm_device *ddev = dev_get_drvdata(dev);
1563 struct amdgpu_device *adev = drm_to_adev(ddev);
1564 uint64_t count0 = 0, count1 = 0;
1565 int ret;
1566
1567 if (amdgpu_in_reset(adev))
1568 return -EPERM;
1569 if (adev->in_suspend && !adev->in_runpm)
1570 return -EPERM;
1571
1572 if (adev->flags & AMD_IS_APU)
1573 return -ENODATA;
1574
1575 if (!adev->asic_funcs->get_pcie_usage)
1576 return -ENODATA;
1577
1578 ret = pm_runtime_get_sync(ddev->dev);
1579 if (ret < 0) {
1580 pm_runtime_put_autosuspend(ddev->dev);
1581 return ret;
1582 }
1583
1584 amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
1585
1586 pm_runtime_mark_last_busy(ddev->dev);
1587 pm_runtime_put_autosuspend(ddev->dev);
1588
1589 return sysfs_emit(buf, "%llu %llu %i\n",
1590 count0, count1, pcie_get_mps(adev->pdev));
1591 }
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603 static ssize_t amdgpu_get_unique_id(struct device *dev,
1604 struct device_attribute *attr,
1605 char *buf)
1606 {
1607 struct drm_device *ddev = dev_get_drvdata(dev);
1608 struct amdgpu_device *adev = drm_to_adev(ddev);
1609
1610 if (amdgpu_in_reset(adev))
1611 return -EPERM;
1612 if (adev->in_suspend && !adev->in_runpm)
1613 return -EPERM;
1614
1615 if (adev->unique_id)
1616 return sysfs_emit(buf, "%016llx\n", adev->unique_id);
1617
1618 return 0;
1619 }
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636 static ssize_t amdgpu_get_thermal_throttling_logging(struct device *dev,
1637 struct device_attribute *attr,
1638 char *buf)
1639 {
1640 struct drm_device *ddev = dev_get_drvdata(dev);
1641 struct amdgpu_device *adev = drm_to_adev(ddev);
1642
1643 return sysfs_emit(buf, "%s: thermal throttling logging %s, with interval %d seconds\n",
1644 adev_to_drm(adev)->unique,
1645 atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled",
1646 adev->throttling_logging_rs.interval / HZ + 1);
1647 }
1648
1649 static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
1650 struct device_attribute *attr,
1651 const char *buf,
1652 size_t count)
1653 {
1654 struct drm_device *ddev = dev_get_drvdata(dev);
1655 struct amdgpu_device *adev = drm_to_adev(ddev);
1656 long throttling_logging_interval;
1657 unsigned long flags;
1658 int ret = 0;
1659
1660 ret = kstrtol(buf, 0, &throttling_logging_interval);
1661 if (ret)
1662 return ret;
1663
1664 if (throttling_logging_interval > 3600)
1665 return -EINVAL;
1666
1667 if (throttling_logging_interval > 0) {
1668 raw_spin_lock_irqsave(&adev->throttling_logging_rs.lock, flags);
1669
1670
1671
1672
1673 adev->throttling_logging_rs.interval =
1674 (throttling_logging_interval - 1) * HZ;
1675 adev->throttling_logging_rs.begin = 0;
1676 adev->throttling_logging_rs.printed = 0;
1677 adev->throttling_logging_rs.missed = 0;
1678 raw_spin_unlock_irqrestore(&adev->throttling_logging_rs.lock, flags);
1679
1680 atomic_set(&adev->throttling_logging_enabled, 1);
1681 } else {
1682 atomic_set(&adev->throttling_logging_enabled, 0);
1683 }
1684
1685 return count;
1686 }
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700 static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
1701 struct device_attribute *attr,
1702 char *buf)
1703 {
1704 struct drm_device *ddev = dev_get_drvdata(dev);
1705 struct amdgpu_device *adev = drm_to_adev(ddev);
1706 void *gpu_metrics;
1707 ssize_t size = 0;
1708 int ret;
1709
1710 if (amdgpu_in_reset(adev))
1711 return -EPERM;
1712 if (adev->in_suspend && !adev->in_runpm)
1713 return -EPERM;
1714
1715 ret = pm_runtime_get_sync(ddev->dev);
1716 if (ret < 0) {
1717 pm_runtime_put_autosuspend(ddev->dev);
1718 return ret;
1719 }
1720
1721 size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics);
1722 if (size <= 0)
1723 goto out;
1724
1725 if (size >= PAGE_SIZE)
1726 size = PAGE_SIZE - 1;
1727
1728 memcpy(buf, gpu_metrics, size);
1729
1730 out:
1731 pm_runtime_mark_last_busy(ddev->dev);
1732 pm_runtime_put_autosuspend(ddev->dev);
1733
1734 return size;
1735 }
1736
1737 static int amdgpu_device_read_powershift(struct amdgpu_device *adev,
1738 uint32_t *ss_power, bool dgpu_share)
1739 {
1740 struct drm_device *ddev = adev_to_drm(adev);
1741 uint32_t size;
1742 int r = 0;
1743
1744 if (amdgpu_in_reset(adev))
1745 return -EPERM;
1746 if (adev->in_suspend && !adev->in_runpm)
1747 return -EPERM;
1748
1749 r = pm_runtime_get_sync(ddev->dev);
1750 if (r < 0) {
1751 pm_runtime_put_autosuspend(ddev->dev);
1752 return r;
1753 }
1754
1755 if (dgpu_share)
1756 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE,
1757 (void *)ss_power, &size);
1758 else
1759 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE,
1760 (void *)ss_power, &size);
1761
1762 pm_runtime_mark_last_busy(ddev->dev);
1763 pm_runtime_put_autosuspend(ddev->dev);
1764 return r;
1765 }
1766
1767 static int amdgpu_show_powershift_percent(struct device *dev,
1768 char *buf, bool dgpu_share)
1769 {
1770 struct drm_device *ddev = dev_get_drvdata(dev);
1771 struct amdgpu_device *adev = drm_to_adev(ddev);
1772 uint32_t ss_power;
1773 int r = 0, i;
1774
1775 r = amdgpu_device_read_powershift(adev, &ss_power, dgpu_share);
1776 if (r == -EOPNOTSUPP) {
1777
1778 adev = NULL;
1779 mutex_lock(&mgpu_info.mutex);
1780 for (i = 0; i < mgpu_info.num_gpu; i++) {
1781 if (mgpu_info.gpu_ins[i].adev->flags & AMD_IS_APU) {
1782 adev = mgpu_info.gpu_ins[i].adev;
1783 break;
1784 }
1785 }
1786 mutex_unlock(&mgpu_info.mutex);
1787 if (adev)
1788 r = amdgpu_device_read_powershift(adev, &ss_power, dgpu_share);
1789 }
1790
1791 if (!r)
1792 r = sysfs_emit(buf, "%u%%\n", ss_power);
1793
1794 return r;
1795 }
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806 static ssize_t amdgpu_get_smartshift_apu_power(struct device *dev, struct device_attribute *attr,
1807 char *buf)
1808 {
1809 return amdgpu_show_powershift_percent(dev, buf, false);
1810 }
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822 static ssize_t amdgpu_get_smartshift_dgpu_power(struct device *dev, struct device_attribute *attr,
1823 char *buf)
1824 {
1825 return amdgpu_show_powershift_percent(dev, buf, true);
1826 }
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837 static ssize_t amdgpu_get_smartshift_bias(struct device *dev,
1838 struct device_attribute *attr,
1839 char *buf)
1840 {
1841 int r = 0;
1842
1843 r = sysfs_emit(buf, "%d\n", amdgpu_smartshift_bias);
1844
1845 return r;
1846 }
1847
1848 static ssize_t amdgpu_set_smartshift_bias(struct device *dev,
1849 struct device_attribute *attr,
1850 const char *buf, size_t count)
1851 {
1852 struct drm_device *ddev = dev_get_drvdata(dev);
1853 struct amdgpu_device *adev = drm_to_adev(ddev);
1854 int r = 0;
1855 int bias = 0;
1856
1857 if (amdgpu_in_reset(adev))
1858 return -EPERM;
1859 if (adev->in_suspend && !adev->in_runpm)
1860 return -EPERM;
1861
1862 r = pm_runtime_get_sync(ddev->dev);
1863 if (r < 0) {
1864 pm_runtime_put_autosuspend(ddev->dev);
1865 return r;
1866 }
1867
1868 r = kstrtoint(buf, 10, &bias);
1869 if (r)
1870 goto out;
1871
1872 if (bias > AMDGPU_SMARTSHIFT_MAX_BIAS)
1873 bias = AMDGPU_SMARTSHIFT_MAX_BIAS;
1874 else if (bias < AMDGPU_SMARTSHIFT_MIN_BIAS)
1875 bias = AMDGPU_SMARTSHIFT_MIN_BIAS;
1876
1877 amdgpu_smartshift_bias = bias;
1878 r = count;
1879
1880
1881
1882 out:
1883 pm_runtime_mark_last_busy(ddev->dev);
1884 pm_runtime_put_autosuspend(ddev->dev);
1885 return r;
1886 }
1887
1888
1889 static int ss_power_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1890 uint32_t mask, enum amdgpu_device_attr_states *states)
1891 {
1892 if (!amdgpu_device_supports_smart_shift(adev_to_drm(adev)))
1893 *states = ATTR_STATE_UNSUPPORTED;
1894
1895 return 0;
1896 }
1897
1898 static int ss_bias_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1899 uint32_t mask, enum amdgpu_device_attr_states *states)
1900 {
1901 uint32_t ss_power, size;
1902
1903 if (!amdgpu_device_supports_smart_shift(adev_to_drm(adev)))
1904 *states = ATTR_STATE_UNSUPPORTED;
1905 else if (amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE,
1906 (void *)&ss_power, &size))
1907 *states = ATTR_STATE_UNSUPPORTED;
1908 else if (amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE,
1909 (void *)&ss_power, &size))
1910 *states = ATTR_STATE_UNSUPPORTED;
1911
1912 return 0;
1913 }
1914
1915 static struct amdgpu_device_attr amdgpu_device_attrs[] = {
1916 AMDGPU_DEVICE_ATTR_RW(power_dpm_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1917 AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1918 AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1919 AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1920 AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1921 AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1922 AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1923 AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1924 AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1925 AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1926 AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1927 AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1928 AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1929 AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1930 AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC),
1931 AMDGPU_DEVICE_ATTR_RW(pp_mclk_od, ATTR_FLAG_BASIC),
1932 AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1933 AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage, ATTR_FLAG_BASIC),
1934 AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1935 AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1936 AMDGPU_DEVICE_ATTR_RO(pcie_bw, ATTR_FLAG_BASIC),
1937 AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1938 AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1939 AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1940 AMDGPU_DEVICE_ATTR_RO(gpu_metrics, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1941 AMDGPU_DEVICE_ATTR_RO(smartshift_apu_power, ATTR_FLAG_BASIC,
1942 .attr_update = ss_power_attr_update),
1943 AMDGPU_DEVICE_ATTR_RO(smartshift_dgpu_power, ATTR_FLAG_BASIC,
1944 .attr_update = ss_power_attr_update),
1945 AMDGPU_DEVICE_ATTR_RW(smartshift_bias, ATTR_FLAG_BASIC,
1946 .attr_update = ss_bias_attr_update),
1947 };
1948
1949 static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1950 uint32_t mask, enum amdgpu_device_attr_states *states)
1951 {
1952 struct device_attribute *dev_attr = &attr->dev_attr;
1953 uint32_t mp1_ver = adev->ip_versions[MP1_HWIP][0];
1954 uint32_t gc_ver = adev->ip_versions[GC_HWIP][0];
1955 const char *attr_name = dev_attr->attr.name;
1956
1957 if (!(attr->flags & mask)) {
1958 *states = ATTR_STATE_UNSUPPORTED;
1959 return 0;
1960 }
1961
1962 #define DEVICE_ATTR_IS(_name) (!strcmp(attr_name, #_name))
1963
1964 if (DEVICE_ATTR_IS(pp_dpm_socclk)) {
1965 if (gc_ver < IP_VERSION(9, 0, 0))
1966 *states = ATTR_STATE_UNSUPPORTED;
1967 } else if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
1968 if (gc_ver < IP_VERSION(9, 0, 0) ||
1969 gc_ver == IP_VERSION(9, 4, 1) ||
1970 gc_ver == IP_VERSION(9, 4, 2))
1971 *states = ATTR_STATE_UNSUPPORTED;
1972 } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {
1973 if (mp1_ver < IP_VERSION(10, 0, 0))
1974 *states = ATTR_STATE_UNSUPPORTED;
1975 } else if (DEVICE_ATTR_IS(pp_od_clk_voltage)) {
1976 *states = ATTR_STATE_UNSUPPORTED;
1977 if (amdgpu_dpm_is_overdrive_supported(adev))
1978 *states = ATTR_STATE_SUPPORTED;
1979 } else if (DEVICE_ATTR_IS(mem_busy_percent)) {
1980 if (adev->flags & AMD_IS_APU || gc_ver == IP_VERSION(9, 0, 1))
1981 *states = ATTR_STATE_UNSUPPORTED;
1982 } else if (DEVICE_ATTR_IS(pcie_bw)) {
1983
1984 if (adev->flags & AMD_IS_APU)
1985 *states = ATTR_STATE_UNSUPPORTED;
1986 } else if (DEVICE_ATTR_IS(unique_id)) {
1987 switch (gc_ver) {
1988 case IP_VERSION(9, 0, 1):
1989 case IP_VERSION(9, 4, 0):
1990 case IP_VERSION(9, 4, 1):
1991 case IP_VERSION(9, 4, 2):
1992 case IP_VERSION(10, 3, 0):
1993 case IP_VERSION(11, 0, 0):
1994 *states = ATTR_STATE_SUPPORTED;
1995 break;
1996 default:
1997 *states = ATTR_STATE_UNSUPPORTED;
1998 }
1999 } else if (DEVICE_ATTR_IS(pp_features)) {
2000 if (adev->flags & AMD_IS_APU || gc_ver < IP_VERSION(9, 0, 0))
2001 *states = ATTR_STATE_UNSUPPORTED;
2002 } else if (DEVICE_ATTR_IS(gpu_metrics)) {
2003 if (gc_ver < IP_VERSION(9, 1, 0))
2004 *states = ATTR_STATE_UNSUPPORTED;
2005 } else if (DEVICE_ATTR_IS(pp_dpm_vclk)) {
2006 if (!(gc_ver == IP_VERSION(10, 3, 1) ||
2007 gc_ver == IP_VERSION(10, 3, 0) ||
2008 gc_ver == IP_VERSION(10, 1, 2) ||
2009 gc_ver == IP_VERSION(11, 0, 0) ||
2010 gc_ver == IP_VERSION(11, 0, 2)))
2011 *states = ATTR_STATE_UNSUPPORTED;
2012 } else if (DEVICE_ATTR_IS(pp_dpm_dclk)) {
2013 if (!(gc_ver == IP_VERSION(10, 3, 1) ||
2014 gc_ver == IP_VERSION(10, 3, 0) ||
2015 gc_ver == IP_VERSION(10, 1, 2) ||
2016 gc_ver == IP_VERSION(11, 0, 0) ||
2017 gc_ver == IP_VERSION(11, 0, 2)))
2018 *states = ATTR_STATE_UNSUPPORTED;
2019 } else if (DEVICE_ATTR_IS(pp_power_profile_mode)) {
2020 if (amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP)
2021 *states = ATTR_STATE_UNSUPPORTED;
2022 else if (gc_ver == IP_VERSION(10, 3, 0) && amdgpu_sriov_vf(adev))
2023 *states = ATTR_STATE_UNSUPPORTED;
2024 }
2025
2026 switch (gc_ver) {
2027 case IP_VERSION(9, 4, 1):
2028 case IP_VERSION(9, 4, 2):
2029
2030 if (DEVICE_ATTR_IS(pp_dpm_mclk) ||
2031 DEVICE_ATTR_IS(pp_dpm_socclk) ||
2032 DEVICE_ATTR_IS(pp_dpm_fclk)) {
2033 dev_attr->attr.mode &= ~S_IWUGO;
2034 dev_attr->store = NULL;
2035 }
2036 break;
2037 case IP_VERSION(10, 3, 0):
2038 if (DEVICE_ATTR_IS(power_dpm_force_performance_level) &&
2039 amdgpu_sriov_vf(adev)) {
2040 dev_attr->attr.mode &= ~0222;
2041 dev_attr->store = NULL;
2042 }
2043 break;
2044 default:
2045 break;
2046 }
2047
2048 if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
2049
2050 if (gc_ver >= IP_VERSION(10, 0, 0)) {
2051 dev_attr->attr.mode &= ~S_IWUGO;
2052 dev_attr->store = NULL;
2053 }
2054 }
2055
2056
2057 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) {
2058 dev_attr->attr.mode &= ~S_IWUGO;
2059 dev_attr->store = NULL;
2060 }
2061
2062 #undef DEVICE_ATTR_IS
2063
2064 return 0;
2065 }
2066
2067
2068 static int amdgpu_device_attr_create(struct amdgpu_device *adev,
2069 struct amdgpu_device_attr *attr,
2070 uint32_t mask, struct list_head *attr_list)
2071 {
2072 int ret = 0;
2073 struct device_attribute *dev_attr = &attr->dev_attr;
2074 const char *name = dev_attr->attr.name;
2075 enum amdgpu_device_attr_states attr_states = ATTR_STATE_SUPPORTED;
2076 struct amdgpu_device_attr_entry *attr_entry;
2077
2078 int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2079 uint32_t mask, enum amdgpu_device_attr_states *states) = default_attr_update;
2080
2081 BUG_ON(!attr);
2082
2083 attr_update = attr->attr_update ? attr->attr_update : default_attr_update;
2084
2085 ret = attr_update(adev, attr, mask, &attr_states);
2086 if (ret) {
2087 dev_err(adev->dev, "failed to update device file %s, ret = %d\n",
2088 name, ret);
2089 return ret;
2090 }
2091
2092 if (attr_states == ATTR_STATE_UNSUPPORTED)
2093 return 0;
2094
2095 ret = device_create_file(adev->dev, dev_attr);
2096 if (ret) {
2097 dev_err(adev->dev, "failed to create device file %s, ret = %d\n",
2098 name, ret);
2099 }
2100
2101 attr_entry = kmalloc(sizeof(*attr_entry), GFP_KERNEL);
2102 if (!attr_entry)
2103 return -ENOMEM;
2104
2105 attr_entry->attr = attr;
2106 INIT_LIST_HEAD(&attr_entry->entry);
2107
2108 list_add_tail(&attr_entry->entry, attr_list);
2109
2110 return ret;
2111 }
2112
2113 static void amdgpu_device_attr_remove(struct amdgpu_device *adev, struct amdgpu_device_attr *attr)
2114 {
2115 struct device_attribute *dev_attr = &attr->dev_attr;
2116
2117 device_remove_file(adev->dev, dev_attr);
2118 }
2119
2120 static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
2121 struct list_head *attr_list);
2122
2123 static int amdgpu_device_attr_create_groups(struct amdgpu_device *adev,
2124 struct amdgpu_device_attr *attrs,
2125 uint32_t counts,
2126 uint32_t mask,
2127 struct list_head *attr_list)
2128 {
2129 int ret = 0;
2130 uint32_t i = 0;
2131
2132 for (i = 0; i < counts; i++) {
2133 ret = amdgpu_device_attr_create(adev, &attrs[i], mask, attr_list);
2134 if (ret)
2135 goto failed;
2136 }
2137
2138 return 0;
2139
2140 failed:
2141 amdgpu_device_attr_remove_groups(adev, attr_list);
2142
2143 return ret;
2144 }
2145
2146 static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
2147 struct list_head *attr_list)
2148 {
2149 struct amdgpu_device_attr_entry *entry, *entry_tmp;
2150
2151 if (list_empty(attr_list))
2152 return ;
2153
2154 list_for_each_entry_safe(entry, entry_tmp, attr_list, entry) {
2155 amdgpu_device_attr_remove(adev, entry->attr);
2156 list_del(&entry->entry);
2157 kfree(entry);
2158 }
2159 }
2160
2161 static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
2162 struct device_attribute *attr,
2163 char *buf)
2164 {
2165 struct amdgpu_device *adev = dev_get_drvdata(dev);
2166 int channel = to_sensor_dev_attr(attr)->index;
2167 int r, temp = 0, size = sizeof(temp);
2168
2169 if (amdgpu_in_reset(adev))
2170 return -EPERM;
2171 if (adev->in_suspend && !adev->in_runpm)
2172 return -EPERM;
2173
2174 if (channel >= PP_TEMP_MAX)
2175 return -EINVAL;
2176
2177 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2178 if (r < 0) {
2179 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2180 return r;
2181 }
2182
2183 switch (channel) {
2184 case PP_TEMP_JUNCTION:
2185
2186 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
2187 (void *)&temp, &size);
2188 break;
2189 case PP_TEMP_EDGE:
2190
2191 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_EDGE_TEMP,
2192 (void *)&temp, &size);
2193 break;
2194 case PP_TEMP_MEM:
2195
2196 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_TEMP,
2197 (void *)&temp, &size);
2198 break;
2199 default:
2200 r = -EINVAL;
2201 break;
2202 }
2203
2204 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2205 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2206
2207 if (r)
2208 return r;
2209
2210 return sysfs_emit(buf, "%d\n", temp);
2211 }
2212
2213 static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
2214 struct device_attribute *attr,
2215 char *buf)
2216 {
2217 struct amdgpu_device *adev = dev_get_drvdata(dev);
2218 int hyst = to_sensor_dev_attr(attr)->index;
2219 int temp;
2220
2221 if (hyst)
2222 temp = adev->pm.dpm.thermal.min_temp;
2223 else
2224 temp = adev->pm.dpm.thermal.max_temp;
2225
2226 return sysfs_emit(buf, "%d\n", temp);
2227 }
2228
2229 static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev,
2230 struct device_attribute *attr,
2231 char *buf)
2232 {
2233 struct amdgpu_device *adev = dev_get_drvdata(dev);
2234 int hyst = to_sensor_dev_attr(attr)->index;
2235 int temp;
2236
2237 if (hyst)
2238 temp = adev->pm.dpm.thermal.min_hotspot_temp;
2239 else
2240 temp = adev->pm.dpm.thermal.max_hotspot_crit_temp;
2241
2242 return sysfs_emit(buf, "%d\n", temp);
2243 }
2244
2245 static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev,
2246 struct device_attribute *attr,
2247 char *buf)
2248 {
2249 struct amdgpu_device *adev = dev_get_drvdata(dev);
2250 int hyst = to_sensor_dev_attr(attr)->index;
2251 int temp;
2252
2253 if (hyst)
2254 temp = adev->pm.dpm.thermal.min_mem_temp;
2255 else
2256 temp = adev->pm.dpm.thermal.max_mem_crit_temp;
2257
2258 return sysfs_emit(buf, "%d\n", temp);
2259 }
2260
2261 static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev,
2262 struct device_attribute *attr,
2263 char *buf)
2264 {
2265 int channel = to_sensor_dev_attr(attr)->index;
2266
2267 if (channel >= PP_TEMP_MAX)
2268 return -EINVAL;
2269
2270 return sysfs_emit(buf, "%s\n", temp_label[channel].label);
2271 }
2272
2273 static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev,
2274 struct device_attribute *attr,
2275 char *buf)
2276 {
2277 struct amdgpu_device *adev = dev_get_drvdata(dev);
2278 int channel = to_sensor_dev_attr(attr)->index;
2279 int temp = 0;
2280
2281 if (channel >= PP_TEMP_MAX)
2282 return -EINVAL;
2283
2284 switch (channel) {
2285 case PP_TEMP_JUNCTION:
2286 temp = adev->pm.dpm.thermal.max_hotspot_emergency_temp;
2287 break;
2288 case PP_TEMP_EDGE:
2289 temp = adev->pm.dpm.thermal.max_edge_emergency_temp;
2290 break;
2291 case PP_TEMP_MEM:
2292 temp = adev->pm.dpm.thermal.max_mem_emergency_temp;
2293 break;
2294 }
2295
2296 return sysfs_emit(buf, "%d\n", temp);
2297 }
2298
2299 static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
2300 struct device_attribute *attr,
2301 char *buf)
2302 {
2303 struct amdgpu_device *adev = dev_get_drvdata(dev);
2304 u32 pwm_mode = 0;
2305 int ret;
2306
2307 if (amdgpu_in_reset(adev))
2308 return -EPERM;
2309 if (adev->in_suspend && !adev->in_runpm)
2310 return -EPERM;
2311
2312 ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2313 if (ret < 0) {
2314 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2315 return ret;
2316 }
2317
2318 ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
2319
2320 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2321 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2322
2323 if (ret)
2324 return -EINVAL;
2325
2326 return sysfs_emit(buf, "%u\n", pwm_mode);
2327 }
2328
2329 static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
2330 struct device_attribute *attr,
2331 const char *buf,
2332 size_t count)
2333 {
2334 struct amdgpu_device *adev = dev_get_drvdata(dev);
2335 int err, ret;
2336 int value;
2337
2338 if (amdgpu_in_reset(adev))
2339 return -EPERM;
2340 if (adev->in_suspend && !adev->in_runpm)
2341 return -EPERM;
2342
2343 err = kstrtoint(buf, 10, &value);
2344 if (err)
2345 return err;
2346
2347 ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2348 if (ret < 0) {
2349 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2350 return ret;
2351 }
2352
2353 ret = amdgpu_dpm_set_fan_control_mode(adev, value);
2354
2355 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2356 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2357
2358 if (ret)
2359 return -EINVAL;
2360
2361 return count;
2362 }
2363
2364 static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
2365 struct device_attribute *attr,
2366 char *buf)
2367 {
2368 return sysfs_emit(buf, "%i\n", 0);
2369 }
2370
2371 static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
2372 struct device_attribute *attr,
2373 char *buf)
2374 {
2375 return sysfs_emit(buf, "%i\n", 255);
2376 }
2377
2378 static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
2379 struct device_attribute *attr,
2380 const char *buf, size_t count)
2381 {
2382 struct amdgpu_device *adev = dev_get_drvdata(dev);
2383 int err;
2384 u32 value;
2385 u32 pwm_mode;
2386
2387 if (amdgpu_in_reset(adev))
2388 return -EPERM;
2389 if (adev->in_suspend && !adev->in_runpm)
2390 return -EPERM;
2391
2392 err = kstrtou32(buf, 10, &value);
2393 if (err)
2394 return err;
2395
2396 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2397 if (err < 0) {
2398 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2399 return err;
2400 }
2401
2402 err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
2403 if (err)
2404 goto out;
2405
2406 if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
2407 pr_info("manual fan speed control should be enabled first\n");
2408 err = -EINVAL;
2409 goto out;
2410 }
2411
2412 err = amdgpu_dpm_set_fan_speed_pwm(adev, value);
2413
2414 out:
2415 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2416 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2417
2418 if (err)
2419 return err;
2420
2421 return count;
2422 }
2423
2424 static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
2425 struct device_attribute *attr,
2426 char *buf)
2427 {
2428 struct amdgpu_device *adev = dev_get_drvdata(dev);
2429 int err;
2430 u32 speed = 0;
2431
2432 if (amdgpu_in_reset(adev))
2433 return -EPERM;
2434 if (adev->in_suspend && !adev->in_runpm)
2435 return -EPERM;
2436
2437 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2438 if (err < 0) {
2439 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2440 return err;
2441 }
2442
2443 err = amdgpu_dpm_get_fan_speed_pwm(adev, &speed);
2444
2445 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2446 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2447
2448 if (err)
2449 return err;
2450
2451 return sysfs_emit(buf, "%i\n", speed);
2452 }
2453
2454 static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
2455 struct device_attribute *attr,
2456 char *buf)
2457 {
2458 struct amdgpu_device *adev = dev_get_drvdata(dev);
2459 int err;
2460 u32 speed = 0;
2461
2462 if (amdgpu_in_reset(adev))
2463 return -EPERM;
2464 if (adev->in_suspend && !adev->in_runpm)
2465 return -EPERM;
2466
2467 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2468 if (err < 0) {
2469 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2470 return err;
2471 }
2472
2473 err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
2474
2475 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2476 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2477
2478 if (err)
2479 return err;
2480
2481 return sysfs_emit(buf, "%i\n", speed);
2482 }
2483
2484 static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
2485 struct device_attribute *attr,
2486 char *buf)
2487 {
2488 struct amdgpu_device *adev = dev_get_drvdata(dev);
2489 u32 min_rpm = 0;
2490 u32 size = sizeof(min_rpm);
2491 int r;
2492
2493 if (amdgpu_in_reset(adev))
2494 return -EPERM;
2495 if (adev->in_suspend && !adev->in_runpm)
2496 return -EPERM;
2497
2498 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2499 if (r < 0) {
2500 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2501 return r;
2502 }
2503
2504 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
2505 (void *)&min_rpm, &size);
2506
2507 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2508 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2509
2510 if (r)
2511 return r;
2512
2513 return sysfs_emit(buf, "%d\n", min_rpm);
2514 }
2515
2516 static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
2517 struct device_attribute *attr,
2518 char *buf)
2519 {
2520 struct amdgpu_device *adev = dev_get_drvdata(dev);
2521 u32 max_rpm = 0;
2522 u32 size = sizeof(max_rpm);
2523 int r;
2524
2525 if (amdgpu_in_reset(adev))
2526 return -EPERM;
2527 if (adev->in_suspend && !adev->in_runpm)
2528 return -EPERM;
2529
2530 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2531 if (r < 0) {
2532 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2533 return r;
2534 }
2535
2536 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
2537 (void *)&max_rpm, &size);
2538
2539 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2540 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2541
2542 if (r)
2543 return r;
2544
2545 return sysfs_emit(buf, "%d\n", max_rpm);
2546 }
2547
2548 static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
2549 struct device_attribute *attr,
2550 char *buf)
2551 {
2552 struct amdgpu_device *adev = dev_get_drvdata(dev);
2553 int err;
2554 u32 rpm = 0;
2555
2556 if (amdgpu_in_reset(adev))
2557 return -EPERM;
2558 if (adev->in_suspend && !adev->in_runpm)
2559 return -EPERM;
2560
2561 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2562 if (err < 0) {
2563 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2564 return err;
2565 }
2566
2567 err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
2568
2569 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2570 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2571
2572 if (err)
2573 return err;
2574
2575 return sysfs_emit(buf, "%i\n", rpm);
2576 }
2577
2578 static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
2579 struct device_attribute *attr,
2580 const char *buf, size_t count)
2581 {
2582 struct amdgpu_device *adev = dev_get_drvdata(dev);
2583 int err;
2584 u32 value;
2585 u32 pwm_mode;
2586
2587 if (amdgpu_in_reset(adev))
2588 return -EPERM;
2589 if (adev->in_suspend && !adev->in_runpm)
2590 return -EPERM;
2591
2592 err = kstrtou32(buf, 10, &value);
2593 if (err)
2594 return err;
2595
2596 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2597 if (err < 0) {
2598 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2599 return err;
2600 }
2601
2602 err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
2603 if (err)
2604 goto out;
2605
2606 if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
2607 err = -ENODATA;
2608 goto out;
2609 }
2610
2611 err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
2612
2613 out:
2614 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2615 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2616
2617 if (err)
2618 return err;
2619
2620 return count;
2621 }
2622
2623 static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
2624 struct device_attribute *attr,
2625 char *buf)
2626 {
2627 struct amdgpu_device *adev = dev_get_drvdata(dev);
2628 u32 pwm_mode = 0;
2629 int ret;
2630
2631 if (amdgpu_in_reset(adev))
2632 return -EPERM;
2633 if (adev->in_suspend && !adev->in_runpm)
2634 return -EPERM;
2635
2636 ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2637 if (ret < 0) {
2638 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2639 return ret;
2640 }
2641
2642 ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
2643
2644 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2645 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2646
2647 if (ret)
2648 return -EINVAL;
2649
2650 return sysfs_emit(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
2651 }
2652
2653 static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
2654 struct device_attribute *attr,
2655 const char *buf,
2656 size_t count)
2657 {
2658 struct amdgpu_device *adev = dev_get_drvdata(dev);
2659 int err;
2660 int value;
2661 u32 pwm_mode;
2662
2663 if (amdgpu_in_reset(adev))
2664 return -EPERM;
2665 if (adev->in_suspend && !adev->in_runpm)
2666 return -EPERM;
2667
2668 err = kstrtoint(buf, 10, &value);
2669 if (err)
2670 return err;
2671
2672 if (value == 0)
2673 pwm_mode = AMD_FAN_CTRL_AUTO;
2674 else if (value == 1)
2675 pwm_mode = AMD_FAN_CTRL_MANUAL;
2676 else
2677 return -EINVAL;
2678
2679 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2680 if (err < 0) {
2681 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2682 return err;
2683 }
2684
2685 err = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
2686
2687 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2688 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2689
2690 if (err)
2691 return -EINVAL;
2692
2693 return count;
2694 }
2695
2696 static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
2697 struct device_attribute *attr,
2698 char *buf)
2699 {
2700 struct amdgpu_device *adev = dev_get_drvdata(dev);
2701 u32 vddgfx;
2702 int r, size = sizeof(vddgfx);
2703
2704 if (amdgpu_in_reset(adev))
2705 return -EPERM;
2706 if (adev->in_suspend && !adev->in_runpm)
2707 return -EPERM;
2708
2709 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2710 if (r < 0) {
2711 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2712 return r;
2713 }
2714
2715
2716 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX,
2717 (void *)&vddgfx, &size);
2718
2719 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2720 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2721
2722 if (r)
2723 return r;
2724
2725 return sysfs_emit(buf, "%d\n", vddgfx);
2726 }
2727
2728 static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
2729 struct device_attribute *attr,
2730 char *buf)
2731 {
2732 return sysfs_emit(buf, "vddgfx\n");
2733 }
2734
2735 static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
2736 struct device_attribute *attr,
2737 char *buf)
2738 {
2739 struct amdgpu_device *adev = dev_get_drvdata(dev);
2740 u32 vddnb;
2741 int r, size = sizeof(vddnb);
2742
2743 if (amdgpu_in_reset(adev))
2744 return -EPERM;
2745 if (adev->in_suspend && !adev->in_runpm)
2746 return -EPERM;
2747
2748
2749 if (!(adev->flags & AMD_IS_APU))
2750 return -EINVAL;
2751
2752 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2753 if (r < 0) {
2754 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2755 return r;
2756 }
2757
2758
2759 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB,
2760 (void *)&vddnb, &size);
2761
2762 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2763 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2764
2765 if (r)
2766 return r;
2767
2768 return sysfs_emit(buf, "%d\n", vddnb);
2769 }
2770
2771 static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev,
2772 struct device_attribute *attr,
2773 char *buf)
2774 {
2775 return sysfs_emit(buf, "vddnb\n");
2776 }
2777
2778 static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
2779 struct device_attribute *attr,
2780 char *buf)
2781 {
2782 struct amdgpu_device *adev = dev_get_drvdata(dev);
2783 u32 query = 0;
2784 int r, size = sizeof(u32);
2785 unsigned uw;
2786
2787 if (amdgpu_in_reset(adev))
2788 return -EPERM;
2789 if (adev->in_suspend && !adev->in_runpm)
2790 return -EPERM;
2791
2792 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2793 if (r < 0) {
2794 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2795 return r;
2796 }
2797
2798
2799 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER,
2800 (void *)&query, &size);
2801
2802 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2803 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2804
2805 if (r)
2806 return r;
2807
2808
2809 uw = (query >> 8) * 1000000 + (query & 0xff) * 1000;
2810
2811 return sysfs_emit(buf, "%u\n", uw);
2812 }
2813
2814 static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
2815 struct device_attribute *attr,
2816 char *buf)
2817 {
2818 return sysfs_emit(buf, "%i\n", 0);
2819 }
2820
2821
2822 static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev,
2823 struct device_attribute *attr,
2824 char *buf,
2825 enum pp_power_limit_level pp_limit_level)
2826 {
2827 struct amdgpu_device *adev = dev_get_drvdata(dev);
2828 enum pp_power_type power_type = to_sensor_dev_attr(attr)->index;
2829 uint32_t limit;
2830 ssize_t size;
2831 int r;
2832
2833 if (amdgpu_in_reset(adev))
2834 return -EPERM;
2835 if (adev->in_suspend && !adev->in_runpm)
2836 return -EPERM;
2837
2838 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2839 if (r < 0) {
2840 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2841 return r;
2842 }
2843
2844 r = amdgpu_dpm_get_power_limit(adev, &limit,
2845 pp_limit_level, power_type);
2846
2847 if (!r)
2848 size = sysfs_emit(buf, "%u\n", limit * 1000000);
2849 else
2850 size = sysfs_emit(buf, "\n");
2851
2852 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2853 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2854
2855 return size;
2856 }
2857
2858
2859 static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
2860 struct device_attribute *attr,
2861 char *buf)
2862 {
2863 return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_MAX);
2864
2865 }
2866
2867 static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
2868 struct device_attribute *attr,
2869 char *buf)
2870 {
2871 return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_CURRENT);
2872
2873 }
2874
2875 static ssize_t amdgpu_hwmon_show_power_cap_default(struct device *dev,
2876 struct device_attribute *attr,
2877 char *buf)
2878 {
2879 return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_DEFAULT);
2880
2881 }
2882
2883 static ssize_t amdgpu_hwmon_show_power_label(struct device *dev,
2884 struct device_attribute *attr,
2885 char *buf)
2886 {
2887 struct amdgpu_device *adev = dev_get_drvdata(dev);
2888 uint32_t gc_ver = adev->ip_versions[GC_HWIP][0];
2889
2890 if (gc_ver == IP_VERSION(10, 3, 1))
2891 return sysfs_emit(buf, "%s\n",
2892 to_sensor_dev_attr(attr)->index == PP_PWR_TYPE_FAST ?
2893 "fastPPT" : "slowPPT");
2894 else
2895 return sysfs_emit(buf, "PPT\n");
2896 }
2897
2898 static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
2899 struct device_attribute *attr,
2900 const char *buf,
2901 size_t count)
2902 {
2903 struct amdgpu_device *adev = dev_get_drvdata(dev);
2904 int limit_type = to_sensor_dev_attr(attr)->index;
2905 int err;
2906 u32 value;
2907
2908 if (amdgpu_in_reset(adev))
2909 return -EPERM;
2910 if (adev->in_suspend && !adev->in_runpm)
2911 return -EPERM;
2912
2913 if (amdgpu_sriov_vf(adev))
2914 return -EINVAL;
2915
2916 err = kstrtou32(buf, 10, &value);
2917 if (err)
2918 return err;
2919
2920 value = value / 1000000;
2921 value |= limit_type << 24;
2922
2923 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2924 if (err < 0) {
2925 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2926 return err;
2927 }
2928
2929 err = amdgpu_dpm_set_power_limit(adev, value);
2930
2931 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2932 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2933
2934 if (err)
2935 return err;
2936
2937 return count;
2938 }
2939
2940 static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
2941 struct device_attribute *attr,
2942 char *buf)
2943 {
2944 struct amdgpu_device *adev = dev_get_drvdata(dev);
2945 uint32_t sclk;
2946 int r, size = sizeof(sclk);
2947
2948 if (amdgpu_in_reset(adev))
2949 return -EPERM;
2950 if (adev->in_suspend && !adev->in_runpm)
2951 return -EPERM;
2952
2953 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2954 if (r < 0) {
2955 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2956 return r;
2957 }
2958
2959
2960 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
2961 (void *)&sclk, &size);
2962
2963 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2964 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2965
2966 if (r)
2967 return r;
2968
2969 return sysfs_emit(buf, "%u\n", sclk * 10 * 1000);
2970 }
2971
2972 static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev,
2973 struct device_attribute *attr,
2974 char *buf)
2975 {
2976 return sysfs_emit(buf, "sclk\n");
2977 }
2978
2979 static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
2980 struct device_attribute *attr,
2981 char *buf)
2982 {
2983 struct amdgpu_device *adev = dev_get_drvdata(dev);
2984 uint32_t mclk;
2985 int r, size = sizeof(mclk);
2986
2987 if (amdgpu_in_reset(adev))
2988 return -EPERM;
2989 if (adev->in_suspend && !adev->in_runpm)
2990 return -EPERM;
2991
2992 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2993 if (r < 0) {
2994 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2995 return r;
2996 }
2997
2998
2999 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
3000 (void *)&mclk, &size);
3001
3002 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
3003 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3004
3005 if (r)
3006 return r;
3007
3008 return sysfs_emit(buf, "%u\n", mclk * 10 * 1000);
3009 }
3010
3011 static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
3012 struct device_attribute *attr,
3013 char *buf)
3014 {
3015 return sysfs_emit(buf, "mclk\n");
3016 }
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_EDGE);
3104 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
3105 static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
3106 static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_EDGE);
3107 static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_JUNCTION);
3108 static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 0);
3109 static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 1);
3110 static SENSOR_DEVICE_ATTR(temp2_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_JUNCTION);
3111 static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_MEM);
3112 static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 0);
3113 static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 1);
3114 static SENSOR_DEVICE_ATTR(temp3_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_MEM);
3115 static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_EDGE);
3116 static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_JUNCTION);
3117 static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_MEM);
3118 static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
3119 static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
3120 static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
3121 static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
3122 static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0);
3123 static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, amdgpu_hwmon_get_fan1_min, NULL, 0);
3124 static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, amdgpu_hwmon_get_fan1_max, NULL, 0);
3125 static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_target, amdgpu_hwmon_set_fan1_target, 0);
3126 static SENSOR_DEVICE_ATTR(fan1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_enable, amdgpu_hwmon_set_fan1_enable, 0);
3127 static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0);
3128 static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0);
3129 static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0);
3130 static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0);
3131 static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0);
3132 static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
3133 static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0);
3134 static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
3135 static SENSOR_DEVICE_ATTR(power1_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 0);
3136 static SENSOR_DEVICE_ATTR(power1_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 0);
3137 static SENSOR_DEVICE_ATTR(power2_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 1);
3138 static SENSOR_DEVICE_ATTR(power2_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 1);
3139 static SENSOR_DEVICE_ATTR(power2_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 1);
3140 static SENSOR_DEVICE_ATTR(power2_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 1);
3141 static SENSOR_DEVICE_ATTR(power2_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 1);
3142 static SENSOR_DEVICE_ATTR(power2_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 1);
3143 static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0);
3144 static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0);
3145 static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0);
3146 static SENSOR_DEVICE_ATTR(freq2_label, S_IRUGO, amdgpu_hwmon_show_mclk_label, NULL, 0);
3147
3148 static struct attribute *hwmon_attributes[] = {
3149 &sensor_dev_attr_temp1_input.dev_attr.attr,
3150 &sensor_dev_attr_temp1_crit.dev_attr.attr,
3151 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
3152 &sensor_dev_attr_temp2_input.dev_attr.attr,
3153 &sensor_dev_attr_temp2_crit.dev_attr.attr,
3154 &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
3155 &sensor_dev_attr_temp3_input.dev_attr.attr,
3156 &sensor_dev_attr_temp3_crit.dev_attr.attr,
3157 &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
3158 &sensor_dev_attr_temp1_emergency.dev_attr.attr,
3159 &sensor_dev_attr_temp2_emergency.dev_attr.attr,
3160 &sensor_dev_attr_temp3_emergency.dev_attr.attr,
3161 &sensor_dev_attr_temp1_label.dev_attr.attr,
3162 &sensor_dev_attr_temp2_label.dev_attr.attr,
3163 &sensor_dev_attr_temp3_label.dev_attr.attr,
3164 &sensor_dev_attr_pwm1.dev_attr.attr,
3165 &sensor_dev_attr_pwm1_enable.dev_attr.attr,
3166 &sensor_dev_attr_pwm1_min.dev_attr.attr,
3167 &sensor_dev_attr_pwm1_max.dev_attr.attr,
3168 &sensor_dev_attr_fan1_input.dev_attr.attr,
3169 &sensor_dev_attr_fan1_min.dev_attr.attr,
3170 &sensor_dev_attr_fan1_max.dev_attr.attr,
3171 &sensor_dev_attr_fan1_target.dev_attr.attr,
3172 &sensor_dev_attr_fan1_enable.dev_attr.attr,
3173 &sensor_dev_attr_in0_input.dev_attr.attr,
3174 &sensor_dev_attr_in0_label.dev_attr.attr,
3175 &sensor_dev_attr_in1_input.dev_attr.attr,
3176 &sensor_dev_attr_in1_label.dev_attr.attr,
3177 &sensor_dev_attr_power1_average.dev_attr.attr,
3178 &sensor_dev_attr_power1_cap_max.dev_attr.attr,
3179 &sensor_dev_attr_power1_cap_min.dev_attr.attr,
3180 &sensor_dev_attr_power1_cap.dev_attr.attr,
3181 &sensor_dev_attr_power1_cap_default.dev_attr.attr,
3182 &sensor_dev_attr_power1_label.dev_attr.attr,
3183 &sensor_dev_attr_power2_average.dev_attr.attr,
3184 &sensor_dev_attr_power2_cap_max.dev_attr.attr,
3185 &sensor_dev_attr_power2_cap_min.dev_attr.attr,
3186 &sensor_dev_attr_power2_cap.dev_attr.attr,
3187 &sensor_dev_attr_power2_cap_default.dev_attr.attr,
3188 &sensor_dev_attr_power2_label.dev_attr.attr,
3189 &sensor_dev_attr_freq1_input.dev_attr.attr,
3190 &sensor_dev_attr_freq1_label.dev_attr.attr,
3191 &sensor_dev_attr_freq2_input.dev_attr.attr,
3192 &sensor_dev_attr_freq2_label.dev_attr.attr,
3193 NULL
3194 };
3195
3196 static umode_t hwmon_attributes_visible(struct kobject *kobj,
3197 struct attribute *attr, int index)
3198 {
3199 struct device *dev = kobj_to_dev(kobj);
3200 struct amdgpu_device *adev = dev_get_drvdata(dev);
3201 umode_t effective_mode = attr->mode;
3202 uint32_t gc_ver = adev->ip_versions[GC_HWIP][0];
3203
3204
3205 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
3206 return 0;
3207
3208
3209 if (amdgpu_sriov_is_pp_one_vf(adev))
3210 effective_mode &= ~S_IWUSR;
3211
3212
3213 if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3214 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3215 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3216 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3217 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3218 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3219 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3220 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3221 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3222 return 0;
3223
3224
3225 if ((adev->flags & AMD_IS_APU) &&
3226 (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3227 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3228 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3229 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3230 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3231 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3232 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3233 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3234 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3235 return 0;
3236
3237
3238 if ((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ) &&
3239 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
3240 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
3241 return 0;
3242
3243
3244 if (!adev->pm.dpm_enabled &&
3245 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
3246 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
3247 attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3248 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3249 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3250 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3251 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3252 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3253 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3254 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3255 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3256 return 0;
3257
3258
3259 if (((amdgpu_dpm_get_fan_speed_pwm(adev, NULL) == -EOPNOTSUPP) &&
3260 attr == &sensor_dev_attr_pwm1.dev_attr.attr) ||
3261 ((amdgpu_dpm_get_fan_control_mode(adev, NULL) == -EOPNOTSUPP) &&
3262 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr))
3263 effective_mode &= ~S_IRUGO;
3264
3265 if (((amdgpu_dpm_set_fan_speed_pwm(adev, U32_MAX) == -EOPNOTSUPP) &&
3266 attr == &sensor_dev_attr_pwm1.dev_attr.attr) ||
3267 ((amdgpu_dpm_set_fan_control_mode(adev, U32_MAX) == -EOPNOTSUPP) &&
3268 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr))
3269 effective_mode &= ~S_IWUSR;
3270
3271
3272 if (((adev->family == AMDGPU_FAMILY_SI) ||
3273 ((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(10, 3, 1)))) &&
3274 (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
3275 attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr ||
3276 attr == &sensor_dev_attr_power1_cap.dev_attr.attr ||
3277 attr == &sensor_dev_attr_power1_cap_default.dev_attr.attr))
3278 return 0;
3279
3280
3281 if (((adev->family == AMDGPU_FAMILY_SI) ||
3282 ((adev->flags & AMD_IS_APU) && (gc_ver < IP_VERSION(9, 3, 0)))) &&
3283 (attr == &sensor_dev_attr_power1_average.dev_attr.attr))
3284 return 0;
3285
3286
3287 if (((amdgpu_dpm_set_fan_speed_pwm(adev, U32_MAX) == -EOPNOTSUPP) &&
3288 (amdgpu_dpm_get_fan_speed_pwm(adev, NULL) == -EOPNOTSUPP) &&
3289 (amdgpu_dpm_set_fan_speed_rpm(adev, U32_MAX) == -EOPNOTSUPP) &&
3290 (amdgpu_dpm_get_fan_speed_rpm(adev, NULL) == -EOPNOTSUPP)) &&
3291 (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3292 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
3293 return 0;
3294
3295 if ((amdgpu_dpm_set_fan_speed_rpm(adev, U32_MAX) == -EOPNOTSUPP) &&
3296 (amdgpu_dpm_get_fan_speed_rpm(adev, NULL) == -EOPNOTSUPP) &&
3297 (attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3298 attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
3299 return 0;
3300
3301 if ((adev->family == AMDGPU_FAMILY_SI ||
3302 adev->family == AMDGPU_FAMILY_KV) &&
3303 (attr == &sensor_dev_attr_in0_input.dev_attr.attr ||
3304 attr == &sensor_dev_attr_in0_label.dev_attr.attr))
3305 return 0;
3306
3307
3308 if (!(adev->flags & AMD_IS_APU) &&
3309 (attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
3310 attr == &sensor_dev_attr_in1_label.dev_attr.attr))
3311 return 0;
3312
3313
3314 if ((adev->flags & AMD_IS_APU) &&
3315 (attr == &sensor_dev_attr_freq2_input.dev_attr.attr ||
3316 attr == &sensor_dev_attr_freq2_label.dev_attr.attr))
3317 return 0;
3318
3319
3320 if (((adev->flags & AMD_IS_APU) || gc_ver < IP_VERSION(9, 0, 0)) &&
3321 (attr == &sensor_dev_attr_temp2_crit.dev_attr.attr ||
3322 attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr ||
3323 attr == &sensor_dev_attr_temp3_crit.dev_attr.attr ||
3324 attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr ||
3325 attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
3326 attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
3327 attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr ||
3328 attr == &sensor_dev_attr_temp2_input.dev_attr.attr ||
3329 attr == &sensor_dev_attr_temp3_input.dev_attr.attr ||
3330 attr == &sensor_dev_attr_temp2_label.dev_attr.attr ||
3331 attr == &sensor_dev_attr_temp3_label.dev_attr.attr))
3332 return 0;
3333
3334
3335 if (!(gc_ver == IP_VERSION(10, 3, 1)) &&
3336 (attr == &sensor_dev_attr_power2_average.dev_attr.attr ||
3337 attr == &sensor_dev_attr_power2_cap_max.dev_attr.attr ||
3338 attr == &sensor_dev_attr_power2_cap_min.dev_attr.attr ||
3339 attr == &sensor_dev_attr_power2_cap.dev_attr.attr ||
3340 attr == &sensor_dev_attr_power2_cap_default.dev_attr.attr ||
3341 attr == &sensor_dev_attr_power2_label.dev_attr.attr))
3342 return 0;
3343
3344 return effective_mode;
3345 }
3346
3347 static const struct attribute_group hwmon_attrgroup = {
3348 .attrs = hwmon_attributes,
3349 .is_visible = hwmon_attributes_visible,
3350 };
3351
3352 static const struct attribute_group *hwmon_groups[] = {
3353 &hwmon_attrgroup,
3354 NULL
3355 };
3356
3357 int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
3358 {
3359 int ret;
3360 uint32_t mask = 0;
3361
3362 if (adev->pm.sysfs_initialized)
3363 return 0;
3364
3365 if (adev->pm.dpm_enabled == 0)
3366 return 0;
3367
3368 INIT_LIST_HEAD(&adev->pm.pm_attr_list);
3369
3370 adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
3371 DRIVER_NAME, adev,
3372 hwmon_groups);
3373 if (IS_ERR(adev->pm.int_hwmon_dev)) {
3374 ret = PTR_ERR(adev->pm.int_hwmon_dev);
3375 dev_err(adev->dev,
3376 "Unable to register hwmon device: %d\n", ret);
3377 return ret;
3378 }
3379
3380 switch (amdgpu_virt_get_sriov_vf_mode(adev)) {
3381 case SRIOV_VF_MODE_ONE_VF:
3382 mask = ATTR_FLAG_ONEVF;
3383 break;
3384 case SRIOV_VF_MODE_MULTI_VF:
3385 mask = 0;
3386 break;
3387 case SRIOV_VF_MODE_BARE_METAL:
3388 default:
3389 mask = ATTR_FLAG_MASK_ALL;
3390 break;
3391 }
3392
3393 ret = amdgpu_device_attr_create_groups(adev,
3394 amdgpu_device_attrs,
3395 ARRAY_SIZE(amdgpu_device_attrs),
3396 mask,
3397 &adev->pm.pm_attr_list);
3398 if (ret)
3399 return ret;
3400
3401 adev->pm.sysfs_initialized = true;
3402
3403 return 0;
3404 }
3405
3406 void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
3407 {
3408 if (adev->pm.dpm_enabled == 0)
3409 return;
3410
3411 if (adev->pm.int_hwmon_dev)
3412 hwmon_device_unregister(adev->pm.int_hwmon_dev);
3413
3414 amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list);
3415 }
3416
3417
3418
3419
3420 #if defined(CONFIG_DEBUG_FS)
3421
3422 static void amdgpu_debugfs_prints_cpu_info(struct seq_file *m,
3423 struct amdgpu_device *adev) {
3424 uint16_t *p_val;
3425 uint32_t size;
3426 int i;
3427 uint32_t num_cpu_cores = amdgpu_dpm_get_num_cpu_cores(adev);
3428
3429 if (amdgpu_dpm_is_cclk_dpm_supported(adev)) {
3430 p_val = kcalloc(num_cpu_cores, sizeof(uint16_t),
3431 GFP_KERNEL);
3432
3433 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_CPU_CLK,
3434 (void *)p_val, &size)) {
3435 for (i = 0; i < num_cpu_cores; i++)
3436 seq_printf(m, "\t%u MHz (CPU%d)\n",
3437 *(p_val + i), i);
3438 }
3439
3440 kfree(p_val);
3441 }
3442 }
3443
3444 static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
3445 {
3446 uint32_t mp1_ver = adev->ip_versions[MP1_HWIP][0];
3447 uint32_t gc_ver = adev->ip_versions[GC_HWIP][0];
3448 uint32_t value;
3449 uint64_t value64 = 0;
3450 uint32_t query = 0;
3451 int size;
3452
3453
3454 size = sizeof(value);
3455 seq_printf(m, "GFX Clocks and Power:\n");
3456
3457 amdgpu_debugfs_prints_cpu_info(m, adev);
3458
3459 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size))
3460 seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
3461 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size))
3462 seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
3463 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size))
3464 seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n", value/100);
3465 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size))
3466 seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n", value/100);
3467 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size))
3468 seq_printf(m, "\t%u mV (VDDGFX)\n", value);
3469 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
3470 seq_printf(m, "\t%u mV (VDDNB)\n", value);
3471 size = sizeof(uint32_t);
3472 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size))
3473 seq_printf(m, "\t%u.%u W (average GPU)\n", query >> 8, query & 0xff);
3474 size = sizeof(value);
3475 seq_printf(m, "\n");
3476
3477
3478 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size))
3479 seq_printf(m, "GPU Temperature: %u C\n", value/1000);
3480
3481
3482 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size))
3483 seq_printf(m, "GPU Load: %u %%\n", value);
3484
3485 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size))
3486 seq_printf(m, "MEM Load: %u %%\n", value);
3487
3488 seq_printf(m, "\n");
3489
3490
3491 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size))
3492 seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64);
3493
3494
3495 if (gc_ver != IP_VERSION(9, 4, 0) && mp1_ver > IP_VERSION(9, 0, 0)) {
3496
3497 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) {
3498 if (!value) {
3499 seq_printf(m, "VCN: Disabled\n");
3500 } else {
3501 seq_printf(m, "VCN: Enabled\n");
3502 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
3503 seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
3504 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
3505 seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
3506 }
3507 }
3508 seq_printf(m, "\n");
3509 } else {
3510
3511 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
3512 if (!value) {
3513 seq_printf(m, "UVD: Disabled\n");
3514 } else {
3515 seq_printf(m, "UVD: Enabled\n");
3516 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
3517 seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
3518 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
3519 seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
3520 }
3521 }
3522 seq_printf(m, "\n");
3523
3524
3525 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
3526 if (!value) {
3527 seq_printf(m, "VCE: Disabled\n");
3528 } else {
3529 seq_printf(m, "VCE: Enabled\n");
3530 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
3531 seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
3532 }
3533 }
3534 }
3535
3536 return 0;
3537 }
3538
3539 static void amdgpu_parse_cg_state(struct seq_file *m, u64 flags)
3540 {
3541 int i;
3542
3543 for (i = 0; clocks[i].flag; i++)
3544 seq_printf(m, "\t%s: %s\n", clocks[i].name,
3545 (flags & clocks[i].flag) ? "On" : "Off");
3546 }
3547
3548 static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused)
3549 {
3550 struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
3551 struct drm_device *dev = adev_to_drm(adev);
3552 u64 flags = 0;
3553 int r;
3554
3555 if (amdgpu_in_reset(adev))
3556 return -EPERM;
3557 if (adev->in_suspend && !adev->in_runpm)
3558 return -EPERM;
3559
3560 r = pm_runtime_get_sync(dev->dev);
3561 if (r < 0) {
3562 pm_runtime_put_autosuspend(dev->dev);
3563 return r;
3564 }
3565
3566 if (amdgpu_dpm_debugfs_print_current_performance_level(adev, m)) {
3567 r = amdgpu_debugfs_pm_info_pp(m, adev);
3568 if (r)
3569 goto out;
3570 }
3571
3572 amdgpu_device_ip_get_clockgating_state(adev, &flags);
3573
3574 seq_printf(m, "Clock Gating Flags Mask: 0x%llx\n", flags);
3575 amdgpu_parse_cg_state(m, flags);
3576 seq_printf(m, "\n");
3577
3578 out:
3579 pm_runtime_mark_last_busy(dev->dev);
3580 pm_runtime_put_autosuspend(dev->dev);
3581
3582 return r;
3583 }
3584
3585 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_pm_info);
3586
3587
3588
3589
3590
3591
3592 static ssize_t amdgpu_pm_prv_buffer_read(struct file *f, char __user *buf,
3593 size_t size, loff_t *pos)
3594 {
3595 struct amdgpu_device *adev = file_inode(f)->i_private;
3596 size_t smu_prv_buf_size;
3597 void *smu_prv_buf;
3598 int ret = 0;
3599
3600 if (amdgpu_in_reset(adev))
3601 return -EPERM;
3602 if (adev->in_suspend && !adev->in_runpm)
3603 return -EPERM;
3604
3605 ret = amdgpu_dpm_get_smu_prv_buf_details(adev, &smu_prv_buf, &smu_prv_buf_size);
3606 if (ret)
3607 return ret;
3608
3609 if (!smu_prv_buf || !smu_prv_buf_size)
3610 return -EINVAL;
3611
3612 return simple_read_from_buffer(buf, size, pos, smu_prv_buf,
3613 smu_prv_buf_size);
3614 }
3615
3616 static const struct file_operations amdgpu_debugfs_pm_prv_buffer_fops = {
3617 .owner = THIS_MODULE,
3618 .open = simple_open,
3619 .read = amdgpu_pm_prv_buffer_read,
3620 .llseek = default_llseek,
3621 };
3622
3623 #endif
3624
3625 void amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
3626 {
3627 #if defined(CONFIG_DEBUG_FS)
3628 struct drm_minor *minor = adev_to_drm(adev)->primary;
3629 struct dentry *root = minor->debugfs_root;
3630
3631 if (!adev->pm.dpm_enabled)
3632 return;
3633
3634 debugfs_create_file("amdgpu_pm_info", 0444, root, adev,
3635 &amdgpu_debugfs_pm_info_fops);
3636
3637 if (adev->pm.smu_prv_buffer_size > 0)
3638 debugfs_create_file_size("amdgpu_pm_prv_buffer", 0444, root,
3639 adev,
3640 &amdgpu_debugfs_pm_prv_buffer_fops,
3641 adev->pm.smu_prv_buffer_size);
3642
3643 amdgpu_dpm_stb_debug_fs_init(adev);
3644 #endif
3645 }