0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026 #include <linux/string_helpers.h>
0027 #include <linux/uaccess.h>
0028
0029 #include "dc.h"
0030 #include "amdgpu.h"
0031 #include "amdgpu_dm.h"
0032 #include "amdgpu_dm_debugfs.h"
0033 #include "dm_helpers.h"
0034 #include "dmub/dmub_srv.h"
0035 #include "resource.h"
0036 #include "dsc.h"
0037 #include "dc_link_dp.h"
0038 #include "link_hwss.h"
0039 #include "dc/dc_dmub_srv.h"
0040
0041 struct dmub_debugfs_trace_header {
0042 uint32_t entry_count;
0043 uint32_t reserved[3];
0044 };
0045
0046 struct dmub_debugfs_trace_entry {
0047 uint32_t trace_code;
0048 uint32_t tick_count;
0049 uint32_t param0;
0050 uint32_t param1;
0051 };
0052
0053 static const char *const mst_progress_status[] = {
0054 "probe",
0055 "remote_edid",
0056 "allocate_new_payload",
0057 "clear_allocated_payload",
0058 };
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068 static int parse_write_buffer_into_params(char *wr_buf, uint32_t wr_buf_size,
0069 long *param, const char __user *buf,
0070 int max_param_num,
0071 uint8_t *param_nums)
0072 {
0073 char *wr_buf_ptr = NULL;
0074 uint32_t wr_buf_count = 0;
0075 int r;
0076 char *sub_str = NULL;
0077 const char delimiter[3] = {' ', '\n', '\0'};
0078 uint8_t param_index = 0;
0079
0080 *param_nums = 0;
0081
0082 wr_buf_ptr = wr_buf;
0083
0084
0085 if (copy_from_user(wr_buf_ptr, buf, wr_buf_size)) {
0086 DRM_DEBUG_DRIVER("user data could not be read successfully\n");
0087 return -EFAULT;
0088 }
0089
0090
0091 while ((*wr_buf_ptr != 0xa) && (wr_buf_count < wr_buf_size)) {
0092
0093 while (isspace(*wr_buf_ptr) && (wr_buf_count < wr_buf_size)) {
0094 wr_buf_ptr++;
0095 wr_buf_count++;
0096 }
0097
0098 if (wr_buf_count == wr_buf_size)
0099 break;
0100
0101
0102 while ((!isspace(*wr_buf_ptr)) && (wr_buf_count < wr_buf_size)) {
0103 wr_buf_ptr++;
0104 wr_buf_count++;
0105 }
0106
0107 (*param_nums)++;
0108
0109 if (wr_buf_count == wr_buf_size)
0110 break;
0111 }
0112
0113 if (*param_nums > max_param_num)
0114 *param_nums = max_param_num;
0115
0116 wr_buf_ptr = wr_buf;
0117 wr_buf_count = 0;
0118
0119 while (isspace(*wr_buf_ptr) && (wr_buf_count < wr_buf_size)) {
0120 wr_buf_ptr++;
0121 wr_buf_count++;
0122 }
0123
0124 while (param_index < *param_nums) {
0125
0126 sub_str = strsep(&wr_buf_ptr, delimiter);
0127
0128 r = kstrtol(sub_str, 16, &(param[param_index]));
0129
0130 if (r)
0131 DRM_DEBUG_DRIVER("string to int convert error code: %d\n", r);
0132
0133 param_index++;
0134 }
0135
0136 return 0;
0137 }
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180 static ssize_t dp_link_settings_read(struct file *f, char __user *buf,
0181 size_t size, loff_t *pos)
0182 {
0183 struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
0184 struct dc_link *link = connector->dc_link;
0185 char *rd_buf = NULL;
0186 char *rd_buf_ptr = NULL;
0187 const uint32_t rd_buf_size = 100;
0188 uint32_t result = 0;
0189 uint8_t str_len = 0;
0190 int r;
0191
0192 if (*pos & 3 || size & 3)
0193 return -EINVAL;
0194
0195 rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
0196 if (!rd_buf)
0197 return 0;
0198
0199 rd_buf_ptr = rd_buf;
0200
0201 str_len = strlen("Current: %d 0x%x %d ");
0202 snprintf(rd_buf_ptr, str_len, "Current: %d 0x%x %d ",
0203 link->cur_link_settings.lane_count,
0204 link->cur_link_settings.link_rate,
0205 link->cur_link_settings.link_spread);
0206 rd_buf_ptr += str_len;
0207
0208 str_len = strlen("Verified: %d 0x%x %d ");
0209 snprintf(rd_buf_ptr, str_len, "Verified: %d 0x%x %d ",
0210 link->verified_link_cap.lane_count,
0211 link->verified_link_cap.link_rate,
0212 link->verified_link_cap.link_spread);
0213 rd_buf_ptr += str_len;
0214
0215 str_len = strlen("Reported: %d 0x%x %d ");
0216 snprintf(rd_buf_ptr, str_len, "Reported: %d 0x%x %d ",
0217 link->reported_link_cap.lane_count,
0218 link->reported_link_cap.link_rate,
0219 link->reported_link_cap.link_spread);
0220 rd_buf_ptr += str_len;
0221
0222 str_len = strlen("Preferred: %d 0x%x %d ");
0223 snprintf(rd_buf_ptr, str_len, "Preferred: %d 0x%x %d\n",
0224 link->preferred_link_setting.lane_count,
0225 link->preferred_link_setting.link_rate,
0226 link->preferred_link_setting.link_spread);
0227
0228 while (size) {
0229 if (*pos >= rd_buf_size)
0230 break;
0231
0232 r = put_user(*(rd_buf + result), buf);
0233 if (r) {
0234 kfree(rd_buf);
0235 return r;
0236 }
0237
0238 buf += 1;
0239 size -= 1;
0240 *pos += 1;
0241 result += 1;
0242 }
0243
0244 kfree(rd_buf);
0245 return result;
0246 }
0247
0248 static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
0249 size_t size, loff_t *pos)
0250 {
0251 struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
0252 struct dc_link *link = connector->dc_link;
0253 struct amdgpu_device *adev = drm_to_adev(connector->base.dev);
0254 struct dc *dc = (struct dc *)link->dc;
0255 struct dc_link_settings prefer_link_settings;
0256 char *wr_buf = NULL;
0257 const uint32_t wr_buf_size = 40;
0258
0259 int max_param_num = 2;
0260 uint8_t param_nums = 0;
0261 long param[2];
0262 bool valid_input = true;
0263
0264 if (size == 0)
0265 return -EINVAL;
0266
0267 wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
0268 if (!wr_buf)
0269 return -ENOSPC;
0270
0271 if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
0272 (long *)param, buf,
0273 max_param_num,
0274 ¶m_nums)) {
0275 kfree(wr_buf);
0276 return -EINVAL;
0277 }
0278
0279 if (param_nums <= 0) {
0280 kfree(wr_buf);
0281 DRM_DEBUG_DRIVER("user data not be read\n");
0282 return -EINVAL;
0283 }
0284
0285 switch (param[0]) {
0286 case LANE_COUNT_ONE:
0287 case LANE_COUNT_TWO:
0288 case LANE_COUNT_FOUR:
0289 break;
0290 default:
0291 valid_input = false;
0292 break;
0293 }
0294
0295 switch (param[1]) {
0296 case LINK_RATE_LOW:
0297 case LINK_RATE_HIGH:
0298 case LINK_RATE_RBR2:
0299 case LINK_RATE_HIGH2:
0300 case LINK_RATE_HIGH3:
0301 case LINK_RATE_UHBR10:
0302 break;
0303 default:
0304 valid_input = false;
0305 break;
0306 }
0307
0308 if (!valid_input) {
0309 kfree(wr_buf);
0310 DRM_DEBUG_DRIVER("Invalid Input value No HW will be programmed\n");
0311 mutex_lock(&adev->dm.dc_lock);
0312 dc_link_set_preferred_training_settings(dc, NULL, NULL, link, false);
0313 mutex_unlock(&adev->dm.dc_lock);
0314 return size;
0315 }
0316
0317
0318
0319
0320 prefer_link_settings.link_spread = link->cur_link_settings.link_spread;
0321 prefer_link_settings.use_link_rate_set = false;
0322 prefer_link_settings.lane_count = param[0];
0323 prefer_link_settings.link_rate = param[1];
0324
0325 mutex_lock(&adev->dm.dc_lock);
0326 dc_link_set_preferred_training_settings(dc, &prefer_link_settings, NULL, link, false);
0327 mutex_unlock(&adev->dm.dc_lock);
0328
0329 kfree(wr_buf);
0330 return size;
0331 }
0332
0333
0334
0335
0336
0337
0338
0339
0340
0341
0342
0343
0344
0345
0346
0347
0348
0349
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359
0360
0361
0362
0363
0364
0365
0366
0367
0368
0369
0370
0371
0372
0373
0374 static ssize_t dp_phy_settings_read(struct file *f, char __user *buf,
0375 size_t size, loff_t *pos)
0376 {
0377 struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
0378 struct dc_link *link = connector->dc_link;
0379 char *rd_buf = NULL;
0380 const uint32_t rd_buf_size = 20;
0381 uint32_t result = 0;
0382 int r;
0383
0384 if (*pos & 3 || size & 3)
0385 return -EINVAL;
0386
0387 rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
0388 if (!rd_buf)
0389 return -EINVAL;
0390
0391 snprintf(rd_buf, rd_buf_size, " %d %d %d\n",
0392 link->cur_lane_setting[0].VOLTAGE_SWING,
0393 link->cur_lane_setting[0].PRE_EMPHASIS,
0394 link->cur_lane_setting[0].POST_CURSOR2);
0395
0396 while (size) {
0397 if (*pos >= rd_buf_size)
0398 break;
0399
0400 r = put_user((*(rd_buf + result)), buf);
0401 if (r) {
0402 kfree(rd_buf);
0403 return r;
0404 }
0405
0406 buf += 1;
0407 size -= 1;
0408 *pos += 1;
0409 result += 1;
0410 }
0411
0412 kfree(rd_buf);
0413 return result;
0414 }
0415
0416 static int dp_lttpr_status_show(struct seq_file *m, void *d)
0417 {
0418 char *data;
0419 struct amdgpu_dm_connector *connector = file_inode(m->file)->i_private;
0420 struct dc_link *link = connector->dc_link;
0421 uint32_t read_size = 1;
0422 uint8_t repeater_count = 0;
0423
0424 data = kzalloc(read_size, GFP_KERNEL);
0425 if (!data)
0426 return 0;
0427
0428 dm_helpers_dp_read_dpcd(link->ctx, link, 0xF0002, data, read_size);
0429
0430 switch ((uint8_t)*data) {
0431 case 0x80:
0432 repeater_count = 1;
0433 break;
0434 case 0x40:
0435 repeater_count = 2;
0436 break;
0437 case 0x20:
0438 repeater_count = 3;
0439 break;
0440 case 0x10:
0441 repeater_count = 4;
0442 break;
0443 case 0x8:
0444 repeater_count = 5;
0445 break;
0446 case 0x4:
0447 repeater_count = 6;
0448 break;
0449 case 0x2:
0450 repeater_count = 7;
0451 break;
0452 case 0x1:
0453 repeater_count = 8;
0454 break;
0455 case 0x0:
0456 repeater_count = 0;
0457 break;
0458 default:
0459 repeater_count = (uint8_t)*data;
0460 break;
0461 }
0462
0463 seq_printf(m, "phy repeater count: %d\n", repeater_count);
0464
0465 dm_helpers_dp_read_dpcd(link->ctx, link, 0xF0003, data, read_size);
0466
0467 if ((uint8_t)*data == 0x55)
0468 seq_printf(m, "phy repeater mode: transparent\n");
0469 else if ((uint8_t)*data == 0xAA)
0470 seq_printf(m, "phy repeater mode: non-transparent\n");
0471 else if ((uint8_t)*data == 0x00)
0472 seq_printf(m, "phy repeater mode: non lttpr\n");
0473 else
0474 seq_printf(m, "phy repeater mode: read error\n");
0475
0476 kfree(data);
0477 return 0;
0478 }
0479
0480 static ssize_t dp_phy_settings_write(struct file *f, const char __user *buf,
0481 size_t size, loff_t *pos)
0482 {
0483 struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
0484 struct dc_link *link = connector->dc_link;
0485 struct dc *dc = (struct dc *)link->dc;
0486 char *wr_buf = NULL;
0487 uint32_t wr_buf_size = 40;
0488 long param[3];
0489 bool use_prefer_link_setting;
0490 struct link_training_settings link_lane_settings;
0491 int max_param_num = 3;
0492 uint8_t param_nums = 0;
0493 int r = 0;
0494
0495
0496 if (size == 0)
0497 return -EINVAL;
0498
0499 wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
0500 if (!wr_buf)
0501 return -ENOSPC;
0502
0503 if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
0504 (long *)param, buf,
0505 max_param_num,
0506 ¶m_nums)) {
0507 kfree(wr_buf);
0508 return -EINVAL;
0509 }
0510
0511 if (param_nums <= 0) {
0512 kfree(wr_buf);
0513 DRM_DEBUG_DRIVER("user data not be read\n");
0514 return -EINVAL;
0515 }
0516
0517 if ((param[0] > VOLTAGE_SWING_MAX_LEVEL) ||
0518 (param[1] > PRE_EMPHASIS_MAX_LEVEL) ||
0519 (param[2] > POST_CURSOR2_MAX_LEVEL)) {
0520 kfree(wr_buf);
0521 DRM_DEBUG_DRIVER("Invalid Input No HW will be programmed\n");
0522 return size;
0523 }
0524
0525
0526 use_prefer_link_setting =
0527 ((link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN) &&
0528 (link->test_pattern_enabled));
0529
0530 memset(&link_lane_settings, 0, sizeof(link_lane_settings));
0531
0532 if (use_prefer_link_setting) {
0533 link_lane_settings.link_settings.lane_count =
0534 link->preferred_link_setting.lane_count;
0535 link_lane_settings.link_settings.link_rate =
0536 link->preferred_link_setting.link_rate;
0537 link_lane_settings.link_settings.link_spread =
0538 link->preferred_link_setting.link_spread;
0539 } else {
0540 link_lane_settings.link_settings.lane_count =
0541 link->cur_link_settings.lane_count;
0542 link_lane_settings.link_settings.link_rate =
0543 link->cur_link_settings.link_rate;
0544 link_lane_settings.link_settings.link_spread =
0545 link->cur_link_settings.link_spread;
0546 }
0547
0548
0549 for (r = 0; r < link_lane_settings.link_settings.lane_count; r++) {
0550 link_lane_settings.hw_lane_settings[r].VOLTAGE_SWING =
0551 (enum dc_voltage_swing) (param[0]);
0552 link_lane_settings.hw_lane_settings[r].PRE_EMPHASIS =
0553 (enum dc_pre_emphasis) (param[1]);
0554 link_lane_settings.hw_lane_settings[r].POST_CURSOR2 =
0555 (enum dc_post_cursor2) (param[2]);
0556 }
0557
0558
0559 dc_link_set_drive_settings(dc, &link_lane_settings, link);
0560
0561 kfree(wr_buf);
0562 return size;
0563 }
0564
0565
0566
0567
0568
0569
0570
0571
0572
0573
0574
0575
0576
0577
0578
0579
0580
0581
0582
0583
0584
0585
0586
0587
0588
0589
0590
0591
0592
0593
0594
0595
0596
0597
0598
0599
0600
0601
0602
0603
0604
0605
0606
0607
0608
0609
0610
0611
0612
0613
0614
0615
0616
0617
0618
0619
0620
0621
0622
0623 static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __user *buf,
0624 size_t size, loff_t *pos)
0625 {
0626 struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
0627 struct dc_link *link = connector->dc_link;
0628 char *wr_buf = NULL;
0629 uint32_t wr_buf_size = 100;
0630 long param[11] = {0x0};
0631 int max_param_num = 11;
0632 enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
0633 bool disable_hpd = false;
0634 bool valid_test_pattern = false;
0635 uint8_t param_nums = 0;
0636
0637 uint8_t custom_pattern[10] = {
0638 0x1f, 0x7c, 0xf0, 0xc1, 0x07,
0639 0x1f, 0x7c, 0xf0, 0xc1, 0x07
0640 };
0641 struct dc_link_settings prefer_link_settings = {LANE_COUNT_UNKNOWN,
0642 LINK_RATE_UNKNOWN, LINK_SPREAD_DISABLED};
0643 struct dc_link_settings cur_link_settings = {LANE_COUNT_UNKNOWN,
0644 LINK_RATE_UNKNOWN, LINK_SPREAD_DISABLED};
0645 struct link_training_settings link_training_settings;
0646 int i;
0647
0648 if (size == 0)
0649 return -EINVAL;
0650
0651 wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
0652 if (!wr_buf)
0653 return -ENOSPC;
0654
0655 if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
0656 (long *)param, buf,
0657 max_param_num,
0658 ¶m_nums)) {
0659 kfree(wr_buf);
0660 return -EINVAL;
0661 }
0662
0663 if (param_nums <= 0) {
0664 kfree(wr_buf);
0665 DRM_DEBUG_DRIVER("user data not be read\n");
0666 return -EINVAL;
0667 }
0668
0669
0670 test_pattern = param[0];
0671
0672 switch (test_pattern) {
0673 case DP_TEST_PATTERN_VIDEO_MODE:
0674 case DP_TEST_PATTERN_COLOR_SQUARES:
0675 case DP_TEST_PATTERN_COLOR_SQUARES_CEA:
0676 case DP_TEST_PATTERN_VERTICAL_BARS:
0677 case DP_TEST_PATTERN_HORIZONTAL_BARS:
0678 case DP_TEST_PATTERN_COLOR_RAMP:
0679 valid_test_pattern = true;
0680 break;
0681
0682 case DP_TEST_PATTERN_D102:
0683 case DP_TEST_PATTERN_SYMBOL_ERROR:
0684 case DP_TEST_PATTERN_PRBS7:
0685 case DP_TEST_PATTERN_80BIT_CUSTOM:
0686 case DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE:
0687 case DP_TEST_PATTERN_TRAINING_PATTERN4:
0688 disable_hpd = true;
0689 valid_test_pattern = true;
0690 break;
0691
0692 default:
0693 valid_test_pattern = false;
0694 test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
0695 break;
0696 }
0697
0698 if (!valid_test_pattern) {
0699 kfree(wr_buf);
0700 DRM_DEBUG_DRIVER("Invalid Test Pattern Parameters\n");
0701 return size;
0702 }
0703
0704 if (test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM) {
0705 for (i = 0; i < 10; i++) {
0706 if ((uint8_t) param[i + 1] != 0x0)
0707 break;
0708 }
0709
0710 if (i < 10) {
0711
0712 for (i = 0; i < 10; i++)
0713 custom_pattern[i] = (uint8_t) param[i + 1];
0714 }
0715 }
0716
0717
0718
0719
0720
0721
0722
0723
0724
0725 if (!disable_hpd)
0726 dc_link_enable_hpd(link);
0727
0728 prefer_link_settings.lane_count = link->verified_link_cap.lane_count;
0729 prefer_link_settings.link_rate = link->verified_link_cap.link_rate;
0730 prefer_link_settings.link_spread = link->verified_link_cap.link_spread;
0731
0732 cur_link_settings.lane_count = link->cur_link_settings.lane_count;
0733 cur_link_settings.link_rate = link->cur_link_settings.link_rate;
0734 cur_link_settings.link_spread = link->cur_link_settings.link_spread;
0735
0736 link_training_settings.link_settings = cur_link_settings;
0737
0738
0739 if (test_pattern != DP_TEST_PATTERN_VIDEO_MODE) {
0740 if (prefer_link_settings.lane_count != LANE_COUNT_UNKNOWN &&
0741 prefer_link_settings.link_rate != LINK_RATE_UNKNOWN &&
0742 (prefer_link_settings.lane_count != cur_link_settings.lane_count ||
0743 prefer_link_settings.link_rate != cur_link_settings.link_rate))
0744 link_training_settings.link_settings = prefer_link_settings;
0745 }
0746
0747 for (i = 0; i < (unsigned int)(link_training_settings.link_settings.lane_count); i++)
0748 link_training_settings.hw_lane_settings[i] = link->cur_lane_setting[i];
0749
0750 dc_link_set_test_pattern(
0751 link,
0752 test_pattern,
0753 DP_TEST_PATTERN_COLOR_SPACE_RGB,
0754 &link_training_settings,
0755 custom_pattern,
0756 10);
0757
0758
0759
0760
0761
0762
0763 if (valid_test_pattern && disable_hpd)
0764 dc_link_disable_hpd(link);
0765
0766 kfree(wr_buf);
0767
0768 return size;
0769 }
0770
0771
0772
0773
0774
0775 static int dmub_tracebuffer_show(struct seq_file *m, void *data)
0776 {
0777 struct amdgpu_device *adev = m->private;
0778 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
0779 struct dmub_debugfs_trace_entry *entries;
0780 uint8_t *tbuf_base;
0781 uint32_t tbuf_size, max_entries, num_entries, i;
0782
0783 if (!fb_info)
0784 return 0;
0785
0786 tbuf_base = (uint8_t *)fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr;
0787 if (!tbuf_base)
0788 return 0;
0789
0790 tbuf_size = fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size;
0791 max_entries = (tbuf_size - sizeof(struct dmub_debugfs_trace_header)) /
0792 sizeof(struct dmub_debugfs_trace_entry);
0793
0794 num_entries =
0795 ((struct dmub_debugfs_trace_header *)tbuf_base)->entry_count;
0796
0797 num_entries = min(num_entries, max_entries);
0798
0799 entries = (struct dmub_debugfs_trace_entry
0800 *)(tbuf_base +
0801 sizeof(struct dmub_debugfs_trace_header));
0802
0803 for (i = 0; i < num_entries; ++i) {
0804 struct dmub_debugfs_trace_entry *entry = &entries[i];
0805
0806 seq_printf(m,
0807 "trace_code=%u tick_count=%u param0=%u param1=%u\n",
0808 entry->trace_code, entry->tick_count, entry->param0,
0809 entry->param1);
0810 }
0811
0812 return 0;
0813 }
0814
0815
0816
0817
0818
0819 static int dmub_fw_state_show(struct seq_file *m, void *data)
0820 {
0821 struct amdgpu_device *adev = m->private;
0822 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
0823 uint8_t *state_base;
0824 uint32_t state_size;
0825
0826 if (!fb_info)
0827 return 0;
0828
0829 state_base = (uint8_t *)fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr;
0830 if (!state_base)
0831 return 0;
0832
0833 state_size = fb_info->fb[DMUB_WINDOW_6_FW_STATE].size;
0834
0835 return seq_write(m, state_base, state_size);
0836 }
0837
0838
0839
0840
0841
0842
0843
0844
0845
0846
0847
0848
0849
0850
0851
0852 static int psr_capability_show(struct seq_file *m, void *data)
0853 {
0854 struct drm_connector *connector = m->private;
0855 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
0856 struct dc_link *link = aconnector->dc_link;
0857
0858 if (!link)
0859 return -ENODEV;
0860
0861 if (link->type == dc_connection_none)
0862 return -ENODEV;
0863
0864 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
0865 return -ENODEV;
0866
0867 seq_printf(m, "Sink support: %s", str_yes_no(link->dpcd_caps.psr_info.psr_version != 0));
0868 if (link->dpcd_caps.psr_info.psr_version)
0869 seq_printf(m, " [0x%02x]", link->dpcd_caps.psr_info.psr_version);
0870 seq_puts(m, "\n");
0871
0872 seq_printf(m, "Driver support: %s", str_yes_no(link->psr_settings.psr_feature_enabled));
0873 if (link->psr_settings.psr_version)
0874 seq_printf(m, " [0x%02x]", link->psr_settings.psr_version);
0875 seq_puts(m, "\n");
0876
0877 return 0;
0878 }
0879
0880
0881
0882
0883
0884 static int amdgpu_current_bpc_show(struct seq_file *m, void *data)
0885 {
0886 struct drm_crtc *crtc = m->private;
0887 struct drm_device *dev = crtc->dev;
0888 struct dm_crtc_state *dm_crtc_state = NULL;
0889 int res = -ENODEV;
0890 unsigned int bpc;
0891
0892 mutex_lock(&dev->mode_config.mutex);
0893 drm_modeset_lock(&crtc->mutex, NULL);
0894 if (crtc->state == NULL)
0895 goto unlock;
0896
0897 dm_crtc_state = to_dm_crtc_state(crtc->state);
0898 if (dm_crtc_state->stream == NULL)
0899 goto unlock;
0900
0901 switch (dm_crtc_state->stream->timing.display_color_depth) {
0902 case COLOR_DEPTH_666:
0903 bpc = 6;
0904 break;
0905 case COLOR_DEPTH_888:
0906 bpc = 8;
0907 break;
0908 case COLOR_DEPTH_101010:
0909 bpc = 10;
0910 break;
0911 case COLOR_DEPTH_121212:
0912 bpc = 12;
0913 break;
0914 case COLOR_DEPTH_161616:
0915 bpc = 16;
0916 break;
0917 default:
0918 goto unlock;
0919 }
0920
0921 seq_printf(m, "Current: %u\n", bpc);
0922 res = 0;
0923
0924 unlock:
0925 drm_modeset_unlock(&crtc->mutex);
0926 mutex_unlock(&dev->mode_config.mutex);
0927
0928 return res;
0929 }
0930 DEFINE_SHOW_ATTRIBUTE(amdgpu_current_bpc);
0931
0932
0933
0934
0935
0936
0937
0938
0939 static ssize_t dp_dsc_passthrough_set(struct file *f, const char __user *buf,
0940 size_t size, loff_t *pos)
0941 {
0942 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
0943 char *wr_buf = NULL;
0944 uint32_t wr_buf_size = 42;
0945 int max_param_num = 1;
0946 long param;
0947 uint8_t param_nums = 0;
0948
0949 if (size == 0)
0950 return -EINVAL;
0951
0952 wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
0953
0954 if (!wr_buf) {
0955 DRM_DEBUG_DRIVER("no memory to allocate write buffer\n");
0956 return -ENOSPC;
0957 }
0958
0959 if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
0960 ¶m, buf,
0961 max_param_num,
0962 ¶m_nums)) {
0963 kfree(wr_buf);
0964 return -EINVAL;
0965 }
0966
0967 aconnector->dsc_settings.dsc_force_disable_passthrough = param;
0968
0969 kfree(wr_buf);
0970 return 0;
0971 }
0972
0973 #ifdef CONFIG_DRM_AMD_DC_HDCP
0974
0975
0976
0977
0978
0979
0980
0981
0982
0983 static int hdcp_sink_capability_show(struct seq_file *m, void *data)
0984 {
0985 struct drm_connector *connector = m->private;
0986 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
0987 bool hdcp_cap, hdcp2_cap;
0988
0989 if (connector->status != connector_status_connected)
0990 return -ENODEV;
0991
0992 seq_printf(m, "%s:%d HDCP version: ", connector->name, connector->base.id);
0993
0994 hdcp_cap = dc_link_is_hdcp14(aconnector->dc_link, aconnector->dc_sink->sink_signal);
0995 hdcp2_cap = dc_link_is_hdcp22(aconnector->dc_link, aconnector->dc_sink->sink_signal);
0996
0997
0998 if (hdcp_cap)
0999 seq_printf(m, "%s ", "HDCP1.4");
1000 if (hdcp2_cap)
1001 seq_printf(m, "%s ", "HDCP2.2");
1002
1003 if (!hdcp_cap && !hdcp2_cap)
1004 seq_printf(m, "%s ", "None");
1005
1006 seq_puts(m, "\n");
1007
1008 return 0;
1009 }
1010 #endif
1011
1012
1013
1014
1015
1016 static int internal_display_show(struct seq_file *m, void *data)
1017 {
1018 struct drm_connector *connector = m->private;
1019 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
1020 struct dc_link *link = aconnector->dc_link;
1021
1022 seq_printf(m, "Internal: %u\n", link->is_internal_display);
1023
1024 return 0;
1025 }
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041 static ssize_t dp_sdp_message_debugfs_write(struct file *f, const char __user *buf,
1042 size_t size, loff_t *pos)
1043 {
1044 int r;
1045 uint8_t data[36];
1046 struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
1047 struct dm_crtc_state *acrtc_state;
1048 uint32_t write_size = 36;
1049
1050 if (connector->base.status != connector_status_connected)
1051 return -ENODEV;
1052
1053 if (size == 0)
1054 return 0;
1055
1056 acrtc_state = to_dm_crtc_state(connector->base.state->crtc->state);
1057
1058 r = copy_from_user(data, buf, write_size);
1059
1060 write_size -= r;
1061
1062 dc_stream_send_dp_sdp(acrtc_state->stream, data, write_size);
1063
1064 return write_size;
1065 }
1066
1067 static ssize_t dp_dpcd_address_write(struct file *f, const char __user *buf,
1068 size_t size, loff_t *pos)
1069 {
1070 int r;
1071 struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
1072
1073 if (size < sizeof(connector->debugfs_dpcd_address))
1074 return -EINVAL;
1075
1076 r = copy_from_user(&connector->debugfs_dpcd_address,
1077 buf, sizeof(connector->debugfs_dpcd_address));
1078
1079 return size - r;
1080 }
1081
1082 static ssize_t dp_dpcd_size_write(struct file *f, const char __user *buf,
1083 size_t size, loff_t *pos)
1084 {
1085 int r;
1086 struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
1087
1088 if (size < sizeof(connector->debugfs_dpcd_size))
1089 return -EINVAL;
1090
1091 r = copy_from_user(&connector->debugfs_dpcd_size,
1092 buf, sizeof(connector->debugfs_dpcd_size));
1093
1094 if (connector->debugfs_dpcd_size > 256)
1095 connector->debugfs_dpcd_size = 0;
1096
1097 return size - r;
1098 }
1099
1100 static ssize_t dp_dpcd_data_write(struct file *f, const char __user *buf,
1101 size_t size, loff_t *pos)
1102 {
1103 int r;
1104 char *data;
1105 struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
1106 struct dc_link *link = connector->dc_link;
1107 uint32_t write_size = connector->debugfs_dpcd_size;
1108
1109 if (!write_size || size < write_size)
1110 return -EINVAL;
1111
1112 data = kzalloc(write_size, GFP_KERNEL);
1113 if (!data)
1114 return 0;
1115
1116 r = copy_from_user(data, buf, write_size);
1117
1118 dm_helpers_dp_write_dpcd(link->ctx, link,
1119 connector->debugfs_dpcd_address, data, write_size - r);
1120 kfree(data);
1121 return write_size - r;
1122 }
1123
1124 static ssize_t dp_dpcd_data_read(struct file *f, char __user *buf,
1125 size_t size, loff_t *pos)
1126 {
1127 int r;
1128 char *data;
1129 struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
1130 struct dc_link *link = connector->dc_link;
1131 uint32_t read_size = connector->debugfs_dpcd_size;
1132
1133 if (!read_size || size < read_size)
1134 return 0;
1135
1136 data = kzalloc(read_size, GFP_KERNEL);
1137 if (!data)
1138 return 0;
1139
1140 dm_helpers_dp_read_dpcd(link->ctx, link,
1141 connector->debugfs_dpcd_address, data, read_size);
1142
1143 r = copy_to_user(buf, data, read_size);
1144
1145 kfree(data);
1146 return read_size - r;
1147 }
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158 static int dp_dsc_fec_support_show(struct seq_file *m, void *data)
1159 {
1160 struct drm_connector *connector = m->private;
1161 struct drm_modeset_acquire_ctx ctx;
1162 struct drm_device *dev = connector->dev;
1163 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
1164 int ret = 0;
1165 bool try_again = false;
1166 bool is_fec_supported = false;
1167 bool is_dsc_supported = false;
1168 struct dpcd_caps dpcd_caps;
1169
1170 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
1171 do {
1172 try_again = false;
1173 ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &ctx);
1174 if (ret) {
1175 if (ret == -EDEADLK) {
1176 ret = drm_modeset_backoff(&ctx);
1177 if (!ret) {
1178 try_again = true;
1179 continue;
1180 }
1181 }
1182 break;
1183 }
1184 if (connector->status != connector_status_connected) {
1185 ret = -ENODEV;
1186 break;
1187 }
1188 dpcd_caps = aconnector->dc_link->dpcd_caps;
1189 if (aconnector->port) {
1190
1191
1192
1193
1194
1195 if (aconnector->dsc_aux) {
1196 is_fec_supported = true;
1197 is_dsc_supported = true;
1198 }
1199 } else {
1200 is_fec_supported = dpcd_caps.fec_cap.raw & 0x1;
1201 is_dsc_supported = dpcd_caps.dsc_caps.dsc_basic_caps.raw[0] & 0x1;
1202 }
1203 } while (try_again);
1204
1205 drm_modeset_drop_locks(&ctx);
1206 drm_modeset_acquire_fini(&ctx);
1207
1208 seq_printf(m, "FEC_Sink_Support: %s\n", str_yes_no(is_fec_supported));
1209 seq_printf(m, "DSC_Sink_Support: %s\n", str_yes_no(is_dsc_supported));
1210
1211 return ret;
1212 }
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229 static ssize_t trigger_hotplug(struct file *f, const char __user *buf,
1230 size_t size, loff_t *pos)
1231 {
1232 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
1233 struct drm_connector *connector = &aconnector->base;
1234 struct dc_link *link = NULL;
1235 struct drm_device *dev = connector->dev;
1236 struct amdgpu_device *adev = drm_to_adev(dev);
1237 enum dc_connection_type new_connection_type = dc_connection_none;
1238 char *wr_buf = NULL;
1239 uint32_t wr_buf_size = 42;
1240 int max_param_num = 1;
1241 long param[1] = {0};
1242 uint8_t param_nums = 0;
1243 bool ret = false;
1244
1245 if (!aconnector || !aconnector->dc_link)
1246 return -EINVAL;
1247
1248 if (size == 0)
1249 return -EINVAL;
1250
1251 wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
1252
1253 if (!wr_buf) {
1254 DRM_DEBUG_DRIVER("no memory to allocate write buffer\n");
1255 return -ENOSPC;
1256 }
1257
1258 if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
1259 (long *)param, buf,
1260 max_param_num,
1261 ¶m_nums)) {
1262 kfree(wr_buf);
1263 return -EINVAL;
1264 }
1265
1266 kfree(wr_buf);
1267
1268 if (param_nums <= 0) {
1269 DRM_DEBUG_DRIVER("user data not be read\n");
1270 return -EINVAL;
1271 }
1272
1273 mutex_lock(&aconnector->hpd_lock);
1274
1275
1276 if (aconnector->mst_port) {
1277 mutex_unlock(&aconnector->hpd_lock);
1278 return -EINVAL;
1279 }
1280
1281 if (param[0] == 1) {
1282
1283 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type) &&
1284 new_connection_type != dc_connection_none)
1285 goto unlock;
1286
1287 mutex_lock(&adev->dm.dc_lock);
1288 ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1289 mutex_unlock(&adev->dm.dc_lock);
1290
1291 if (!ret)
1292 goto unlock;
1293
1294 amdgpu_dm_update_connector_after_detect(aconnector);
1295
1296 drm_modeset_lock_all(dev);
1297 dm_restore_drm_connector_state(dev, connector);
1298 drm_modeset_unlock_all(dev);
1299
1300 drm_kms_helper_connector_hotplug_event(connector);
1301 } else if (param[0] == 0) {
1302 if (!aconnector->dc_link)
1303 goto unlock;
1304
1305 link = aconnector->dc_link;
1306
1307 if (link->local_sink) {
1308 dc_sink_release(link->local_sink);
1309 link->local_sink = NULL;
1310 }
1311
1312 link->dpcd_sink_count = 0;
1313 link->type = dc_connection_none;
1314 link->dongle_max_pix_clk = 0;
1315
1316 amdgpu_dm_update_connector_after_detect(aconnector);
1317
1318
1319 if (aconnector->mst_mgr.mst_state == true)
1320 reset_cur_dp_mst_topology(link);
1321
1322 drm_modeset_lock_all(dev);
1323 dm_restore_drm_connector_state(dev, connector);
1324 drm_modeset_unlock_all(dev);
1325
1326 drm_kms_helper_connector_hotplug_event(connector);
1327 }
1328
1329 unlock:
1330 mutex_unlock(&aconnector->hpd_lock);
1331
1332 return size;
1333 }
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350 static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf,
1351 size_t size, loff_t *pos)
1352 {
1353 char *rd_buf = NULL;
1354 char *rd_buf_ptr = NULL;
1355 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
1356 struct display_stream_compressor *dsc;
1357 struct dcn_dsc_state dsc_state = {0};
1358 const uint32_t rd_buf_size = 10;
1359 struct pipe_ctx *pipe_ctx;
1360 ssize_t result = 0;
1361 int i, r, str_len = 30;
1362
1363 rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
1364
1365 if (!rd_buf)
1366 return -ENOMEM;
1367
1368 rd_buf_ptr = rd_buf;
1369
1370 for (i = 0; i < MAX_PIPES; i++) {
1371 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
1372 if (pipe_ctx && pipe_ctx->stream &&
1373 pipe_ctx->stream->link == aconnector->dc_link)
1374 break;
1375 }
1376
1377 if (!pipe_ctx) {
1378 kfree(rd_buf);
1379 return -ENXIO;
1380 }
1381
1382 dsc = pipe_ctx->stream_res.dsc;
1383 if (dsc)
1384 dsc->funcs->dsc_read_state(dsc, &dsc_state);
1385
1386 snprintf(rd_buf_ptr, str_len,
1387 "%d\n",
1388 dsc_state.dsc_clock_en);
1389 rd_buf_ptr += str_len;
1390
1391 while (size) {
1392 if (*pos >= rd_buf_size)
1393 break;
1394
1395 r = put_user(*(rd_buf + result), buf);
1396 if (r) {
1397 kfree(rd_buf);
1398 return r;
1399 }
1400
1401 buf += 1;
1402 size -= 1;
1403 *pos += 1;
1404 result += 1;
1405 }
1406
1407 kfree(rd_buf);
1408 return result;
1409 }
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436 static ssize_t dp_dsc_clock_en_write(struct file *f, const char __user *buf,
1437 size_t size, loff_t *pos)
1438 {
1439 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
1440 struct drm_connector *connector = &aconnector->base;
1441 struct drm_device *dev = connector->dev;
1442 struct drm_crtc *crtc = NULL;
1443 struct dm_crtc_state *dm_crtc_state = NULL;
1444 struct pipe_ctx *pipe_ctx;
1445 int i;
1446 char *wr_buf = NULL;
1447 uint32_t wr_buf_size = 42;
1448 int max_param_num = 1;
1449 long param[1] = {0};
1450 uint8_t param_nums = 0;
1451
1452 if (size == 0)
1453 return -EINVAL;
1454
1455 wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
1456
1457 if (!wr_buf) {
1458 DRM_DEBUG_DRIVER("no memory to allocate write buffer\n");
1459 return -ENOSPC;
1460 }
1461
1462 if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
1463 (long *)param, buf,
1464 max_param_num,
1465 ¶m_nums)) {
1466 kfree(wr_buf);
1467 return -EINVAL;
1468 }
1469
1470 if (param_nums <= 0) {
1471 DRM_DEBUG_DRIVER("user data not be read\n");
1472 kfree(wr_buf);
1473 return -EINVAL;
1474 }
1475
1476 for (i = 0; i < MAX_PIPES; i++) {
1477 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
1478 if (pipe_ctx && pipe_ctx->stream &&
1479 pipe_ctx->stream->link == aconnector->dc_link)
1480 break;
1481 }
1482
1483 if (!pipe_ctx || !pipe_ctx->stream)
1484 goto done;
1485
1486
1487 mutex_lock(&dev->mode_config.mutex);
1488 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
1489
1490 if (connector->state == NULL)
1491 goto unlock;
1492
1493 crtc = connector->state->crtc;
1494 if (crtc == NULL)
1495 goto unlock;
1496
1497 drm_modeset_lock(&crtc->mutex, NULL);
1498 if (crtc->state == NULL)
1499 goto unlock;
1500
1501 dm_crtc_state = to_dm_crtc_state(crtc->state);
1502 if (dm_crtc_state->stream == NULL)
1503 goto unlock;
1504
1505 if (param[0] == 1)
1506 aconnector->dsc_settings.dsc_force_enable = DSC_CLK_FORCE_ENABLE;
1507 else if (param[0] == 2)
1508 aconnector->dsc_settings.dsc_force_enable = DSC_CLK_FORCE_DISABLE;
1509 else
1510 aconnector->dsc_settings.dsc_force_enable = DSC_CLK_FORCE_DEFAULT;
1511
1512 dm_crtc_state->dsc_force_changed = true;
1513
1514 unlock:
1515 if (crtc)
1516 drm_modeset_unlock(&crtc->mutex);
1517 drm_modeset_unlock(&dev->mode_config.connection_mutex);
1518 mutex_unlock(&dev->mode_config.mutex);
1519
1520 done:
1521 kfree(wr_buf);
1522 return size;
1523 }
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541 static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf,
1542 size_t size, loff_t *pos)
1543 {
1544 char *rd_buf = NULL;
1545 char *rd_buf_ptr = NULL;
1546 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
1547 struct display_stream_compressor *dsc;
1548 struct dcn_dsc_state dsc_state = {0};
1549 const uint32_t rd_buf_size = 100;
1550 struct pipe_ctx *pipe_ctx;
1551 ssize_t result = 0;
1552 int i, r, str_len = 30;
1553
1554 rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
1555
1556 if (!rd_buf)
1557 return -ENOMEM;
1558
1559 rd_buf_ptr = rd_buf;
1560
1561 for (i = 0; i < MAX_PIPES; i++) {
1562 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
1563 if (pipe_ctx && pipe_ctx->stream &&
1564 pipe_ctx->stream->link == aconnector->dc_link)
1565 break;
1566 }
1567
1568 if (!pipe_ctx) {
1569 kfree(rd_buf);
1570 return -ENXIO;
1571 }
1572
1573 dsc = pipe_ctx->stream_res.dsc;
1574 if (dsc)
1575 dsc->funcs->dsc_read_state(dsc, &dsc_state);
1576
1577 snprintf(rd_buf_ptr, str_len,
1578 "%d\n",
1579 dsc_state.dsc_slice_width);
1580 rd_buf_ptr += str_len;
1581
1582 while (size) {
1583 if (*pos >= rd_buf_size)
1584 break;
1585
1586 r = put_user(*(rd_buf + result), buf);
1587 if (r) {
1588 kfree(rd_buf);
1589 return r;
1590 }
1591
1592 buf += 1;
1593 size -= 1;
1594 *pos += 1;
1595 result += 1;
1596 }
1597
1598 kfree(rd_buf);
1599 return result;
1600 }
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625 static ssize_t dp_dsc_slice_width_write(struct file *f, const char __user *buf,
1626 size_t size, loff_t *pos)
1627 {
1628 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
1629 struct pipe_ctx *pipe_ctx;
1630 struct drm_connector *connector = &aconnector->base;
1631 struct drm_device *dev = connector->dev;
1632 struct drm_crtc *crtc = NULL;
1633 struct dm_crtc_state *dm_crtc_state = NULL;
1634 int i;
1635 char *wr_buf = NULL;
1636 uint32_t wr_buf_size = 42;
1637 int max_param_num = 1;
1638 long param[1] = {0};
1639 uint8_t param_nums = 0;
1640
1641 if (size == 0)
1642 return -EINVAL;
1643
1644 wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
1645
1646 if (!wr_buf) {
1647 DRM_DEBUG_DRIVER("no memory to allocate write buffer\n");
1648 return -ENOSPC;
1649 }
1650
1651 if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
1652 (long *)param, buf,
1653 max_param_num,
1654 ¶m_nums)) {
1655 kfree(wr_buf);
1656 return -EINVAL;
1657 }
1658
1659 if (param_nums <= 0) {
1660 DRM_DEBUG_DRIVER("user data not be read\n");
1661 kfree(wr_buf);
1662 return -EINVAL;
1663 }
1664
1665 for (i = 0; i < MAX_PIPES; i++) {
1666 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
1667 if (pipe_ctx && pipe_ctx->stream &&
1668 pipe_ctx->stream->link == aconnector->dc_link)
1669 break;
1670 }
1671
1672 if (!pipe_ctx || !pipe_ctx->stream)
1673 goto done;
1674
1675
1676 mutex_lock(&dev->mode_config.mutex);
1677 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
1678
1679 if (connector->state == NULL)
1680 goto unlock;
1681
1682 crtc = connector->state->crtc;
1683 if (crtc == NULL)
1684 goto unlock;
1685
1686 drm_modeset_lock(&crtc->mutex, NULL);
1687 if (crtc->state == NULL)
1688 goto unlock;
1689
1690 dm_crtc_state = to_dm_crtc_state(crtc->state);
1691 if (dm_crtc_state->stream == NULL)
1692 goto unlock;
1693
1694 if (param[0] > 0)
1695 aconnector->dsc_settings.dsc_num_slices_h = DIV_ROUND_UP(
1696 pipe_ctx->stream->timing.h_addressable,
1697 param[0]);
1698 else
1699 aconnector->dsc_settings.dsc_num_slices_h = 0;
1700
1701 dm_crtc_state->dsc_force_changed = true;
1702
1703 unlock:
1704 if (crtc)
1705 drm_modeset_unlock(&crtc->mutex);
1706 drm_modeset_unlock(&dev->mode_config.connection_mutex);
1707 mutex_unlock(&dev->mode_config.mutex);
1708
1709 done:
1710 kfree(wr_buf);
1711 return size;
1712 }
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730 static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf,
1731 size_t size, loff_t *pos)
1732 {
1733 char *rd_buf = NULL;
1734 char *rd_buf_ptr = NULL;
1735 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
1736 struct display_stream_compressor *dsc;
1737 struct dcn_dsc_state dsc_state = {0};
1738 const uint32_t rd_buf_size = 100;
1739 struct pipe_ctx *pipe_ctx;
1740 ssize_t result = 0;
1741 int i, r, str_len = 30;
1742
1743 rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
1744
1745 if (!rd_buf)
1746 return -ENOMEM;
1747
1748 rd_buf_ptr = rd_buf;
1749
1750 for (i = 0; i < MAX_PIPES; i++) {
1751 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
1752 if (pipe_ctx && pipe_ctx->stream &&
1753 pipe_ctx->stream->link == aconnector->dc_link)
1754 break;
1755 }
1756
1757 if (!pipe_ctx) {
1758 kfree(rd_buf);
1759 return -ENXIO;
1760 }
1761
1762 dsc = pipe_ctx->stream_res.dsc;
1763 if (dsc)
1764 dsc->funcs->dsc_read_state(dsc, &dsc_state);
1765
1766 snprintf(rd_buf_ptr, str_len,
1767 "%d\n",
1768 dsc_state.dsc_slice_height);
1769 rd_buf_ptr += str_len;
1770
1771 while (size) {
1772 if (*pos >= rd_buf_size)
1773 break;
1774
1775 r = put_user(*(rd_buf + result), buf);
1776 if (r) {
1777 kfree(rd_buf);
1778 return r;
1779 }
1780
1781 buf += 1;
1782 size -= 1;
1783 *pos += 1;
1784 result += 1;
1785 }
1786
1787 kfree(rd_buf);
1788 return result;
1789 }
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814 static ssize_t dp_dsc_slice_height_write(struct file *f, const char __user *buf,
1815 size_t size, loff_t *pos)
1816 {
1817 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
1818 struct drm_connector *connector = &aconnector->base;
1819 struct drm_device *dev = connector->dev;
1820 struct drm_crtc *crtc = NULL;
1821 struct dm_crtc_state *dm_crtc_state = NULL;
1822 struct pipe_ctx *pipe_ctx;
1823 int i;
1824 char *wr_buf = NULL;
1825 uint32_t wr_buf_size = 42;
1826 int max_param_num = 1;
1827 uint8_t param_nums = 0;
1828 long param[1] = {0};
1829
1830 if (size == 0)
1831 return -EINVAL;
1832
1833 wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
1834
1835 if (!wr_buf) {
1836 DRM_DEBUG_DRIVER("no memory to allocate write buffer\n");
1837 return -ENOSPC;
1838 }
1839
1840 if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
1841 (long *)param, buf,
1842 max_param_num,
1843 ¶m_nums)) {
1844 kfree(wr_buf);
1845 return -EINVAL;
1846 }
1847
1848 if (param_nums <= 0) {
1849 DRM_DEBUG_DRIVER("user data not be read\n");
1850 kfree(wr_buf);
1851 return -EINVAL;
1852 }
1853
1854 for (i = 0; i < MAX_PIPES; i++) {
1855 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
1856 if (pipe_ctx && pipe_ctx->stream &&
1857 pipe_ctx->stream->link == aconnector->dc_link)
1858 break;
1859 }
1860
1861 if (!pipe_ctx || !pipe_ctx->stream)
1862 goto done;
1863
1864
1865 mutex_lock(&dev->mode_config.mutex);
1866 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
1867
1868 if (connector->state == NULL)
1869 goto unlock;
1870
1871 crtc = connector->state->crtc;
1872 if (crtc == NULL)
1873 goto unlock;
1874
1875 drm_modeset_lock(&crtc->mutex, NULL);
1876 if (crtc->state == NULL)
1877 goto unlock;
1878
1879 dm_crtc_state = to_dm_crtc_state(crtc->state);
1880 if (dm_crtc_state->stream == NULL)
1881 goto unlock;
1882
1883 if (param[0] > 0)
1884 aconnector->dsc_settings.dsc_num_slices_v = DIV_ROUND_UP(
1885 pipe_ctx->stream->timing.v_addressable,
1886 param[0]);
1887 else
1888 aconnector->dsc_settings.dsc_num_slices_v = 0;
1889
1890 dm_crtc_state->dsc_force_changed = true;
1891
1892 unlock:
1893 if (crtc)
1894 drm_modeset_unlock(&crtc->mutex);
1895 drm_modeset_unlock(&dev->mode_config.connection_mutex);
1896 mutex_unlock(&dev->mode_config.mutex);
1897
1898 done:
1899 kfree(wr_buf);
1900 return size;
1901 }
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915 static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf,
1916 size_t size, loff_t *pos)
1917 {
1918 char *rd_buf = NULL;
1919 char *rd_buf_ptr = NULL;
1920 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
1921 struct display_stream_compressor *dsc;
1922 struct dcn_dsc_state dsc_state = {0};
1923 const uint32_t rd_buf_size = 100;
1924 struct pipe_ctx *pipe_ctx;
1925 ssize_t result = 0;
1926 int i, r, str_len = 30;
1927
1928 rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
1929
1930 if (!rd_buf)
1931 return -ENOMEM;
1932
1933 rd_buf_ptr = rd_buf;
1934
1935 for (i = 0; i < MAX_PIPES; i++) {
1936 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
1937 if (pipe_ctx && pipe_ctx->stream &&
1938 pipe_ctx->stream->link == aconnector->dc_link)
1939 break;
1940 }
1941
1942 if (!pipe_ctx) {
1943 kfree(rd_buf);
1944 return -ENXIO;
1945 }
1946
1947 dsc = pipe_ctx->stream_res.dsc;
1948 if (dsc)
1949 dsc->funcs->dsc_read_state(dsc, &dsc_state);
1950
1951 snprintf(rd_buf_ptr, str_len,
1952 "%d\n",
1953 dsc_state.dsc_bits_per_pixel);
1954 rd_buf_ptr += str_len;
1955
1956 while (size) {
1957 if (*pos >= rd_buf_size)
1958 break;
1959
1960 r = put_user(*(rd_buf + result), buf);
1961 if (r) {
1962 kfree(rd_buf);
1963 return r;
1964 }
1965
1966 buf += 1;
1967 size -= 1;
1968 *pos += 1;
1969 result += 1;
1970 }
1971
1972 kfree(rd_buf);
1973 return result;
1974 }
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996 static ssize_t dp_dsc_bits_per_pixel_write(struct file *f, const char __user *buf,
1997 size_t size, loff_t *pos)
1998 {
1999 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
2000 struct drm_connector *connector = &aconnector->base;
2001 struct drm_device *dev = connector->dev;
2002 struct drm_crtc *crtc = NULL;
2003 struct dm_crtc_state *dm_crtc_state = NULL;
2004 struct pipe_ctx *pipe_ctx;
2005 int i;
2006 char *wr_buf = NULL;
2007 uint32_t wr_buf_size = 42;
2008 int max_param_num = 1;
2009 uint8_t param_nums = 0;
2010 long param[1] = {0};
2011
2012 if (size == 0)
2013 return -EINVAL;
2014
2015 wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
2016
2017 if (!wr_buf) {
2018 DRM_DEBUG_DRIVER("no memory to allocate write buffer\n");
2019 return -ENOSPC;
2020 }
2021
2022 if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
2023 (long *)param, buf,
2024 max_param_num,
2025 ¶m_nums)) {
2026 kfree(wr_buf);
2027 return -EINVAL;
2028 }
2029
2030 if (param_nums <= 0) {
2031 DRM_DEBUG_DRIVER("user data not be read\n");
2032 kfree(wr_buf);
2033 return -EINVAL;
2034 }
2035
2036 for (i = 0; i < MAX_PIPES; i++) {
2037 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
2038 if (pipe_ctx && pipe_ctx->stream &&
2039 pipe_ctx->stream->link == aconnector->dc_link)
2040 break;
2041 }
2042
2043 if (!pipe_ctx || !pipe_ctx->stream)
2044 goto done;
2045
2046
2047 mutex_lock(&dev->mode_config.mutex);
2048 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
2049
2050 if (connector->state == NULL)
2051 goto unlock;
2052
2053 crtc = connector->state->crtc;
2054 if (crtc == NULL)
2055 goto unlock;
2056
2057 drm_modeset_lock(&crtc->mutex, NULL);
2058 if (crtc->state == NULL)
2059 goto unlock;
2060
2061 dm_crtc_state = to_dm_crtc_state(crtc->state);
2062 if (dm_crtc_state->stream == NULL)
2063 goto unlock;
2064
2065 aconnector->dsc_settings.dsc_bits_per_pixel = param[0];
2066
2067 dm_crtc_state->dsc_force_changed = true;
2068
2069 unlock:
2070 if (crtc)
2071 drm_modeset_unlock(&crtc->mutex);
2072 drm_modeset_unlock(&dev->mode_config.connection_mutex);
2073 mutex_unlock(&dev->mode_config.mutex);
2074
2075 done:
2076 kfree(wr_buf);
2077 return size;
2078 }
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095 static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf,
2096 size_t size, loff_t *pos)
2097 {
2098 char *rd_buf = NULL;
2099 char *rd_buf_ptr = NULL;
2100 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
2101 struct display_stream_compressor *dsc;
2102 struct dcn_dsc_state dsc_state = {0};
2103 const uint32_t rd_buf_size = 100;
2104 struct pipe_ctx *pipe_ctx;
2105 ssize_t result = 0;
2106 int i, r, str_len = 30;
2107
2108 rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
2109
2110 if (!rd_buf)
2111 return -ENOMEM;
2112
2113 rd_buf_ptr = rd_buf;
2114
2115 for (i = 0; i < MAX_PIPES; i++) {
2116 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
2117 if (pipe_ctx && pipe_ctx->stream &&
2118 pipe_ctx->stream->link == aconnector->dc_link)
2119 break;
2120 }
2121
2122 if (!pipe_ctx) {
2123 kfree(rd_buf);
2124 return -ENXIO;
2125 }
2126
2127 dsc = pipe_ctx->stream_res.dsc;
2128 if (dsc)
2129 dsc->funcs->dsc_read_state(dsc, &dsc_state);
2130
2131 snprintf(rd_buf_ptr, str_len,
2132 "%d\n",
2133 dsc_state.dsc_pic_width);
2134 rd_buf_ptr += str_len;
2135
2136 while (size) {
2137 if (*pos >= rd_buf_size)
2138 break;
2139
2140 r = put_user(*(rd_buf + result), buf);
2141 if (r) {
2142 kfree(rd_buf);
2143 return r;
2144 }
2145
2146 buf += 1;
2147 size -= 1;
2148 *pos += 1;
2149 result += 1;
2150 }
2151
2152 kfree(rd_buf);
2153 return result;
2154 }
2155
2156 static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf,
2157 size_t size, loff_t *pos)
2158 {
2159 char *rd_buf = NULL;
2160 char *rd_buf_ptr = NULL;
2161 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
2162 struct display_stream_compressor *dsc;
2163 struct dcn_dsc_state dsc_state = {0};
2164 const uint32_t rd_buf_size = 100;
2165 struct pipe_ctx *pipe_ctx;
2166 ssize_t result = 0;
2167 int i, r, str_len = 30;
2168
2169 rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
2170
2171 if (!rd_buf)
2172 return -ENOMEM;
2173
2174 rd_buf_ptr = rd_buf;
2175
2176 for (i = 0; i < MAX_PIPES; i++) {
2177 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
2178 if (pipe_ctx && pipe_ctx->stream &&
2179 pipe_ctx->stream->link == aconnector->dc_link)
2180 break;
2181 }
2182
2183 if (!pipe_ctx) {
2184 kfree(rd_buf);
2185 return -ENXIO;
2186 }
2187
2188 dsc = pipe_ctx->stream_res.dsc;
2189 if (dsc)
2190 dsc->funcs->dsc_read_state(dsc, &dsc_state);
2191
2192 snprintf(rd_buf_ptr, str_len,
2193 "%d\n",
2194 dsc_state.dsc_pic_height);
2195 rd_buf_ptr += str_len;
2196
2197 while (size) {
2198 if (*pos >= rd_buf_size)
2199 break;
2200
2201 r = put_user(*(rd_buf + result), buf);
2202 if (r) {
2203 kfree(rd_buf);
2204 return r;
2205 }
2206
2207 buf += 1;
2208 size -= 1;
2209 *pos += 1;
2210 result += 1;
2211 }
2212
2213 kfree(rd_buf);
2214 return result;
2215 }
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232 static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf,
2233 size_t size, loff_t *pos)
2234 {
2235 char *rd_buf = NULL;
2236 char *rd_buf_ptr = NULL;
2237 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
2238 struct display_stream_compressor *dsc;
2239 struct dcn_dsc_state dsc_state = {0};
2240 const uint32_t rd_buf_size = 100;
2241 struct pipe_ctx *pipe_ctx;
2242 ssize_t result = 0;
2243 int i, r, str_len = 30;
2244
2245 rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
2246
2247 if (!rd_buf)
2248 return -ENOMEM;
2249
2250 rd_buf_ptr = rd_buf;
2251
2252 for (i = 0; i < MAX_PIPES; i++) {
2253 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
2254 if (pipe_ctx && pipe_ctx->stream &&
2255 pipe_ctx->stream->link == aconnector->dc_link)
2256 break;
2257 }
2258
2259 if (!pipe_ctx) {
2260 kfree(rd_buf);
2261 return -ENXIO;
2262 }
2263
2264 dsc = pipe_ctx->stream_res.dsc;
2265 if (dsc)
2266 dsc->funcs->dsc_read_state(dsc, &dsc_state);
2267
2268 snprintf(rd_buf_ptr, str_len,
2269 "%d\n",
2270 dsc_state.dsc_chunk_size);
2271 rd_buf_ptr += str_len;
2272
2273 while (size) {
2274 if (*pos >= rd_buf_size)
2275 break;
2276
2277 r = put_user(*(rd_buf + result), buf);
2278 if (r) {
2279 kfree(rd_buf);
2280 return r;
2281 }
2282
2283 buf += 1;
2284 size -= 1;
2285 *pos += 1;
2286 result += 1;
2287 }
2288
2289 kfree(rd_buf);
2290 return result;
2291 }
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308 static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf,
2309 size_t size, loff_t *pos)
2310 {
2311 char *rd_buf = NULL;
2312 char *rd_buf_ptr = NULL;
2313 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
2314 struct display_stream_compressor *dsc;
2315 struct dcn_dsc_state dsc_state = {0};
2316 const uint32_t rd_buf_size = 100;
2317 struct pipe_ctx *pipe_ctx;
2318 ssize_t result = 0;
2319 int i, r, str_len = 30;
2320
2321 rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
2322
2323 if (!rd_buf)
2324 return -ENOMEM;
2325
2326 rd_buf_ptr = rd_buf;
2327
2328 for (i = 0; i < MAX_PIPES; i++) {
2329 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
2330 if (pipe_ctx && pipe_ctx->stream &&
2331 pipe_ctx->stream->link == aconnector->dc_link)
2332 break;
2333 }
2334
2335 if (!pipe_ctx) {
2336 kfree(rd_buf);
2337 return -ENXIO;
2338 }
2339
2340 dsc = pipe_ctx->stream_res.dsc;
2341 if (dsc)
2342 dsc->funcs->dsc_read_state(dsc, &dsc_state);
2343
2344 snprintf(rd_buf_ptr, str_len,
2345 "%d\n",
2346 dsc_state.dsc_slice_bpg_offset);
2347 rd_buf_ptr += str_len;
2348
2349 while (size) {
2350 if (*pos >= rd_buf_size)
2351 break;
2352
2353 r = put_user(*(rd_buf + result), buf);
2354 if (r) {
2355 kfree(rd_buf);
2356 return r;
2357 }
2358
2359 buf += 1;
2360 size -= 1;
2361 *pos += 1;
2362 result += 1;
2363 }
2364
2365 kfree(rd_buf);
2366 return result;
2367 }
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378 static ssize_t dp_max_bpc_read(struct file *f, char __user *buf,
2379 size_t size, loff_t *pos)
2380 {
2381 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
2382 struct drm_connector *connector = &aconnector->base;
2383 struct drm_device *dev = connector->dev;
2384 struct dm_connector_state *state;
2385 ssize_t result = 0;
2386 char *rd_buf = NULL;
2387 char *rd_buf_ptr = NULL;
2388 const uint32_t rd_buf_size = 10;
2389 int r;
2390
2391 rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
2392
2393 if (!rd_buf)
2394 return -ENOMEM;
2395
2396 mutex_lock(&dev->mode_config.mutex);
2397 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
2398
2399 if (connector->state == NULL)
2400 goto unlock;
2401
2402 state = to_dm_connector_state(connector->state);
2403
2404 rd_buf_ptr = rd_buf;
2405 snprintf(rd_buf_ptr, rd_buf_size,
2406 "%u\n",
2407 state->base.max_requested_bpc);
2408
2409 while (size) {
2410 if (*pos >= rd_buf_size)
2411 break;
2412
2413 r = put_user(*(rd_buf + result), buf);
2414 if (r) {
2415 result = r;
2416 goto unlock;
2417 }
2418 buf += 1;
2419 size -= 1;
2420 *pos += 1;
2421 result += 1;
2422 }
2423 unlock:
2424 drm_modeset_unlock(&dev->mode_config.connection_mutex);
2425 mutex_unlock(&dev->mode_config.mutex);
2426 kfree(rd_buf);
2427 return result;
2428 }
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453 static ssize_t dp_max_bpc_write(struct file *f, const char __user *buf,
2454 size_t size, loff_t *pos)
2455 {
2456 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
2457 struct drm_connector *connector = &aconnector->base;
2458 struct dm_connector_state *state;
2459 struct drm_device *dev = connector->dev;
2460 char *wr_buf = NULL;
2461 uint32_t wr_buf_size = 42;
2462 int max_param_num = 1;
2463 long param[1] = {0};
2464 uint8_t param_nums = 0;
2465
2466 if (size == 0)
2467 return -EINVAL;
2468
2469 wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
2470
2471 if (!wr_buf) {
2472 DRM_DEBUG_DRIVER("no memory to allocate write buffer\n");
2473 return -ENOSPC;
2474 }
2475
2476 if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
2477 (long *)param, buf,
2478 max_param_num,
2479 ¶m_nums)) {
2480 kfree(wr_buf);
2481 return -EINVAL;
2482 }
2483
2484 if (param_nums <= 0) {
2485 DRM_DEBUG_DRIVER("user data not be read\n");
2486 kfree(wr_buf);
2487 return -EINVAL;
2488 }
2489
2490 if (param[0] < 6 || param[0] > 16) {
2491 DRM_DEBUG_DRIVER("bad max_bpc value\n");
2492 kfree(wr_buf);
2493 return -EINVAL;
2494 }
2495
2496 mutex_lock(&dev->mode_config.mutex);
2497 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
2498
2499 if (connector->state == NULL)
2500 goto unlock;
2501
2502 state = to_dm_connector_state(connector->state);
2503 state->base.max_requested_bpc = param[0];
2504 unlock:
2505 drm_modeset_unlock(&dev->mode_config.connection_mutex);
2506 mutex_unlock(&dev->mode_config.mutex);
2507
2508 kfree(wr_buf);
2509 return size;
2510 }
2511
2512
2513
2514
2515
2516
2517
2518
2519 static int current_backlight_show(struct seq_file *m, void *unused)
2520 {
2521 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(m->private);
2522 struct dc_link *link = aconnector->dc_link;
2523 unsigned int backlight;
2524
2525 backlight = dc_link_get_backlight_level(link);
2526 seq_printf(m, "0x%x\n", backlight);
2527
2528 return 0;
2529 }
2530
2531
2532
2533
2534
2535
2536
2537
2538 static int target_backlight_show(struct seq_file *m, void *unused)
2539 {
2540 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(m->private);
2541 struct dc_link *link = aconnector->dc_link;
2542 unsigned int backlight;
2543
2544 backlight = dc_link_get_target_backlight_pwm(link);
2545 seq_printf(m, "0x%x\n", backlight);
2546
2547 return 0;
2548 }
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563 static int dp_is_mst_connector_show(struct seq_file *m, void *unused)
2564 {
2565 struct drm_connector *connector = m->private;
2566 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
2567 struct drm_dp_mst_topology_mgr *mgr = NULL;
2568 struct drm_dp_mst_port *port = NULL;
2569 char *role = NULL;
2570
2571 mutex_lock(&aconnector->hpd_lock);
2572
2573 if (aconnector->mst_mgr.mst_state) {
2574 role = "root";
2575 } else if (aconnector->mst_port &&
2576 aconnector->mst_port->mst_mgr.mst_state) {
2577
2578 role = "end";
2579
2580 mgr = &aconnector->mst_port->mst_mgr;
2581 port = aconnector->port;
2582
2583 drm_modeset_lock(&mgr->base.lock, NULL);
2584 if (port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
2585 port->mcs)
2586 role = "branch";
2587 drm_modeset_unlock(&mgr->base.lock);
2588
2589 } else {
2590 role = "no";
2591 }
2592
2593 seq_printf(m, "%s\n", role);
2594
2595 mutex_unlock(&aconnector->hpd_lock);
2596
2597 return 0;
2598 }
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611 static int dp_mst_progress_status_show(struct seq_file *m, void *unused)
2612 {
2613 struct drm_connector *connector = m->private;
2614 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
2615 struct amdgpu_device *adev = drm_to_adev(connector->dev);
2616 int i;
2617
2618 mutex_lock(&aconnector->hpd_lock);
2619 mutex_lock(&adev->dm.dc_lock);
2620
2621 if (aconnector->mst_status == MST_STATUS_DEFAULT) {
2622 seq_puts(m, "disabled\n");
2623 } else {
2624 for (i = 0; i < sizeof(mst_progress_status)/sizeof(char *); i++)
2625 seq_printf(m, "%s:%s\n",
2626 mst_progress_status[i],
2627 aconnector->mst_status & BIT(i) ? "done" : "not_done");
2628 }
2629
2630 mutex_unlock(&adev->dm.dc_lock);
2631 mutex_unlock(&aconnector->hpd_lock);
2632
2633 return 0;
2634 }
2635
2636 DEFINE_SHOW_ATTRIBUTE(dp_dsc_fec_support);
2637 DEFINE_SHOW_ATTRIBUTE(dmub_fw_state);
2638 DEFINE_SHOW_ATTRIBUTE(dmub_tracebuffer);
2639 DEFINE_SHOW_ATTRIBUTE(dp_lttpr_status);
2640 #ifdef CONFIG_DRM_AMD_DC_HDCP
2641 DEFINE_SHOW_ATTRIBUTE(hdcp_sink_capability);
2642 #endif
2643 DEFINE_SHOW_ATTRIBUTE(internal_display);
2644 DEFINE_SHOW_ATTRIBUTE(psr_capability);
2645 DEFINE_SHOW_ATTRIBUTE(dp_is_mst_connector);
2646 DEFINE_SHOW_ATTRIBUTE(dp_mst_progress_status);
2647
2648 static const struct file_operations dp_dsc_clock_en_debugfs_fops = {
2649 .owner = THIS_MODULE,
2650 .read = dp_dsc_clock_en_read,
2651 .write = dp_dsc_clock_en_write,
2652 .llseek = default_llseek
2653 };
2654
2655 static const struct file_operations dp_dsc_slice_width_debugfs_fops = {
2656 .owner = THIS_MODULE,
2657 .read = dp_dsc_slice_width_read,
2658 .write = dp_dsc_slice_width_write,
2659 .llseek = default_llseek
2660 };
2661
2662 static const struct file_operations dp_dsc_slice_height_debugfs_fops = {
2663 .owner = THIS_MODULE,
2664 .read = dp_dsc_slice_height_read,
2665 .write = dp_dsc_slice_height_write,
2666 .llseek = default_llseek
2667 };
2668
2669 static const struct file_operations dp_dsc_bits_per_pixel_debugfs_fops = {
2670 .owner = THIS_MODULE,
2671 .read = dp_dsc_bits_per_pixel_read,
2672 .write = dp_dsc_bits_per_pixel_write,
2673 .llseek = default_llseek
2674 };
2675
2676 static const struct file_operations dp_dsc_pic_width_debugfs_fops = {
2677 .owner = THIS_MODULE,
2678 .read = dp_dsc_pic_width_read,
2679 .llseek = default_llseek
2680 };
2681
2682 static const struct file_operations dp_dsc_pic_height_debugfs_fops = {
2683 .owner = THIS_MODULE,
2684 .read = dp_dsc_pic_height_read,
2685 .llseek = default_llseek
2686 };
2687
2688 static const struct file_operations dp_dsc_chunk_size_debugfs_fops = {
2689 .owner = THIS_MODULE,
2690 .read = dp_dsc_chunk_size_read,
2691 .llseek = default_llseek
2692 };
2693
2694 static const struct file_operations dp_dsc_slice_bpg_offset_debugfs_fops = {
2695 .owner = THIS_MODULE,
2696 .read = dp_dsc_slice_bpg_offset_read,
2697 .llseek = default_llseek
2698 };
2699
2700 static const struct file_operations trigger_hotplug_debugfs_fops = {
2701 .owner = THIS_MODULE,
2702 .write = trigger_hotplug,
2703 .llseek = default_llseek
2704 };
2705
2706 static const struct file_operations dp_link_settings_debugfs_fops = {
2707 .owner = THIS_MODULE,
2708 .read = dp_link_settings_read,
2709 .write = dp_link_settings_write,
2710 .llseek = default_llseek
2711 };
2712
2713 static const struct file_operations dp_phy_settings_debugfs_fop = {
2714 .owner = THIS_MODULE,
2715 .read = dp_phy_settings_read,
2716 .write = dp_phy_settings_write,
2717 .llseek = default_llseek
2718 };
2719
2720 static const struct file_operations dp_phy_test_pattern_fops = {
2721 .owner = THIS_MODULE,
2722 .write = dp_phy_test_pattern_debugfs_write,
2723 .llseek = default_llseek
2724 };
2725
2726 static const struct file_operations sdp_message_fops = {
2727 .owner = THIS_MODULE,
2728 .write = dp_sdp_message_debugfs_write,
2729 .llseek = default_llseek
2730 };
2731
2732 static const struct file_operations dp_dpcd_address_debugfs_fops = {
2733 .owner = THIS_MODULE,
2734 .write = dp_dpcd_address_write,
2735 .llseek = default_llseek
2736 };
2737
2738 static const struct file_operations dp_dpcd_size_debugfs_fops = {
2739 .owner = THIS_MODULE,
2740 .write = dp_dpcd_size_write,
2741 .llseek = default_llseek
2742 };
2743
2744 static const struct file_operations dp_dpcd_data_debugfs_fops = {
2745 .owner = THIS_MODULE,
2746 .read = dp_dpcd_data_read,
2747 .write = dp_dpcd_data_write,
2748 .llseek = default_llseek
2749 };
2750
2751 static const struct file_operations dp_max_bpc_debugfs_fops = {
2752 .owner = THIS_MODULE,
2753 .read = dp_max_bpc_read,
2754 .write = dp_max_bpc_write,
2755 .llseek = default_llseek
2756 };
2757
2758 static const struct file_operations dp_dsc_disable_passthrough_debugfs_fops = {
2759 .owner = THIS_MODULE,
2760 .write = dp_dsc_passthrough_set,
2761 .llseek = default_llseek
2762 };
2763
2764 static const struct {
2765 char *name;
2766 const struct file_operations *fops;
2767 } dp_debugfs_entries[] = {
2768 {"link_settings", &dp_link_settings_debugfs_fops},
2769 {"phy_settings", &dp_phy_settings_debugfs_fop},
2770 {"lttpr_status", &dp_lttpr_status_fops},
2771 {"test_pattern", &dp_phy_test_pattern_fops},
2772 #ifdef CONFIG_DRM_AMD_DC_HDCP
2773 {"hdcp_sink_capability", &hdcp_sink_capability_fops},
2774 #endif
2775 {"sdp_message", &sdp_message_fops},
2776 {"aux_dpcd_address", &dp_dpcd_address_debugfs_fops},
2777 {"aux_dpcd_size", &dp_dpcd_size_debugfs_fops},
2778 {"aux_dpcd_data", &dp_dpcd_data_debugfs_fops},
2779 {"dsc_clock_en", &dp_dsc_clock_en_debugfs_fops},
2780 {"dsc_slice_width", &dp_dsc_slice_width_debugfs_fops},
2781 {"dsc_slice_height", &dp_dsc_slice_height_debugfs_fops},
2782 {"dsc_bits_per_pixel", &dp_dsc_bits_per_pixel_debugfs_fops},
2783 {"dsc_pic_width", &dp_dsc_pic_width_debugfs_fops},
2784 {"dsc_pic_height", &dp_dsc_pic_height_debugfs_fops},
2785 {"dsc_chunk_size", &dp_dsc_chunk_size_debugfs_fops},
2786 {"dsc_slice_bpg", &dp_dsc_slice_bpg_offset_debugfs_fops},
2787 {"dp_dsc_fec_support", &dp_dsc_fec_support_fops},
2788 {"max_bpc", &dp_max_bpc_debugfs_fops},
2789 {"dsc_disable_passthrough", &dp_dsc_disable_passthrough_debugfs_fops},
2790 {"is_mst_connector", &dp_is_mst_connector_fops},
2791 {"mst_progress_status", &dp_mst_progress_status_fops}
2792 };
2793
2794 #ifdef CONFIG_DRM_AMD_DC_HDCP
2795 static const struct {
2796 char *name;
2797 const struct file_operations *fops;
2798 } hdmi_debugfs_entries[] = {
2799 {"hdcp_sink_capability", &hdcp_sink_capability_fops}
2800 };
2801 #endif
2802
2803
2804
2805 static int force_yuv420_output_set(void *data, u64 val)
2806 {
2807 struct amdgpu_dm_connector *connector = data;
2808
2809 connector->force_yuv420_output = (bool)val;
2810
2811 return 0;
2812 }
2813
2814
2815
2816
2817 static int force_yuv420_output_get(void *data, u64 *val)
2818 {
2819 struct amdgpu_dm_connector *connector = data;
2820
2821 *val = connector->force_yuv420_output;
2822
2823 return 0;
2824 }
2825
2826 DEFINE_DEBUGFS_ATTRIBUTE(force_yuv420_output_fops, force_yuv420_output_get,
2827 force_yuv420_output_set, "%llu\n");
2828
2829
2830
2831
2832 static int psr_get(void *data, u64 *val)
2833 {
2834 struct amdgpu_dm_connector *connector = data;
2835 struct dc_link *link = connector->dc_link;
2836 enum dc_psr_state state = PSR_STATE0;
2837
2838 dc_link_get_psr_state(link, &state);
2839
2840 *val = state;
2841
2842 return 0;
2843 }
2844
2845
2846
2847
2848
2849
2850 static int dmcub_trace_event_state_set(void *data, u64 val)
2851 {
2852 struct amdgpu_device *adev = data;
2853
2854 if (val == 1 || val == 0) {
2855 dc_dmub_trace_event_control(adev->dm.dc, val);
2856 adev->dm.dmcub_trace_event_en = (bool)val;
2857 } else
2858 return 0;
2859
2860 return 0;
2861 }
2862
2863
2864
2865
2866
2867
2868 static int dmcub_trace_event_state_get(void *data, u64 *val)
2869 {
2870 struct amdgpu_device *adev = data;
2871
2872 *val = adev->dm.dmcub_trace_event_en;
2873 return 0;
2874 }
2875
2876 DEFINE_DEBUGFS_ATTRIBUTE(dmcub_trace_event_state_fops, dmcub_trace_event_state_get,
2877 dmcub_trace_event_state_set, "%llu\n");
2878
2879 DEFINE_DEBUGFS_ATTRIBUTE(psr_fops, psr_get, NULL, "%llu\n");
2880
2881 DEFINE_SHOW_ATTRIBUTE(current_backlight);
2882 DEFINE_SHOW_ATTRIBUTE(target_backlight);
2883
2884 static const struct {
2885 char *name;
2886 const struct file_operations *fops;
2887 } connector_debugfs_entries[] = {
2888 {"force_yuv420_output", &force_yuv420_output_fops},
2889 {"trigger_hotplug", &trigger_hotplug_debugfs_fops},
2890 {"internal_display", &internal_display_fops}
2891 };
2892
2893
2894
2895
2896
2897 static int edp_ilr_show(struct seq_file *m, void *unused)
2898 {
2899 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(m->private);
2900 struct dc_link *link = aconnector->dc_link;
2901 uint8_t supported_link_rates[16];
2902 uint32_t link_rate_in_khz;
2903 uint32_t entry = 0;
2904 uint8_t dpcd_rev;
2905
2906 memset(supported_link_rates, 0, sizeof(supported_link_rates));
2907 dm_helpers_dp_read_dpcd(link->ctx, link, DP_SUPPORTED_LINK_RATES,
2908 supported_link_rates, sizeof(supported_link_rates));
2909
2910 dpcd_rev = link->dpcd_caps.dpcd_rev.raw;
2911
2912 if (dpcd_rev >= DP_DPCD_REV_13 &&
2913 (supported_link_rates[entry+1] != 0 || supported_link_rates[entry] != 0)) {
2914
2915 for (entry = 0; entry < 16; entry += 2) {
2916 link_rate_in_khz = (supported_link_rates[entry+1] * 0x100 +
2917 supported_link_rates[entry]) * 200;
2918 seq_printf(m, "[%d] %d kHz\n", entry/2, link_rate_in_khz);
2919 }
2920 } else {
2921 seq_printf(m, "ILR is not supported by this eDP panel.\n");
2922 }
2923
2924 return 0;
2925 }
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936 static ssize_t edp_ilr_write(struct file *f, const char __user *buf,
2937 size_t size, loff_t *pos)
2938 {
2939 struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
2940 struct dc_link *link = connector->dc_link;
2941 struct amdgpu_device *adev = drm_to_adev(connector->base.dev);
2942 struct dc *dc = (struct dc *)link->dc;
2943 struct dc_link_settings prefer_link_settings;
2944 char *wr_buf = NULL;
2945 const uint32_t wr_buf_size = 40;
2946
2947 int max_param_num = 2;
2948 uint8_t param_nums = 0;
2949 long param[2];
2950 bool valid_input = true;
2951
2952 if (size == 0)
2953 return -EINVAL;
2954
2955 wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
2956 if (!wr_buf)
2957 return -ENOMEM;
2958
2959 if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
2960 (long *)param, buf,
2961 max_param_num,
2962 ¶m_nums)) {
2963 kfree(wr_buf);
2964 return -EINVAL;
2965 }
2966
2967 if (param_nums <= 0) {
2968 kfree(wr_buf);
2969 return -EINVAL;
2970 }
2971
2972 switch (param[0]) {
2973 case LANE_COUNT_ONE:
2974 case LANE_COUNT_TWO:
2975 case LANE_COUNT_FOUR:
2976 break;
2977 default:
2978 valid_input = false;
2979 break;
2980 }
2981
2982 if (param[1] >= link->dpcd_caps.edp_supported_link_rates_count)
2983 valid_input = false;
2984
2985 if (!valid_input) {
2986 kfree(wr_buf);
2987 DRM_DEBUG_DRIVER("Invalid Input value. No HW will be programmed\n");
2988 prefer_link_settings.use_link_rate_set = false;
2989 mutex_lock(&adev->dm.dc_lock);
2990 dc_link_set_preferred_training_settings(dc, NULL, NULL, link, false);
2991 mutex_unlock(&adev->dm.dc_lock);
2992 return size;
2993 }
2994
2995
2996
2997
2998 prefer_link_settings.link_spread = link->cur_link_settings.link_spread;
2999 prefer_link_settings.lane_count = param[0];
3000 prefer_link_settings.use_link_rate_set = true;
3001 prefer_link_settings.link_rate_set = param[1];
3002 prefer_link_settings.link_rate = link->dpcd_caps.edp_supported_link_rates[param[1]];
3003
3004 mutex_lock(&adev->dm.dc_lock);
3005 dc_link_set_preferred_training_settings(dc, &prefer_link_settings,
3006 NULL, link, false);
3007 mutex_unlock(&adev->dm.dc_lock);
3008
3009 kfree(wr_buf);
3010 return size;
3011 }
3012
3013 static int edp_ilr_open(struct inode *inode, struct file *file)
3014 {
3015 return single_open(file, edp_ilr_show, inode->i_private);
3016 }
3017
3018 static const struct file_operations edp_ilr_debugfs_fops = {
3019 .owner = THIS_MODULE,
3020 .open = edp_ilr_open,
3021 .read = seq_read,
3022 .llseek = seq_lseek,
3023 .release = single_release,
3024 .write = edp_ilr_write
3025 };
3026
3027 void connector_debugfs_init(struct amdgpu_dm_connector *connector)
3028 {
3029 int i;
3030 struct dentry *dir = connector->base.debugfs_entry;
3031
3032 if (connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
3033 connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) {
3034 for (i = 0; i < ARRAY_SIZE(dp_debugfs_entries); i++) {
3035 debugfs_create_file(dp_debugfs_entries[i].name,
3036 0644, dir, connector,
3037 dp_debugfs_entries[i].fops);
3038 }
3039 }
3040 if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) {
3041 debugfs_create_file_unsafe("psr_capability", 0444, dir, connector, &psr_capability_fops);
3042 debugfs_create_file_unsafe("psr_state", 0444, dir, connector, &psr_fops);
3043 debugfs_create_file("amdgpu_current_backlight_pwm", 0444, dir, connector,
3044 ¤t_backlight_fops);
3045 debugfs_create_file("amdgpu_target_backlight_pwm", 0444, dir, connector,
3046 &target_backlight_fops);
3047 debugfs_create_file("ilr_setting", 0644, dir, connector,
3048 &edp_ilr_debugfs_fops);
3049 }
3050
3051 for (i = 0; i < ARRAY_SIZE(connector_debugfs_entries); i++) {
3052 debugfs_create_file(connector_debugfs_entries[i].name,
3053 0644, dir, connector,
3054 connector_debugfs_entries[i].fops);
3055 }
3056
3057 connector->debugfs_dpcd_address = 0;
3058 connector->debugfs_dpcd_size = 0;
3059
3060 #ifdef CONFIG_DRM_AMD_DC_HDCP
3061 if (connector->base.connector_type == DRM_MODE_CONNECTOR_HDMIA) {
3062 for (i = 0; i < ARRAY_SIZE(hdmi_debugfs_entries); i++) {
3063 debugfs_create_file(hdmi_debugfs_entries[i].name,
3064 0644, dir, connector,
3065 hdmi_debugfs_entries[i].fops);
3066 }
3067 }
3068 #endif
3069 }
3070
3071 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
3072
3073
3074
3075 static int crc_win_x_start_set(void *data, u64 val)
3076 {
3077 struct drm_crtc *crtc = data;
3078 struct drm_device *drm_dev = crtc->dev;
3079 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
3080
3081 spin_lock_irq(&drm_dev->event_lock);
3082 acrtc->dm_irq_params.crc_window.x_start = (uint16_t) val;
3083 acrtc->dm_irq_params.crc_window.update_win = false;
3084 spin_unlock_irq(&drm_dev->event_lock);
3085
3086 return 0;
3087 }
3088
3089
3090
3091
3092 static int crc_win_x_start_get(void *data, u64 *val)
3093 {
3094 struct drm_crtc *crtc = data;
3095 struct drm_device *drm_dev = crtc->dev;
3096 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
3097
3098 spin_lock_irq(&drm_dev->event_lock);
3099 *val = acrtc->dm_irq_params.crc_window.x_start;
3100 spin_unlock_irq(&drm_dev->event_lock);
3101
3102 return 0;
3103 }
3104
3105 DEFINE_DEBUGFS_ATTRIBUTE(crc_win_x_start_fops, crc_win_x_start_get,
3106 crc_win_x_start_set, "%llu\n");
3107
3108
3109
3110
3111
3112 static int crc_win_y_start_set(void *data, u64 val)
3113 {
3114 struct drm_crtc *crtc = data;
3115 struct drm_device *drm_dev = crtc->dev;
3116 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
3117
3118 spin_lock_irq(&drm_dev->event_lock);
3119 acrtc->dm_irq_params.crc_window.y_start = (uint16_t) val;
3120 acrtc->dm_irq_params.crc_window.update_win = false;
3121 spin_unlock_irq(&drm_dev->event_lock);
3122
3123 return 0;
3124 }
3125
3126
3127
3128
3129 static int crc_win_y_start_get(void *data, u64 *val)
3130 {
3131 struct drm_crtc *crtc = data;
3132 struct drm_device *drm_dev = crtc->dev;
3133 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
3134
3135 spin_lock_irq(&drm_dev->event_lock);
3136 *val = acrtc->dm_irq_params.crc_window.y_start;
3137 spin_unlock_irq(&drm_dev->event_lock);
3138
3139 return 0;
3140 }
3141
3142 DEFINE_DEBUGFS_ATTRIBUTE(crc_win_y_start_fops, crc_win_y_start_get,
3143 crc_win_y_start_set, "%llu\n");
3144
3145
3146
3147
3148 static int crc_win_x_end_set(void *data, u64 val)
3149 {
3150 struct drm_crtc *crtc = data;
3151 struct drm_device *drm_dev = crtc->dev;
3152 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
3153
3154 spin_lock_irq(&drm_dev->event_lock);
3155 acrtc->dm_irq_params.crc_window.x_end = (uint16_t) val;
3156 acrtc->dm_irq_params.crc_window.update_win = false;
3157 spin_unlock_irq(&drm_dev->event_lock);
3158
3159 return 0;
3160 }
3161
3162
3163
3164
3165 static int crc_win_x_end_get(void *data, u64 *val)
3166 {
3167 struct drm_crtc *crtc = data;
3168 struct drm_device *drm_dev = crtc->dev;
3169 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
3170
3171 spin_lock_irq(&drm_dev->event_lock);
3172 *val = acrtc->dm_irq_params.crc_window.x_end;
3173 spin_unlock_irq(&drm_dev->event_lock);
3174
3175 return 0;
3176 }
3177
3178 DEFINE_DEBUGFS_ATTRIBUTE(crc_win_x_end_fops, crc_win_x_end_get,
3179 crc_win_x_end_set, "%llu\n");
3180
3181
3182
3183
3184 static int crc_win_y_end_set(void *data, u64 val)
3185 {
3186 struct drm_crtc *crtc = data;
3187 struct drm_device *drm_dev = crtc->dev;
3188 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
3189
3190 spin_lock_irq(&drm_dev->event_lock);
3191 acrtc->dm_irq_params.crc_window.y_end = (uint16_t) val;
3192 acrtc->dm_irq_params.crc_window.update_win = false;
3193 spin_unlock_irq(&drm_dev->event_lock);
3194
3195 return 0;
3196 }
3197
3198
3199
3200
3201 static int crc_win_y_end_get(void *data, u64 *val)
3202 {
3203 struct drm_crtc *crtc = data;
3204 struct drm_device *drm_dev = crtc->dev;
3205 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
3206
3207 spin_lock_irq(&drm_dev->event_lock);
3208 *val = acrtc->dm_irq_params.crc_window.y_end;
3209 spin_unlock_irq(&drm_dev->event_lock);
3210
3211 return 0;
3212 }
3213
3214 DEFINE_DEBUGFS_ATTRIBUTE(crc_win_y_end_fops, crc_win_y_end_get,
3215 crc_win_y_end_set, "%llu\n");
3216
3217
3218
3219 static int crc_win_update_set(void *data, u64 val)
3220 {
3221 struct drm_crtc *new_crtc = data;
3222 struct drm_crtc *old_crtc = NULL;
3223 struct amdgpu_crtc *new_acrtc, *old_acrtc;
3224 struct amdgpu_device *adev = drm_to_adev(new_crtc->dev);
3225 struct crc_rd_work *crc_rd_wrk = adev->dm.crc_rd_wrk;
3226
3227 if (!crc_rd_wrk)
3228 return 0;
3229
3230 if (val) {
3231 spin_lock_irq(&adev_to_drm(adev)->event_lock);
3232 spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
3233 if (crc_rd_wrk->crtc) {
3234 old_crtc = crc_rd_wrk->crtc;
3235 old_acrtc = to_amdgpu_crtc(old_crtc);
3236 }
3237 new_acrtc = to_amdgpu_crtc(new_crtc);
3238
3239 if (old_crtc && old_crtc != new_crtc) {
3240 old_acrtc->dm_irq_params.crc_window.activated = false;
3241 old_acrtc->dm_irq_params.crc_window.update_win = false;
3242 old_acrtc->dm_irq_params.crc_window.skip_frame_cnt = 0;
3243
3244 new_acrtc->dm_irq_params.crc_window.activated = true;
3245 new_acrtc->dm_irq_params.crc_window.update_win = true;
3246 new_acrtc->dm_irq_params.crc_window.skip_frame_cnt = 0;
3247 crc_rd_wrk->crtc = new_crtc;
3248 } else {
3249 new_acrtc->dm_irq_params.crc_window.activated = true;
3250 new_acrtc->dm_irq_params.crc_window.update_win = true;
3251 new_acrtc->dm_irq_params.crc_window.skip_frame_cnt = 0;
3252 crc_rd_wrk->crtc = new_crtc;
3253 }
3254 spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
3255 spin_unlock_irq(&adev_to_drm(adev)->event_lock);
3256 }
3257
3258 return 0;
3259 }
3260
3261
3262
3263
3264 static int crc_win_update_get(void *data, u64 *val)
3265 {
3266 *val = 0;
3267 return 0;
3268 }
3269
3270 DEFINE_DEBUGFS_ATTRIBUTE(crc_win_update_fops, crc_win_update_get,
3271 crc_win_update_set, "%llu\n");
3272 #endif
3273 void crtc_debugfs_init(struct drm_crtc *crtc)
3274 {
3275 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
3276 struct dentry *dir = debugfs_lookup("crc", crtc->debugfs_entry);
3277
3278 if (!dir)
3279 return;
3280
3281 debugfs_create_file_unsafe("crc_win_x_start", 0644, dir, crtc,
3282 &crc_win_x_start_fops);
3283 debugfs_create_file_unsafe("crc_win_y_start", 0644, dir, crtc,
3284 &crc_win_y_start_fops);
3285 debugfs_create_file_unsafe("crc_win_x_end", 0644, dir, crtc,
3286 &crc_win_x_end_fops);
3287 debugfs_create_file_unsafe("crc_win_y_end", 0644, dir, crtc,
3288 &crc_win_y_end_fops);
3289 debugfs_create_file_unsafe("crc_win_update", 0644, dir, crtc,
3290 &crc_win_update_fops);
3291 dput(dir);
3292 #endif
3293 debugfs_create_file("amdgpu_current_bpc", 0644, crtc->debugfs_entry,
3294 crtc, &amdgpu_current_bpc_fops);
3295 }
3296
3297
3298
3299
3300
3301 static ssize_t dtn_log_read(
3302 struct file *f,
3303 char __user *buf,
3304 size_t size,
3305 loff_t *pos)
3306 {
3307 struct amdgpu_device *adev = file_inode(f)->i_private;
3308 struct dc *dc = adev->dm.dc;
3309 struct dc_log_buffer_ctx log_ctx = { 0 };
3310 ssize_t result = 0;
3311
3312 if (!buf || !size)
3313 return -EINVAL;
3314
3315 if (!dc->hwss.log_hw_state)
3316 return 0;
3317
3318 dc->hwss.log_hw_state(dc, &log_ctx);
3319
3320 if (*pos < log_ctx.pos) {
3321 size_t to_copy = log_ctx.pos - *pos;
3322
3323 to_copy = min(to_copy, size);
3324
3325 if (!copy_to_user(buf, log_ctx.buf + *pos, to_copy)) {
3326 *pos += to_copy;
3327 result = to_copy;
3328 }
3329 }
3330
3331 kfree(log_ctx.buf);
3332
3333 return result;
3334 }
3335
3336
3337
3338
3339
3340 static ssize_t dtn_log_write(
3341 struct file *f,
3342 const char __user *buf,
3343 size_t size,
3344 loff_t *pos)
3345 {
3346 struct amdgpu_device *adev = file_inode(f)->i_private;
3347 struct dc *dc = adev->dm.dc;
3348
3349
3350 if (size == 0)
3351 return 0;
3352
3353 if (dc->hwss.log_hw_state)
3354 dc->hwss.log_hw_state(dc, NULL);
3355
3356 return size;
3357 }
3358
3359 static int mst_topo_show(struct seq_file *m, void *unused)
3360 {
3361 struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
3362 struct drm_device *dev = adev_to_drm(adev);
3363 struct drm_connector *connector;
3364 struct drm_connector_list_iter conn_iter;
3365 struct amdgpu_dm_connector *aconnector;
3366
3367 drm_connector_list_iter_begin(dev, &conn_iter);
3368 drm_for_each_connector_iter(connector, &conn_iter) {
3369 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
3370 continue;
3371
3372 aconnector = to_amdgpu_dm_connector(connector);
3373
3374
3375 if (!aconnector->mst_mgr.mst_state)
3376 continue;
3377
3378 seq_printf(m, "\nMST topology for connector %d\n", aconnector->connector_id);
3379 drm_dp_mst_dump_topology(m, &aconnector->mst_mgr);
3380 }
3381 drm_connector_list_iter_end(&conn_iter);
3382
3383 return 0;
3384 }
3385
3386
3387
3388
3389
3390
3391
3392
3393 static int trigger_hpd_mst_set(void *data, u64 val)
3394 {
3395 struct amdgpu_device *adev = data;
3396 struct drm_device *dev = adev_to_drm(adev);
3397 struct drm_connector_list_iter iter;
3398 struct amdgpu_dm_connector *aconnector;
3399 struct drm_connector *connector;
3400 struct dc_link *link = NULL;
3401
3402 if (val == 1) {
3403 drm_connector_list_iter_begin(dev, &iter);
3404 drm_for_each_connector_iter(connector, &iter) {
3405 aconnector = to_amdgpu_dm_connector(connector);
3406 if (aconnector->dc_link->type == dc_connection_mst_branch &&
3407 aconnector->mst_mgr.aux) {
3408 mutex_lock(&adev->dm.dc_lock);
3409 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3410 mutex_unlock(&adev->dm.dc_lock);
3411
3412 drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
3413 }
3414 }
3415 } else if (val == 0) {
3416 drm_connector_list_iter_begin(dev, &iter);
3417 drm_for_each_connector_iter(connector, &iter) {
3418 aconnector = to_amdgpu_dm_connector(connector);
3419 if (!aconnector->dc_link)
3420 continue;
3421
3422 if (!aconnector->mst_port)
3423 continue;
3424
3425 link = aconnector->dc_link;
3426 dp_receiver_power_ctrl(link, false);
3427 drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_port->mst_mgr, false);
3428 link->mst_stream_alloc_table.stream_count = 0;
3429 memset(link->mst_stream_alloc_table.stream_allocations, 0,
3430 sizeof(link->mst_stream_alloc_table.stream_allocations));
3431 }
3432 } else {
3433 return 0;
3434 }
3435 drm_kms_helper_hotplug_event(dev);
3436
3437 return 0;
3438 }
3439
3440
3441
3442
3443
3444
3445 static int trigger_hpd_mst_get(void *data, u64 *val)
3446 {
3447 *val = 0;
3448 return 0;
3449 }
3450
3451 DEFINE_DEBUGFS_ATTRIBUTE(trigger_hpd_mst_ops, trigger_hpd_mst_get,
3452 trigger_hpd_mst_set, "%llu\n");
3453
3454
3455
3456
3457
3458
3459
3460 static int force_timing_sync_set(void *data, u64 val)
3461 {
3462 struct amdgpu_device *adev = data;
3463
3464 adev->dm.force_timing_sync = (bool)val;
3465
3466 amdgpu_dm_trigger_timing_sync(adev_to_drm(adev));
3467
3468 return 0;
3469 }
3470
3471
3472
3473
3474
3475 static int force_timing_sync_get(void *data, u64 *val)
3476 {
3477 struct amdgpu_device *adev = data;
3478
3479 *val = adev->dm.force_timing_sync;
3480
3481 return 0;
3482 }
3483
3484 DEFINE_DEBUGFS_ATTRIBUTE(force_timing_sync_ops, force_timing_sync_get,
3485 force_timing_sync_set, "%llu\n");
3486
3487
3488
3489
3490
3491
3492 static int disable_hpd_set(void *data, u64 val)
3493 {
3494 struct amdgpu_device *adev = data;
3495
3496 adev->dm.disable_hpd_irq = (bool)val;
3497
3498 return 0;
3499 }
3500
3501
3502
3503
3504
3505
3506 static int disable_hpd_get(void *data, u64 *val)
3507 {
3508 struct amdgpu_device *adev = data;
3509
3510 *val = adev->dm.disable_hpd_irq;
3511
3512 return 0;
3513 }
3514
3515 DEFINE_DEBUGFS_ATTRIBUTE(disable_hpd_ops, disable_hpd_get,
3516 disable_hpd_set, "%llu\n");
3517
3518
3519
3520
3521
3522 static int dp_force_sst_set(void *data, u64 val)
3523 {
3524 struct amdgpu_device *adev = data;
3525
3526 adev->dm.dc->debug.set_mst_en_for_sst = val;
3527
3528 return 0;
3529 }
3530
3531 static int dp_force_sst_get(void *data, u64 *val)
3532 {
3533 struct amdgpu_device *adev = data;
3534
3535 *val = adev->dm.dc->debug.set_mst_en_for_sst;
3536
3537 return 0;
3538 }
3539 DEFINE_DEBUGFS_ATTRIBUTE(dp_set_mst_en_for_sst_ops, dp_force_sst_get,
3540 dp_force_sst_set, "%llu\n");
3541
3542
3543
3544
3545
3546 static int dp_ignore_cable_id_set(void *data, u64 val)
3547 {
3548 struct amdgpu_device *adev = data;
3549
3550 adev->dm.dc->debug.ignore_cable_id = val;
3551
3552 return 0;
3553 }
3554
3555 static int dp_ignore_cable_id_get(void *data, u64 *val)
3556 {
3557 struct amdgpu_device *adev = data;
3558
3559 *val = adev->dm.dc->debug.ignore_cable_id;
3560
3561 return 0;
3562 }
3563 DEFINE_DEBUGFS_ATTRIBUTE(dp_ignore_cable_id_ops, dp_ignore_cable_id_get,
3564 dp_ignore_cable_id_set, "%llu\n");
3565
3566
3567
3568
3569
3570 static int visual_confirm_set(void *data, u64 val)
3571 {
3572 struct amdgpu_device *adev = data;
3573
3574 adev->dm.dc->debug.visual_confirm = (enum visual_confirm)val;
3575
3576 return 0;
3577 }
3578
3579
3580
3581
3582
3583 static int visual_confirm_get(void *data, u64 *val)
3584 {
3585 struct amdgpu_device *adev = data;
3586
3587 *val = adev->dm.dc->debug.visual_confirm;
3588
3589 return 0;
3590 }
3591
3592 DEFINE_SHOW_ATTRIBUTE(mst_topo);
3593 DEFINE_DEBUGFS_ATTRIBUTE(visual_confirm_fops, visual_confirm_get,
3594 visual_confirm_set, "%llu\n");
3595
3596
3597
3598
3599
3600
3601 static int skip_detection_link_training_set(void *data, u64 val)
3602 {
3603 struct amdgpu_device *adev = data;
3604
3605 if (val == 0)
3606 adev->dm.dc->debug.skip_detection_link_training = false;
3607 else
3608 adev->dm.dc->debug.skip_detection_link_training = true;
3609
3610 return 0;
3611 }
3612
3613
3614
3615
3616
3617 static int skip_detection_link_training_get(void *data, u64 *val)
3618 {
3619 struct amdgpu_device *adev = data;
3620
3621 *val = adev->dm.dc->debug.skip_detection_link_training;
3622
3623 return 0;
3624 }
3625
3626 DEFINE_DEBUGFS_ATTRIBUTE(skip_detection_link_training_fops,
3627 skip_detection_link_training_get,
3628 skip_detection_link_training_set, "%llu\n");
3629
3630
3631
3632
3633
3634 static ssize_t dcc_en_bits_read(
3635 struct file *f,
3636 char __user *buf,
3637 size_t size,
3638 loff_t *pos)
3639 {
3640 struct amdgpu_device *adev = file_inode(f)->i_private;
3641 struct dc *dc = adev->dm.dc;
3642 char *rd_buf = NULL;
3643 const uint32_t rd_buf_size = 32;
3644 uint32_t result = 0;
3645 int offset = 0;
3646 int num_pipes = dc->res_pool->pipe_count;
3647 int *dcc_en_bits;
3648 int i, r;
3649
3650 dcc_en_bits = kcalloc(num_pipes, sizeof(int), GFP_KERNEL);
3651 if (!dcc_en_bits)
3652 return -ENOMEM;
3653
3654 if (!dc->hwss.get_dcc_en_bits) {
3655 kfree(dcc_en_bits);
3656 return 0;
3657 }
3658
3659 dc->hwss.get_dcc_en_bits(dc, dcc_en_bits);
3660
3661 rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
3662 if (!rd_buf) {
3663 kfree(dcc_en_bits);
3664 return -ENOMEM;
3665 }
3666
3667 for (i = 0; i < num_pipes; i++)
3668 offset += snprintf(rd_buf + offset, rd_buf_size - offset,
3669 "%d ", dcc_en_bits[i]);
3670 rd_buf[strlen(rd_buf)] = '\n';
3671
3672 kfree(dcc_en_bits);
3673
3674 while (size) {
3675 if (*pos >= rd_buf_size)
3676 break;
3677 r = put_user(*(rd_buf + result), buf);
3678 if (r) {
3679 kfree(rd_buf);
3680 return r;
3681 }
3682 buf += 1;
3683 size -= 1;
3684 *pos += 1;
3685 result += 1;
3686 }
3687
3688 kfree(rd_buf);
3689 return result;
3690 }
3691
3692 void dtn_debugfs_init(struct amdgpu_device *adev)
3693 {
3694 static const struct file_operations dtn_log_fops = {
3695 .owner = THIS_MODULE,
3696 .read = dtn_log_read,
3697 .write = dtn_log_write,
3698 .llseek = default_llseek
3699 };
3700 static const struct file_operations dcc_en_bits_fops = {
3701 .owner = THIS_MODULE,
3702 .read = dcc_en_bits_read,
3703 .llseek = default_llseek
3704 };
3705
3706 struct drm_minor *minor = adev_to_drm(adev)->primary;
3707 struct dentry *root = minor->debugfs_root;
3708
3709 debugfs_create_file("amdgpu_mst_topology", 0444, root,
3710 adev, &mst_topo_fops);
3711 debugfs_create_file("amdgpu_dm_dtn_log", 0644, root, adev,
3712 &dtn_log_fops);
3713 debugfs_create_file("amdgpu_dm_dp_set_mst_en_for_sst", 0644, root, adev,
3714 &dp_set_mst_en_for_sst_ops);
3715 debugfs_create_file("amdgpu_dm_dp_ignore_cable_id", 0644, root, adev,
3716 &dp_ignore_cable_id_ops);
3717
3718 debugfs_create_file_unsafe("amdgpu_dm_visual_confirm", 0644, root, adev,
3719 &visual_confirm_fops);
3720
3721 debugfs_create_file_unsafe("amdgpu_dm_skip_detection_link_training", 0644, root, adev,
3722 &skip_detection_link_training_fops);
3723
3724 debugfs_create_file_unsafe("amdgpu_dm_dmub_tracebuffer", 0644, root,
3725 adev, &dmub_tracebuffer_fops);
3726
3727 debugfs_create_file_unsafe("amdgpu_dm_dmub_fw_state", 0644, root,
3728 adev, &dmub_fw_state_fops);
3729
3730 debugfs_create_file_unsafe("amdgpu_dm_force_timing_sync", 0644, root,
3731 adev, &force_timing_sync_ops);
3732
3733 debugfs_create_file_unsafe("amdgpu_dm_dmcub_trace_event_en", 0644, root,
3734 adev, &dmcub_trace_event_state_fops);
3735
3736 debugfs_create_file_unsafe("amdgpu_dm_trigger_hpd_mst", 0644, root,
3737 adev, &trigger_hpd_mst_ops);
3738
3739 debugfs_create_file_unsafe("amdgpu_dm_dcc_en", 0644, root, adev,
3740 &dcc_en_bits_fops);
3741
3742 debugfs_create_file_unsafe("amdgpu_dm_disable_hpd", 0644, root, adev,
3743 &disable_hpd_ops);
3744
3745 }