0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026 #include <linux/string.h>
0027 #include <linux/acpi.h>
0028 #include <linux/i2c.h>
0029
0030 #include <drm/drm_probe_helper.h>
0031 #include <drm/amdgpu_drm.h>
0032 #include <drm/drm_edid.h>
0033
0034 #include "dm_services.h"
0035 #include "amdgpu.h"
0036 #include "dc.h"
0037 #include "amdgpu_dm.h"
0038 #include "amdgpu_dm_irq.h"
0039 #include "amdgpu_dm_mst_types.h"
0040
0041 #include "dm_helpers.h"
0042 #include "ddc_service_types.h"
0043
0044 struct monitor_patch_info {
0045 unsigned int manufacturer_id;
0046 unsigned int product_id;
0047 void (*patch_func)(struct dc_edid_caps *edid_caps, unsigned int param);
0048 unsigned int patch_param;
0049 };
0050 static void set_max_dsc_bpp_limit(struct dc_edid_caps *edid_caps, unsigned int param);
0051
0052 static const struct monitor_patch_info monitor_patch_table[] = {
0053 {0x6D1E, 0x5BBF, set_max_dsc_bpp_limit, 15},
0054 {0x6D1E, 0x5B9A, set_max_dsc_bpp_limit, 15},
0055 };
0056
0057 static void set_max_dsc_bpp_limit(struct dc_edid_caps *edid_caps, unsigned int param)
0058 {
0059 if (edid_caps)
0060 edid_caps->panel_patch.max_dsc_target_bpp_limit = param;
0061 }
0062
0063 static int amdgpu_dm_patch_edid_caps(struct dc_edid_caps *edid_caps)
0064 {
0065 int i, ret = 0;
0066
0067 for (i = 0; i < ARRAY_SIZE(monitor_patch_table); i++)
0068 if ((edid_caps->manufacturer_id == monitor_patch_table[i].manufacturer_id)
0069 && (edid_caps->product_id == monitor_patch_table[i].product_id)) {
0070 monitor_patch_table[i].patch_func(edid_caps, monitor_patch_table[i].patch_param);
0071 ret++;
0072 }
0073
0074 return ret;
0075 }
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086 enum dc_edid_status dm_helpers_parse_edid_caps(
0087 struct dc_link *link,
0088 const struct dc_edid *edid,
0089 struct dc_edid_caps *edid_caps)
0090 {
0091 struct amdgpu_dm_connector *aconnector = link->priv;
0092 struct drm_connector *connector = &aconnector->base;
0093 struct edid *edid_buf = edid ? (struct edid *) edid->raw_edid : NULL;
0094 struct cea_sad *sads;
0095 int sad_count = -1;
0096 int sadb_count = -1;
0097 int i = 0;
0098 uint8_t *sadb = NULL;
0099
0100 enum dc_edid_status result = EDID_OK;
0101
0102 if (!edid_caps || !edid)
0103 return EDID_BAD_INPUT;
0104
0105 if (!drm_edid_is_valid(edid_buf))
0106 result = EDID_BAD_CHECKSUM;
0107
0108 edid_caps->manufacturer_id = (uint16_t) edid_buf->mfg_id[0] |
0109 ((uint16_t) edid_buf->mfg_id[1])<<8;
0110 edid_caps->product_id = (uint16_t) edid_buf->prod_code[0] |
0111 ((uint16_t) edid_buf->prod_code[1])<<8;
0112 edid_caps->serial_number = edid_buf->serial;
0113 edid_caps->manufacture_week = edid_buf->mfg_week;
0114 edid_caps->manufacture_year = edid_buf->mfg_year;
0115
0116 drm_edid_get_monitor_name(edid_buf,
0117 edid_caps->display_name,
0118 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
0119
0120 edid_caps->edid_hdmi = connector->display_info.is_hdmi;
0121
0122 sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads);
0123 if (sad_count <= 0)
0124 return result;
0125
0126 edid_caps->audio_mode_count = sad_count < DC_MAX_AUDIO_DESC_COUNT ? sad_count : DC_MAX_AUDIO_DESC_COUNT;
0127 for (i = 0; i < edid_caps->audio_mode_count; ++i) {
0128 struct cea_sad *sad = &sads[i];
0129
0130 edid_caps->audio_modes[i].format_code = sad->format;
0131 edid_caps->audio_modes[i].channel_count = sad->channels + 1;
0132 edid_caps->audio_modes[i].sample_rate = sad->freq;
0133 edid_caps->audio_modes[i].sample_size = sad->byte2;
0134 }
0135
0136 sadb_count = drm_edid_to_speaker_allocation((struct edid *) edid->raw_edid, &sadb);
0137
0138 if (sadb_count < 0) {
0139 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sadb_count);
0140 sadb_count = 0;
0141 }
0142
0143 if (sadb_count)
0144 edid_caps->speaker_flags = sadb[0];
0145 else
0146 edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION;
0147
0148 kfree(sads);
0149 kfree(sadb);
0150
0151 amdgpu_dm_patch_edid_caps(edid_caps);
0152
0153 return result;
0154 }
0155
0156 static void get_payload_table(
0157 struct amdgpu_dm_connector *aconnector,
0158 struct dp_mst_stream_allocation_table *proposed_table)
0159 {
0160 int i;
0161 struct drm_dp_mst_topology_mgr *mst_mgr =
0162 &aconnector->mst_port->mst_mgr;
0163
0164 mutex_lock(&mst_mgr->payload_lock);
0165
0166 proposed_table->stream_count = 0;
0167
0168
0169 for (i = 0; i < mst_mgr->max_payloads; i++) {
0170 if (mst_mgr->payloads[i].num_slots == 0)
0171 break;
0172
0173 ASSERT(mst_mgr->payloads[i].payload_state !=
0174 DP_PAYLOAD_DELETE_LOCAL);
0175
0176 if (mst_mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL ||
0177 mst_mgr->payloads[i].payload_state ==
0178 DP_PAYLOAD_REMOTE) {
0179
0180 struct dp_mst_stream_allocation *sa =
0181 &proposed_table->stream_allocations[
0182 proposed_table->stream_count];
0183
0184 sa->slot_count = mst_mgr->payloads[i].num_slots;
0185 sa->vcp_id = mst_mgr->proposed_vcpis[i]->vcpi;
0186 proposed_table->stream_count++;
0187 }
0188 }
0189
0190 mutex_unlock(&mst_mgr->payload_lock);
0191 }
0192
0193 void dm_helpers_dp_update_branch_info(
0194 struct dc_context *ctx,
0195 const struct dc_link *link)
0196 {}
0197
0198
0199
0200
0201 bool dm_helpers_dp_mst_write_payload_allocation_table(
0202 struct dc_context *ctx,
0203 const struct dc_stream_state *stream,
0204 struct dp_mst_stream_allocation_table *proposed_table,
0205 bool enable)
0206 {
0207 struct amdgpu_dm_connector *aconnector;
0208 struct dm_connector_state *dm_conn_state;
0209 struct drm_dp_mst_topology_mgr *mst_mgr;
0210 struct drm_dp_mst_port *mst_port;
0211 bool ret;
0212 u8 link_coding_cap = DP_8b_10b_ENCODING;
0213
0214 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
0215
0216
0217
0218
0219
0220 if (!aconnector || !aconnector->mst_port)
0221 return false;
0222
0223 dm_conn_state = to_dm_connector_state(aconnector->base.state);
0224
0225 mst_mgr = &aconnector->mst_port->mst_mgr;
0226
0227 if (!mst_mgr->mst_state)
0228 return false;
0229
0230 mst_port = aconnector->port;
0231
0232 #if defined(CONFIG_DRM_AMD_DC_DCN)
0233 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
0234 #endif
0235
0236 if (enable) {
0237
0238 ret = drm_dp_mst_allocate_vcpi(mst_mgr, mst_port,
0239 dm_conn_state->pbn,
0240 dm_conn_state->vcpi_slots);
0241 if (!ret)
0242 return false;
0243
0244 } else {
0245 drm_dp_mst_reset_vcpi_slots(mst_mgr, mst_port);
0246 }
0247
0248
0249 drm_dp_update_payload_part1(mst_mgr, (link_coding_cap == DP_CAP_ANSI_128B132B) ? 0:1);
0250
0251
0252
0253
0254
0255
0256 get_payload_table(aconnector, proposed_table);
0257
0258 return true;
0259 }
0260
0261
0262
0263
0264 void dm_helpers_dp_mst_poll_pending_down_reply(
0265 struct dc_context *ctx,
0266 const struct dc_link *link)
0267 {}
0268
0269
0270
0271
0272 void dm_helpers_dp_mst_clear_payload_allocation_table(
0273 struct dc_context *ctx,
0274 const struct dc_link *link)
0275 {}
0276
0277
0278
0279
0280
0281 enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger(
0282 struct dc_context *ctx,
0283 const struct dc_stream_state *stream)
0284 {
0285 struct amdgpu_dm_connector *aconnector;
0286 struct drm_dp_mst_topology_mgr *mst_mgr;
0287 int ret;
0288
0289 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
0290
0291 if (!aconnector || !aconnector->mst_port)
0292 return ACT_FAILED;
0293
0294 mst_mgr = &aconnector->mst_port->mst_mgr;
0295
0296 if (!mst_mgr->mst_state)
0297 return ACT_FAILED;
0298
0299 ret = drm_dp_check_act_status(mst_mgr);
0300
0301 if (ret)
0302 return ACT_FAILED;
0303
0304 return ACT_SUCCESS;
0305 }
0306
0307 bool dm_helpers_dp_mst_send_payload_allocation(
0308 struct dc_context *ctx,
0309 const struct dc_stream_state *stream,
0310 bool enable)
0311 {
0312 struct amdgpu_dm_connector *aconnector;
0313 struct drm_dp_mst_topology_mgr *mst_mgr;
0314 struct drm_dp_mst_port *mst_port;
0315 enum mst_progress_status set_flag = MST_ALLOCATE_NEW_PAYLOAD;
0316 enum mst_progress_status clr_flag = MST_CLEAR_ALLOCATED_PAYLOAD;
0317
0318 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
0319
0320 if (!aconnector || !aconnector->mst_port)
0321 return false;
0322
0323 mst_port = aconnector->port;
0324
0325 mst_mgr = &aconnector->mst_port->mst_mgr;
0326
0327 if (!mst_mgr->mst_state)
0328 return false;
0329
0330 if (!enable) {
0331 set_flag = MST_CLEAR_ALLOCATED_PAYLOAD;
0332 clr_flag = MST_ALLOCATE_NEW_PAYLOAD;
0333 }
0334
0335 if (drm_dp_update_payload_part2(mst_mgr)) {
0336 amdgpu_dm_set_mst_status(&aconnector->mst_status,
0337 set_flag, false);
0338 } else {
0339 amdgpu_dm_set_mst_status(&aconnector->mst_status,
0340 set_flag, true);
0341 amdgpu_dm_set_mst_status(&aconnector->mst_status,
0342 clr_flag, false);
0343 }
0344
0345 if (!enable)
0346 drm_dp_mst_deallocate_vcpi(mst_mgr, mst_port);
0347
0348 return true;
0349 }
0350
0351 void dm_dtn_log_begin(struct dc_context *ctx,
0352 struct dc_log_buffer_ctx *log_ctx)
0353 {
0354 static const char msg[] = "[dtn begin]\n";
0355
0356 if (!log_ctx) {
0357 pr_info("%s", msg);
0358 return;
0359 }
0360
0361 dm_dtn_log_append_v(ctx, log_ctx, "%s", msg);
0362 }
0363
0364 __printf(3, 4)
0365 void dm_dtn_log_append_v(struct dc_context *ctx,
0366 struct dc_log_buffer_ctx *log_ctx,
0367 const char *msg, ...)
0368 {
0369 va_list args;
0370 size_t total;
0371 int n;
0372
0373 if (!log_ctx) {
0374
0375 struct va_format vaf;
0376
0377 vaf.fmt = msg;
0378 vaf.va = &args;
0379
0380 va_start(args, msg);
0381 pr_info("%pV", &vaf);
0382 va_end(args);
0383
0384 return;
0385 }
0386
0387
0388 va_start(args, msg);
0389 n = vsnprintf(NULL, 0, msg, args);
0390 va_end(args);
0391
0392 if (n <= 0)
0393 return;
0394
0395
0396 total = log_ctx->pos + n + 1;
0397
0398 if (total > log_ctx->size) {
0399 char *buf = (char *)kvcalloc(total, sizeof(char), GFP_KERNEL);
0400
0401 if (buf) {
0402 memcpy(buf, log_ctx->buf, log_ctx->pos);
0403 kfree(log_ctx->buf);
0404
0405 log_ctx->buf = buf;
0406 log_ctx->size = total;
0407 }
0408 }
0409
0410 if (!log_ctx->buf)
0411 return;
0412
0413
0414 va_start(args, msg);
0415 n = vscnprintf(
0416 log_ctx->buf + log_ctx->pos,
0417 log_ctx->size - log_ctx->pos,
0418 msg,
0419 args);
0420 va_end(args);
0421
0422 if (n > 0)
0423 log_ctx->pos += n;
0424 }
0425
0426 void dm_dtn_log_end(struct dc_context *ctx,
0427 struct dc_log_buffer_ctx *log_ctx)
0428 {
0429 static const char msg[] = "[dtn end]\n";
0430
0431 if (!log_ctx) {
0432 pr_info("%s", msg);
0433 return;
0434 }
0435
0436 dm_dtn_log_append_v(ctx, log_ctx, "%s", msg);
0437 }
0438
0439 bool dm_helpers_dp_mst_start_top_mgr(
0440 struct dc_context *ctx,
0441 const struct dc_link *link,
0442 bool boot)
0443 {
0444 struct amdgpu_dm_connector *aconnector = link->priv;
0445
0446 if (!aconnector) {
0447 DRM_ERROR("Failed to find connector for link!");
0448 return false;
0449 }
0450
0451 if (boot) {
0452 DRM_INFO("DM_MST: Differing MST start on aconnector: %p [id: %d]\n",
0453 aconnector, aconnector->base.base.id);
0454 return true;
0455 }
0456
0457 DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n",
0458 aconnector, aconnector->base.base.id);
0459
0460 return (drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true) == 0);
0461 }
0462
0463 bool dm_helpers_dp_mst_stop_top_mgr(
0464 struct dc_context *ctx,
0465 struct dc_link *link)
0466 {
0467 struct amdgpu_dm_connector *aconnector = link->priv;
0468
0469 if (!aconnector) {
0470 DRM_ERROR("Failed to find connector for link!");
0471 return false;
0472 }
0473
0474 DRM_INFO("DM_MST: stopping TM on aconnector: %p [id: %d]\n",
0475 aconnector, aconnector->base.base.id);
0476
0477 if (aconnector->mst_mgr.mst_state == true) {
0478 drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, false);
0479 link->cur_link_settings.lane_count = 0;
0480 }
0481
0482 return false;
0483 }
0484
0485 bool dm_helpers_dp_read_dpcd(
0486 struct dc_context *ctx,
0487 const struct dc_link *link,
0488 uint32_t address,
0489 uint8_t *data,
0490 uint32_t size)
0491 {
0492
0493 struct amdgpu_dm_connector *aconnector = link->priv;
0494
0495 if (!aconnector) {
0496 DC_LOG_DC("Failed to find connector for link!\n");
0497 return false;
0498 }
0499
0500 return drm_dp_dpcd_read(&aconnector->dm_dp_aux.aux, address,
0501 data, size) > 0;
0502 }
0503
0504 bool dm_helpers_dp_write_dpcd(
0505 struct dc_context *ctx,
0506 const struct dc_link *link,
0507 uint32_t address,
0508 const uint8_t *data,
0509 uint32_t size)
0510 {
0511 struct amdgpu_dm_connector *aconnector = link->priv;
0512
0513 if (!aconnector) {
0514 DRM_ERROR("Failed to find connector for link!");
0515 return false;
0516 }
0517
0518 return drm_dp_dpcd_write(&aconnector->dm_dp_aux.aux,
0519 address, (uint8_t *)data, size) > 0;
0520 }
0521
0522 bool dm_helpers_submit_i2c(
0523 struct dc_context *ctx,
0524 const struct dc_link *link,
0525 struct i2c_command *cmd)
0526 {
0527 struct amdgpu_dm_connector *aconnector = link->priv;
0528 struct i2c_msg *msgs;
0529 int i = 0;
0530 int num = cmd->number_of_payloads;
0531 bool result;
0532
0533 if (!aconnector) {
0534 DRM_ERROR("Failed to find connector for link!");
0535 return false;
0536 }
0537
0538 msgs = kcalloc(num, sizeof(struct i2c_msg), GFP_KERNEL);
0539
0540 if (!msgs)
0541 return false;
0542
0543 for (i = 0; i < num; i++) {
0544 msgs[i].flags = cmd->payloads[i].write ? 0 : I2C_M_RD;
0545 msgs[i].addr = cmd->payloads[i].address;
0546 msgs[i].len = cmd->payloads[i].length;
0547 msgs[i].buf = cmd->payloads[i].data;
0548 }
0549
0550 result = i2c_transfer(&aconnector->i2c->base, msgs, num) == num;
0551
0552 kfree(msgs);
0553
0554 return result;
0555 }
0556
0557 #if defined(CONFIG_DRM_AMD_DC_DCN)
0558 static bool execute_synaptics_rc_command(struct drm_dp_aux *aux,
0559 bool is_write_cmd,
0560 unsigned char cmd,
0561 unsigned int length,
0562 unsigned int offset,
0563 unsigned char *data)
0564 {
0565 bool success = false;
0566 unsigned char rc_data[16] = {0};
0567 unsigned char rc_offset[4] = {0};
0568 unsigned char rc_length[2] = {0};
0569 unsigned char rc_cmd = 0;
0570 unsigned char rc_result = 0xFF;
0571 unsigned char i = 0;
0572 int ret;
0573
0574 if (is_write_cmd) {
0575
0576 memmove(rc_data, data, length);
0577 ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_DATA, rc_data, sizeof(rc_data));
0578 }
0579
0580
0581 rc_offset[0] = (unsigned char) offset & 0xFF;
0582 rc_offset[1] = (unsigned char) (offset >> 8) & 0xFF;
0583 rc_offset[2] = (unsigned char) (offset >> 16) & 0xFF;
0584 rc_offset[3] = (unsigned char) (offset >> 24) & 0xFF;
0585 ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_OFFSET, rc_offset, sizeof(rc_offset));
0586
0587
0588 rc_length[0] = (unsigned char) length & 0xFF;
0589 rc_length[1] = (unsigned char) (length >> 8) & 0xFF;
0590 ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_LENGTH, rc_length, sizeof(rc_length));
0591
0592
0593 rc_cmd = cmd | 0x80;
0594 ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_COMMAND, &rc_cmd, sizeof(rc_cmd));
0595
0596 if (ret < 0) {
0597 DRM_ERROR(" execute_synaptics_rc_command - write cmd ..., err = %d\n", ret);
0598 return false;
0599 }
0600
0601
0602 for (i = 0; i < 10; i++) {
0603 drm_dp_dpcd_read(aux, SYNAPTICS_RC_COMMAND, &rc_cmd, sizeof(rc_cmd));
0604 if (rc_cmd == cmd)
0605
0606 break;
0607 msleep(10);
0608 }
0609
0610
0611 drm_dp_dpcd_read(aux, SYNAPTICS_RC_RESULT, &rc_result, sizeof(rc_result));
0612 success = (rc_result == 0);
0613
0614 if (success && !is_write_cmd) {
0615
0616 drm_dp_dpcd_read(aux, SYNAPTICS_RC_DATA, data, length);
0617 }
0618
0619 DC_LOG_DC(" execute_synaptics_rc_command - success = %d\n", success);
0620
0621 return success;
0622 }
0623
0624 static void apply_synaptics_fifo_reset_wa(struct drm_dp_aux *aux)
0625 {
0626 unsigned char data[16] = {0};
0627
0628 DC_LOG_DC("Start apply_synaptics_fifo_reset_wa\n");
0629
0630
0631 data[0] = 'P';
0632 data[1] = 'R';
0633 data[2] = 'I';
0634 data[3] = 'U';
0635 data[4] = 'S';
0636
0637 if (!execute_synaptics_rc_command(aux, true, 0x01, 5, 0, data))
0638 return;
0639
0640
0641 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220998, data))
0642 return;
0643
0644 data[0] &= (~(1 << 1));
0645 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x220998, data))
0646 return;
0647
0648 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220D98, data))
0649 return;
0650
0651 data[0] &= (~(1 << 1));
0652 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x220D98, data))
0653 return;
0654
0655 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x221198, data))
0656 return;
0657
0658 data[0] &= (~(1 << 1));
0659 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x221198, data))
0660 return;
0661
0662
0663 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220998, data))
0664 return;
0665
0666 data[0] |= (1 << 1);
0667 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x220998, data))
0668 return;
0669
0670 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220D98, data))
0671 return;
0672
0673 data[0] |= (1 << 1);
0674 return;
0675
0676 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x221198, data))
0677 return;
0678
0679 data[0] |= (1 << 1);
0680 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x221198, data))
0681 return;
0682
0683
0684 if (!execute_synaptics_rc_command(aux, true, 0x02, 0, 0, NULL))
0685 return;
0686
0687 DC_LOG_DC("Done apply_synaptics_fifo_reset_wa\n");
0688 }
0689
0690 static uint8_t write_dsc_enable_synaptics_non_virtual_dpcd_mst(
0691 struct drm_dp_aux *aux,
0692 const struct dc_stream_state *stream,
0693 bool enable)
0694 {
0695 uint8_t ret = 0;
0696
0697 DC_LOG_DC("Configure DSC to non-virtual dpcd synaptics\n");
0698
0699 if (enable) {
0700
0701
0702
0703
0704 if (!stream->link->link_status.link_active &&
0705 memcmp(stream->link->dpcd_caps.branch_dev_name,
0706 (int8_t *)SYNAPTICS_DEVICE_ID, 4) == 0)
0707 apply_synaptics_fifo_reset_wa(aux);
0708
0709 ret = drm_dp_dpcd_write(aux, DP_DSC_ENABLE, &enable, 1);
0710 DRM_INFO("Send DSC enable to synaptics\n");
0711
0712 } else {
0713
0714
0715
0716
0717 if (!stream->link->link_status.link_active) {
0718 ret = drm_dp_dpcd_write(aux, DP_DSC_ENABLE, &enable, 1);
0719 DRM_INFO("Send DSC disable to synaptics\n");
0720 }
0721 }
0722
0723 return ret;
0724 }
0725 #endif
0726
0727 bool dm_helpers_dp_write_dsc_enable(
0728 struct dc_context *ctx,
0729 const struct dc_stream_state *stream,
0730 bool enable)
0731 {
0732 uint8_t enable_dsc = enable ? 1 : 0;
0733 struct amdgpu_dm_connector *aconnector;
0734 uint8_t ret = 0;
0735
0736 if (!stream)
0737 return false;
0738
0739 if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
0740 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
0741
0742 if (!aconnector->dsc_aux)
0743 return false;
0744
0745 #if defined(CONFIG_DRM_AMD_DC_DCN)
0746
0747 if (needs_dsc_aux_workaround(aconnector->dc_link) &&
0748 (aconnector->mst_downstream_port_present.byte & 0x7) != 0x3)
0749 return write_dsc_enable_synaptics_non_virtual_dpcd_mst(
0750 aconnector->dsc_aux, stream, enable_dsc);
0751 #endif
0752
0753 ret = drm_dp_dpcd_write(aconnector->dsc_aux, DP_DSC_ENABLE, &enable_dsc, 1);
0754 DC_LOG_DC("Send DSC %s to MST RX\n", enable_dsc ? "enable" : "disable");
0755 }
0756
0757 if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || stream->signal == SIGNAL_TYPE_EDP) {
0758 #if defined(CONFIG_DRM_AMD_DC_DCN)
0759 if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
0760 #endif
0761 ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1);
0762 DC_LOG_DC("Send DSC %s to SST RX\n", enable_dsc ? "enable" : "disable");
0763 #if defined(CONFIG_DRM_AMD_DC_DCN)
0764 } else if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
0765 ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1);
0766 DC_LOG_DC("Send DSC %s to DP-HDMI PCON\n", enable_dsc ? "enable" : "disable");
0767 }
0768 #endif
0769 }
0770
0771 return (ret > 0);
0772 }
0773
0774 bool dm_helpers_is_dp_sink_present(struct dc_link *link)
0775 {
0776 bool dp_sink_present;
0777 struct amdgpu_dm_connector *aconnector = link->priv;
0778
0779 if (!aconnector) {
0780 BUG_ON("Failed to find connector for link!");
0781 return true;
0782 }
0783
0784 mutex_lock(&aconnector->dm_dp_aux.aux.hw_mutex);
0785 dp_sink_present = dc_link_is_dp_sink_present(link);
0786 mutex_unlock(&aconnector->dm_dp_aux.aux.hw_mutex);
0787 return dp_sink_present;
0788 }
0789
0790 enum dc_edid_status dm_helpers_read_local_edid(
0791 struct dc_context *ctx,
0792 struct dc_link *link,
0793 struct dc_sink *sink)
0794 {
0795 struct amdgpu_dm_connector *aconnector = link->priv;
0796 struct drm_connector *connector = &aconnector->base;
0797 struct i2c_adapter *ddc;
0798 int retry = 3;
0799 enum dc_edid_status edid_status;
0800 struct edid *edid;
0801
0802 if (link->aux_mode)
0803 ddc = &aconnector->dm_dp_aux.aux.ddc;
0804 else
0805 ddc = &aconnector->i2c->base;
0806
0807
0808
0809
0810 do {
0811
0812 edid = drm_get_edid(&aconnector->base, ddc);
0813
0814
0815 if (link->aux_mode && connector->edid_corrupt)
0816 drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, connector->real_edid_checksum);
0817
0818 if (!edid && connector->edid_corrupt) {
0819 connector->edid_corrupt = false;
0820 return EDID_BAD_CHECKSUM;
0821 }
0822
0823 if (!edid)
0824 return EDID_NO_RESPONSE;
0825
0826 sink->dc_edid.length = EDID_LENGTH * (edid->extensions + 1);
0827 memmove(sink->dc_edid.raw_edid, (uint8_t *)edid, sink->dc_edid.length);
0828
0829
0830 kfree(edid);
0831
0832 edid_status = dm_helpers_parse_edid_caps(
0833 link,
0834 &sink->dc_edid,
0835 &sink->edid_caps);
0836
0837 } while (edid_status == EDID_BAD_CHECKSUM && --retry > 0);
0838
0839 if (edid_status != EDID_OK)
0840 DRM_ERROR("EDID err: %d, on connector: %s",
0841 edid_status,
0842 aconnector->base.name);
0843
0844
0845 if (link->aux_mode)
0846 drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, sink->dc_edid.raw_edid[sink->dc_edid.length-1]);
0847
0848 return edid_status;
0849 }
0850 int dm_helper_dmub_aux_transfer_sync(
0851 struct dc_context *ctx,
0852 const struct dc_link *link,
0853 struct aux_payload *payload,
0854 enum aux_return_code_type *operation_result)
0855 {
0856 return amdgpu_dm_process_dmub_aux_transfer_sync(true, ctx,
0857 link->link_index, (void *)payload,
0858 (void *)operation_result);
0859 }
0860
0861 int dm_helpers_dmub_set_config_sync(struct dc_context *ctx,
0862 const struct dc_link *link,
0863 struct set_config_cmd_payload *payload,
0864 enum set_config_status *operation_result)
0865 {
0866 return amdgpu_dm_process_dmub_aux_transfer_sync(false, ctx,
0867 link->link_index, (void *)payload,
0868 (void *)operation_result);
0869 }
0870
0871 void dm_set_dcn_clocks(struct dc_context *ctx, struct dc_clocks *clks)
0872 {
0873
0874 }
0875
0876 void dm_helpers_smu_timeout(struct dc_context *ctx, unsigned int msg_id, unsigned int param, unsigned int timeout_us)
0877 {
0878
0879
0880 }
0881
0882 void *dm_helpers_allocate_gpu_mem(
0883 struct dc_context *ctx,
0884 enum dc_gpu_mem_alloc_type type,
0885 size_t size,
0886 long long *addr)
0887 {
0888 struct amdgpu_device *adev = ctx->driver_context;
0889 struct dal_allocation *da;
0890 u32 domain = (type == DC_MEM_ALLOC_TYPE_GART) ?
0891 AMDGPU_GEM_DOMAIN_GTT : AMDGPU_GEM_DOMAIN_VRAM;
0892 int ret;
0893
0894 da = kzalloc(sizeof(struct dal_allocation), GFP_KERNEL);
0895 if (!da)
0896 return NULL;
0897
0898 ret = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
0899 domain, &da->bo,
0900 &da->gpu_addr, &da->cpu_ptr);
0901
0902 *addr = da->gpu_addr;
0903
0904 if (ret) {
0905 kfree(da);
0906 return NULL;
0907 }
0908
0909
0910 list_add(&da->list, &adev->dm.da_list);
0911
0912 return da->cpu_ptr;
0913 }
0914
0915 void dm_helpers_free_gpu_mem(
0916 struct dc_context *ctx,
0917 enum dc_gpu_mem_alloc_type type,
0918 void *pvMem)
0919 {
0920 struct amdgpu_device *adev = ctx->driver_context;
0921 struct dal_allocation *da;
0922
0923
0924 list_for_each_entry(da, &adev->dm.da_list, list) {
0925 if (pvMem == da->cpu_ptr) {
0926 amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr);
0927 list_del(&da->list);
0928 kfree(da);
0929 break;
0930 }
0931 }
0932 }
0933
0934 bool dm_helpers_dmub_outbox_interrupt_control(struct dc_context *ctx, bool enable)
0935 {
0936 enum dc_irq_source irq_source;
0937 bool ret;
0938
0939 irq_source = DC_IRQ_SOURCE_DMCUB_OUTBOX;
0940
0941 ret = dc_interrupt_set(ctx->dc, irq_source, enable);
0942
0943 DRM_DEBUG_DRIVER("Dmub trace irq %sabling: r=%d\n",
0944 enable ? "en" : "dis", ret);
0945 return ret;
0946 }
0947
0948 void dm_helpers_mst_enable_stream_features(const struct dc_stream_state *stream)
0949 {
0950
0951 struct dc_link *link = stream->link;
0952 union down_spread_ctrl old_downspread;
0953 union down_spread_ctrl new_downspread;
0954
0955 if (link->aux_access_disabled)
0956 return;
0957
0958 if (!dm_helpers_dp_read_dpcd(link->ctx, link, DP_DOWNSPREAD_CTRL,
0959 &old_downspread.raw,
0960 sizeof(old_downspread)))
0961 return;
0962
0963 new_downspread.raw = old_downspread.raw;
0964 new_downspread.bits.IGNORE_MSA_TIMING_PARAM =
0965 (stream->ignore_msa_timing_param) ? 1 : 0;
0966
0967 if (new_downspread.raw != old_downspread.raw)
0968 dm_helpers_dp_write_dpcd(link->ctx, link, DP_DOWNSPREAD_CTRL,
0969 &new_downspread.raw,
0970 sizeof(new_downspread));
0971 }
0972
0973 void dm_set_phyd32clk(struct dc_context *ctx, int freq_khz)
0974 {
0975
0976 }
0977
0978 void dm_helpers_enable_periodic_detection(struct dc_context *ctx, bool enable)
0979 {
0980
0981 }