0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025 #include "dm_services.h"
0026
0027 #include "dc.h"
0028
0029 #include "core_status.h"
0030 #include "core_types.h"
0031 #include "hw_sequencer.h"
0032 #include "dce/dce_hwseq.h"
0033
0034 #include "resource.h"
0035
0036 #include "clk_mgr.h"
0037 #include "clock_source.h"
0038 #include "dc_bios_types.h"
0039
0040 #include "bios_parser_interface.h"
0041 #include "bios/bios_parser_helper.h"
0042 #include "include/irq_service_interface.h"
0043 #include "transform.h"
0044 #include "dmcu.h"
0045 #include "dpp.h"
0046 #include "timing_generator.h"
0047 #include "abm.h"
0048 #include "virtual/virtual_link_encoder.h"
0049 #include "hubp.h"
0050
0051 #include "link_hwss.h"
0052 #include "link_encoder.h"
0053 #include "link_enc_cfg.h"
0054
0055 #include "dc_link.h"
0056 #include "dc_link_ddc.h"
0057 #include "dm_helpers.h"
0058 #include "mem_input.h"
0059
0060 #include "dc_link_dp.h"
0061 #include "dc_dmub_srv.h"
0062
0063 #include "dsc.h"
0064
0065 #include "vm_helper.h"
0066
0067 #include "dce/dce_i2c.h"
0068
0069 #include "dmub/dmub_srv.h"
0070
0071 #include "i2caux_interface.h"
0072
0073 #include "dce/dmub_psr.h"
0074
0075 #include "dce/dmub_hw_lock_mgr.h"
0076
0077 #include "dc_trace.h"
0078
0079 #include "dce/dmub_outbox.h"
0080
0081 #define CTX \
0082 dc->ctx
0083
0084 #define DC_LOGGER \
0085 dc->ctx->logger
0086
0087 static const char DC_BUILD_ID[] = "production-build";
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142 static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
0143 {
0144 if (new > *original)
0145 *original = new;
0146 }
0147
0148 static void destroy_links(struct dc *dc)
0149 {
0150 uint32_t i;
0151
0152 for (i = 0; i < dc->link_count; i++) {
0153 if (NULL != dc->links[i])
0154 link_destroy(&dc->links[i]);
0155 }
0156 }
0157
0158 static uint32_t get_num_of_internal_disp(struct dc_link **links, uint32_t num_links)
0159 {
0160 int i;
0161 uint32_t count = 0;
0162
0163 for (i = 0; i < num_links; i++) {
0164 if (links[i]->connector_signal == SIGNAL_TYPE_EDP ||
0165 links[i]->is_internal_display)
0166 count++;
0167 }
0168
0169 return count;
0170 }
0171
0172 static int get_seamless_boot_stream_count(struct dc_state *ctx)
0173 {
0174 uint8_t i;
0175 uint8_t seamless_boot_stream_count = 0;
0176
0177 for (i = 0; i < ctx->stream_count; i++)
0178 if (ctx->streams[i]->apply_seamless_boot_optimization)
0179 seamless_boot_stream_count++;
0180
0181 return seamless_boot_stream_count;
0182 }
0183
0184 static bool create_links(
0185 struct dc *dc,
0186 uint32_t num_virtual_links)
0187 {
0188 int i;
0189 int connectors_num;
0190 struct dc_bios *bios = dc->ctx->dc_bios;
0191
0192 dc->link_count = 0;
0193
0194 connectors_num = bios->funcs->get_connectors_number(bios);
0195
0196 DC_LOG_DC("BIOS object table - number of connectors: %d", connectors_num);
0197
0198 if (connectors_num > ENUM_ID_COUNT) {
0199 dm_error(
0200 "DC: Number of connectors %d exceeds maximum of %d!\n",
0201 connectors_num,
0202 ENUM_ID_COUNT);
0203 return false;
0204 }
0205
0206 dm_output_to_console(
0207 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
0208 __func__,
0209 connectors_num,
0210 num_virtual_links);
0211
0212 for (i = 0; i < connectors_num; i++) {
0213 struct link_init_data link_init_params = {0};
0214 struct dc_link *link;
0215
0216 DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count);
0217
0218 link_init_params.ctx = dc->ctx;
0219
0220 link_init_params.connector_index = i;
0221 link_init_params.link_index = dc->link_count;
0222 link_init_params.dc = dc;
0223 link = link_create(&link_init_params);
0224
0225 if (link) {
0226 dc->links[dc->link_count] = link;
0227 link->dc = dc;
0228 ++dc->link_count;
0229 }
0230 }
0231
0232 DC_LOG_DC("BIOS object table - end");
0233
0234
0235 for (i = 0; i < dc->res_pool->usb4_dpia_count; i++) {
0236 struct link_init_data link_init_params = {0};
0237 struct dc_link *link;
0238
0239 link_init_params.ctx = dc->ctx;
0240 link_init_params.connector_index = i;
0241 link_init_params.link_index = dc->link_count;
0242 link_init_params.dc = dc;
0243 link_init_params.is_dpia_link = true;
0244
0245 link = link_create(&link_init_params);
0246 if (link) {
0247 dc->links[dc->link_count] = link;
0248 link->dc = dc;
0249 ++dc->link_count;
0250 }
0251 }
0252
0253 for (i = 0; i < num_virtual_links; i++) {
0254 struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
0255 struct encoder_init_data enc_init = {0};
0256
0257 if (link == NULL) {
0258 BREAK_TO_DEBUGGER();
0259 goto failed_alloc;
0260 }
0261
0262 link->link_index = dc->link_count;
0263 dc->links[dc->link_count] = link;
0264 dc->link_count++;
0265
0266 link->ctx = dc->ctx;
0267 link->dc = dc;
0268 link->connector_signal = SIGNAL_TYPE_VIRTUAL;
0269 link->link_id.type = OBJECT_TYPE_CONNECTOR;
0270 link->link_id.id = CONNECTOR_ID_VIRTUAL;
0271 link->link_id.enum_id = ENUM_ID_1;
0272 link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
0273
0274 if (!link->link_enc) {
0275 BREAK_TO_DEBUGGER();
0276 goto failed_alloc;
0277 }
0278
0279 link->link_status.dpcd_caps = &link->dpcd_caps;
0280
0281 enc_init.ctx = dc->ctx;
0282 enc_init.channel = CHANNEL_ID_UNKNOWN;
0283 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
0284 enc_init.transmitter = TRANSMITTER_UNKNOWN;
0285 enc_init.connector = link->link_id;
0286 enc_init.encoder.type = OBJECT_TYPE_ENCODER;
0287 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
0288 enc_init.encoder.enum_id = ENUM_ID_1;
0289 virtual_link_encoder_construct(link->link_enc, &enc_init);
0290 }
0291
0292 dc->caps.num_of_internal_disp = get_num_of_internal_disp(dc->links, dc->link_count);
0293
0294 return true;
0295
0296 failed_alloc:
0297 return false;
0298 }
0299
0300
0301
0302
0303
0304 static bool create_link_encoders(struct dc *dc)
0305 {
0306 bool res = true;
0307 unsigned int num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia;
0308 unsigned int num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc;
0309 int i;
0310
0311
0312
0313
0314
0315 if (num_usb4_dpia == 0)
0316 return res;
0317
0318
0319
0320
0321 if (num_dig_link_enc > dc->res_pool->dig_link_enc_count) {
0322 for (i = 0; i < num_dig_link_enc; i++) {
0323 struct link_encoder *link_enc = dc->res_pool->link_encoders[i];
0324
0325 if (!link_enc && dc->res_pool->funcs->link_enc_create_minimal) {
0326 link_enc = dc->res_pool->funcs->link_enc_create_minimal(dc->ctx,
0327 (enum engine_id)(ENGINE_ID_DIGA + i));
0328 if (link_enc) {
0329 dc->res_pool->link_encoders[i] = link_enc;
0330 dc->res_pool->dig_link_enc_count++;
0331 } else {
0332 res = false;
0333 }
0334 }
0335 }
0336 }
0337
0338 return res;
0339 }
0340
0341
0342
0343
0344
0345 static void destroy_link_encoders(struct dc *dc)
0346 {
0347 unsigned int num_usb4_dpia;
0348 unsigned int num_dig_link_enc;
0349 int i;
0350
0351 if (!dc->res_pool)
0352 return;
0353
0354 num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia;
0355 num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc;
0356
0357
0358
0359
0360
0361 if (num_usb4_dpia == 0)
0362 return;
0363
0364 for (i = 0; i < num_dig_link_enc; i++) {
0365 struct link_encoder *link_enc = dc->res_pool->link_encoders[i];
0366
0367 if (link_enc) {
0368 link_enc->funcs->destroy(&link_enc);
0369 dc->res_pool->link_encoders[i] = NULL;
0370 dc->res_pool->dig_link_enc_count--;
0371 }
0372 }
0373 }
0374
0375 static struct dc_perf_trace *dc_perf_trace_create(void)
0376 {
0377 return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL);
0378 }
0379
0380 static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
0381 {
0382 kfree(*perf_trace);
0383 *perf_trace = NULL;
0384 }
0385
0386
0387
0388
0389
0390
0391
0392
0393
0394
0395
0396
0397
0398 bool dc_stream_adjust_vmin_vmax(struct dc *dc,
0399 struct dc_stream_state *stream,
0400 struct dc_crtc_timing_adjust *adjust)
0401 {
0402 int i;
0403
0404 stream->adjust.v_total_max = adjust->v_total_max;
0405 stream->adjust.v_total_mid = adjust->v_total_mid;
0406 stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num;
0407 stream->adjust.v_total_min = adjust->v_total_min;
0408
0409 for (i = 0; i < MAX_PIPES; i++) {
0410 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
0411
0412 if (pipe->stream == stream && pipe->stream_res.tg) {
0413 dc->hwss.set_drr(&pipe,
0414 1,
0415 *adjust);
0416
0417 return true;
0418 }
0419 }
0420 return false;
0421 }
0422
0423
0424
0425
0426
0427
0428
0429
0430
0431
0432
0433
0434
0435
0436
0437 bool dc_stream_get_last_used_drr_vtotal(struct dc *dc,
0438 struct dc_stream_state *stream,
0439 uint32_t *refresh_rate)
0440 {
0441 bool status = false;
0442
0443 int i = 0;
0444
0445 for (i = 0; i < MAX_PIPES; i++) {
0446 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
0447
0448 if (pipe->stream == stream && pipe->stream_res.tg) {
0449
0450
0451
0452 if (pipe->stream_res.tg->funcs->get_last_used_drr_vtotal) {
0453 pipe->stream_res.tg->funcs->get_last_used_drr_vtotal(pipe->stream_res.tg, refresh_rate);
0454
0455 status = true;
0456
0457 break;
0458 }
0459 }
0460 }
0461
0462 return status;
0463 }
0464
0465 bool dc_stream_get_crtc_position(struct dc *dc,
0466 struct dc_stream_state **streams, int num_streams,
0467 unsigned int *v_pos, unsigned int *nom_v_pos)
0468 {
0469
0470 const struct dc_stream_state *stream = streams[0];
0471 int i;
0472 bool ret = false;
0473 struct crtc_position position;
0474
0475 for (i = 0; i < MAX_PIPES; i++) {
0476 struct pipe_ctx *pipe =
0477 &dc->current_state->res_ctx.pipe_ctx[i];
0478
0479 if (pipe->stream == stream && pipe->stream_res.stream_enc) {
0480 dc->hwss.get_position(&pipe, 1, &position);
0481
0482 *v_pos = position.vertical_count;
0483 *nom_v_pos = position.nominal_vcount;
0484 ret = true;
0485 }
0486 }
0487 return ret;
0488 }
0489
0490 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
0491 bool dc_stream_forward_dmcu_crc_window(struct dc *dc, struct dc_stream_state *stream,
0492 struct crc_params *crc_window)
0493 {
0494 int i;
0495 struct dmcu *dmcu = dc->res_pool->dmcu;
0496 struct pipe_ctx *pipe;
0497 struct crc_region tmp_win, *crc_win;
0498 struct otg_phy_mux mapping_tmp, *mux_mapping;
0499
0500
0501 if (!crc_window)
0502 return false;
0503
0504 if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu))) {
0505 crc_win = &tmp_win;
0506 mux_mapping = &mapping_tmp;
0507
0508 tmp_win.x_start = crc_window->windowa_x_start;
0509 tmp_win.y_start = crc_window->windowa_y_start;
0510 tmp_win.x_end = crc_window->windowa_x_end;
0511 tmp_win.y_end = crc_window->windowa_y_end;
0512
0513 for (i = 0; i < MAX_PIPES; i++) {
0514 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
0515 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
0516 break;
0517 }
0518
0519
0520 if (i == MAX_PIPES)
0521 return false;
0522
0523
0524
0525 mapping_tmp.phy_output_num = stream->link->link_enc_hw_inst;
0526 mapping_tmp.otg_output_num = pipe->stream_res.tg->inst;
0527
0528 dmcu->funcs->forward_crc_window(dmcu, crc_win, mux_mapping);
0529 } else {
0530 DC_LOG_DC("dmcu is not initialized");
0531 return false;
0532 }
0533
0534 return true;
0535 }
0536
0537 bool dc_stream_stop_dmcu_crc_win_update(struct dc *dc, struct dc_stream_state *stream)
0538 {
0539 int i;
0540 struct dmcu *dmcu = dc->res_pool->dmcu;
0541 struct pipe_ctx *pipe;
0542 struct otg_phy_mux mapping_tmp, *mux_mapping;
0543
0544 if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu))) {
0545 mux_mapping = &mapping_tmp;
0546
0547 for (i = 0; i < MAX_PIPES; i++) {
0548 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
0549 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
0550 break;
0551 }
0552
0553
0554 if (i == MAX_PIPES)
0555 return false;
0556
0557
0558
0559 mapping_tmp.phy_output_num = stream->link->link_enc_hw_inst;
0560 mapping_tmp.otg_output_num = pipe->stream_res.tg->inst;
0561
0562 dmcu->funcs->stop_crc_win_update(dmcu, mux_mapping);
0563 } else {
0564 DC_LOG_DC("dmcu is not initialized");
0565 return false;
0566 }
0567
0568 return true;
0569 }
0570 #endif
0571
0572
0573
0574
0575
0576
0577
0578
0579
0580
0581
0582
0583
0584 bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
0585 struct crc_params *crc_window, bool enable, bool continuous)
0586 {
0587 int i;
0588 struct pipe_ctx *pipe;
0589 struct crc_params param;
0590 struct timing_generator *tg;
0591
0592 for (i = 0; i < MAX_PIPES; i++) {
0593 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
0594 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
0595 break;
0596 }
0597
0598 if (i == MAX_PIPES)
0599 return false;
0600
0601
0602 param.windowa_x_start = 0;
0603 param.windowa_y_start = 0;
0604 param.windowa_x_end = pipe->stream->timing.h_addressable;
0605 param.windowa_y_end = pipe->stream->timing.v_addressable;
0606 param.windowb_x_start = 0;
0607 param.windowb_y_start = 0;
0608 param.windowb_x_end = pipe->stream->timing.h_addressable;
0609 param.windowb_y_end = pipe->stream->timing.v_addressable;
0610
0611 if (crc_window) {
0612 param.windowa_x_start = crc_window->windowa_x_start;
0613 param.windowa_y_start = crc_window->windowa_y_start;
0614 param.windowa_x_end = crc_window->windowa_x_end;
0615 param.windowa_y_end = crc_window->windowa_y_end;
0616 param.windowb_x_start = crc_window->windowb_x_start;
0617 param.windowb_y_start = crc_window->windowb_y_start;
0618 param.windowb_x_end = crc_window->windowb_x_end;
0619 param.windowb_y_end = crc_window->windowb_y_end;
0620 }
0621
0622 param.dsc_mode = pipe->stream->timing.flags.DSC ? 1:0;
0623 param.odm_mode = pipe->next_odm_pipe ? 1:0;
0624
0625
0626 param.selection = UNION_WINDOW_A_B;
0627 param.continuous_mode = continuous;
0628 param.enable = enable;
0629
0630 tg = pipe->stream_res.tg;
0631
0632
0633 if (tg->funcs->configure_crc)
0634 return tg->funcs->configure_crc(tg, ¶m);
0635 DC_LOG_WARNING("CRC capture not supported.");
0636 return false;
0637 }
0638
0639
0640
0641
0642
0643
0644
0645
0646
0647
0648
0649
0650 bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
0651 uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
0652 {
0653 int i;
0654 struct pipe_ctx *pipe;
0655 struct timing_generator *tg;
0656
0657 for (i = 0; i < MAX_PIPES; i++) {
0658 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
0659 if (pipe->stream == stream)
0660 break;
0661 }
0662
0663 if (i == MAX_PIPES)
0664 return false;
0665
0666 tg = pipe->stream_res.tg;
0667
0668 if (tg->funcs->get_crc)
0669 return tg->funcs->get_crc(tg, r_cr, g_y, b_cb);
0670 DC_LOG_WARNING("CRC capture not supported.");
0671 return false;
0672 }
0673
0674 void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
0675 enum dc_dynamic_expansion option)
0676 {
0677
0678 int i;
0679 struct pipe_ctx *pipe_ctx;
0680
0681 for (i = 0; i < MAX_PIPES; i++) {
0682 if (dc->current_state->res_ctx.pipe_ctx[i].stream
0683 == stream) {
0684 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
0685 pipe_ctx->stream_res.opp->dyn_expansion = option;
0686 pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
0687 pipe_ctx->stream_res.opp,
0688 COLOR_SPACE_YCBCR601,
0689 stream->timing.display_color_depth,
0690 stream->signal);
0691 }
0692 }
0693 }
0694
0695 void dc_stream_set_dither_option(struct dc_stream_state *stream,
0696 enum dc_dither_option option)
0697 {
0698 struct bit_depth_reduction_params params;
0699 struct dc_link *link = stream->link;
0700 struct pipe_ctx *pipes = NULL;
0701 int i;
0702
0703 for (i = 0; i < MAX_PIPES; i++) {
0704 if (link->dc->current_state->res_ctx.pipe_ctx[i].stream ==
0705 stream) {
0706 pipes = &link->dc->current_state->res_ctx.pipe_ctx[i];
0707 break;
0708 }
0709 }
0710
0711 if (!pipes)
0712 return;
0713 if (option > DITHER_OPTION_MAX)
0714 return;
0715
0716 stream->dither_option = option;
0717
0718 memset(¶ms, 0, sizeof(params));
0719 resource_build_bit_depth_reduction_params(stream, ¶ms);
0720 stream->bit_depth_params = params;
0721
0722 if (pipes->plane_res.xfm &&
0723 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) {
0724 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth(
0725 pipes->plane_res.xfm,
0726 pipes->plane_res.scl_data.lb_params.depth,
0727 &stream->bit_depth_params);
0728 }
0729
0730 pipes->stream_res.opp->funcs->
0731 opp_program_bit_depth_reduction(pipes->stream_res.opp, ¶ms);
0732 }
0733
0734 bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream)
0735 {
0736 int i;
0737 bool ret = false;
0738 struct pipe_ctx *pipes;
0739
0740 for (i = 0; i < MAX_PIPES; i++) {
0741 if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
0742 pipes = &dc->current_state->res_ctx.pipe_ctx[i];
0743 dc->hwss.program_gamut_remap(pipes);
0744 ret = true;
0745 }
0746 }
0747
0748 return ret;
0749 }
0750
0751 bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
0752 {
0753 int i;
0754 bool ret = false;
0755 struct pipe_ctx *pipes;
0756
0757 for (i = 0; i < MAX_PIPES; i++) {
0758 if (dc->current_state->res_ctx.pipe_ctx[i].stream
0759 == stream) {
0760
0761 pipes = &dc->current_state->res_ctx.pipe_ctx[i];
0762 dc->hwss.program_output_csc(dc,
0763 pipes,
0764 stream->output_color_space,
0765 stream->csc_color_matrix.matrix,
0766 pipes->stream_res.opp->inst);
0767 ret = true;
0768 }
0769 }
0770
0771 return ret;
0772 }
0773
0774 void dc_stream_set_static_screen_params(struct dc *dc,
0775 struct dc_stream_state **streams,
0776 int num_streams,
0777 const struct dc_static_screen_params *params)
0778 {
0779 int i, j;
0780 struct pipe_ctx *pipes_affected[MAX_PIPES];
0781 int num_pipes_affected = 0;
0782
0783 for (i = 0; i < num_streams; i++) {
0784 struct dc_stream_state *stream = streams[i];
0785
0786 for (j = 0; j < MAX_PIPES; j++) {
0787 if (dc->current_state->res_ctx.pipe_ctx[j].stream
0788 == stream) {
0789 pipes_affected[num_pipes_affected++] =
0790 &dc->current_state->res_ctx.pipe_ctx[j];
0791 }
0792 }
0793 }
0794
0795 dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params);
0796 }
0797
0798 static void dc_destruct(struct dc *dc)
0799 {
0800
0801 if (dc->res_pool && dc->res_pool->funcs->link_encs_assign)
0802 link_enc_cfg_init(dc, dc->current_state);
0803
0804 if (dc->current_state) {
0805 dc_release_state(dc->current_state);
0806 dc->current_state = NULL;
0807 }
0808
0809 destroy_links(dc);
0810
0811 destroy_link_encoders(dc);
0812
0813 if (dc->clk_mgr) {
0814 dc_destroy_clk_mgr(dc->clk_mgr);
0815 dc->clk_mgr = NULL;
0816 }
0817
0818 dc_destroy_resource_pool(dc);
0819
0820 if (dc->ctx->gpio_service)
0821 dal_gpio_service_destroy(&dc->ctx->gpio_service);
0822
0823 if (dc->ctx->created_bios)
0824 dal_bios_parser_destroy(&dc->ctx->dc_bios);
0825
0826 dc_perf_trace_destroy(&dc->ctx->perf_trace);
0827
0828 kfree(dc->ctx);
0829 dc->ctx = NULL;
0830
0831 kfree(dc->bw_vbios);
0832 dc->bw_vbios = NULL;
0833
0834 kfree(dc->bw_dceip);
0835 dc->bw_dceip = NULL;
0836
0837 kfree(dc->dcn_soc);
0838 dc->dcn_soc = NULL;
0839
0840 kfree(dc->dcn_ip);
0841 dc->dcn_ip = NULL;
0842
0843 kfree(dc->vm_helper);
0844 dc->vm_helper = NULL;
0845
0846 }
0847
0848 static bool dc_construct_ctx(struct dc *dc,
0849 const struct dc_init_data *init_params)
0850 {
0851 struct dc_context *dc_ctx;
0852 enum dce_version dc_version = DCE_VERSION_UNKNOWN;
0853
0854 dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
0855 if (!dc_ctx)
0856 return false;
0857
0858 dc_ctx->cgs_device = init_params->cgs_device;
0859 dc_ctx->driver_context = init_params->driver;
0860 dc_ctx->dc = dc;
0861 dc_ctx->asic_id = init_params->asic_id;
0862 dc_ctx->dc_sink_id_count = 0;
0863 dc_ctx->dc_stream_id_count = 0;
0864 dc_ctx->dce_environment = init_params->dce_environment;
0865 dc_ctx->dcn_reg_offsets = init_params->dcn_reg_offsets;
0866 dc_ctx->nbio_reg_offsets = init_params->nbio_reg_offsets;
0867
0868
0869
0870 dc_version = resource_parse_asic_id(init_params->asic_id);
0871 dc_ctx->dce_version = dc_version;
0872
0873 dc_ctx->perf_trace = dc_perf_trace_create();
0874 if (!dc_ctx->perf_trace) {
0875 ASSERT_CRITICAL(false);
0876 return false;
0877 }
0878
0879 dc->ctx = dc_ctx;
0880
0881 return true;
0882 }
0883
0884 static bool dc_construct(struct dc *dc,
0885 const struct dc_init_data *init_params)
0886 {
0887 struct dc_context *dc_ctx;
0888 struct bw_calcs_dceip *dc_dceip;
0889 struct bw_calcs_vbios *dc_vbios;
0890 struct dcn_soc_bounding_box *dcn_soc;
0891 struct dcn_ip_params *dcn_ip;
0892
0893 dc->config = init_params->flags;
0894
0895
0896 dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL);
0897 if (!dc->vm_helper) {
0898 dm_error("%s: failed to create dc->vm_helper\n", __func__);
0899 goto fail;
0900 }
0901
0902 memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
0903
0904 dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
0905 if (!dc_dceip) {
0906 dm_error("%s: failed to create dceip\n", __func__);
0907 goto fail;
0908 }
0909
0910 dc->bw_dceip = dc_dceip;
0911
0912 dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL);
0913 if (!dc_vbios) {
0914 dm_error("%s: failed to create vbios\n", __func__);
0915 goto fail;
0916 }
0917
0918 dc->bw_vbios = dc_vbios;
0919 dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
0920 if (!dcn_soc) {
0921 dm_error("%s: failed to create dcn_soc\n", __func__);
0922 goto fail;
0923 }
0924
0925 dc->dcn_soc = dcn_soc;
0926
0927 dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
0928 if (!dcn_ip) {
0929 dm_error("%s: failed to create dcn_ip\n", __func__);
0930 goto fail;
0931 }
0932
0933 dc->dcn_ip = dcn_ip;
0934
0935 if (!dc_construct_ctx(dc, init_params)) {
0936 dm_error("%s: failed to create ctx\n", __func__);
0937 goto fail;
0938 }
0939
0940 dc_ctx = dc->ctx;
0941
0942
0943
0944
0945 if (init_params->vbios_override)
0946 dc_ctx->dc_bios = init_params->vbios_override;
0947 else {
0948
0949 struct bp_init_data bp_init_data;
0950
0951 bp_init_data.ctx = dc_ctx;
0952 bp_init_data.bios = init_params->asic_id.atombios_base_address;
0953
0954 dc_ctx->dc_bios = dal_bios_parser_create(
0955 &bp_init_data, dc_ctx->dce_version);
0956
0957 if (!dc_ctx->dc_bios) {
0958 ASSERT_CRITICAL(false);
0959 goto fail;
0960 }
0961
0962 dc_ctx->created_bios = true;
0963 }
0964
0965 dc->vendor_signature = init_params->vendor_signature;
0966
0967
0968 dc_ctx->gpio_service = dal_gpio_service_create(
0969 dc_ctx->dce_version,
0970 dc_ctx->dce_environment,
0971 dc_ctx);
0972
0973 if (!dc_ctx->gpio_service) {
0974 ASSERT_CRITICAL(false);
0975 goto fail;
0976 }
0977
0978 dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version);
0979 if (!dc->res_pool)
0980 goto fail;
0981
0982
0983 if (dc->caps.i2c_speed_in_khz_hdcp == 0)
0984 dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz;
0985
0986 dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
0987 if (!dc->clk_mgr)
0988 goto fail;
0989 #ifdef CONFIG_DRM_AMD_DC_DCN
0990 dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present;
0991
0992 if (dc->res_pool->funcs->update_bw_bounding_box) {
0993 DC_FP_START();
0994 dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
0995 DC_FP_END();
0996 }
0997 #endif
0998
0999
1000
1001
1002
1003
1004 dc->current_state = dc_create_state(dc);
1005
1006 if (!dc->current_state) {
1007 dm_error("%s: failed to create validate ctx\n", __func__);
1008 goto fail;
1009 }
1010
1011 if (!create_links(dc, init_params->num_virtual_links))
1012 goto fail;
1013
1014
1015
1016
1017 if (!create_link_encoders(dc))
1018 goto fail;
1019
1020 dc_resource_state_construct(dc, dc->current_state);
1021
1022 return true;
1023
1024 fail:
1025 return false;
1026 }
1027
1028 static void disable_all_writeback_pipes_for_stream(
1029 const struct dc *dc,
1030 struct dc_stream_state *stream,
1031 struct dc_state *context)
1032 {
1033 int i;
1034
1035 for (i = 0; i < stream->num_wb_info; i++)
1036 stream->writeback_info[i].wb_enabled = false;
1037 }
1038
1039 static void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context,
1040 struct dc_stream_state *stream, bool lock)
1041 {
1042 int i;
1043
1044
1045 if (dc->hwss.interdependent_update_lock)
1046 dc->hwss.interdependent_update_lock(dc, context, lock);
1047 else {
1048 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1049 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1050 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1051
1052
1053 if (stream == pipe_ctx->stream) {
1054 if (!pipe_ctx->top_pipe &&
1055 (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
1056 dc->hwss.pipe_control_lock(dc, pipe_ctx, lock);
1057 }
1058 }
1059 }
1060 }
1061
1062 static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
1063 {
1064 int i, j;
1065 struct dc_state *dangling_context = dc_create_state(dc);
1066 struct dc_state *current_ctx;
1067
1068 if (dangling_context == NULL)
1069 return;
1070
1071 dc_resource_state_copy_construct(dc->current_state, dangling_context);
1072
1073 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1074 struct dc_stream_state *old_stream =
1075 dc->current_state->res_ctx.pipe_ctx[i].stream;
1076 bool should_disable = true;
1077 bool pipe_split_change = false;
1078
1079 if ((context->res_ctx.pipe_ctx[i].top_pipe) &&
1080 (dc->current_state->res_ctx.pipe_ctx[i].top_pipe))
1081 pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe->pipe_idx !=
1082 dc->current_state->res_ctx.pipe_ctx[i].top_pipe->pipe_idx;
1083 else
1084 pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe !=
1085 dc->current_state->res_ctx.pipe_ctx[i].top_pipe;
1086
1087 for (j = 0; j < context->stream_count; j++) {
1088 if (old_stream == context->streams[j]) {
1089 should_disable = false;
1090 break;
1091 }
1092 }
1093 if (!should_disable && pipe_split_change &&
1094 dc->current_state->stream_count != context->stream_count)
1095 should_disable = true;
1096
1097 if (old_stream && !dc->current_state->res_ctx.pipe_ctx[i].top_pipe &&
1098 !dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe) {
1099 struct pipe_ctx *old_pipe, *new_pipe;
1100
1101 old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1102 new_pipe = &context->res_ctx.pipe_ctx[i];
1103
1104 if (old_pipe->plane_state && !new_pipe->plane_state)
1105 should_disable = true;
1106 }
1107
1108 if (should_disable && old_stream) {
1109 dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
1110 disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
1111
1112 if (dc->hwss.apply_ctx_for_surface) {
1113 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true);
1114 dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
1115 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false);
1116 dc->hwss.post_unlock_program_front_end(dc, dangling_context);
1117 }
1118 if (dc->hwss.program_front_end_for_ctx) {
1119 dc->hwss.interdependent_update_lock(dc, dc->current_state, true);
1120 dc->hwss.program_front_end_for_ctx(dc, dangling_context);
1121 dc->hwss.interdependent_update_lock(dc, dc->current_state, false);
1122 dc->hwss.post_unlock_program_front_end(dc, dangling_context);
1123 }
1124 }
1125 }
1126
1127 current_ctx = dc->current_state;
1128 dc->current_state = dangling_context;
1129 dc_release_state(current_ctx);
1130 }
1131
1132 static void disable_vbios_mode_if_required(
1133 struct dc *dc,
1134 struct dc_state *context)
1135 {
1136 unsigned int i, j;
1137
1138
1139 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1140 struct dc_stream_state *stream = NULL;
1141 struct dc_link *link = NULL;
1142 struct pipe_ctx *pipe = NULL;
1143
1144 pipe = &context->res_ctx.pipe_ctx[i];
1145 stream = pipe->stream;
1146 if (stream == NULL)
1147 continue;
1148
1149
1150 if (pipe->prev_odm_pipe)
1151 continue;
1152
1153 if (stream->link->local_sink &&
1154 stream->link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
1155 link = stream->link;
1156 }
1157
1158 if (link != NULL && link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
1159 unsigned int enc_inst, tg_inst = 0;
1160 unsigned int pix_clk_100hz;
1161
1162 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
1163 if (enc_inst != ENGINE_ID_UNKNOWN) {
1164 for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
1165 if (dc->res_pool->stream_enc[j]->id == enc_inst) {
1166 tg_inst = dc->res_pool->stream_enc[j]->funcs->dig_source_otg(
1167 dc->res_pool->stream_enc[j]);
1168 break;
1169 }
1170 }
1171
1172 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1173 dc->res_pool->dp_clock_source,
1174 tg_inst, &pix_clk_100hz);
1175
1176 if (link->link_status.link_active) {
1177 uint32_t requested_pix_clk_100hz =
1178 pipe->stream_res.pix_clk_params.requested_pix_clk_100hz;
1179
1180 if (pix_clk_100hz != requested_pix_clk_100hz) {
1181 core_link_disable_stream(pipe);
1182 pipe->stream->dpms_off = false;
1183 }
1184 }
1185 }
1186 }
1187 }
1188 }
1189
1190 static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
1191 {
1192 int i;
1193 PERF_TRACE();
1194 for (i = 0; i < MAX_PIPES; i++) {
1195 int count = 0;
1196 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
1197
1198 if (!pipe->plane_state)
1199 continue;
1200
1201
1202 while (count < 100000) {
1203
1204 pipe->plane_state->status.is_flip_pending = false;
1205 dc->hwss.update_pending_status(pipe);
1206 if (!pipe->plane_state->status.is_flip_pending)
1207 break;
1208 udelay(1);
1209 count++;
1210 }
1211 ASSERT(!pipe->plane_state->status.is_flip_pending);
1212 }
1213 PERF_TRACE();
1214 }
1215
1216
1217
1218
1219
1220 struct dc *dc_create(const struct dc_init_data *init_params)
1221 {
1222 struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
1223 unsigned int full_pipe_count;
1224
1225 if (!dc)
1226 return NULL;
1227
1228 if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) {
1229 if (!dc_construct_ctx(dc, init_params))
1230 goto destruct_dc;
1231 } else {
1232 if (!dc_construct(dc, init_params))
1233 goto destruct_dc;
1234
1235 full_pipe_count = dc->res_pool->pipe_count;
1236 if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
1237 full_pipe_count--;
1238 dc->caps.max_streams = min(
1239 full_pipe_count,
1240 dc->res_pool->stream_enc_count);
1241
1242 dc->caps.max_links = dc->link_count;
1243 dc->caps.max_audios = dc->res_pool->audio_count;
1244 dc->caps.linear_pitch_alignment = 64;
1245
1246 dc->caps.max_dp_protocol_version = DP_VERSION_1_4;
1247
1248 dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator;
1249
1250 if (dc->res_pool->dmcu != NULL)
1251 dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
1252 }
1253
1254 dc->dcn_reg_offsets = init_params->dcn_reg_offsets;
1255 dc->nbio_reg_offsets = init_params->nbio_reg_offsets;
1256
1257
1258 dc->versions.dc_ver = DC_VER;
1259
1260 dc->build_id = DC_BUILD_ID;
1261
1262 DC_LOG_DC("Display Core initialized\n");
1263
1264
1265
1266 return dc;
1267
1268 destruct_dc:
1269 dc_destruct(dc);
1270 kfree(dc);
1271 return NULL;
1272 }
1273
1274 static void detect_edp_presence(struct dc *dc)
1275 {
1276 struct dc_link *edp_links[MAX_NUM_EDP];
1277 struct dc_link *edp_link = NULL;
1278 enum dc_connection_type type;
1279 int i;
1280 int edp_num;
1281
1282 get_edp_links(dc, edp_links, &edp_num);
1283 if (!edp_num)
1284 return;
1285
1286 for (i = 0; i < edp_num; i++) {
1287 edp_link = edp_links[i];
1288 if (dc->config.edp_not_connected) {
1289 edp_link->edp_sink_present = false;
1290 } else {
1291 dc_link_detect_sink(edp_link, &type);
1292 edp_link->edp_sink_present = (type != dc_connection_none);
1293 }
1294 }
1295 }
1296
1297 void dc_hardware_init(struct dc *dc)
1298 {
1299
1300 detect_edp_presence(dc);
1301 if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW)
1302 dc->hwss.init_hw(dc);
1303 }
1304
1305 void dc_init_callbacks(struct dc *dc,
1306 const struct dc_callback_init *init_params)
1307 {
1308 #ifdef CONFIG_DRM_AMD_DC_HDCP
1309 dc->ctx->cp_psp = init_params->cp_psp;
1310 #endif
1311 }
1312
1313 void dc_deinit_callbacks(struct dc *dc)
1314 {
1315 #ifdef CONFIG_DRM_AMD_DC_HDCP
1316 memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp));
1317 #endif
1318 }
1319
1320 void dc_destroy(struct dc **dc)
1321 {
1322 dc_destruct(*dc);
1323 kfree(*dc);
1324 *dc = NULL;
1325 }
1326
1327 static void enable_timing_multisync(
1328 struct dc *dc,
1329 struct dc_state *ctx)
1330 {
1331 int i, multisync_count = 0;
1332 int pipe_count = dc->res_pool->pipe_count;
1333 struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL };
1334
1335 for (i = 0; i < pipe_count; i++) {
1336 if (!ctx->res_ctx.pipe_ctx[i].stream ||
1337 !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled)
1338 continue;
1339 if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source)
1340 continue;
1341 multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i];
1342 multisync_count++;
1343 }
1344
1345 if (multisync_count > 0) {
1346 dc->hwss.enable_per_frame_crtc_position_reset(
1347 dc, multisync_count, multisync_pipes);
1348 }
1349 }
1350
1351 static void program_timing_sync(
1352 struct dc *dc,
1353 struct dc_state *ctx)
1354 {
1355 int i, j, k;
1356 int group_index = 0;
1357 int num_group = 0;
1358 int pipe_count = dc->res_pool->pipe_count;
1359 struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
1360
1361 for (i = 0; i < pipe_count; i++) {
1362 if (!ctx->res_ctx.pipe_ctx[i].stream
1363 || ctx->res_ctx.pipe_ctx[i].top_pipe
1364 || ctx->res_ctx.pipe_ctx[i].prev_odm_pipe)
1365 continue;
1366
1367 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
1368 }
1369
1370 for (i = 0; i < pipe_count; i++) {
1371 int group_size = 1;
1372 enum timing_synchronization_type sync_type = NOT_SYNCHRONIZABLE;
1373 struct pipe_ctx *pipe_set[MAX_PIPES];
1374
1375 if (!unsynced_pipes[i])
1376 continue;
1377
1378 pipe_set[0] = unsynced_pipes[i];
1379 unsynced_pipes[i] = NULL;
1380
1381
1382
1383
1384 for (j = i + 1; j < pipe_count; j++) {
1385 if (!unsynced_pipes[j])
1386 continue;
1387 if (sync_type != TIMING_SYNCHRONIZABLE &&
1388 dc->hwss.enable_vblanks_synchronization &&
1389 unsynced_pipes[j]->stream_res.tg->funcs->align_vblanks &&
1390 resource_are_vblanks_synchronizable(
1391 unsynced_pipes[j]->stream,
1392 pipe_set[0]->stream)) {
1393 sync_type = VBLANK_SYNCHRONIZABLE;
1394 pipe_set[group_size] = unsynced_pipes[j];
1395 unsynced_pipes[j] = NULL;
1396 group_size++;
1397 } else
1398 if (sync_type != VBLANK_SYNCHRONIZABLE &&
1399 resource_are_streams_timing_synchronizable(
1400 unsynced_pipes[j]->stream,
1401 pipe_set[0]->stream)) {
1402 sync_type = TIMING_SYNCHRONIZABLE;
1403 pipe_set[group_size] = unsynced_pipes[j];
1404 unsynced_pipes[j] = NULL;
1405 group_size++;
1406 }
1407 }
1408
1409
1410 for (j = 0; j < group_size; j++) {
1411 bool is_blanked;
1412
1413 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
1414 is_blanked =
1415 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
1416 else
1417 is_blanked =
1418 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
1419 if (!is_blanked) {
1420 if (j == 0)
1421 break;
1422
1423 swap(pipe_set[0], pipe_set[j]);
1424 break;
1425 }
1426 }
1427
1428 for (k = 0; k < group_size; k++) {
1429 struct dc_stream_status *status = dc_stream_get_status_from_state(ctx, pipe_set[k]->stream);
1430
1431 status->timing_sync_info.group_id = num_group;
1432 status->timing_sync_info.group_size = group_size;
1433 if (k == 0)
1434 status->timing_sync_info.master = true;
1435 else
1436 status->timing_sync_info.master = false;
1437
1438 }
1439
1440
1441 if (dc->config.use_pipe_ctx_sync_logic) {
1442
1443 for (j = 1; j < group_size; j++) {
1444 if (pipe_set[j]->pipe_idx_syncd == pipe_set[0]->pipe_idx_syncd) {
1445 group_size--;
1446 pipe_set[j] = pipe_set[group_size];
1447 j--;
1448 } else
1449
1450 pipe_set[j]->pipe_idx_syncd = pipe_set[0]->pipe_idx_syncd;
1451 }
1452 } else {
1453 for (j = j + 1; j < group_size; j++) {
1454 bool is_blanked;
1455
1456 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
1457 is_blanked =
1458 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
1459 else
1460 is_blanked =
1461 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
1462 if (!is_blanked) {
1463 group_size--;
1464 pipe_set[j] = pipe_set[group_size];
1465 j--;
1466 }
1467 }
1468 }
1469
1470 if (group_size > 1) {
1471 if (sync_type == TIMING_SYNCHRONIZABLE) {
1472 dc->hwss.enable_timing_synchronization(
1473 dc, group_index, group_size, pipe_set);
1474 } else
1475 if (sync_type == VBLANK_SYNCHRONIZABLE) {
1476 dc->hwss.enable_vblanks_synchronization(
1477 dc, group_index, group_size, pipe_set);
1478 }
1479 group_index++;
1480 }
1481 num_group++;
1482 }
1483 }
1484
1485 static bool context_changed(
1486 struct dc *dc,
1487 struct dc_state *context)
1488 {
1489 uint8_t i;
1490
1491 if (context->stream_count != dc->current_state->stream_count)
1492 return true;
1493
1494 for (i = 0; i < dc->current_state->stream_count; i++) {
1495 if (dc->current_state->streams[i] != context->streams[i])
1496 return true;
1497 }
1498
1499 return false;
1500 }
1501
1502 bool dc_validate_boot_timing(const struct dc *dc,
1503 const struct dc_sink *sink,
1504 struct dc_crtc_timing *crtc_timing)
1505 {
1506 struct timing_generator *tg;
1507 struct stream_encoder *se = NULL;
1508
1509 struct dc_crtc_timing hw_crtc_timing = {0};
1510
1511 struct dc_link *link = sink->link;
1512 unsigned int i, enc_inst, tg_inst = 0;
1513
1514
1515 if (sink->sink_signal != SIGNAL_TYPE_EDP) {
1516 return false;
1517 }
1518
1519
1520 if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
1521 return false;
1522
1523 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
1524
1525 if (enc_inst == ENGINE_ID_UNKNOWN)
1526 return false;
1527
1528 for (i = 0; i < dc->res_pool->stream_enc_count; i++) {
1529 if (dc->res_pool->stream_enc[i]->id == enc_inst) {
1530
1531 se = dc->res_pool->stream_enc[i];
1532
1533 tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg(
1534 dc->res_pool->stream_enc[i]);
1535 break;
1536 }
1537 }
1538
1539
1540 if (i == dc->res_pool->stream_enc_count)
1541 return false;
1542
1543 if (tg_inst >= dc->res_pool->timing_generator_count)
1544 return false;
1545
1546 tg = dc->res_pool->timing_generators[tg_inst];
1547
1548 if (!tg->funcs->get_hw_timing)
1549 return false;
1550
1551 if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing))
1552 return false;
1553
1554 if (crtc_timing->h_total != hw_crtc_timing.h_total)
1555 return false;
1556
1557 if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left)
1558 return false;
1559
1560 if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable)
1561 return false;
1562
1563 if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right)
1564 return false;
1565
1566 if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch)
1567 return false;
1568
1569 if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width)
1570 return false;
1571
1572 if (crtc_timing->v_total != hw_crtc_timing.v_total)
1573 return false;
1574
1575 if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top)
1576 return false;
1577
1578 if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable)
1579 return false;
1580
1581 if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom)
1582 return false;
1583
1584 if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch)
1585 return false;
1586
1587 if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width)
1588 return false;
1589
1590
1591 if (crtc_timing->flags.DSC)
1592 return false;
1593
1594 if (dc_is_dp_signal(link->connector_signal)) {
1595 unsigned int pix_clk_100hz;
1596 uint32_t numOdmPipes = 1;
1597 uint32_t id_src[4] = {0};
1598
1599 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1600 dc->res_pool->dp_clock_source,
1601 tg_inst, &pix_clk_100hz);
1602
1603 if (tg->funcs->get_optc_source)
1604 tg->funcs->get_optc_source(tg,
1605 &numOdmPipes, &id_src[0], &id_src[1]);
1606
1607 if (numOdmPipes == 2)
1608 pix_clk_100hz *= 2;
1609 if (numOdmPipes == 4)
1610 pix_clk_100hz *= 4;
1611
1612
1613
1614 if (crtc_timing->pix_clk_100hz != pix_clk_100hz)
1615 return false;
1616
1617 if (!se->funcs->dp_get_pixel_format)
1618 return false;
1619
1620 if (!se->funcs->dp_get_pixel_format(
1621 se,
1622 &hw_crtc_timing.pixel_encoding,
1623 &hw_crtc_timing.display_color_depth))
1624 return false;
1625
1626 if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth)
1627 return false;
1628
1629 if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding)
1630 return false;
1631 }
1632
1633 if (link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
1634 return false;
1635 }
1636
1637 if (is_edp_ilr_optimization_required(link, crtc_timing)) {
1638 DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n");
1639 return false;
1640 }
1641
1642 return true;
1643 }
1644
1645 static inline bool should_update_pipe_for_stream(
1646 struct dc_state *context,
1647 struct pipe_ctx *pipe_ctx,
1648 struct dc_stream_state *stream)
1649 {
1650 return (pipe_ctx->stream && pipe_ctx->stream == stream);
1651 }
1652
1653 static inline bool should_update_pipe_for_plane(
1654 struct dc_state *context,
1655 struct pipe_ctx *pipe_ctx,
1656 struct dc_plane_state *plane_state)
1657 {
1658 return (pipe_ctx->plane_state == plane_state);
1659 }
1660
1661 void dc_enable_stereo(
1662 struct dc *dc,
1663 struct dc_state *context,
1664 struct dc_stream_state *streams[],
1665 uint8_t stream_count)
1666 {
1667 int i, j;
1668 struct pipe_ctx *pipe;
1669
1670 for (i = 0; i < MAX_PIPES; i++) {
1671 if (context != NULL) {
1672 pipe = &context->res_ctx.pipe_ctx[i];
1673 } else {
1674 context = dc->current_state;
1675 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1676 }
1677
1678 for (j = 0; pipe && j < stream_count; j++) {
1679 if (should_update_pipe_for_stream(context, pipe, streams[j]) &&
1680 dc->hwss.setup_stereo)
1681 dc->hwss.setup_stereo(pipe, dc);
1682 }
1683 }
1684 }
1685
1686 void dc_trigger_sync(struct dc *dc, struct dc_state *context)
1687 {
1688 if (context->stream_count > 1 && !dc->debug.disable_timing_sync) {
1689 enable_timing_multisync(dc, context);
1690 program_timing_sync(dc, context);
1691 }
1692 }
1693
1694 static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context)
1695 {
1696 int i;
1697 unsigned int stream_mask = 0;
1698
1699 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1700 if (context->res_ctx.pipe_ctx[i].stream)
1701 stream_mask |= 1 << i;
1702 }
1703
1704 return stream_mask;
1705 }
1706
1707 void dc_z10_restore(const struct dc *dc)
1708 {
1709 if (dc->hwss.z10_restore)
1710 dc->hwss.z10_restore(dc);
1711 }
1712
1713 void dc_z10_save_init(struct dc *dc)
1714 {
1715 if (dc->hwss.z10_save_init)
1716 dc->hwss.z10_save_init(dc);
1717 }
1718
1719
1720
1721
1722
1723 static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
1724 {
1725 struct dc_bios *dcb = dc->ctx->dc_bios;
1726 enum dc_status result = DC_ERROR_UNEXPECTED;
1727 struct pipe_ctx *pipe;
1728 int i, k, l;
1729 struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
1730 struct dc_state *old_state;
1731
1732 dc_z10_restore(dc);
1733 dc_allow_idle_optimizations(dc, false);
1734
1735 for (i = 0; i < context->stream_count; i++)
1736 dc_streams[i] = context->streams[i];
1737
1738 if (!dcb->funcs->is_accelerated_mode(dcb)) {
1739 disable_vbios_mode_if_required(dc, context);
1740 dc->hwss.enable_accelerated_mode(dc, context);
1741 }
1742
1743 if (context->stream_count > get_seamless_boot_stream_count(context) ||
1744 context->stream_count == 0)
1745 dc->hwss.prepare_bandwidth(dc, context);
1746
1747 disable_dangling_plane(dc, context);
1748
1749
1750
1751 if (dc->hwss.apply_ctx_for_surface) {
1752 for (i = 0; i < context->stream_count; i++) {
1753 if (context->streams[i]->mode_changed)
1754 continue;
1755 apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
1756 dc->hwss.apply_ctx_for_surface(
1757 dc, context->streams[i],
1758 context->stream_status[i].plane_count,
1759 context);
1760 apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
1761 dc->hwss.post_unlock_program_front_end(dc, context);
1762 }
1763 }
1764
1765
1766 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1767 pipe = &context->res_ctx.pipe_ctx[i];
1768 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
1769 }
1770
1771 result = dc->hwss.apply_ctx_to_hw(dc, context);
1772
1773 if (result != DC_OK) {
1774
1775 dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY;
1776 return result;
1777 }
1778
1779 dc_trigger_sync(dc, context);
1780
1781
1782 if (dc->hwss.program_front_end_for_ctx) {
1783 dc->hwss.interdependent_update_lock(dc, context, true);
1784 dc->hwss.program_front_end_for_ctx(dc, context);
1785 dc->hwss.interdependent_update_lock(dc, context, false);
1786 dc->hwss.post_unlock_program_front_end(dc, context);
1787 }
1788 for (i = 0; i < context->stream_count; i++) {
1789 const struct dc_link *link = context->streams[i]->link;
1790
1791 if (!context->streams[i]->mode_changed)
1792 continue;
1793
1794 if (dc->hwss.apply_ctx_for_surface) {
1795 apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
1796 dc->hwss.apply_ctx_for_surface(
1797 dc, context->streams[i],
1798 context->stream_status[i].plane_count,
1799 context);
1800 apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
1801 dc->hwss.post_unlock_program_front_end(dc, context);
1802 }
1803
1804
1805
1806
1807
1808 for (k = 0; k < MAX_PIPES; k++) {
1809 pipe = &context->res_ctx.pipe_ctx[k];
1810
1811 for (l = 0 ; pipe && l < context->stream_count; l++) {
1812 if (context->streams[l] &&
1813 context->streams[l] == pipe->stream &&
1814 dc->hwss.setup_stereo)
1815 dc->hwss.setup_stereo(pipe, dc);
1816 }
1817 }
1818
1819 CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}",
1820 context->streams[i]->timing.h_addressable,
1821 context->streams[i]->timing.v_addressable,
1822 context->streams[i]->timing.h_total,
1823 context->streams[i]->timing.v_total,
1824 context->streams[i]->timing.pix_clk_100hz / 10);
1825 }
1826
1827 dc_enable_stereo(dc, context, dc_streams, context->stream_count);
1828
1829 if (context->stream_count > get_seamless_boot_stream_count(context) ||
1830 context->stream_count == 0) {
1831
1832 wait_for_no_pipes_pending(dc, context);
1833
1834 dc->hwss.optimize_bandwidth(dc, context);
1835 }
1836
1837 if (dc->ctx->dce_version >= DCE_VERSION_MAX)
1838 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
1839 else
1840 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
1841
1842 context->stream_mask = get_stream_mask(dc, context);
1843
1844 if (context->stream_mask != dc->current_state->stream_mask)
1845 dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, context->stream_mask);
1846
1847 for (i = 0; i < context->stream_count; i++)
1848 context->streams[i]->mode_changed = false;
1849
1850 old_state = dc->current_state;
1851 dc->current_state = context;
1852
1853 dc_release_state(old_state);
1854
1855 dc_retain_state(dc->current_state);
1856
1857 return result;
1858 }
1859
1860 bool dc_commit_state(struct dc *dc, struct dc_state *context)
1861 {
1862 enum dc_status result = DC_ERROR_UNEXPECTED;
1863 int i;
1864
1865 if (!context_changed(dc, context))
1866 return DC_OK;
1867
1868 DC_LOG_DC("%s: %d streams\n",
1869 __func__, context->stream_count);
1870
1871 for (i = 0; i < context->stream_count; i++) {
1872 struct dc_stream_state *stream = context->streams[i];
1873
1874 dc_stream_log(dc, stream);
1875 }
1876
1877
1878
1879
1880
1881
1882
1883 result = dc_validate_global_state(dc, context, false);
1884 if (result != DC_OK) {
1885 DC_LOG_ERROR("DC commit global validation failure: %s (%d)",
1886 dc_status_to_str(result), result);
1887 return result;
1888 }
1889
1890 result = dc_commit_state_no_check(dc, context);
1891
1892 return (result == DC_OK);
1893 }
1894
1895 bool dc_acquire_release_mpc_3dlut(
1896 struct dc *dc, bool acquire,
1897 struct dc_stream_state *stream,
1898 struct dc_3dlut **lut,
1899 struct dc_transfer_func **shaper)
1900 {
1901 int pipe_idx;
1902 bool ret = false;
1903 bool found_pipe_idx = false;
1904 const struct resource_pool *pool = dc->res_pool;
1905 struct resource_context *res_ctx = &dc->current_state->res_ctx;
1906 int mpcc_id = 0;
1907
1908 if (pool && res_ctx) {
1909 if (acquire) {
1910
1911 for (pipe_idx = 0; pipe_idx < pool->pipe_count; pipe_idx++) {
1912 if (res_ctx->pipe_ctx[pipe_idx].stream == stream) {
1913 found_pipe_idx = true;
1914 mpcc_id = res_ctx->pipe_ctx[pipe_idx].plane_res.hubp->inst;
1915 break;
1916 }
1917 }
1918 } else
1919 found_pipe_idx = true;
1920
1921 if (found_pipe_idx) {
1922 if (acquire && pool->funcs->acquire_post_bldn_3dlut)
1923 ret = pool->funcs->acquire_post_bldn_3dlut(res_ctx, pool, mpcc_id, lut, shaper);
1924 else if (!acquire && pool->funcs->release_post_bldn_3dlut)
1925 ret = pool->funcs->release_post_bldn_3dlut(res_ctx, pool, lut, shaper);
1926 }
1927 }
1928 return ret;
1929 }
1930
1931 static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
1932 {
1933 int i;
1934 struct pipe_ctx *pipe;
1935
1936 for (i = 0; i < MAX_PIPES; i++) {
1937 pipe = &context->res_ctx.pipe_ctx[i];
1938
1939
1940 if (!pipe->plane_state || (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM))
1941 continue;
1942
1943
1944 pipe->plane_state->status.is_flip_pending = false;
1945 dc->hwss.update_pending_status(pipe);
1946 if (pipe->plane_state->status.is_flip_pending)
1947 return true;
1948 }
1949 return false;
1950 }
1951
1952
1953
1954
1955
1956
1957
1958
1959 static void process_deferred_updates(struct dc *dc)
1960 {
1961 int i = 0;
1962
1963 if (dc->debug.enable_mem_low_power.bits.cm) {
1964 ASSERT(dc->dcn_ip->max_num_dpp);
1965 for (i = 0; i < dc->dcn_ip->max_num_dpp; i++)
1966 if (dc->res_pool->dpps[i]->funcs->dpp_deferred_update)
1967 dc->res_pool->dpps[i]->funcs->dpp_deferred_update(dc->res_pool->dpps[i]);
1968 }
1969 }
1970
1971 void dc_post_update_surfaces_to_stream(struct dc *dc)
1972 {
1973 int i;
1974 struct dc_state *context = dc->current_state;
1975
1976 if ((!dc->optimized_required) || get_seamless_boot_stream_count(context) > 0)
1977 return;
1978
1979 post_surface_trace(dc);
1980
1981 if (dc->ctx->dce_version >= DCE_VERSION_MAX)
1982 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
1983 else
1984 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
1985
1986 if (is_flip_pending_in_pipes(dc, context))
1987 return;
1988
1989 for (i = 0; i < dc->res_pool->pipe_count; i++)
1990 if (context->res_ctx.pipe_ctx[i].stream == NULL ||
1991 context->res_ctx.pipe_ctx[i].plane_state == NULL) {
1992 context->res_ctx.pipe_ctx[i].pipe_idx = i;
1993 dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
1994 }
1995
1996 process_deferred_updates(dc);
1997
1998 dc->hwss.optimize_bandwidth(dc, context);
1999
2000 dc->optimized_required = false;
2001 dc->wm_optimized_required = false;
2002 }
2003
2004 static void init_state(struct dc *dc, struct dc_state *context)
2005 {
2006
2007
2008
2009
2010 memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
2011 }
2012
2013 struct dc_state *dc_create_state(struct dc *dc)
2014 {
2015 struct dc_state *context = kvzalloc(sizeof(struct dc_state),
2016 GFP_KERNEL);
2017
2018 if (!context)
2019 return NULL;
2020
2021 init_state(dc, context);
2022
2023 kref_init(&context->refcount);
2024
2025 return context;
2026 }
2027
2028 struct dc_state *dc_copy_state(struct dc_state *src_ctx)
2029 {
2030 int i, j;
2031 struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL);
2032
2033 if (!new_ctx)
2034 return NULL;
2035 memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
2036
2037 for (i = 0; i < MAX_PIPES; i++) {
2038 struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
2039
2040 if (cur_pipe->top_pipe)
2041 cur_pipe->top_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
2042
2043 if (cur_pipe->bottom_pipe)
2044 cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
2045
2046 if (cur_pipe->prev_odm_pipe)
2047 cur_pipe->prev_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx];
2048
2049 if (cur_pipe->next_odm_pipe)
2050 cur_pipe->next_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx];
2051
2052 }
2053
2054 for (i = 0; i < new_ctx->stream_count; i++) {
2055 dc_stream_retain(new_ctx->streams[i]);
2056 for (j = 0; j < new_ctx->stream_status[i].plane_count; j++)
2057 dc_plane_state_retain(
2058 new_ctx->stream_status[i].plane_states[j]);
2059 }
2060
2061 kref_init(&new_ctx->refcount);
2062
2063 return new_ctx;
2064 }
2065
2066 void dc_retain_state(struct dc_state *context)
2067 {
2068 kref_get(&context->refcount);
2069 }
2070
2071 static void dc_state_free(struct kref *kref)
2072 {
2073 struct dc_state *context = container_of(kref, struct dc_state, refcount);
2074 dc_resource_state_destruct(context);
2075 kvfree(context);
2076 }
2077
2078 void dc_release_state(struct dc_state *context)
2079 {
2080 kref_put(&context->refcount, dc_state_free);
2081 }
2082
2083 bool dc_set_generic_gpio_for_stereo(bool enable,
2084 struct gpio_service *gpio_service)
2085 {
2086 enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR;
2087 struct gpio_pin_info pin_info;
2088 struct gpio *generic;
2089 struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config),
2090 GFP_KERNEL);
2091
2092 if (!config)
2093 return false;
2094 pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0);
2095
2096 if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) {
2097 kfree(config);
2098 return false;
2099 } else {
2100 generic = dal_gpio_service_create_generic_mux(
2101 gpio_service,
2102 pin_info.offset,
2103 pin_info.mask);
2104 }
2105
2106 if (!generic) {
2107 kfree(config);
2108 return false;
2109 }
2110
2111 gpio_result = dal_gpio_open(generic, GPIO_MODE_OUTPUT);
2112
2113 config->enable_output_from_mux = enable;
2114 config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC;
2115
2116 if (gpio_result == GPIO_RESULT_OK)
2117 gpio_result = dal_mux_setup_config(generic, config);
2118
2119 if (gpio_result == GPIO_RESULT_OK) {
2120 dal_gpio_close(generic);
2121 dal_gpio_destroy_generic_mux(&generic);
2122 kfree(config);
2123 return true;
2124 } else {
2125 dal_gpio_close(generic);
2126 dal_gpio_destroy_generic_mux(&generic);
2127 kfree(config);
2128 return false;
2129 }
2130 }
2131
2132 static bool is_surface_in_context(
2133 const struct dc_state *context,
2134 const struct dc_plane_state *plane_state)
2135 {
2136 int j;
2137
2138 for (j = 0; j < MAX_PIPES; j++) {
2139 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2140
2141 if (plane_state == pipe_ctx->plane_state) {
2142 return true;
2143 }
2144 }
2145
2146 return false;
2147 }
2148
2149 static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u)
2150 {
2151 union surface_update_flags *update_flags = &u->surface->update_flags;
2152 enum surface_update_type update_type = UPDATE_TYPE_FAST;
2153
2154 if (!u->plane_info)
2155 return UPDATE_TYPE_FAST;
2156
2157 if (u->plane_info->color_space != u->surface->color_space) {
2158 update_flags->bits.color_space_change = 1;
2159 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2160 }
2161
2162 if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) {
2163 update_flags->bits.horizontal_mirror_change = 1;
2164 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2165 }
2166
2167 if (u->plane_info->rotation != u->surface->rotation) {
2168 update_flags->bits.rotation_change = 1;
2169 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2170 }
2171
2172 if (u->plane_info->format != u->surface->format) {
2173 update_flags->bits.pixel_format_change = 1;
2174 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2175 }
2176
2177 if (u->plane_info->stereo_format != u->surface->stereo_format) {
2178 update_flags->bits.stereo_format_change = 1;
2179 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2180 }
2181
2182 if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) {
2183 update_flags->bits.per_pixel_alpha_change = 1;
2184 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2185 }
2186
2187 if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) {
2188 update_flags->bits.global_alpha_change = 1;
2189 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2190 }
2191
2192 if (u->plane_info->dcc.enable != u->surface->dcc.enable
2193 || u->plane_info->dcc.dcc_ind_blk != u->surface->dcc.dcc_ind_blk
2194 || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) {
2195
2196
2197
2198
2199
2200 update_flags->bits.dcc_change = 1;
2201 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2202 }
2203
2204 if (resource_pixel_format_to_bpp(u->plane_info->format) !=
2205 resource_pixel_format_to_bpp(u->surface->format)) {
2206
2207
2208
2209 update_flags->bits.bpp_change = 1;
2210 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2211 }
2212
2213 if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch
2214 || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) {
2215 update_flags->bits.plane_size_change = 1;
2216 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2217 }
2218
2219
2220 if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
2221 sizeof(union dc_tiling_info)) != 0) {
2222 update_flags->bits.swizzle_change = 1;
2223 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2224
2225
2226
2227
2228 if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
2229
2230
2231
2232 update_flags->bits.bandwidth_change = 1;
2233 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2234 }
2235 }
2236
2237
2238 return update_type;
2239 }
2240
2241 static enum surface_update_type get_scaling_info_update_type(
2242 const struct dc_surface_update *u)
2243 {
2244 union surface_update_flags *update_flags = &u->surface->update_flags;
2245
2246 if (!u->scaling_info)
2247 return UPDATE_TYPE_FAST;
2248
2249 if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width
2250 || u->scaling_info->clip_rect.height != u->surface->clip_rect.height
2251 || u->scaling_info->dst_rect.width != u->surface->dst_rect.width
2252 || u->scaling_info->dst_rect.height != u->surface->dst_rect.height
2253 || u->scaling_info->scaling_quality.integer_scaling !=
2254 u->surface->scaling_quality.integer_scaling
2255 ) {
2256 update_flags->bits.scaling_change = 1;
2257
2258 if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width
2259 || u->scaling_info->dst_rect.height < u->surface->dst_rect.height)
2260 && (u->scaling_info->dst_rect.width < u->surface->src_rect.width
2261 || u->scaling_info->dst_rect.height < u->surface->src_rect.height))
2262
2263 update_flags->bits.bandwidth_change = 1;
2264 }
2265
2266 if (u->scaling_info->src_rect.width != u->surface->src_rect.width
2267 || u->scaling_info->src_rect.height != u->surface->src_rect.height) {
2268
2269 update_flags->bits.scaling_change = 1;
2270 if (u->scaling_info->src_rect.width > u->surface->src_rect.width
2271 || u->scaling_info->src_rect.height > u->surface->src_rect.height)
2272
2273 update_flags->bits.clock_change = 1;
2274 }
2275
2276 if (u->scaling_info->src_rect.x != u->surface->src_rect.x
2277 || u->scaling_info->src_rect.y != u->surface->src_rect.y
2278 || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
2279 || u->scaling_info->clip_rect.y != u->surface->clip_rect.y
2280 || u->scaling_info->dst_rect.x != u->surface->dst_rect.x
2281 || u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
2282 update_flags->bits.position_change = 1;
2283
2284 if (update_flags->bits.clock_change
2285 || update_flags->bits.bandwidth_change
2286 || update_flags->bits.scaling_change)
2287 return UPDATE_TYPE_FULL;
2288
2289 if (update_flags->bits.position_change)
2290 return UPDATE_TYPE_MED;
2291
2292 return UPDATE_TYPE_FAST;
2293 }
2294
2295 static enum surface_update_type det_surface_update(const struct dc *dc,
2296 const struct dc_surface_update *u)
2297 {
2298 const struct dc_state *context = dc->current_state;
2299 enum surface_update_type type;
2300 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
2301 union surface_update_flags *update_flags = &u->surface->update_flags;
2302
2303 if (u->flip_addr)
2304 update_flags->bits.addr_update = 1;
2305
2306 if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) {
2307 update_flags->raw = 0xFFFFFFFF;
2308 return UPDATE_TYPE_FULL;
2309 }
2310
2311 update_flags->raw = 0;
2312
2313 type = get_plane_info_update_type(u);
2314 elevate_update_type(&overall_type, type);
2315
2316 type = get_scaling_info_update_type(u);
2317 elevate_update_type(&overall_type, type);
2318
2319 if (u->flip_addr)
2320 update_flags->bits.addr_update = 1;
2321
2322 if (u->in_transfer_func)
2323 update_flags->bits.in_transfer_func_change = 1;
2324
2325 if (u->input_csc_color_matrix)
2326 update_flags->bits.input_csc_change = 1;
2327
2328 if (u->coeff_reduction_factor)
2329 update_flags->bits.coeff_reduction_change = 1;
2330
2331 if (u->gamut_remap_matrix)
2332 update_flags->bits.gamut_remap_change = 1;
2333
2334 if (u->gamma) {
2335 enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN;
2336
2337 if (u->plane_info)
2338 format = u->plane_info->format;
2339 else if (u->surface)
2340 format = u->surface->format;
2341
2342 if (dce_use_lut(format))
2343 update_flags->bits.gamma_change = 1;
2344 }
2345
2346 if (u->lut3d_func || u->func_shaper)
2347 update_flags->bits.lut_3d = 1;
2348
2349 if (u->hdr_mult.value)
2350 if (u->hdr_mult.value != u->surface->hdr_mult.value) {
2351 update_flags->bits.hdr_mult = 1;
2352 elevate_update_type(&overall_type, UPDATE_TYPE_MED);
2353 }
2354
2355 if (update_flags->bits.in_transfer_func_change) {
2356 type = UPDATE_TYPE_MED;
2357 elevate_update_type(&overall_type, type);
2358 }
2359
2360 if (update_flags->bits.input_csc_change
2361 || update_flags->bits.coeff_reduction_change
2362 || update_flags->bits.lut_3d
2363 || update_flags->bits.gamma_change
2364 || update_flags->bits.gamut_remap_change) {
2365 type = UPDATE_TYPE_FULL;
2366 elevate_update_type(&overall_type, type);
2367 }
2368
2369 return overall_type;
2370 }
2371
2372 static enum surface_update_type check_update_surfaces_for_stream(
2373 struct dc *dc,
2374 struct dc_surface_update *updates,
2375 int surface_count,
2376 struct dc_stream_update *stream_update,
2377 const struct dc_stream_status *stream_status)
2378 {
2379 int i;
2380 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
2381
2382 if (dc->idle_optimizations_allowed)
2383 overall_type = UPDATE_TYPE_FULL;
2384
2385 if (stream_status == NULL || stream_status->plane_count != surface_count)
2386 overall_type = UPDATE_TYPE_FULL;
2387
2388 if (stream_update && stream_update->pending_test_pattern) {
2389 overall_type = UPDATE_TYPE_FULL;
2390 }
2391
2392
2393 if (stream_update) {
2394 union stream_update_flags *su_flags = &stream_update->stream->update_flags;
2395
2396 if ((stream_update->src.height != 0 && stream_update->src.width != 0) ||
2397 (stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
2398 stream_update->integer_scaling_update)
2399 su_flags->bits.scaling = 1;
2400
2401 if (stream_update->out_transfer_func)
2402 su_flags->bits.out_tf = 1;
2403
2404 if (stream_update->abm_level)
2405 su_flags->bits.abm_level = 1;
2406
2407 if (stream_update->dpms_off)
2408 su_flags->bits.dpms_off = 1;
2409
2410 if (stream_update->gamut_remap)
2411 su_flags->bits.gamut_remap = 1;
2412
2413 if (stream_update->wb_update)
2414 su_flags->bits.wb_update = 1;
2415
2416 if (stream_update->dsc_config)
2417 su_flags->bits.dsc_changed = 1;
2418
2419 if (stream_update->mst_bw_update)
2420 su_flags->bits.mst_bw = 1;
2421 if (stream_update->crtc_timing_adjust && dc_extended_blank_supported(dc))
2422 su_flags->bits.crtc_timing_adjust = 1;
2423
2424 if (su_flags->raw != 0)
2425 overall_type = UPDATE_TYPE_FULL;
2426
2427 if (stream_update->output_csc_transform || stream_update->output_color_space)
2428 su_flags->bits.out_csc = 1;
2429 }
2430
2431 for (i = 0 ; i < surface_count; i++) {
2432 enum surface_update_type type =
2433 det_surface_update(dc, &updates[i]);
2434
2435 elevate_update_type(&overall_type, type);
2436 }
2437
2438 return overall_type;
2439 }
2440
2441 static bool dc_check_is_fullscreen_video(struct rect src, struct rect clip_rect)
2442 {
2443 int view_height, view_width, clip_x, clip_y, clip_width, clip_height;
2444
2445 view_height = src.height;
2446 view_width = src.width;
2447
2448 clip_x = clip_rect.x;
2449 clip_y = clip_rect.y;
2450
2451 clip_width = clip_rect.width;
2452 clip_height = clip_rect.height;
2453
2454
2455 if ((view_height - clip_y - clip_height <= clip_y + 1) &&
2456 (view_width - clip_x - clip_width <= clip_x + 1) &&
2457 (view_height - clip_y - clip_height >= clip_y - 1) &&
2458 (view_width - clip_x - clip_width >= clip_x - 1)) {
2459
2460
2461
2462
2463
2464 if (clip_x <= 4 || clip_y <= 4)
2465 return true;
2466 }
2467
2468 return false;
2469 }
2470
2471 static enum surface_update_type check_boundary_crossing_for_windowed_mpo_with_odm(struct dc *dc,
2472 struct dc_surface_update *srf_updates, int surface_count,
2473 enum surface_update_type update_type)
2474 {
2475 enum surface_update_type new_update_type = update_type;
2476 int i, j;
2477 struct pipe_ctx *pipe = NULL;
2478 struct dc_stream_state *stream;
2479
2480
2481
2482
2483
2484
2485
2486 if ((surface_count > 1) && dc->config.enable_windowed_mpo_odm) {
2487 for (i = 0; i < surface_count; i++) {
2488 if (srf_updates[i].surface && srf_updates[i].scaling_info
2489 && srf_updates[i].surface->update_flags.bits.position_change) {
2490
2491 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2492 if (srf_updates[i].surface == dc->current_state->res_ctx.pipe_ctx[j].plane_state) {
2493 pipe = &dc->current_state->res_ctx.pipe_ctx[j];
2494 stream = pipe->stream;
2495 break;
2496 }
2497 }
2498
2499 if (pipe && pipe->top_pipe && (get_num_odm_splits(pipe->top_pipe) > 0) && stream
2500 && !dc_check_is_fullscreen_video(stream->src, srf_updates[i].scaling_info->clip_rect)) {
2501 struct rect old_clip_rect, new_clip_rect;
2502 bool old_clip_rect_left, old_clip_rect_right, old_clip_rect_middle;
2503 bool new_clip_rect_left, new_clip_rect_right, new_clip_rect_middle;
2504
2505 old_clip_rect = srf_updates[i].surface->clip_rect;
2506 new_clip_rect = srf_updates[i].scaling_info->clip_rect;
2507
2508 old_clip_rect_left = ((old_clip_rect.x + old_clip_rect.width) <= (stream->src.x + (stream->src.width/2)));
2509 old_clip_rect_right = (old_clip_rect.x >= (stream->src.x + (stream->src.width/2)));
2510 old_clip_rect_middle = !old_clip_rect_left && !old_clip_rect_right;
2511
2512 new_clip_rect_left = ((new_clip_rect.x + new_clip_rect.width) <= (stream->src.x + (stream->src.width/2)));
2513 new_clip_rect_right = (new_clip_rect.x >= (stream->src.x + (stream->src.width/2)));
2514 new_clip_rect_middle = !new_clip_rect_left && !new_clip_rect_right;
2515
2516 if (old_clip_rect_left && new_clip_rect_middle)
2517 new_update_type = UPDATE_TYPE_FULL;
2518 else if (old_clip_rect_middle && new_clip_rect_right)
2519 new_update_type = UPDATE_TYPE_FULL;
2520 else if (old_clip_rect_right && new_clip_rect_middle)
2521 new_update_type = UPDATE_TYPE_FULL;
2522 else if (old_clip_rect_middle && new_clip_rect_left)
2523 new_update_type = UPDATE_TYPE_FULL;
2524 }
2525 }
2526 }
2527 }
2528 return new_update_type;
2529 }
2530
2531
2532
2533
2534
2535
2536 enum surface_update_type dc_check_update_surfaces_for_stream(
2537 struct dc *dc,
2538 struct dc_surface_update *updates,
2539 int surface_count,
2540 struct dc_stream_update *stream_update,
2541 const struct dc_stream_status *stream_status)
2542 {
2543 int i;
2544 enum surface_update_type type;
2545
2546 if (stream_update)
2547 stream_update->stream->update_flags.raw = 0;
2548 for (i = 0; i < surface_count; i++)
2549 updates[i].surface->update_flags.raw = 0;
2550
2551 type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
2552 if (type == UPDATE_TYPE_FULL) {
2553 if (stream_update) {
2554 uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed;
2555 stream_update->stream->update_flags.raw = 0xFFFFFFFF;
2556 stream_update->stream->update_flags.bits.dsc_changed = dsc_changed;
2557 }
2558 for (i = 0; i < surface_count; i++)
2559 updates[i].surface->update_flags.raw = 0xFFFFFFFF;
2560 }
2561
2562 if (type == UPDATE_TYPE_MED)
2563 type = check_boundary_crossing_for_windowed_mpo_with_odm(dc,
2564 updates, surface_count, type);
2565
2566 if (type == UPDATE_TYPE_FAST) {
2567
2568 if (dc->clk_mgr->funcs->are_clock_states_equal) {
2569 if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk))
2570 dc->optimized_required = true;
2571
2572 } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
2573 dc->optimized_required = true;
2574 }
2575
2576 dc->optimized_required |= dc->wm_optimized_required;
2577 }
2578
2579 return type;
2580 }
2581
2582 static struct dc_stream_status *stream_get_status(
2583 struct dc_state *ctx,
2584 struct dc_stream_state *stream)
2585 {
2586 uint8_t i;
2587
2588 for (i = 0; i < ctx->stream_count; i++) {
2589 if (stream == ctx->streams[i]) {
2590 return &ctx->stream_status[i];
2591 }
2592 }
2593
2594 return NULL;
2595 }
2596
2597 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
2598
2599 static void copy_surface_update_to_plane(
2600 struct dc_plane_state *surface,
2601 struct dc_surface_update *srf_update)
2602 {
2603 if (srf_update->flip_addr) {
2604 surface->address = srf_update->flip_addr->address;
2605 surface->flip_immediate =
2606 srf_update->flip_addr->flip_immediate;
2607 surface->time.time_elapsed_in_us[surface->time.index] =
2608 srf_update->flip_addr->flip_timestamp_in_us -
2609 surface->time.prev_update_time_in_us;
2610 surface->time.prev_update_time_in_us =
2611 srf_update->flip_addr->flip_timestamp_in_us;
2612 surface->time.index++;
2613 if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
2614 surface->time.index = 0;
2615
2616 surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips;
2617 }
2618
2619 if (srf_update->scaling_info) {
2620 surface->scaling_quality =
2621 srf_update->scaling_info->scaling_quality;
2622 surface->dst_rect =
2623 srf_update->scaling_info->dst_rect;
2624 surface->src_rect =
2625 srf_update->scaling_info->src_rect;
2626 surface->clip_rect =
2627 srf_update->scaling_info->clip_rect;
2628 }
2629
2630 if (srf_update->plane_info) {
2631 surface->color_space =
2632 srf_update->plane_info->color_space;
2633 surface->format =
2634 srf_update->plane_info->format;
2635 surface->plane_size =
2636 srf_update->plane_info->plane_size;
2637 surface->rotation =
2638 srf_update->plane_info->rotation;
2639 surface->horizontal_mirror =
2640 srf_update->plane_info->horizontal_mirror;
2641 surface->stereo_format =
2642 srf_update->plane_info->stereo_format;
2643 surface->tiling_info =
2644 srf_update->plane_info->tiling_info;
2645 surface->visible =
2646 srf_update->plane_info->visible;
2647 surface->per_pixel_alpha =
2648 srf_update->plane_info->per_pixel_alpha;
2649 surface->global_alpha =
2650 srf_update->plane_info->global_alpha;
2651 surface->global_alpha_value =
2652 srf_update->plane_info->global_alpha_value;
2653 surface->dcc =
2654 srf_update->plane_info->dcc;
2655 surface->layer_index =
2656 srf_update->plane_info->layer_index;
2657 }
2658
2659 if (srf_update->gamma &&
2660 (surface->gamma_correction !=
2661 srf_update->gamma)) {
2662 memcpy(&surface->gamma_correction->entries,
2663 &srf_update->gamma->entries,
2664 sizeof(struct dc_gamma_entries));
2665 surface->gamma_correction->is_identity =
2666 srf_update->gamma->is_identity;
2667 surface->gamma_correction->num_entries =
2668 srf_update->gamma->num_entries;
2669 surface->gamma_correction->type =
2670 srf_update->gamma->type;
2671 }
2672
2673 if (srf_update->in_transfer_func &&
2674 (surface->in_transfer_func !=
2675 srf_update->in_transfer_func)) {
2676 surface->in_transfer_func->sdr_ref_white_level =
2677 srf_update->in_transfer_func->sdr_ref_white_level;
2678 surface->in_transfer_func->tf =
2679 srf_update->in_transfer_func->tf;
2680 surface->in_transfer_func->type =
2681 srf_update->in_transfer_func->type;
2682 memcpy(&surface->in_transfer_func->tf_pts,
2683 &srf_update->in_transfer_func->tf_pts,
2684 sizeof(struct dc_transfer_func_distributed_points));
2685 }
2686
2687 if (srf_update->func_shaper &&
2688 (surface->in_shaper_func !=
2689 srf_update->func_shaper))
2690 memcpy(surface->in_shaper_func, srf_update->func_shaper,
2691 sizeof(*surface->in_shaper_func));
2692
2693 if (srf_update->lut3d_func &&
2694 (surface->lut3d_func !=
2695 srf_update->lut3d_func))
2696 memcpy(surface->lut3d_func, srf_update->lut3d_func,
2697 sizeof(*surface->lut3d_func));
2698
2699 if (srf_update->hdr_mult.value)
2700 surface->hdr_mult =
2701 srf_update->hdr_mult;
2702
2703 if (srf_update->blend_tf &&
2704 (surface->blend_tf !=
2705 srf_update->blend_tf))
2706 memcpy(surface->blend_tf, srf_update->blend_tf,
2707 sizeof(*surface->blend_tf));
2708
2709 if (srf_update->input_csc_color_matrix)
2710 surface->input_csc_color_matrix =
2711 *srf_update->input_csc_color_matrix;
2712
2713 if (srf_update->coeff_reduction_factor)
2714 surface->coeff_reduction_factor =
2715 *srf_update->coeff_reduction_factor;
2716
2717 if (srf_update->gamut_remap_matrix)
2718 surface->gamut_remap_matrix =
2719 *srf_update->gamut_remap_matrix;
2720 }
2721
2722 static void copy_stream_update_to_stream(struct dc *dc,
2723 struct dc_state *context,
2724 struct dc_stream_state *stream,
2725 struct dc_stream_update *update)
2726 {
2727 struct dc_context *dc_ctx = dc->ctx;
2728
2729 if (update == NULL || stream == NULL)
2730 return;
2731
2732 if (update->src.height && update->src.width)
2733 stream->src = update->src;
2734
2735 if (update->dst.height && update->dst.width)
2736 stream->dst = update->dst;
2737
2738 if (update->out_transfer_func &&
2739 stream->out_transfer_func != update->out_transfer_func) {
2740 stream->out_transfer_func->sdr_ref_white_level =
2741 update->out_transfer_func->sdr_ref_white_level;
2742 stream->out_transfer_func->tf = update->out_transfer_func->tf;
2743 stream->out_transfer_func->type =
2744 update->out_transfer_func->type;
2745 memcpy(&stream->out_transfer_func->tf_pts,
2746 &update->out_transfer_func->tf_pts,
2747 sizeof(struct dc_transfer_func_distributed_points));
2748 }
2749
2750 if (update->hdr_static_metadata)
2751 stream->hdr_static_metadata = *update->hdr_static_metadata;
2752
2753 if (update->abm_level)
2754 stream->abm_level = *update->abm_level;
2755
2756 if (update->periodic_interrupt0)
2757 stream->periodic_interrupt0 = *update->periodic_interrupt0;
2758
2759 if (update->periodic_interrupt1)
2760 stream->periodic_interrupt1 = *update->periodic_interrupt1;
2761
2762 if (update->gamut_remap)
2763 stream->gamut_remap_matrix = *update->gamut_remap;
2764
2765
2766
2767
2768
2769 if (update->output_color_space)
2770 stream->output_color_space = *update->output_color_space;
2771
2772 if (update->output_csc_transform)
2773 stream->csc_color_matrix = *update->output_csc_transform;
2774
2775 if (update->vrr_infopacket)
2776 stream->vrr_infopacket = *update->vrr_infopacket;
2777
2778 if (update->allow_freesync)
2779 stream->allow_freesync = *update->allow_freesync;
2780
2781 if (update->vrr_active_variable)
2782 stream->vrr_active_variable = *update->vrr_active_variable;
2783
2784 if (update->crtc_timing_adjust)
2785 stream->adjust = *update->crtc_timing_adjust;
2786
2787 if (update->dpms_off)
2788 stream->dpms_off = *update->dpms_off;
2789
2790 if (update->hfvsif_infopacket)
2791 stream->hfvsif_infopacket = *update->hfvsif_infopacket;
2792
2793 if (update->vtem_infopacket)
2794 stream->vtem_infopacket = *update->vtem_infopacket;
2795
2796 if (update->vsc_infopacket)
2797 stream->vsc_infopacket = *update->vsc_infopacket;
2798
2799 if (update->vsp_infopacket)
2800 stream->vsp_infopacket = *update->vsp_infopacket;
2801
2802 if (update->dither_option)
2803 stream->dither_option = *update->dither_option;
2804
2805 if (update->pending_test_pattern)
2806 stream->test_pattern = *update->pending_test_pattern;
2807
2808 if (update->wb_update) {
2809 int i;
2810
2811 stream->num_wb_info = update->wb_update->num_wb_info;
2812 ASSERT(stream->num_wb_info <= MAX_DWB_PIPES);
2813 for (i = 0; i < stream->num_wb_info; i++)
2814 stream->writeback_info[i] =
2815 update->wb_update->writeback_info[i];
2816 }
2817 if (update->dsc_config) {
2818 struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg;
2819 uint32_t old_dsc_enabled = stream->timing.flags.DSC;
2820 uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 &&
2821 update->dsc_config->num_slices_v != 0);
2822
2823
2824 struct dc_state *dsc_validate_context = dc_create_state(dc);
2825
2826 if (dsc_validate_context) {
2827 dc_resource_state_copy_construct(dc->current_state, dsc_validate_context);
2828
2829 stream->timing.dsc_cfg = *update->dsc_config;
2830 stream->timing.flags.DSC = enable_dsc;
2831 if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) {
2832 stream->timing.dsc_cfg = old_dsc_cfg;
2833 stream->timing.flags.DSC = old_dsc_enabled;
2834 update->dsc_config = NULL;
2835 }
2836
2837 dc_release_state(dsc_validate_context);
2838 } else {
2839 DC_ERROR("Failed to allocate new validate context for DSC change\n");
2840 update->dsc_config = NULL;
2841 }
2842 }
2843 }
2844
2845 void dc_reset_state(struct dc *dc, struct dc_state *context)
2846 {
2847 dc_resource_state_destruct(context);
2848
2849
2850 memset(context, 0, offsetof(struct dc_state, refcount));
2851
2852 init_state(dc, context);
2853 }
2854
2855 static bool update_planes_and_stream_state(struct dc *dc,
2856 struct dc_surface_update *srf_updates, int surface_count,
2857 struct dc_stream_state *stream,
2858 struct dc_stream_update *stream_update,
2859 enum surface_update_type *new_update_type,
2860 struct dc_state **new_context)
2861 {
2862 struct dc_state *context;
2863 int i, j;
2864 enum surface_update_type update_type;
2865 const struct dc_stream_status *stream_status;
2866 struct dc_context *dc_ctx = dc->ctx;
2867
2868 stream_status = dc_stream_get_status(stream);
2869
2870 if (!stream_status) {
2871 if (surface_count)
2872 ASSERT(false);
2873
2874 return false;
2875 }
2876
2877 context = dc->current_state;
2878
2879 update_type = dc_check_update_surfaces_for_stream(
2880 dc, srf_updates, surface_count, stream_update, stream_status);
2881
2882
2883 copy_stream_update_to_stream(dc, context, stream, stream_update);
2884
2885
2886
2887
2888 if (surface_count > 0) {
2889 for (i = 0; i < surface_count; i++) {
2890 if ((srf_updates[i].surface->src_rect.width == 0 ||
2891 srf_updates[i].surface->src_rect.height == 0 ||
2892 srf_updates[i].surface->dst_rect.width == 0 ||
2893 srf_updates[i].surface->dst_rect.height == 0) &&
2894 (!srf_updates[i].scaling_info ||
2895 srf_updates[i].scaling_info->src_rect.width == 0 ||
2896 srf_updates[i].scaling_info->src_rect.height == 0 ||
2897 srf_updates[i].scaling_info->dst_rect.width == 0 ||
2898 srf_updates[i].scaling_info->dst_rect.height == 0)) {
2899 DC_ERROR("Invalid src/dst rects in surface update!\n");
2900 return false;
2901 }
2902 }
2903 }
2904
2905 if (update_type >= update_surface_trace_level)
2906 update_surface_trace(dc, srf_updates, surface_count);
2907
2908 if (update_type >= UPDATE_TYPE_FULL) {
2909 struct dc_plane_state *new_planes[MAX_SURFACES] = {0};
2910
2911 for (i = 0; i < surface_count; i++)
2912 new_planes[i] = srf_updates[i].surface;
2913
2914
2915 context = dc_create_state(dc);
2916 if (context == NULL) {
2917 DC_ERROR("Failed to allocate new validate context!\n");
2918 return false;
2919 }
2920
2921 dc_resource_state_copy_construct(
2922 dc->current_state, context);
2923
2924
2925 if (!dc_rem_all_planes_for_stream(dc, stream, context)) {
2926
2927 BREAK_TO_DEBUGGER();
2928 goto fail;
2929 }
2930
2931
2932 if (!dc_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) {
2933
2934 BREAK_TO_DEBUGGER();
2935 goto fail;
2936 }
2937 }
2938
2939
2940 for (i = 0; i < surface_count; i++) {
2941 struct dc_plane_state *surface = srf_updates[i].surface;
2942
2943 copy_surface_update_to_plane(surface, &srf_updates[i]);
2944
2945 if (update_type >= UPDATE_TYPE_MED) {
2946 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2947 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2948
2949 if (pipe_ctx->plane_state != surface)
2950 continue;
2951
2952 resource_build_scaling_params(pipe_ctx);
2953 }
2954 }
2955 }
2956
2957 if (update_type == UPDATE_TYPE_FULL) {
2958 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
2959 BREAK_TO_DEBUGGER();
2960 goto fail;
2961 }
2962 }
2963
2964 *new_context = context;
2965 *new_update_type = update_type;
2966
2967 return true;
2968
2969 fail:
2970 dc_release_state(context);
2971
2972 return false;
2973
2974 }
2975
2976 static void commit_planes_do_stream_update(struct dc *dc,
2977 struct dc_stream_state *stream,
2978 struct dc_stream_update *stream_update,
2979 enum surface_update_type update_type,
2980 struct dc_state *context)
2981 {
2982 int j;
2983
2984
2985 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2986 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2987
2988 if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->stream == stream) {
2989
2990 if (stream_update->periodic_interrupt0 &&
2991 dc->hwss.setup_periodic_interrupt)
2992 dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE0);
2993
2994 if (stream_update->periodic_interrupt1 &&
2995 dc->hwss.setup_periodic_interrupt)
2996 dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE1);
2997
2998 if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
2999 stream_update->vrr_infopacket ||
3000 stream_update->vsc_infopacket ||
3001 stream_update->vsp_infopacket ||
3002 stream_update->hfvsif_infopacket ||
3003 stream_update->vtem_infopacket) {
3004 resource_build_info_frame(pipe_ctx);
3005 dc->hwss.update_info_frame(pipe_ctx);
3006
3007 if (dc_is_dp_signal(pipe_ctx->stream->signal))
3008 dp_source_sequence_trace(pipe_ctx->stream->link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
3009 }
3010
3011 if (stream_update->hdr_static_metadata &&
3012 stream->use_dynamic_meta &&
3013 dc->hwss.set_dmdata_attributes &&
3014 pipe_ctx->stream->dmdata_address.quad_part != 0)
3015 dc->hwss.set_dmdata_attributes(pipe_ctx);
3016
3017 if (stream_update->gamut_remap)
3018 dc_stream_set_gamut_remap(dc, stream);
3019
3020 if (stream_update->output_csc_transform)
3021 dc_stream_program_csc_matrix(dc, stream);
3022
3023 if (stream_update->dither_option) {
3024 struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
3025 resource_build_bit_depth_reduction_params(pipe_ctx->stream,
3026 &pipe_ctx->stream->bit_depth_params);
3027 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp,
3028 &stream->bit_depth_params,
3029 &stream->clamping);
3030 while (odm_pipe) {
3031 odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp,
3032 &stream->bit_depth_params,
3033 &stream->clamping);
3034 odm_pipe = odm_pipe->next_odm_pipe;
3035 }
3036 }
3037
3038
3039
3040 if (update_type == UPDATE_TYPE_FAST)
3041 continue;
3042
3043 if (stream_update->dsc_config)
3044 dp_update_dsc_config(pipe_ctx);
3045
3046 if (stream_update->mst_bw_update) {
3047 if (stream_update->mst_bw_update->is_increase)
3048 dc_link_increase_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw);
3049 else
3050 dc_link_reduce_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw);
3051 }
3052
3053 if (stream_update->pending_test_pattern) {
3054 dc_link_dp_set_test_pattern(stream->link,
3055 stream->test_pattern.type,
3056 stream->test_pattern.color_space,
3057 stream->test_pattern.p_link_settings,
3058 stream->test_pattern.p_custom_pattern,
3059 stream->test_pattern.cust_pattern_size);
3060 }
3061
3062 if (stream_update->dpms_off) {
3063 if (*stream_update->dpms_off) {
3064 core_link_disable_stream(pipe_ctx);
3065
3066 if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
3067 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
3068
3069 dc->optimized_required = true;
3070
3071 } else {
3072 if (get_seamless_boot_stream_count(context) == 0)
3073 dc->hwss.prepare_bandwidth(dc, dc->current_state);
3074
3075 core_link_enable_stream(dc->current_state, pipe_ctx);
3076 }
3077 }
3078
3079 if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
3080 bool should_program_abm = true;
3081
3082
3083 if (pipe_ctx->stream_res.tg->funcs->is_blanked)
3084 if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
3085 should_program_abm = false;
3086
3087 if (should_program_abm) {
3088 if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) {
3089 dc->hwss.set_abm_immediate_disable(pipe_ctx);
3090 } else {
3091 pipe_ctx->stream_res.abm->funcs->set_abm_level(
3092 pipe_ctx->stream_res.abm, stream->abm_level);
3093 }
3094 }
3095 }
3096 }
3097 }
3098 }
3099
3100 static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct dc_stream_state *stream)
3101 {
3102 if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
3103 return true;
3104
3105 if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_1 &&
3106 dc->debug.enable_sw_cntl_psr)
3107 return true;
3108
3109 return false;
3110 }
3111
3112 void dc_dmub_update_dirty_rect(struct dc *dc,
3113 int surface_count,
3114 struct dc_stream_state *stream,
3115 struct dc_surface_update *srf_updates,
3116 struct dc_state *context)
3117 {
3118 union dmub_rb_cmd cmd;
3119 struct dc_context *dc_ctx = dc->ctx;
3120 struct dmub_cmd_update_dirty_rect_data *update_dirty_rect;
3121 unsigned int i, j;
3122 unsigned int panel_inst = 0;
3123
3124 if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream))
3125 return;
3126
3127 if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst))
3128 return;
3129
3130 memset(&cmd, 0x0, sizeof(cmd));
3131 cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT;
3132 cmd.update_dirty_rect.header.sub_type = 0;
3133 cmd.update_dirty_rect.header.payload_bytes =
3134 sizeof(cmd.update_dirty_rect) -
3135 sizeof(cmd.update_dirty_rect.header);
3136 update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data;
3137 for (i = 0; i < surface_count; i++) {
3138 struct dc_plane_state *plane_state = srf_updates[i].surface;
3139 const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr;
3140
3141 if (!srf_updates[i].surface || !flip_addr)
3142 continue;
3143
3144 if (srf_updates[i].surface->flip_immediate)
3145 continue;
3146
3147 update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count;
3148 memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects,
3149 sizeof(flip_addr->dirty_rects));
3150 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3151 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3152
3153 if (pipe_ctx->stream != stream)
3154 continue;
3155 if (pipe_ctx->plane_state != plane_state)
3156 continue;
3157
3158 update_dirty_rect->panel_inst = panel_inst;
3159 update_dirty_rect->pipe_idx = j;
3160 dc_dmub_srv_cmd_queue(dc_ctx->dmub_srv, &cmd);
3161 dc_dmub_srv_cmd_execute(dc_ctx->dmub_srv);
3162 }
3163 }
3164 }
3165
3166 static void commit_planes_for_stream(struct dc *dc,
3167 struct dc_surface_update *srf_updates,
3168 int surface_count,
3169 struct dc_stream_state *stream,
3170 struct dc_stream_update *stream_update,
3171 enum surface_update_type update_type,
3172 struct dc_state *context)
3173 {
3174 int i, j;
3175 struct pipe_ctx *top_pipe_to_program = NULL;
3176 bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST);
3177 bool subvp_prev_use = false;
3178
3179
3180
3181
3182 subvp_prev_use = false;
3183
3184
3185 dc_z10_restore(dc);
3186
3187 if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
3188
3189
3190
3191
3192
3193
3194 if (stream->apply_seamless_boot_optimization) {
3195 stream->apply_seamless_boot_optimization = false;
3196
3197 if (get_seamless_boot_stream_count(context) == 0)
3198 dc->optimized_required = true;
3199 }
3200 }
3201
3202 if (update_type == UPDATE_TYPE_FULL) {
3203 dc_allow_idle_optimizations(dc, false);
3204
3205 if (get_seamless_boot_stream_count(context) == 0)
3206 dc->hwss.prepare_bandwidth(dc, context);
3207
3208 context_clock_trace(dc, context);
3209 }
3210
3211 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3212 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3213
3214 if (!pipe_ctx->top_pipe &&
3215 !pipe_ctx->prev_odm_pipe &&
3216 pipe_ctx->stream &&
3217 pipe_ctx->stream == stream) {
3218 top_pipe_to_program = pipe_ctx;
3219 }
3220 }
3221
3222 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3223 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
3224
3225
3226 subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM);
3227 if (subvp_prev_use)
3228 break;
3229 }
3230
3231 if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
3232 struct pipe_ctx *mpcc_pipe;
3233 struct pipe_ctx *odm_pipe;
3234
3235 for (mpcc_pipe = top_pipe_to_program; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe)
3236 for (odm_pipe = mpcc_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
3237 odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU;
3238 }
3239
3240 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
3241 if (top_pipe_to_program &&
3242 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
3243 if (should_use_dmub_lock(stream->link)) {
3244 union dmub_hw_lock_flags hw_locks = { 0 };
3245 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
3246
3247 hw_locks.bits.lock_dig = 1;
3248 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
3249
3250 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
3251 true,
3252 &hw_locks,
3253 &inst_flags);
3254 } else
3255 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable(
3256 top_pipe_to_program->stream_res.tg);
3257 }
3258
3259 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3260 if (dc->hwss.subvp_pipe_control_lock)
3261 dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use);
3262 dc->hwss.interdependent_update_lock(dc, context, true);
3263
3264 } else {
3265 if (dc->hwss.subvp_pipe_control_lock)
3266 dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
3267
3268
3269
3270
3271 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
3272 }
3273
3274 if (update_type != UPDATE_TYPE_FAST) {
3275 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3276 struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
3277
3278 if ((new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) ||
3279 subvp_prev_use) {
3280
3281
3282
3283
3284 dc->hwss.apply_ctx_to_hw(dc, context);
3285 break;
3286 }
3287 }
3288 }
3289
3290 dc_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context);
3291
3292 if (update_type != UPDATE_TYPE_FAST) {
3293 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3294 struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
3295
3296 if ((new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) ||
3297 subvp_prev_use) {
3298
3299
3300
3301
3302 dc->hwss.apply_ctx_to_hw(dc, context);
3303 break;
3304 }
3305 }
3306 }
3307
3308
3309 if (stream_update)
3310 commit_planes_do_stream_update(dc, stream, stream_update, update_type, context);
3311
3312 if (surface_count == 0) {
3313
3314
3315
3316
3317 if (dc->hwss.apply_ctx_for_surface)
3318 dc->hwss.apply_ctx_for_surface(dc, stream, 0, context);
3319 if (dc->hwss.program_front_end_for_ctx)
3320 dc->hwss.program_front_end_for_ctx(dc, context);
3321
3322 if (update_type != UPDATE_TYPE_FAST)
3323 if (dc->hwss.commit_subvp_config)
3324 dc->hwss.commit_subvp_config(dc, context);
3325
3326 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3327 dc->hwss.interdependent_update_lock(dc, context, false);
3328 } else {
3329 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
3330 }
3331 dc->hwss.post_unlock_program_front_end(dc, context);
3332
3333
3334
3335
3336 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3337 if (dc->hwss.subvp_pipe_control_lock)
3338 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
3339 } else {
3340 if (dc->hwss.subvp_pipe_control_lock)
3341 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
3342 }
3343 return;
3344 }
3345
3346 if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
3347 for (i = 0; i < surface_count; i++) {
3348 struct dc_plane_state *plane_state = srf_updates[i].surface;
3349
3350 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3351 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3352 if (!pipe_ctx->plane_state)
3353 continue;
3354 if (should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3355 continue;
3356 pipe_ctx->plane_state->triplebuffer_flips = false;
3357 if (update_type == UPDATE_TYPE_FAST &&
3358 dc->hwss.program_triplebuffer != NULL &&
3359 !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
3360
3361 pipe_ctx->plane_state->triplebuffer_flips = true;
3362 }
3363 }
3364 if (update_type == UPDATE_TYPE_FULL) {
3365
3366 plane_state->flip_immediate = false;
3367 }
3368 }
3369 }
3370
3371
3372 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3373 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3374
3375 if (!pipe_ctx->top_pipe &&
3376 !pipe_ctx->prev_odm_pipe &&
3377 should_update_pipe_for_stream(context, pipe_ctx, stream)) {
3378 struct dc_stream_status *stream_status = NULL;
3379
3380 if (!pipe_ctx->plane_state)
3381 continue;
3382
3383
3384 if (update_type == UPDATE_TYPE_FAST)
3385 continue;
3386
3387 ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
3388
3389 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
3390
3391 dc->hwss.program_triplebuffer(
3392 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
3393 }
3394 stream_status =
3395 stream_get_status(context, pipe_ctx->stream);
3396
3397 if (dc->hwss.apply_ctx_for_surface)
3398 dc->hwss.apply_ctx_for_surface(
3399 dc, pipe_ctx->stream, stream_status->plane_count, context);
3400 }
3401 }
3402 if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) {
3403 dc->hwss.program_front_end_for_ctx(dc, context);
3404 if (dc->debug.validate_dml_output) {
3405 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3406 struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i];
3407 if (cur_pipe->stream == NULL)
3408 continue;
3409
3410 cur_pipe->plane_res.hubp->funcs->validate_dml_output(
3411 cur_pipe->plane_res.hubp, dc->ctx,
3412 &context->res_ctx.pipe_ctx[i].rq_regs,
3413 &context->res_ctx.pipe_ctx[i].dlg_regs,
3414 &context->res_ctx.pipe_ctx[i].ttu_regs);
3415 }
3416 }
3417 }
3418
3419
3420 if (update_type == UPDATE_TYPE_FAST) {
3421 if (dc->hwss.set_flip_control_gsl)
3422 for (i = 0; i < surface_count; i++) {
3423 struct dc_plane_state *plane_state = srf_updates[i].surface;
3424
3425 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3426 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3427
3428 if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
3429 continue;
3430
3431 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3432 continue;
3433
3434
3435 dc->hwss.set_flip_control_gsl(pipe_ctx,
3436 pipe_ctx->plane_state->flip_immediate);
3437 }
3438 }
3439
3440
3441 for (i = 0; i < surface_count; i++) {
3442 struct dc_plane_state *plane_state = srf_updates[i].surface;
3443
3444 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3445 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3446
3447 if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
3448 continue;
3449
3450 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3451 continue;
3452
3453
3454 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
3455
3456 dc->hwss.program_triplebuffer(
3457 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
3458 }
3459 if (pipe_ctx->plane_state->update_flags.bits.addr_update)
3460 dc->hwss.update_plane_addr(dc, pipe_ctx);
3461 }
3462 }
3463
3464 }
3465
3466 if (update_type != UPDATE_TYPE_FAST)
3467 if (dc->hwss.commit_subvp_config)
3468 dc->hwss.commit_subvp_config(dc, context);
3469
3470 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3471 dc->hwss.interdependent_update_lock(dc, context, false);
3472 } else {
3473 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
3474 }
3475
3476 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
3477 if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
3478 top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3479 top_pipe_to_program->stream_res.tg,
3480 CRTC_STATE_VACTIVE);
3481 top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3482 top_pipe_to_program->stream_res.tg,
3483 CRTC_STATE_VBLANK);
3484 top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3485 top_pipe_to_program->stream_res.tg,
3486 CRTC_STATE_VACTIVE);
3487
3488 if (should_use_dmub_lock(stream->link)) {
3489 union dmub_hw_lock_flags hw_locks = { 0 };
3490 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
3491
3492 hw_locks.bits.lock_dig = 1;
3493 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
3494
3495 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
3496 false,
3497 &hw_locks,
3498 &inst_flags);
3499 } else
3500 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable(
3501 top_pipe_to_program->stream_res.tg);
3502 }
3503
3504 if (update_type != UPDATE_TYPE_FAST)
3505 dc->hwss.post_unlock_program_front_end(dc, context);
3506
3507
3508
3509
3510 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3511 if (dc->hwss.subvp_pipe_control_lock)
3512 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
3513 } else {
3514 if (dc->hwss.subvp_pipe_control_lock)
3515 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
3516 }
3517
3518
3519 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3520 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3521
3522 if (!pipe_ctx->plane_state)
3523 continue;
3524
3525 if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe ||
3526 !pipe_ctx->stream || !should_update_pipe_for_stream(context, pipe_ctx, stream) ||
3527 !pipe_ctx->plane_state->update_flags.bits.addr_update ||
3528 pipe_ctx->plane_state->skip_manual_trigger)
3529 continue;
3530
3531 if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger)
3532 pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg);
3533 }
3534 }
3535
3536 static bool commit_minimal_transition_state(struct dc *dc,
3537 struct dc_state *transition_base_context)
3538 {
3539 struct dc_state *transition_context = dc_create_state(dc);
3540 enum pipe_split_policy tmp_policy;
3541 enum dc_status ret = DC_ERROR_UNEXPECTED;
3542 unsigned int i, j;
3543
3544 if (!transition_context)
3545 return false;
3546
3547 tmp_policy = dc->debug.pipe_split_policy;
3548 dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
3549
3550 dc_resource_state_copy_construct(transition_base_context, transition_context);
3551
3552
3553 if (dc->res_pool->funcs->validate_bandwidth(dc, transition_context, false)) {
3554 for (i = 0; i < transition_context->stream_count; i++) {
3555 struct dc_stream_status *stream_status = &transition_context->stream_status[i];
3556
3557 for (j = 0; j < stream_status->plane_count; j++) {
3558 struct dc_plane_state *plane_state = stream_status->plane_states[j];
3559
3560
3561
3562
3563 plane_state->flip_immediate = false;
3564 }
3565 }
3566
3567 ret = dc_commit_state_no_check(dc, transition_context);
3568 }
3569
3570
3571 dc_release_state(transition_context);
3572
3573
3574 dc->debug.pipe_split_policy = tmp_policy;
3575
3576 if (ret != DC_OK) {
3577
3578 BREAK_TO_DEBUGGER();
3579 return false;
3580 }
3581
3582
3583 for (i = 0; i < dc->current_state->stream_count; i++) {
3584 for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) {
3585 dc->current_state->stream_status[i].plane_states[j]->update_flags.raw = 0xFFFFFFFF;
3586 }
3587 }
3588
3589 return true;
3590 }
3591
3592 bool dc_update_planes_and_stream(struct dc *dc,
3593 struct dc_surface_update *srf_updates, int surface_count,
3594 struct dc_stream_state *stream,
3595 struct dc_stream_update *stream_update)
3596 {
3597 struct dc_state *context;
3598 enum surface_update_type update_type;
3599 int i;
3600
3601
3602
3603
3604
3605 bool force_minimal_pipe_splitting = false;
3606 bool is_plane_addition = false;
3607
3608 struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream);
3609
3610 if (cur_stream_status &&
3611 dc->current_state->stream_count > 0 &&
3612 dc->debug.pipe_split_policy != MPC_SPLIT_AVOID) {
3613
3614 if (cur_stream_status->plane_count > surface_count) {
3615 force_minimal_pipe_splitting = true;
3616 } else if (cur_stream_status->plane_count < surface_count) {
3617 force_minimal_pipe_splitting = true;
3618 is_plane_addition = true;
3619 }
3620 }
3621
3622
3623 if (force_minimal_pipe_splitting && is_plane_addition &&
3624 !commit_minimal_transition_state(dc, dc->current_state))
3625 return false;
3626
3627 if (!update_planes_and_stream_state(
3628 dc,
3629 srf_updates,
3630 surface_count,
3631 stream,
3632 stream_update,
3633 &update_type,
3634 &context))
3635 return false;
3636
3637
3638 if (force_minimal_pipe_splitting && !is_plane_addition) {
3639 if (!commit_minimal_transition_state(dc, context)) {
3640 dc_release_state(context);
3641 return false;
3642 }
3643
3644 update_type = UPDATE_TYPE_FULL;
3645 }
3646
3647 commit_planes_for_stream(
3648 dc,
3649 srf_updates,
3650 surface_count,
3651 stream,
3652 stream_update,
3653 update_type,
3654 context);
3655
3656 if (dc->current_state != context) {
3657
3658
3659
3660
3661
3662
3663
3664
3665
3666 struct dc_state *old = dc->current_state;
3667
3668 dc->current_state = context;
3669 dc_release_state(old);
3670
3671
3672 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3673 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
3674
3675 if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
3676 pipe_ctx->plane_state->force_full_update = false;
3677 }
3678 }
3679 return true;
3680 }
3681
3682 void dc_commit_updates_for_stream(struct dc *dc,
3683 struct dc_surface_update *srf_updates,
3684 int surface_count,
3685 struct dc_stream_state *stream,
3686 struct dc_stream_update *stream_update,
3687 struct dc_state *state)
3688 {
3689 const struct dc_stream_status *stream_status;
3690 enum surface_update_type update_type;
3691 struct dc_state *context;
3692 struct dc_context *dc_ctx = dc->ctx;
3693 int i, j;
3694
3695 stream_status = dc_stream_get_status(stream);
3696 context = dc->current_state;
3697
3698 update_type = dc_check_update_surfaces_for_stream(
3699 dc, srf_updates, surface_count, stream_update, stream_status);
3700
3701 if (update_type >= update_surface_trace_level)
3702 update_surface_trace(dc, srf_updates, surface_count);
3703
3704
3705 if (update_type >= UPDATE_TYPE_FULL) {
3706
3707
3708 context = dc_create_state(dc);
3709 if (context == NULL) {
3710 DC_ERROR("Failed to allocate new validate context!\n");
3711 return;
3712 }
3713
3714 dc_resource_state_copy_construct(state, context);
3715
3716 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3717 struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
3718 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
3719
3720 if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
3721 new_pipe->plane_state->force_full_update = true;
3722 }
3723 } else if (update_type == UPDATE_TYPE_FAST && dc_ctx->dce_version >= DCE_VERSION_MAX) {
3724
3725
3726
3727
3728
3729
3730 dc_post_update_surfaces_to_stream(dc);
3731 }
3732
3733
3734 for (i = 0; i < surface_count; i++) {
3735 struct dc_plane_state *surface = srf_updates[i].surface;
3736
3737 copy_surface_update_to_plane(surface, &srf_updates[i]);
3738
3739 if (update_type >= UPDATE_TYPE_MED) {
3740 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3741 struct pipe_ctx *pipe_ctx =
3742 &context->res_ctx.pipe_ctx[j];
3743
3744 if (pipe_ctx->plane_state != surface)
3745 continue;
3746
3747 resource_build_scaling_params(pipe_ctx);
3748 }
3749 }
3750 }
3751
3752 copy_stream_update_to_stream(dc, context, stream, stream_update);
3753
3754 if (update_type >= UPDATE_TYPE_FULL) {
3755 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
3756 DC_ERROR("Mode validation failed for stream update!\n");
3757 dc_release_state(context);
3758 return;
3759 }
3760 }
3761
3762 TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
3763
3764 commit_planes_for_stream(
3765 dc,
3766 srf_updates,
3767 surface_count,
3768 stream,
3769 stream_update,
3770 update_type,
3771 context);
3772
3773 if (dc->current_state != context) {
3774
3775 struct dc_state *old = dc->current_state;
3776
3777 dc->current_state = context;
3778 dc_release_state(old);
3779
3780 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3781 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
3782
3783 if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
3784 pipe_ctx->plane_state->force_full_update = false;
3785 }
3786 }
3787
3788
3789 if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) {
3790 dc_post_update_surfaces_to_stream(dc);
3791 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
3792 }
3793
3794 return;
3795
3796 }
3797
3798 uint8_t dc_get_current_stream_count(struct dc *dc)
3799 {
3800 return dc->current_state->stream_count;
3801 }
3802
3803 struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i)
3804 {
3805 if (i < dc->current_state->stream_count)
3806 return dc->current_state->streams[i];
3807 return NULL;
3808 }
3809
3810 enum dc_irq_source dc_interrupt_to_irq_source(
3811 struct dc *dc,
3812 uint32_t src_id,
3813 uint32_t ext_id)
3814 {
3815 return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
3816 }
3817
3818
3819
3820
3821 bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
3822 {
3823
3824 if (dc == NULL)
3825 return false;
3826
3827 return dal_irq_service_set(dc->res_pool->irqs, src, enable);
3828 }
3829
3830 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
3831 {
3832 dal_irq_service_ack(dc->res_pool->irqs, src);
3833 }
3834
3835 void dc_power_down_on_boot(struct dc *dc)
3836 {
3837 if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW &&
3838 dc->hwss.power_down_on_boot)
3839 dc->hwss.power_down_on_boot(dc);
3840 }
3841
3842 void dc_set_power_state(
3843 struct dc *dc,
3844 enum dc_acpi_cm_power_state power_state)
3845 {
3846 struct kref refcount;
3847 struct display_mode_lib *dml;
3848
3849 if (!dc->current_state)
3850 return;
3851
3852 switch (power_state) {
3853 case DC_ACPI_CM_POWER_STATE_D0:
3854 dc_resource_state_construct(dc, dc->current_state);
3855
3856 dc_z10_restore(dc);
3857
3858 if (dc->ctx->dmub_srv)
3859 dc_dmub_srv_wait_phy_init(dc->ctx->dmub_srv);
3860
3861 dc->hwss.init_hw(dc);
3862
3863 if (dc->hwss.init_sys_ctx != NULL &&
3864 dc->vm_pa_config.valid) {
3865 dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
3866 }
3867
3868 break;
3869 default:
3870 ASSERT(dc->current_state->stream_count == 0);
3871
3872
3873
3874
3875 dml = kzalloc(sizeof(struct display_mode_lib),
3876 GFP_KERNEL);
3877
3878 ASSERT(dml);
3879 if (!dml)
3880 return;
3881
3882
3883 refcount = dc->current_state->refcount;
3884
3885 memcpy(dml, &dc->current_state->bw_ctx.dml, sizeof(struct display_mode_lib));
3886
3887 dc_resource_state_destruct(dc->current_state);
3888 memset(dc->current_state, 0,
3889 sizeof(*dc->current_state));
3890
3891 dc->current_state->refcount = refcount;
3892 dc->current_state->bw_ctx.dml = *dml;
3893
3894 kfree(dml);
3895
3896 break;
3897 }
3898 }
3899
3900 void dc_resume(struct dc *dc)
3901 {
3902 uint32_t i;
3903
3904 for (i = 0; i < dc->link_count; i++)
3905 core_link_resume(dc->links[i]);
3906 }
3907
3908 bool dc_is_dmcu_initialized(struct dc *dc)
3909 {
3910 struct dmcu *dmcu = dc->res_pool->dmcu;
3911
3912 if (dmcu)
3913 return dmcu->funcs->is_dmcu_initialized(dmcu);
3914 return false;
3915 }
3916
3917 bool dc_is_oem_i2c_device_present(
3918 struct dc *dc,
3919 size_t slave_address)
3920 {
3921 if (dc->res_pool->oem_device)
3922 return dce_i2c_oem_device_present(
3923 dc->res_pool,
3924 dc->res_pool->oem_device,
3925 slave_address);
3926
3927 return false;
3928 }
3929
3930 bool dc_submit_i2c(
3931 struct dc *dc,
3932 uint32_t link_index,
3933 struct i2c_command *cmd)
3934 {
3935
3936 struct dc_link *link = dc->links[link_index];
3937 struct ddc_service *ddc = link->ddc;
3938 return dce_i2c_submit_command(
3939 dc->res_pool,
3940 ddc->ddc_pin,
3941 cmd);
3942 }
3943
3944 bool dc_submit_i2c_oem(
3945 struct dc *dc,
3946 struct i2c_command *cmd)
3947 {
3948 struct ddc_service *ddc = dc->res_pool->oem_device;
3949 if (ddc)
3950 return dce_i2c_submit_command(
3951 dc->res_pool,
3952 ddc->ddc_pin,
3953 cmd);
3954
3955 return false;
3956 }
3957
3958 static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink)
3959 {
3960 if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
3961 BREAK_TO_DEBUGGER();
3962 return false;
3963 }
3964
3965 dc_sink_retain(sink);
3966
3967 dc_link->remote_sinks[dc_link->sink_count] = sink;
3968 dc_link->sink_count++;
3969
3970 return true;
3971 }
3972
3973
3974
3975
3976
3977
3978 struct dc_sink *dc_link_add_remote_sink(
3979 struct dc_link *link,
3980 const uint8_t *edid,
3981 int len,
3982 struct dc_sink_init_data *init_data)
3983 {
3984 struct dc_sink *dc_sink;
3985 enum dc_edid_status edid_status;
3986
3987 if (len > DC_MAX_EDID_BUFFER_SIZE) {
3988 dm_error("Max EDID buffer size breached!\n");
3989 return NULL;
3990 }
3991
3992 if (!init_data) {
3993 BREAK_TO_DEBUGGER();
3994 return NULL;
3995 }
3996
3997 if (!init_data->link) {
3998 BREAK_TO_DEBUGGER();
3999 return NULL;
4000 }
4001
4002 dc_sink = dc_sink_create(init_data);
4003
4004 if (!dc_sink)
4005 return NULL;
4006
4007 memmove(dc_sink->dc_edid.raw_edid, edid, len);
4008 dc_sink->dc_edid.length = len;
4009
4010 if (!link_add_remote_sink_helper(
4011 link,
4012 dc_sink))
4013 goto fail_add_sink;
4014
4015 edid_status = dm_helpers_parse_edid_caps(
4016 link,
4017 &dc_sink->dc_edid,
4018 &dc_sink->edid_caps);
4019
4020
4021
4022
4023
4024 if (edid_status != EDID_OK) {
4025 dc_sink->dc_edid.length = 0;
4026 dm_error("Bad EDID, status%d!\n", edid_status);
4027 }
4028
4029 return dc_sink;
4030
4031 fail_add_sink:
4032 dc_sink_release(dc_sink);
4033 return NULL;
4034 }
4035
4036
4037
4038
4039
4040
4041
4042 void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
4043 {
4044 int i;
4045
4046 if (!link->sink_count) {
4047 BREAK_TO_DEBUGGER();
4048 return;
4049 }
4050
4051 for (i = 0; i < link->sink_count; i++) {
4052 if (link->remote_sinks[i] == sink) {
4053 dc_sink_release(sink);
4054 link->remote_sinks[i] = NULL;
4055
4056
4057 while (i < link->sink_count - 1) {
4058 link->remote_sinks[i] = link->remote_sinks[i+1];
4059 i++;
4060 }
4061 link->remote_sinks[i] = NULL;
4062 link->sink_count--;
4063 return;
4064 }
4065 }
4066 }
4067
4068 void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
4069 {
4070 info->displayClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz;
4071 info->engineClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz;
4072 info->memoryClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz;
4073 info->maxSupportedDppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz;
4074 info->dppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz;
4075 info->socClock = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz;
4076 info->dcfClockDeepSleep = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz;
4077 info->fClock = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz;
4078 info->phyClock = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz;
4079 }
4080 enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping)
4081 {
4082 if (dc->hwss.set_clock)
4083 return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping);
4084 return DC_ERROR_UNEXPECTED;
4085 }
4086 void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg)
4087 {
4088 if (dc->hwss.get_clock)
4089 dc->hwss.get_clock(dc, clock_type, clock_cfg);
4090 }
4091
4092
4093 bool dc_set_psr_allow_active(struct dc *dc, bool enable)
4094 {
4095 int i;
4096 bool allow_active;
4097
4098 for (i = 0; i < dc->current_state->stream_count ; i++) {
4099 struct dc_link *link;
4100 struct dc_stream_state *stream = dc->current_state->streams[i];
4101
4102 link = stream->link;
4103 if (!link)
4104 continue;
4105
4106 if (link->psr_settings.psr_feature_enabled) {
4107 if (enable && !link->psr_settings.psr_allow_active) {
4108 allow_active = true;
4109 if (!dc_link_set_psr_allow_active(link, &allow_active, false, false, NULL))
4110 return false;
4111 } else if (!enable && link->psr_settings.psr_allow_active) {
4112 allow_active = false;
4113 if (!dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL))
4114 return false;
4115 }
4116 }
4117 }
4118
4119 return true;
4120 }
4121
4122 void dc_allow_idle_optimizations(struct dc *dc, bool allow)
4123 {
4124 if (dc->debug.disable_idle_power_optimizations)
4125 return;
4126
4127 if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->is_smu_present)
4128 if (!dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr))
4129 return;
4130
4131 if (allow == dc->idle_optimizations_allowed)
4132 return;
4133
4134 if (dc->hwss.apply_idle_power_optimizations && dc->hwss.apply_idle_power_optimizations(dc, allow))
4135 dc->idle_optimizations_allowed = allow;
4136 }
4137
4138
4139 void dc_unlock_memory_clock_frequency(struct dc *dc)
4140 {
4141 if (dc->clk_mgr->funcs->set_hard_min_memclk)
4142 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, false);
4143
4144 if (dc->clk_mgr->funcs->set_hard_max_memclk)
4145 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
4146 }
4147
4148
4149 void dc_lock_memory_clock_frequency(struct dc *dc)
4150 {
4151 if (dc->clk_mgr->funcs->get_memclk_states_from_smu)
4152 dc->clk_mgr->funcs->get_memclk_states_from_smu(dc->clk_mgr);
4153
4154 if (dc->clk_mgr->funcs->set_hard_min_memclk)
4155 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, true);
4156
4157 if (dc->clk_mgr->funcs->set_hard_max_memclk)
4158 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
4159 }
4160
4161 static void blank_and_force_memclk(struct dc *dc, bool apply, unsigned int memclk_mhz)
4162 {
4163 struct dc_state *context = dc->current_state;
4164 struct hubp *hubp;
4165 struct pipe_ctx *pipe;
4166 int i;
4167
4168 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4169 pipe = &context->res_ctx.pipe_ctx[i];
4170
4171 if (pipe->stream != NULL) {
4172 dc->hwss.disable_pixel_data(dc, pipe, true);
4173
4174
4175 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE);
4176 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK);
4177 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE);
4178
4179 hubp = pipe->plane_res.hubp;
4180 hubp->funcs->set_blank_regs(hubp, true);
4181 }
4182 }
4183
4184 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, memclk_mhz);
4185 dc->clk_mgr->funcs->set_min_memclk(dc->clk_mgr, memclk_mhz);
4186
4187 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4188 pipe = &context->res_ctx.pipe_ctx[i];
4189
4190 if (pipe->stream != NULL) {
4191 dc->hwss.disable_pixel_data(dc, pipe, false);
4192
4193 hubp = pipe->plane_res.hubp;
4194 hubp->funcs->set_blank_regs(hubp, false);
4195 }
4196 }
4197 }
4198
4199
4200
4201
4202
4203
4204
4205
4206
4207
4208
4209
4210
4211
4212
4213
4214
4215
4216 void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable)
4217 {
4218 uint32_t hw_internal_rev = dc->ctx->asic_id.hw_internal_rev;
4219 unsigned int softMax, maxDPM, funcMin;
4220 bool p_state_change_support;
4221
4222 if (!ASICREV_IS_BEIGE_GOBY_P(hw_internal_rev))
4223 return;
4224
4225 softMax = dc->clk_mgr->bw_params->dc_mode_softmax_memclk;
4226 maxDPM = dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz;
4227 funcMin = (dc->clk_mgr->clks.dramclk_khz + 999) / 1000;
4228 p_state_change_support = dc->clk_mgr->clks.p_state_change_support;
4229
4230 if (enable && !dc->clk_mgr->dc_mode_softmax_enabled) {
4231 if (p_state_change_support) {
4232 if (funcMin <= softMax)
4233 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, softMax);
4234
4235 } else {
4236 if (funcMin <= softMax)
4237 blank_and_force_memclk(dc, true, softMax);
4238
4239 }
4240 } else if (!enable && dc->clk_mgr->dc_mode_softmax_enabled) {
4241 if (p_state_change_support) {
4242 if (funcMin <= softMax)
4243 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, maxDPM);
4244
4245 } else {
4246 if (funcMin <= softMax)
4247 blank_and_force_memclk(dc, true, maxDPM);
4248
4249 }
4250 }
4251 dc->clk_mgr->dc_mode_softmax_enabled = enable;
4252 }
4253 bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane,
4254 struct dc_cursor_attributes *cursor_attr)
4255 {
4256 if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, plane, cursor_attr))
4257 return true;
4258 return false;
4259 }
4260
4261
4262 void dc_hardware_release(struct dc *dc)
4263 {
4264 dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(dc);
4265
4266 if (dc->hwss.hardware_release)
4267 dc->hwss.hardware_release(dc);
4268 }
4269
4270 void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc)
4271 {
4272 if (dc->current_state)
4273 dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching_shut_down = true;
4274 }
4275
4276
4277
4278
4279
4280
4281
4282
4283
4284
4285
4286
4287
4288
4289
4290
4291
4292 bool dc_is_dmub_outbox_supported(struct dc *dc)
4293 {
4294
4295 if (dc->ctx->asic_id.chip_family == FAMILY_YELLOW_CARP &&
4296 dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 &&
4297 !dc->debug.dpia_debug.bits.disable_dpia)
4298 return true;
4299
4300 if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_1 &&
4301 !dc->debug.dpia_debug.bits.disable_dpia)
4302 return true;
4303
4304
4305 return dc->debug.enable_dmub_aux_for_legacy_ddc;
4306 }
4307
4308
4309
4310
4311
4312
4313
4314
4315
4316
4317
4318
4319
4320
4321
4322
4323
4324 bool dc_enable_dmub_notifications(struct dc *dc)
4325 {
4326 return dc_is_dmub_outbox_supported(dc);
4327 }
4328
4329
4330
4331
4332
4333
4334
4335
4336
4337
4338
4339
4340
4341
4342
4343 void dc_enable_dmub_outbox(struct dc *dc)
4344 {
4345 struct dc_context *dc_ctx = dc->ctx;
4346
4347 dmub_enable_outbox_notification(dc_ctx->dmub_srv);
4348 DC_LOG_DC("%s: dmub outbox notifications enabled\n", __func__);
4349 }
4350
4351
4352
4353
4354
4355
4356
4357
4358
4359
4360 bool dc_process_dmub_aux_transfer_async(struct dc *dc,
4361 uint32_t link_index,
4362 struct aux_payload *payload)
4363 {
4364 uint8_t action;
4365 union dmub_rb_cmd cmd = {0};
4366 struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
4367
4368 ASSERT(payload->length <= 16);
4369
4370 cmd.dp_aux_access.header.type = DMUB_CMD__DP_AUX_ACCESS;
4371 cmd.dp_aux_access.header.payload_bytes = 0;
4372
4373 if (!dc->links[link_index]->ddc->ddc_pin)
4374 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_DPIA;
4375 else
4376 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_LEGACY_DDC;
4377
4378 cmd.dp_aux_access.aux_control.instance = dc->links[link_index]->ddc_hw_inst;
4379 cmd.dp_aux_access.aux_control.sw_crc_enabled = 0;
4380 cmd.dp_aux_access.aux_control.timeout = 0;
4381 cmd.dp_aux_access.aux_control.dpaux.address = payload->address;
4382 cmd.dp_aux_access.aux_control.dpaux.is_i2c_over_aux = payload->i2c_over_aux;
4383 cmd.dp_aux_access.aux_control.dpaux.length = payload->length;
4384
4385
4386 if (payload->i2c_over_aux) {
4387 if (payload->write) {
4388 if (payload->mot)
4389 action = DP_AUX_REQ_ACTION_I2C_WRITE_MOT;
4390 else
4391 action = DP_AUX_REQ_ACTION_I2C_WRITE;
4392 } else {
4393 if (payload->mot)
4394 action = DP_AUX_REQ_ACTION_I2C_READ_MOT;
4395 else
4396 action = DP_AUX_REQ_ACTION_I2C_READ;
4397 }
4398 } else {
4399 if (payload->write)
4400 action = DP_AUX_REQ_ACTION_DPCD_WRITE;
4401 else
4402 action = DP_AUX_REQ_ACTION_DPCD_READ;
4403 }
4404
4405 cmd.dp_aux_access.aux_control.dpaux.action = action;
4406
4407 if (payload->length && payload->write) {
4408 memcpy(cmd.dp_aux_access.aux_control.dpaux.data,
4409 payload->data,
4410 payload->length
4411 );
4412 }
4413
4414 dc_dmub_srv_cmd_queue(dmub_srv, &cmd);
4415 dc_dmub_srv_cmd_execute(dmub_srv);
4416 dc_dmub_srv_wait_idle(dmub_srv);
4417
4418 return true;
4419 }
4420
4421 uint8_t get_link_index_from_dpia_port_index(const struct dc *dc,
4422 uint8_t dpia_port_index)
4423 {
4424 uint8_t index, link_index = 0xFF;
4425
4426 for (index = 0; index < dc->link_count; index++) {
4427
4428
4429
4430 if (!dc->links[index]->ddc->ddc_pin) {
4431 if (dc->links[index]->ddc_hw_inst == dpia_port_index) {
4432 link_index = index;
4433 break;
4434 }
4435 }
4436 }
4437 ASSERT(link_index != 0xFF);
4438 return link_index;
4439 }
4440
4441
4442
4443
4444
4445
4446
4447
4448
4449
4450
4451
4452
4453
4454
4455
4456
4457
4458 bool dc_process_dmub_set_config_async(struct dc *dc,
4459 uint32_t link_index,
4460 struct set_config_cmd_payload *payload,
4461 struct dmub_notification *notify)
4462 {
4463 union dmub_rb_cmd cmd = {0};
4464 struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
4465 bool is_cmd_complete = true;
4466
4467
4468 cmd.set_config_access.header.type = DMUB_CMD__DPIA;
4469 cmd.set_config_access.header.sub_type = DMUB_CMD__DPIA_SET_CONFIG_ACCESS;
4470
4471 cmd.set_config_access.set_config_control.instance = dc->links[link_index]->ddc_hw_inst;
4472 cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type;
4473 cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data;
4474
4475 if (!dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd)) {
4476
4477 notify->sc_status = SET_CONFIG_UNKNOWN_ERROR;
4478 return is_cmd_complete;
4479 }
4480
4481
4482 if (cmd.set_config_access.header.ret_status == 1)
4483 notify->sc_status = cmd.set_config_access.set_config_control.immed_status;
4484 else
4485
4486 is_cmd_complete = false;
4487
4488 return is_cmd_complete;
4489 }
4490
4491
4492
4493
4494
4495
4496
4497
4498
4499
4500
4501
4502
4503
4504
4505
4506
4507
4508 enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
4509 uint32_t link_index,
4510 uint8_t mst_alloc_slots,
4511 uint8_t *mst_slots_in_use)
4512 {
4513 union dmub_rb_cmd cmd = {0};
4514 struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
4515
4516
4517 cmd.set_mst_alloc_slots.header.type = DMUB_CMD__DPIA;
4518 cmd.set_mst_alloc_slots.header.sub_type = DMUB_CMD__DPIA_MST_ALLOC_SLOTS;
4519
4520 cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst;
4521 cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots;
4522
4523 if (!dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd))
4524
4525 return DC_ERROR_UNEXPECTED;
4526
4527
4528 if (cmd.set_config_access.header.ret_status != 1)
4529
4530 return DC_ERROR_UNEXPECTED;
4531
4532
4533 if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 2)
4534 return DC_FAIL_UNSUPPORTED_1;
4535
4536
4537 if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 3) {
4538 *mst_slots_in_use = cmd.set_mst_alloc_slots.mst_slots_control.mst_slots_in_use;
4539 return DC_NOT_SUPPORTED;
4540 }
4541
4542 return DC_OK;
4543 }
4544
4545
4546
4547
4548
4549 void dc_disable_accelerated_mode(struct dc *dc)
4550 {
4551 bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 0);
4552 }
4553
4554
4555
4556
4557
4558
4559
4560
4561
4562
4563
4564
4565
4566
4567 void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable)
4568 {
4569 int i;
4570 int edp_num;
4571 struct pipe_ctx *pipe = NULL;
4572 struct dc_link *link = stream->sink->link;
4573 struct dc_link *edp_links[MAX_NUM_EDP];
4574
4575
4576 if (link->psr_settings.psr_feature_enabled)
4577 return;
4578
4579
4580 for (i = 0; i < MAX_PIPES; i++) {
4581 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
4582
4583 if (pipe->stream == stream && pipe->stream_res.tg)
4584 break;
4585 }
4586
4587 if (i == MAX_PIPES) {
4588 ASSERT(0);
4589 return;
4590 }
4591
4592 get_edp_links(dc, edp_links, &edp_num);
4593
4594
4595 for (i = 0; i < edp_num; i++) {
4596 if (edp_links[i] == link)
4597 break;
4598 }
4599
4600 if (i == edp_num) {
4601 return;
4602 }
4603
4604 if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause)
4605 pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst);
4606 }
4607
4608
4609
4610
4611
4612
4613
4614
4615
4616 bool dc_extended_blank_supported(struct dc *dc)
4617 {
4618 return dc->debug.extended_blank_optimization && !dc->debug.disable_z10
4619 && dc->caps.zstate_support && dc->caps.is_apu;
4620 }