0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027 #define CREATE_TRACE_POINTS
0028
0029 #include "dm_services_types.h"
0030 #include "dc.h"
0031 #include "dc_link_dp.h"
0032 #include "link_enc_cfg.h"
0033 #include "dc/inc/core_types.h"
0034 #include "dal_asic_id.h"
0035 #include "dmub/dmub_srv.h"
0036 #include "dc/inc/hw/dmcu.h"
0037 #include "dc/inc/hw/abm.h"
0038 #include "dc/dc_dmub_srv.h"
0039 #include "dc/dc_edid_parser.h"
0040 #include "dc/dc_stat.h"
0041 #include "amdgpu_dm_trace.h"
0042
0043 #include "vid.h"
0044 #include "amdgpu.h"
0045 #include "amdgpu_display.h"
0046 #include "amdgpu_ucode.h"
0047 #include "atom.h"
0048 #include "amdgpu_dm.h"
0049 #include "amdgpu_dm_plane.h"
0050 #include "amdgpu_dm_crtc.h"
0051 #ifdef CONFIG_DRM_AMD_DC_HDCP
0052 #include "amdgpu_dm_hdcp.h"
0053 #include <drm/display/drm_hdcp_helper.h>
0054 #endif
0055 #include "amdgpu_pm.h"
0056 #include "amdgpu_atombios.h"
0057
0058 #include "amd_shared.h"
0059 #include "amdgpu_dm_irq.h"
0060 #include "dm_helpers.h"
0061 #include "amdgpu_dm_mst_types.h"
0062 #if defined(CONFIG_DEBUG_FS)
0063 #include "amdgpu_dm_debugfs.h"
0064 #endif
0065 #include "amdgpu_dm_psr.h"
0066
0067 #include "ivsrcid/ivsrcid_vislands30.h"
0068
0069 #include "i2caux_interface.h"
0070 #include <linux/module.h>
0071 #include <linux/moduleparam.h>
0072 #include <linux/types.h>
0073 #include <linux/pm_runtime.h>
0074 #include <linux/pci.h>
0075 #include <linux/firmware.h>
0076 #include <linux/component.h>
0077 #include <linux/dmi.h>
0078
0079 #include <drm/display/drm_dp_mst_helper.h>
0080 #include <drm/display/drm_hdmi_helper.h>
0081 #include <drm/drm_atomic.h>
0082 #include <drm/drm_atomic_uapi.h>
0083 #include <drm/drm_atomic_helper.h>
0084 #include <drm/drm_blend.h>
0085 #include <drm/drm_fb_helper.h>
0086 #include <drm/drm_fourcc.h>
0087 #include <drm/drm_edid.h>
0088 #include <drm/drm_vblank.h>
0089 #include <drm/drm_audio_component.h>
0090 #include <drm/drm_gem_atomic_helper.h>
0091
0092 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
0093
0094 #include "dcn/dcn_1_0_offset.h"
0095 #include "dcn/dcn_1_0_sh_mask.h"
0096 #include "soc15_hw_ip.h"
0097 #include "soc15_common.h"
0098 #include "vega10_ip_offset.h"
0099
0100 #include "soc15_common.h"
0101
0102 #include "gc/gc_11_0_0_offset.h"
0103 #include "gc/gc_11_0_0_sh_mask.h"
0104
0105 #include "modules/inc/mod_freesync.h"
0106 #include "modules/power/power_helpers.h"
0107 #include "modules/inc/mod_info_packet.h"
0108
0109 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
0110 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
0111 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
0112 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
0113 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
0114 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
0115 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
0116 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
0117 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
0118 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
0119 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
0120 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
0121 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
0122 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
0123 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
0124 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
0125 #define FIRMWARE_DCN_314_DMUB "amdgpu/dcn_3_1_4_dmcub.bin"
0126 MODULE_FIRMWARE(FIRMWARE_DCN_314_DMUB);
0127 #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
0128 MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
0129 #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
0130 MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
0131
0132 #define FIRMWARE_DCN_V3_2_0_DMCUB "amdgpu/dcn_3_2_0_dmcub.bin"
0133 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_0_DMCUB);
0134 #define FIRMWARE_DCN_V3_2_1_DMCUB "amdgpu/dcn_3_2_1_dmcub.bin"
0135 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_1_DMCUB);
0136
0137 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
0138 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
0139
0140 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
0141 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
0142
0143
0144 #define PSP_HEADER_BYTES 0x100
0145
0146
0147 #define PSP_FOOTER_BYTES 0x100
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160 static int amdgpu_dm_init(struct amdgpu_device *adev);
0161 static void amdgpu_dm_fini(struct amdgpu_device *adev);
0162 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
0163
0164 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
0165 {
0166 switch (link->dpcd_caps.dongle_type) {
0167 case DISPLAY_DONGLE_NONE:
0168 return DRM_MODE_SUBCONNECTOR_Native;
0169 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
0170 return DRM_MODE_SUBCONNECTOR_VGA;
0171 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
0172 case DISPLAY_DONGLE_DP_DVI_DONGLE:
0173 return DRM_MODE_SUBCONNECTOR_DVID;
0174 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
0175 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
0176 return DRM_MODE_SUBCONNECTOR_HDMIA;
0177 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
0178 default:
0179 return DRM_MODE_SUBCONNECTOR_Unknown;
0180 }
0181 }
0182
0183 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
0184 {
0185 struct dc_link *link = aconnector->dc_link;
0186 struct drm_connector *connector = &aconnector->base;
0187 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
0188
0189 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
0190 return;
0191
0192 if (aconnector->dc_sink)
0193 subconnector = get_subconnector_type(link);
0194
0195 drm_object_property_set_value(&connector->base,
0196 connector->dev->mode_config.dp_subconnector_property,
0197 subconnector);
0198 }
0199
0200
0201
0202
0203
0204
0205
0206
0207 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
0208
0209 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
0210
0211 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
0212 struct amdgpu_dm_connector *amdgpu_dm_connector,
0213 uint32_t link_index,
0214 struct amdgpu_encoder *amdgpu_encoder);
0215 static int amdgpu_dm_encoder_init(struct drm_device *dev,
0216 struct amdgpu_encoder *aencoder,
0217 uint32_t link_index);
0218
0219 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
0220
0221 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
0222
0223 static int amdgpu_dm_atomic_check(struct drm_device *dev,
0224 struct drm_atomic_state *state);
0225
0226 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
0227 static void handle_hpd_rx_irq(void *param);
0228
0229 static bool
0230 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
0231 struct drm_crtc_state *new_crtc_state);
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244
0245 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
0246 {
0247 if (crtc >= adev->mode_info.num_crtc)
0248 return 0;
0249 else {
0250 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
0251
0252 if (acrtc->dm_irq_params.stream == NULL) {
0253 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
0254 crtc);
0255 return 0;
0256 }
0257
0258 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
0259 }
0260 }
0261
0262 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
0263 u32 *vbl, u32 *position)
0264 {
0265 uint32_t v_blank_start, v_blank_end, h_position, v_position;
0266
0267 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
0268 return -EINVAL;
0269 else {
0270 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
0271
0272 if (acrtc->dm_irq_params.stream == NULL) {
0273 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
0274 crtc);
0275 return 0;
0276 }
0277
0278
0279
0280
0281
0282 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
0283 &v_blank_start,
0284 &v_blank_end,
0285 &h_position,
0286 &v_position);
0287
0288 *position = v_position | (h_position << 16);
0289 *vbl = v_blank_start | (v_blank_end << 16);
0290 }
0291
0292 return 0;
0293 }
0294
0295 static bool dm_is_idle(void *handle)
0296 {
0297
0298 return true;
0299 }
0300
0301 static int dm_wait_for_idle(void *handle)
0302 {
0303
0304 return 0;
0305 }
0306
0307 static bool dm_check_soft_reset(void *handle)
0308 {
0309 return false;
0310 }
0311
0312 static int dm_soft_reset(void *handle)
0313 {
0314
0315 return 0;
0316 }
0317
0318 static struct amdgpu_crtc *
0319 get_crtc_by_otg_inst(struct amdgpu_device *adev,
0320 int otg_inst)
0321 {
0322 struct drm_device *dev = adev_to_drm(adev);
0323 struct drm_crtc *crtc;
0324 struct amdgpu_crtc *amdgpu_crtc;
0325
0326 if (WARN_ON(otg_inst == -1))
0327 return adev->mode_info.crtcs[0];
0328
0329 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
0330 amdgpu_crtc = to_amdgpu_crtc(crtc);
0331
0332 if (amdgpu_crtc->otg_inst == otg_inst)
0333 return amdgpu_crtc;
0334 }
0335
0336 return NULL;
0337 }
0338
0339 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
0340 struct dm_crtc_state *new_state)
0341 {
0342 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
0343 return true;
0344 else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
0345 return true;
0346 else
0347 return false;
0348 }
0349
0350
0351
0352
0353
0354
0355
0356
0357 static void dm_pflip_high_irq(void *interrupt_params)
0358 {
0359 struct amdgpu_crtc *amdgpu_crtc;
0360 struct common_irq_params *irq_params = interrupt_params;
0361 struct amdgpu_device *adev = irq_params->adev;
0362 unsigned long flags;
0363 struct drm_pending_vblank_event *e;
0364 uint32_t vpos, hpos, v_blank_start, v_blank_end;
0365 bool vrr_active;
0366
0367 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
0368
0369
0370
0371 if (amdgpu_crtc == NULL) {
0372 DC_LOG_PFLIP("CRTC is null, returning.\n");
0373 return;
0374 }
0375
0376 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
0377
0378 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
0379 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
0380 amdgpu_crtc->pflip_status,
0381 AMDGPU_FLIP_SUBMITTED,
0382 amdgpu_crtc->crtc_id,
0383 amdgpu_crtc);
0384 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
0385 return;
0386 }
0387
0388
0389 e = amdgpu_crtc->event;
0390 amdgpu_crtc->event = NULL;
0391
0392 WARN_ON(!e);
0393
0394 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
0395
0396
0397 if (!vrr_active ||
0398 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
0399 &v_blank_end, &hpos, &vpos) ||
0400 (vpos < v_blank_start)) {
0401
0402
0403
0404
0405 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
0406
0407
0408
0409
0410 if (e) {
0411 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
0412
0413
0414 drm_crtc_vblank_put(&amdgpu_crtc->base);
0415 }
0416 } else if (e) {
0417
0418
0419
0420
0421
0422
0423
0424
0425
0426
0427
0428
0429
0430
0431 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
0432 e->pipe = amdgpu_crtc->crtc_id;
0433
0434 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
0435 e = NULL;
0436 }
0437
0438
0439
0440
0441
0442
0443 amdgpu_crtc->dm_irq_params.last_flip_vblank =
0444 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
0445
0446 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
0447 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
0448
0449 DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
0450 amdgpu_crtc->crtc_id, amdgpu_crtc,
0451 vrr_active, (int) !e);
0452 }
0453
0454 static void dm_vupdate_high_irq(void *interrupt_params)
0455 {
0456 struct common_irq_params *irq_params = interrupt_params;
0457 struct amdgpu_device *adev = irq_params->adev;
0458 struct amdgpu_crtc *acrtc;
0459 struct drm_device *drm_dev;
0460 struct drm_vblank_crtc *vblank;
0461 ktime_t frame_duration_ns, previous_timestamp;
0462 unsigned long flags;
0463 int vrr_active;
0464
0465 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
0466
0467 if (acrtc) {
0468 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
0469 drm_dev = acrtc->base.dev;
0470 vblank = &drm_dev->vblank[acrtc->base.index];
0471 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
0472 frame_duration_ns = vblank->time - previous_timestamp;
0473
0474 if (frame_duration_ns > 0) {
0475 trace_amdgpu_refresh_rate_track(acrtc->base.index,
0476 frame_duration_ns,
0477 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
0478 atomic64_set(&irq_params->previous_timestamp, vblank->time);
0479 }
0480
0481 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
0482 acrtc->crtc_id,
0483 vrr_active);
0484
0485
0486
0487
0488
0489
0490
0491 if (vrr_active) {
0492 dm_crtc_handle_vblank(acrtc);
0493
0494
0495 if (acrtc->dm_irq_params.stream &&
0496 adev->family < AMDGPU_FAMILY_AI) {
0497 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
0498 mod_freesync_handle_v_update(
0499 adev->dm.freesync_module,
0500 acrtc->dm_irq_params.stream,
0501 &acrtc->dm_irq_params.vrr_params);
0502
0503 dc_stream_adjust_vmin_vmax(
0504 adev->dm.dc,
0505 acrtc->dm_irq_params.stream,
0506 &acrtc->dm_irq_params.vrr_params.adjust);
0507 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
0508 }
0509 }
0510 }
0511 }
0512
0513
0514
0515
0516
0517
0518
0519
0520 static void dm_crtc_high_irq(void *interrupt_params)
0521 {
0522 struct common_irq_params *irq_params = interrupt_params;
0523 struct amdgpu_device *adev = irq_params->adev;
0524 struct amdgpu_crtc *acrtc;
0525 unsigned long flags;
0526 int vrr_active;
0527
0528 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
0529 if (!acrtc)
0530 return;
0531
0532 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
0533
0534 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
0535 vrr_active, acrtc->dm_irq_params.active_planes);
0536
0537
0538
0539
0540
0541
0542
0543 if (!vrr_active)
0544 dm_crtc_handle_vblank(acrtc);
0545
0546
0547
0548
0549
0550 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
0551
0552
0553 if (adev->family < AMDGPU_FAMILY_AI)
0554 return;
0555
0556 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
0557
0558 if (acrtc->dm_irq_params.stream &&
0559 acrtc->dm_irq_params.vrr_params.supported &&
0560 acrtc->dm_irq_params.freesync_config.state ==
0561 VRR_STATE_ACTIVE_VARIABLE) {
0562 mod_freesync_handle_v_update(adev->dm.freesync_module,
0563 acrtc->dm_irq_params.stream,
0564 &acrtc->dm_irq_params.vrr_params);
0565
0566 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
0567 &acrtc->dm_irq_params.vrr_params.adjust);
0568 }
0569
0570
0571
0572
0573
0574
0575
0576
0577
0578
0579
0580 if (adev->family >= AMDGPU_FAMILY_RV &&
0581 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
0582 acrtc->dm_irq_params.active_planes == 0) {
0583 if (acrtc->event) {
0584 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
0585 acrtc->event = NULL;
0586 drm_crtc_vblank_put(&acrtc->base);
0587 }
0588 acrtc->pflip_status = AMDGPU_FLIP_NONE;
0589 }
0590
0591 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
0592 }
0593
0594 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
0595
0596
0597
0598
0599
0600
0601
0602 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
0603 {
0604 struct common_irq_params *irq_params = interrupt_params;
0605 struct amdgpu_device *adev = irq_params->adev;
0606 struct amdgpu_crtc *acrtc;
0607
0608 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
0609
0610 if (!acrtc)
0611 return;
0612
0613 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
0614 }
0615 #endif
0616
0617
0618
0619
0620
0621
0622
0623
0624
0625
0626 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
0627 struct dmub_notification *notify)
0628 {
0629 if (adev->dm.dmub_notify)
0630 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
0631 if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
0632 complete(&adev->dm.dmub_aux_transfer_done);
0633 }
0634
0635
0636
0637
0638
0639
0640
0641
0642
0643 static void dmub_hpd_callback(struct amdgpu_device *adev,
0644 struct dmub_notification *notify)
0645 {
0646 struct amdgpu_dm_connector *aconnector;
0647 struct amdgpu_dm_connector *hpd_aconnector = NULL;
0648 struct drm_connector *connector;
0649 struct drm_connector_list_iter iter;
0650 struct dc_link *link;
0651 uint8_t link_index = 0;
0652 struct drm_device *dev;
0653
0654 if (adev == NULL)
0655 return;
0656
0657 if (notify == NULL) {
0658 DRM_ERROR("DMUB HPD callback notification was NULL");
0659 return;
0660 }
0661
0662 if (notify->link_index > adev->dm.dc->link_count) {
0663 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
0664 return;
0665 }
0666
0667 link_index = notify->link_index;
0668 link = adev->dm.dc->links[link_index];
0669 dev = adev->dm.ddev;
0670
0671 drm_connector_list_iter_begin(dev, &iter);
0672 drm_for_each_connector_iter(connector, &iter) {
0673 aconnector = to_amdgpu_dm_connector(connector);
0674 if (link && aconnector->dc_link == link) {
0675 DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
0676 hpd_aconnector = aconnector;
0677 break;
0678 }
0679 }
0680 drm_connector_list_iter_end(&iter);
0681
0682 if (hpd_aconnector) {
0683 if (notify->type == DMUB_NOTIFICATION_HPD)
0684 handle_hpd_irq_helper(hpd_aconnector);
0685 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
0686 handle_hpd_rx_irq(hpd_aconnector);
0687 }
0688 }
0689
0690
0691
0692
0693
0694
0695
0696
0697
0698
0699
0700
0701
0702 static bool register_dmub_notify_callback(struct amdgpu_device *adev,
0703 enum dmub_notification_type type,
0704 dmub_notify_interrupt_callback_t callback,
0705 bool dmub_int_thread_offload)
0706 {
0707 if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
0708 adev->dm.dmub_callback[type] = callback;
0709 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
0710 } else
0711 return false;
0712
0713 return true;
0714 }
0715
0716 static void dm_handle_hpd_work(struct work_struct *work)
0717 {
0718 struct dmub_hpd_work *dmub_hpd_wrk;
0719
0720 dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
0721
0722 if (!dmub_hpd_wrk->dmub_notify) {
0723 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
0724 return;
0725 }
0726
0727 if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
0728 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
0729 dmub_hpd_wrk->dmub_notify);
0730 }
0731
0732 kfree(dmub_hpd_wrk->dmub_notify);
0733 kfree(dmub_hpd_wrk);
0734
0735 }
0736
0737 #define DMUB_TRACE_MAX_READ 64
0738
0739
0740
0741
0742
0743
0744
0745 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
0746 {
0747 struct dmub_notification notify;
0748 struct common_irq_params *irq_params = interrupt_params;
0749 struct amdgpu_device *adev = irq_params->adev;
0750 struct amdgpu_display_manager *dm = &adev->dm;
0751 struct dmcub_trace_buf_entry entry = { 0 };
0752 uint32_t count = 0;
0753 struct dmub_hpd_work *dmub_hpd_wrk;
0754 struct dc_link *plink = NULL;
0755
0756 if (dc_enable_dmub_notifications(adev->dm.dc) &&
0757 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
0758
0759 do {
0760 dc_stat_get_dmub_notification(adev->dm.dc, ¬ify);
0761 if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
0762 DRM_ERROR("DM: notify type %d invalid!", notify.type);
0763 continue;
0764 }
0765 if (!dm->dmub_callback[notify.type]) {
0766 DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
0767 continue;
0768 }
0769 if (dm->dmub_thread_offload[notify.type] == true) {
0770 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
0771 if (!dmub_hpd_wrk) {
0772 DRM_ERROR("Failed to allocate dmub_hpd_wrk");
0773 return;
0774 }
0775 dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
0776 if (!dmub_hpd_wrk->dmub_notify) {
0777 kfree(dmub_hpd_wrk);
0778 DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
0779 return;
0780 }
0781 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
0782 if (dmub_hpd_wrk->dmub_notify)
0783 memcpy(dmub_hpd_wrk->dmub_notify, ¬ify, sizeof(struct dmub_notification));
0784 dmub_hpd_wrk->adev = adev;
0785 if (notify.type == DMUB_NOTIFICATION_HPD) {
0786 plink = adev->dm.dc->links[notify.link_index];
0787 if (plink) {
0788 plink->hpd_status =
0789 notify.hpd_status == DP_HPD_PLUG;
0790 }
0791 }
0792 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
0793 } else {
0794 dm->dmub_callback[notify.type](adev, ¬ify);
0795 }
0796 } while (notify.pending_notification);
0797 }
0798
0799
0800 do {
0801 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
0802 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
0803 entry.param0, entry.param1);
0804
0805 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
0806 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
0807 } else
0808 break;
0809
0810 count++;
0811
0812 } while (count <= DMUB_TRACE_MAX_READ);
0813
0814 if (count > DMUB_TRACE_MAX_READ)
0815 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
0816 }
0817
0818 static int dm_set_clockgating_state(void *handle,
0819 enum amd_clockgating_state state)
0820 {
0821 return 0;
0822 }
0823
0824 static int dm_set_powergating_state(void *handle,
0825 enum amd_powergating_state state)
0826 {
0827 return 0;
0828 }
0829
0830
0831 static int dm_early_init(void* handle);
0832
0833
0834 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
0835 {
0836 struct drm_device *dev = connector->dev;
0837 struct amdgpu_device *adev = drm_to_adev(dev);
0838 struct dm_compressor_info *compressor = &adev->dm.compressor;
0839 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
0840 struct drm_display_mode *mode;
0841 unsigned long max_size = 0;
0842
0843 if (adev->dm.dc->fbc_compressor == NULL)
0844 return;
0845
0846 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
0847 return;
0848
0849 if (compressor->bo_ptr)
0850 return;
0851
0852
0853 list_for_each_entry(mode, &connector->modes, head) {
0854 if (max_size < mode->htotal * mode->vtotal)
0855 max_size = mode->htotal * mode->vtotal;
0856 }
0857
0858 if (max_size) {
0859 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
0860 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
0861 &compressor->gpu_addr, &compressor->cpu_addr);
0862
0863 if (r)
0864 DRM_ERROR("DM: Failed to initialize FBC\n");
0865 else {
0866 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
0867 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
0868 }
0869
0870 }
0871
0872 }
0873
0874 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
0875 int pipe, bool *enabled,
0876 unsigned char *buf, int max_bytes)
0877 {
0878 struct drm_device *dev = dev_get_drvdata(kdev);
0879 struct amdgpu_device *adev = drm_to_adev(dev);
0880 struct drm_connector *connector;
0881 struct drm_connector_list_iter conn_iter;
0882 struct amdgpu_dm_connector *aconnector;
0883 int ret = 0;
0884
0885 *enabled = false;
0886
0887 mutex_lock(&adev->dm.audio_lock);
0888
0889 drm_connector_list_iter_begin(dev, &conn_iter);
0890 drm_for_each_connector_iter(connector, &conn_iter) {
0891 aconnector = to_amdgpu_dm_connector(connector);
0892 if (aconnector->audio_inst != port)
0893 continue;
0894
0895 *enabled = true;
0896 ret = drm_eld_size(connector->eld);
0897 memcpy(buf, connector->eld, min(max_bytes, ret));
0898
0899 break;
0900 }
0901 drm_connector_list_iter_end(&conn_iter);
0902
0903 mutex_unlock(&adev->dm.audio_lock);
0904
0905 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
0906
0907 return ret;
0908 }
0909
0910 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
0911 .get_eld = amdgpu_dm_audio_component_get_eld,
0912 };
0913
0914 static int amdgpu_dm_audio_component_bind(struct device *kdev,
0915 struct device *hda_kdev, void *data)
0916 {
0917 struct drm_device *dev = dev_get_drvdata(kdev);
0918 struct amdgpu_device *adev = drm_to_adev(dev);
0919 struct drm_audio_component *acomp = data;
0920
0921 acomp->ops = &amdgpu_dm_audio_component_ops;
0922 acomp->dev = kdev;
0923 adev->dm.audio_component = acomp;
0924
0925 return 0;
0926 }
0927
0928 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
0929 struct device *hda_kdev, void *data)
0930 {
0931 struct drm_device *dev = dev_get_drvdata(kdev);
0932 struct amdgpu_device *adev = drm_to_adev(dev);
0933 struct drm_audio_component *acomp = data;
0934
0935 acomp->ops = NULL;
0936 acomp->dev = NULL;
0937 adev->dm.audio_component = NULL;
0938 }
0939
0940 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
0941 .bind = amdgpu_dm_audio_component_bind,
0942 .unbind = amdgpu_dm_audio_component_unbind,
0943 };
0944
0945 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
0946 {
0947 int i, ret;
0948
0949 if (!amdgpu_audio)
0950 return 0;
0951
0952 adev->mode_info.audio.enabled = true;
0953
0954 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
0955
0956 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
0957 adev->mode_info.audio.pin[i].channels = -1;
0958 adev->mode_info.audio.pin[i].rate = -1;
0959 adev->mode_info.audio.pin[i].bits_per_sample = -1;
0960 adev->mode_info.audio.pin[i].status_bits = 0;
0961 adev->mode_info.audio.pin[i].category_code = 0;
0962 adev->mode_info.audio.pin[i].connected = false;
0963 adev->mode_info.audio.pin[i].id =
0964 adev->dm.dc->res_pool->audios[i]->inst;
0965 adev->mode_info.audio.pin[i].offset = 0;
0966 }
0967
0968 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
0969 if (ret < 0)
0970 return ret;
0971
0972 adev->dm.audio_registered = true;
0973
0974 return 0;
0975 }
0976
0977 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
0978 {
0979 if (!amdgpu_audio)
0980 return;
0981
0982 if (!adev->mode_info.audio.enabled)
0983 return;
0984
0985 if (adev->dm.audio_registered) {
0986 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
0987 adev->dm.audio_registered = false;
0988 }
0989
0990
0991
0992 adev->mode_info.audio.enabled = false;
0993 }
0994
0995 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
0996 {
0997 struct drm_audio_component *acomp = adev->dm.audio_component;
0998
0999 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1000 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1001
1002 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1003 pin, -1);
1004 }
1005 }
1006
1007 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1008 {
1009 const struct dmcub_firmware_header_v1_0 *hdr;
1010 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1011 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1012 const struct firmware *dmub_fw = adev->dm.dmub_fw;
1013 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1014 struct abm *abm = adev->dm.dc->res_pool->abm;
1015 struct dmub_srv_hw_params hw_params;
1016 enum dmub_status status;
1017 const unsigned char *fw_inst_const, *fw_bss_data;
1018 uint32_t i, fw_inst_const_size, fw_bss_data_size;
1019 bool has_hw_support;
1020
1021 if (!dmub_srv)
1022
1023 return 0;
1024
1025 if (!fb_info) {
1026 DRM_ERROR("No framebuffer info for DMUB service.\n");
1027 return -EINVAL;
1028 }
1029
1030 if (!dmub_fw) {
1031
1032 DRM_ERROR("No firmware provided for DMUB.\n");
1033 return -EINVAL;
1034 }
1035
1036 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1037 if (status != DMUB_STATUS_OK) {
1038 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1039 return -EINVAL;
1040 }
1041
1042 if (!has_hw_support) {
1043 DRM_INFO("DMUB unsupported on ASIC\n");
1044 return 0;
1045 }
1046
1047
1048 status = dmub_srv_hw_reset(dmub_srv);
1049 if (status != DMUB_STATUS_OK)
1050 DRM_WARN("Error resetting DMUB HW: %d\n", status);
1051
1052 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1053
1054 fw_inst_const = dmub_fw->data +
1055 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1056 PSP_HEADER_BYTES;
1057
1058 fw_bss_data = dmub_fw->data +
1059 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1060 le32_to_cpu(hdr->inst_const_bytes);
1061
1062
1063 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1064 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1065
1066 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1067
1068
1069
1070
1071
1072
1073 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1074 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1075 fw_inst_const_size);
1076 }
1077
1078 if (fw_bss_data_size)
1079 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1080 fw_bss_data, fw_bss_data_size);
1081
1082
1083 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1084 adev->bios_size);
1085
1086
1087 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1088 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1089
1090 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1091 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1092
1093 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1094 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1095
1096
1097 memset(&hw_params, 0, sizeof(hw_params));
1098 hw_params.fb_base = adev->gmc.fb_start;
1099 hw_params.fb_offset = adev->gmc.aper_base;
1100
1101
1102 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1103 hw_params.load_inst_const = true;
1104
1105 if (dmcu)
1106 hw_params.psp_version = dmcu->psp_version;
1107
1108 for (i = 0; i < fb_info->num_fb; ++i)
1109 hw_params.fb[i] = &fb_info->fb[i];
1110
1111 switch (adev->ip_versions[DCE_HWIP][0]) {
1112 case IP_VERSION(3, 1, 3):
1113 hw_params.dpia_supported = true;
1114 hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
1115 break;
1116 default:
1117 break;
1118 }
1119
1120 status = dmub_srv_hw_init(dmub_srv, &hw_params);
1121 if (status != DMUB_STATUS_OK) {
1122 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1123 return -EINVAL;
1124 }
1125
1126
1127 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1128 if (status != DMUB_STATUS_OK)
1129 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1130
1131
1132 if (dmcu && abm) {
1133 dmcu->funcs->dmcu_init(dmcu);
1134 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1135 }
1136
1137 if (!adev->dm.dc->ctx->dmub_srv)
1138 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1139 if (!adev->dm.dc->ctx->dmub_srv) {
1140 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1141 return -ENOMEM;
1142 }
1143
1144 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1145 adev->dm.dmcub_fw_version);
1146
1147 return 0;
1148 }
1149
1150 static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1151 {
1152 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1153 enum dmub_status status;
1154 bool init;
1155
1156 if (!dmub_srv) {
1157
1158 return;
1159 }
1160
1161 status = dmub_srv_is_hw_init(dmub_srv, &init);
1162 if (status != DMUB_STATUS_OK)
1163 DRM_WARN("DMUB hardware init check failed: %d\n", status);
1164
1165 if (status == DMUB_STATUS_OK && init) {
1166
1167 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1168 if (status != DMUB_STATUS_OK)
1169 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1170 } else {
1171
1172 dm_dmub_hw_init(adev);
1173 }
1174 }
1175
1176 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1177 {
1178 uint64_t pt_base;
1179 uint32_t logical_addr_low;
1180 uint32_t logical_addr_high;
1181 uint32_t agp_base, agp_bot, agp_top;
1182 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1183
1184 memset(pa_config, 0, sizeof(*pa_config));
1185
1186 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1187 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1188
1189 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1190
1191
1192
1193
1194
1195
1196 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1197 else
1198 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1199
1200 agp_base = 0;
1201 agp_bot = adev->gmc.agp_start >> 24;
1202 agp_top = adev->gmc.agp_end >> 24;
1203
1204
1205 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1206 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1207 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1208 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1209 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1210 page_table_base.low_part = lower_32_bits(pt_base);
1211
1212 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1213 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1214
1215 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1216 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1217 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1218
1219 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1220 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1221 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1222
1223 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1224 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1225 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1226
1227 pa_config->is_hvm_enabled = 0;
1228
1229 }
1230
1231 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1232 {
1233 struct hpd_rx_irq_offload_work *offload_work;
1234 struct amdgpu_dm_connector *aconnector;
1235 struct dc_link *dc_link;
1236 struct amdgpu_device *adev;
1237 enum dc_connection_type new_connection_type = dc_connection_none;
1238 unsigned long flags;
1239
1240 offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1241 aconnector = offload_work->offload_wq->aconnector;
1242
1243 if (!aconnector) {
1244 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1245 goto skip;
1246 }
1247
1248 adev = drm_to_adev(aconnector->base.dev);
1249 dc_link = aconnector->dc_link;
1250
1251 mutex_lock(&aconnector->hpd_lock);
1252 if (!dc_link_detect_sink(dc_link, &new_connection_type))
1253 DRM_ERROR("KMS: Failed to detect connector\n");
1254 mutex_unlock(&aconnector->hpd_lock);
1255
1256 if (new_connection_type == dc_connection_none)
1257 goto skip;
1258
1259 if (amdgpu_in_reset(adev))
1260 goto skip;
1261
1262 mutex_lock(&adev->dm.dc_lock);
1263 if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1264 dc_link_dp_handle_automated_test(dc_link);
1265 else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1266 hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1267 dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1268 dc_link_dp_handle_link_loss(dc_link);
1269 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1270 offload_work->offload_wq->is_handling_link_loss = false;
1271 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1272 }
1273 mutex_unlock(&adev->dm.dc_lock);
1274
1275 skip:
1276 kfree(offload_work);
1277
1278 }
1279
1280 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1281 {
1282 int max_caps = dc->caps.max_links;
1283 int i = 0;
1284 struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1285
1286 hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1287
1288 if (!hpd_rx_offload_wq)
1289 return NULL;
1290
1291
1292 for (i = 0; i < max_caps; i++) {
1293 hpd_rx_offload_wq[i].wq =
1294 create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1295
1296 if (hpd_rx_offload_wq[i].wq == NULL) {
1297 DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1298 return NULL;
1299 }
1300
1301 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1302 }
1303
1304 return hpd_rx_offload_wq;
1305 }
1306
1307 struct amdgpu_stutter_quirk {
1308 u16 chip_vendor;
1309 u16 chip_device;
1310 u16 subsys_vendor;
1311 u16 subsys_device;
1312 u8 revision;
1313 };
1314
1315 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1316
1317 { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1318 { 0, 0, 0, 0, 0 },
1319 };
1320
1321 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1322 {
1323 const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1324
1325 while (p && p->chip_device != 0) {
1326 if (pdev->vendor == p->chip_vendor &&
1327 pdev->device == p->chip_device &&
1328 pdev->subsystem_vendor == p->subsys_vendor &&
1329 pdev->subsystem_device == p->subsys_device &&
1330 pdev->revision == p->revision) {
1331 return true;
1332 }
1333 ++p;
1334 }
1335 return false;
1336 }
1337
1338 static const struct dmi_system_id hpd_disconnect_quirk_table[] = {
1339 {
1340 .matches = {
1341 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1342 DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
1343 },
1344 },
1345 {
1346 .matches = {
1347 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1348 DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
1349 },
1350 },
1351 {
1352 .matches = {
1353 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1354 DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
1355 },
1356 },
1357 {}
1358 };
1359
1360 static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
1361 {
1362 const struct dmi_system_id *dmi_id;
1363
1364 dm->aux_hpd_discon_quirk = false;
1365
1366 dmi_id = dmi_first_match(hpd_disconnect_quirk_table);
1367 if (dmi_id) {
1368 dm->aux_hpd_discon_quirk = true;
1369 DRM_INFO("aux_hpd_discon_quirk attached\n");
1370 }
1371 }
1372
1373 static int amdgpu_dm_init(struct amdgpu_device *adev)
1374 {
1375 struct dc_init_data init_data;
1376 #ifdef CONFIG_DRM_AMD_DC_HDCP
1377 struct dc_callback_init init_params;
1378 #endif
1379 int r;
1380
1381 adev->dm.ddev = adev_to_drm(adev);
1382 adev->dm.adev = adev;
1383
1384
1385 memset(&init_data, 0, sizeof(init_data));
1386 #ifdef CONFIG_DRM_AMD_DC_HDCP
1387 memset(&init_params, 0, sizeof(init_params));
1388 #endif
1389
1390 mutex_init(&adev->dm.dc_lock);
1391 mutex_init(&adev->dm.audio_lock);
1392 spin_lock_init(&adev->dm.vblank_lock);
1393
1394 if(amdgpu_dm_irq_init(adev)) {
1395 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1396 goto error;
1397 }
1398
1399 init_data.asic_id.chip_family = adev->family;
1400
1401 init_data.asic_id.pci_revision_id = adev->pdev->revision;
1402 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1403 init_data.asic_id.chip_id = adev->pdev->device;
1404
1405 init_data.asic_id.vram_width = adev->gmc.vram_width;
1406
1407 init_data.asic_id.atombios_base_address =
1408 adev->mode_info.atom_context->bios;
1409
1410 init_data.driver = adev;
1411
1412 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1413
1414 if (!adev->dm.cgs_device) {
1415 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1416 goto error;
1417 }
1418
1419 init_data.cgs_device = adev->dm.cgs_device;
1420
1421 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1422
1423 switch (adev->ip_versions[DCE_HWIP][0]) {
1424 case IP_VERSION(2, 1, 0):
1425 switch (adev->dm.dmcub_fw_version) {
1426 case 0:
1427 case 0x1:
1428 case 0x01000000:
1429 init_data.flags.disable_dmcu = false;
1430 break;
1431 default:
1432 init_data.flags.disable_dmcu = true;
1433 }
1434 break;
1435 case IP_VERSION(2, 0, 3):
1436 init_data.flags.disable_dmcu = true;
1437 break;
1438 default:
1439 break;
1440 }
1441
1442 switch (adev->asic_type) {
1443 case CHIP_CARRIZO:
1444 case CHIP_STONEY:
1445 init_data.flags.gpu_vm_support = true;
1446 break;
1447 default:
1448 switch (adev->ip_versions[DCE_HWIP][0]) {
1449 case IP_VERSION(1, 0, 0):
1450 case IP_VERSION(1, 0, 1):
1451
1452 if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1453 (adev->apu_flags & AMD_APU_IS_PICASSO))
1454 init_data.flags.gpu_vm_support = true;
1455 break;
1456 case IP_VERSION(2, 1, 0):
1457 case IP_VERSION(3, 0, 1):
1458 case IP_VERSION(3, 1, 2):
1459 case IP_VERSION(3, 1, 3):
1460 case IP_VERSION(3, 1, 5):
1461 case IP_VERSION(3, 1, 6):
1462 init_data.flags.gpu_vm_support = true;
1463 break;
1464 default:
1465 break;
1466 }
1467 break;
1468 }
1469
1470 if (init_data.flags.gpu_vm_support)
1471 adev->mode_info.gpu_vm_support = true;
1472
1473 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1474 init_data.flags.fbc_support = true;
1475
1476 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1477 init_data.flags.multi_mon_pp_mclk_switch = true;
1478
1479 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1480 init_data.flags.disable_fractional_pwm = true;
1481
1482 if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1483 init_data.flags.edp_no_power_sequencing = true;
1484
1485 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1486 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1487 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1488 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
1489
1490 init_data.flags.seamless_boot_edp_requested = false;
1491
1492 if (check_seamless_boot_capability(adev)) {
1493 init_data.flags.seamless_boot_edp_requested = true;
1494 init_data.flags.allow_seamless_boot_optimization = true;
1495 DRM_INFO("Seamless boot condition check passed\n");
1496 }
1497
1498 init_data.flags.enable_mipi_converter_optimization = true;
1499
1500 init_data.dcn_reg_offsets = adev->reg_offset[DCE_HWIP][0];
1501 init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0];
1502
1503 INIT_LIST_HEAD(&adev->dm.da_list);
1504
1505 retrieve_dmi_info(&adev->dm);
1506
1507
1508 adev->dm.dc = dc_create(&init_data);
1509
1510 if (adev->dm.dc) {
1511 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1512 } else {
1513 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1514 goto error;
1515 }
1516
1517 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1518 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1519 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1520 }
1521
1522 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1523 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1524 if (dm_should_disable_stutter(adev->pdev))
1525 adev->dm.dc->debug.disable_stutter = true;
1526
1527 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1528 adev->dm.dc->debug.disable_stutter = true;
1529
1530 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
1531 adev->dm.dc->debug.disable_dsc = true;
1532 adev->dm.dc->debug.disable_dsc_edp = true;
1533 }
1534
1535 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1536 adev->dm.dc->debug.disable_clock_gate = true;
1537
1538 if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH)
1539 adev->dm.dc->debug.force_subvp_mclk_switch = true;
1540
1541 adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm;
1542
1543 r = dm_dmub_hw_init(adev);
1544 if (r) {
1545 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1546 goto error;
1547 }
1548
1549 dc_hardware_init(adev->dm.dc);
1550
1551 adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1552 if (!adev->dm.hpd_rx_offload_wq) {
1553 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1554 goto error;
1555 }
1556
1557 if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1558 struct dc_phy_addr_space_config pa_config;
1559
1560 mmhub_read_system_context(adev, &pa_config);
1561
1562
1563 dc_setup_system_context(adev->dm.dc, &pa_config);
1564 }
1565
1566 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1567 if (!adev->dm.freesync_module) {
1568 DRM_ERROR(
1569 "amdgpu: failed to initialize freesync_module.\n");
1570 } else
1571 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1572 adev->dm.freesync_module);
1573
1574 amdgpu_dm_init_color_mod();
1575
1576 if (adev->dm.dc->caps.max_links > 0) {
1577 adev->dm.vblank_control_workqueue =
1578 create_singlethread_workqueue("dm_vblank_control_workqueue");
1579 if (!adev->dm.vblank_control_workqueue)
1580 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1581 }
1582
1583 #ifdef CONFIG_DRM_AMD_DC_HDCP
1584 if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1585 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1586
1587 if (!adev->dm.hdcp_workqueue)
1588 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1589 else
1590 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1591
1592 dc_init_callbacks(adev->dm.dc, &init_params);
1593 }
1594 #endif
1595 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1596 adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1597 #endif
1598 if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
1599 init_completion(&adev->dm.dmub_aux_transfer_done);
1600 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1601 if (!adev->dm.dmub_notify) {
1602 DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1603 goto error;
1604 }
1605
1606 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1607 if (!adev->dm.delayed_hpd_wq) {
1608 DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1609 goto error;
1610 }
1611
1612 amdgpu_dm_outbox_init(adev);
1613 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1614 dmub_aux_setconfig_callback, false)) {
1615 DRM_ERROR("amdgpu: fail to register dmub aux callback");
1616 goto error;
1617 }
1618 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1619 DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1620 goto error;
1621 }
1622 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1623 DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1624 goto error;
1625 }
1626 }
1627
1628 if (amdgpu_dm_initialize_drm_device(adev)) {
1629 DRM_ERROR(
1630 "amdgpu: failed to initialize sw for display support.\n");
1631 goto error;
1632 }
1633
1634
1635
1636
1637
1638 if (dc_is_dmub_outbox_supported(adev->dm.dc))
1639 dc_enable_dmub_outbox(adev->dm.dc);
1640
1641
1642 dm_dp_create_fake_mst_encoders(adev);
1643
1644
1645
1646
1647 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1648 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1649
1650 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1651 DRM_ERROR(
1652 "amdgpu: failed to initialize sw for display support.\n");
1653 goto error;
1654 }
1655
1656
1657 DRM_DEBUG_DRIVER("KMS initialized.\n");
1658
1659 return 0;
1660 error:
1661 amdgpu_dm_fini(adev);
1662
1663 return -EINVAL;
1664 }
1665
1666 static int amdgpu_dm_early_fini(void *handle)
1667 {
1668 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1669
1670 amdgpu_dm_audio_fini(adev);
1671
1672 return 0;
1673 }
1674
1675 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1676 {
1677 int i;
1678
1679 if (adev->dm.vblank_control_workqueue) {
1680 destroy_workqueue(adev->dm.vblank_control_workqueue);
1681 adev->dm.vblank_control_workqueue = NULL;
1682 }
1683
1684 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1685 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1686 }
1687
1688 amdgpu_dm_destroy_drm_device(&adev->dm);
1689
1690 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1691 if (adev->dm.crc_rd_wrk) {
1692 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1693 kfree(adev->dm.crc_rd_wrk);
1694 adev->dm.crc_rd_wrk = NULL;
1695 }
1696 #endif
1697 #ifdef CONFIG_DRM_AMD_DC_HDCP
1698 if (adev->dm.hdcp_workqueue) {
1699 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1700 adev->dm.hdcp_workqueue = NULL;
1701 }
1702
1703 if (adev->dm.dc)
1704 dc_deinit_callbacks(adev->dm.dc);
1705 #endif
1706
1707 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1708
1709 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1710 kfree(adev->dm.dmub_notify);
1711 adev->dm.dmub_notify = NULL;
1712 destroy_workqueue(adev->dm.delayed_hpd_wq);
1713 adev->dm.delayed_hpd_wq = NULL;
1714 }
1715
1716 if (adev->dm.dmub_bo)
1717 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1718 &adev->dm.dmub_bo_gpu_addr,
1719 &adev->dm.dmub_bo_cpu_addr);
1720
1721 if (adev->dm.hpd_rx_offload_wq) {
1722 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1723 if (adev->dm.hpd_rx_offload_wq[i].wq) {
1724 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1725 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1726 }
1727 }
1728
1729 kfree(adev->dm.hpd_rx_offload_wq);
1730 adev->dm.hpd_rx_offload_wq = NULL;
1731 }
1732
1733
1734 if (adev->dm.dc)
1735 dc_destroy(&adev->dm.dc);
1736
1737
1738
1739
1740
1741
1742 if (adev->dm.cgs_device) {
1743 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1744 adev->dm.cgs_device = NULL;
1745 }
1746 if (adev->dm.freesync_module) {
1747 mod_freesync_destroy(adev->dm.freesync_module);
1748 adev->dm.freesync_module = NULL;
1749 }
1750
1751 mutex_destroy(&adev->dm.audio_lock);
1752 mutex_destroy(&adev->dm.dc_lock);
1753
1754 return;
1755 }
1756
1757 static int load_dmcu_fw(struct amdgpu_device *adev)
1758 {
1759 const char *fw_name_dmcu = NULL;
1760 int r;
1761 const struct dmcu_firmware_header_v1_0 *hdr;
1762
1763 switch(adev->asic_type) {
1764 #if defined(CONFIG_DRM_AMD_DC_SI)
1765 case CHIP_TAHITI:
1766 case CHIP_PITCAIRN:
1767 case CHIP_VERDE:
1768 case CHIP_OLAND:
1769 #endif
1770 case CHIP_BONAIRE:
1771 case CHIP_HAWAII:
1772 case CHIP_KAVERI:
1773 case CHIP_KABINI:
1774 case CHIP_MULLINS:
1775 case CHIP_TONGA:
1776 case CHIP_FIJI:
1777 case CHIP_CARRIZO:
1778 case CHIP_STONEY:
1779 case CHIP_POLARIS11:
1780 case CHIP_POLARIS10:
1781 case CHIP_POLARIS12:
1782 case CHIP_VEGAM:
1783 case CHIP_VEGA10:
1784 case CHIP_VEGA12:
1785 case CHIP_VEGA20:
1786 return 0;
1787 case CHIP_NAVI12:
1788 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1789 break;
1790 case CHIP_RAVEN:
1791 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1792 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1793 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1794 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1795 else
1796 return 0;
1797 break;
1798 default:
1799 switch (adev->ip_versions[DCE_HWIP][0]) {
1800 case IP_VERSION(2, 0, 2):
1801 case IP_VERSION(2, 0, 3):
1802 case IP_VERSION(2, 0, 0):
1803 case IP_VERSION(2, 1, 0):
1804 case IP_VERSION(3, 0, 0):
1805 case IP_VERSION(3, 0, 2):
1806 case IP_VERSION(3, 0, 3):
1807 case IP_VERSION(3, 0, 1):
1808 case IP_VERSION(3, 1, 2):
1809 case IP_VERSION(3, 1, 3):
1810 case IP_VERSION(3, 1, 4):
1811 case IP_VERSION(3, 1, 5):
1812 case IP_VERSION(3, 1, 6):
1813 case IP_VERSION(3, 2, 0):
1814 case IP_VERSION(3, 2, 1):
1815 return 0;
1816 default:
1817 break;
1818 }
1819 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1820 return -EINVAL;
1821 }
1822
1823 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1824 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1825 return 0;
1826 }
1827
1828 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1829 if (r == -ENOENT) {
1830
1831 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1832 adev->dm.fw_dmcu = NULL;
1833 return 0;
1834 }
1835 if (r) {
1836 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1837 fw_name_dmcu);
1838 return r;
1839 }
1840
1841 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1842 if (r) {
1843 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1844 fw_name_dmcu);
1845 release_firmware(adev->dm.fw_dmcu);
1846 adev->dm.fw_dmcu = NULL;
1847 return r;
1848 }
1849
1850 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1851 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1852 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1853 adev->firmware.fw_size +=
1854 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1855
1856 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1857 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1858 adev->firmware.fw_size +=
1859 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1860
1861 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1862
1863 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1864
1865 return 0;
1866 }
1867
1868 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1869 {
1870 struct amdgpu_device *adev = ctx;
1871
1872 return dm_read_reg(adev->dm.dc->ctx, address);
1873 }
1874
1875 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1876 uint32_t value)
1877 {
1878 struct amdgpu_device *adev = ctx;
1879
1880 return dm_write_reg(adev->dm.dc->ctx, address, value);
1881 }
1882
1883 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1884 {
1885 struct dmub_srv_create_params create_params;
1886 struct dmub_srv_region_params region_params;
1887 struct dmub_srv_region_info region_info;
1888 struct dmub_srv_fb_params fb_params;
1889 struct dmub_srv_fb_info *fb_info;
1890 struct dmub_srv *dmub_srv;
1891 const struct dmcub_firmware_header_v1_0 *hdr;
1892 const char *fw_name_dmub;
1893 enum dmub_asic dmub_asic;
1894 enum dmub_status status;
1895 int r;
1896
1897 switch (adev->ip_versions[DCE_HWIP][0]) {
1898 case IP_VERSION(2, 1, 0):
1899 dmub_asic = DMUB_ASIC_DCN21;
1900 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1901 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1902 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1903 break;
1904 case IP_VERSION(3, 0, 0):
1905 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1906 dmub_asic = DMUB_ASIC_DCN30;
1907 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1908 } else {
1909 dmub_asic = DMUB_ASIC_DCN30;
1910 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1911 }
1912 break;
1913 case IP_VERSION(3, 0, 1):
1914 dmub_asic = DMUB_ASIC_DCN301;
1915 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1916 break;
1917 case IP_VERSION(3, 0, 2):
1918 dmub_asic = DMUB_ASIC_DCN302;
1919 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1920 break;
1921 case IP_VERSION(3, 0, 3):
1922 dmub_asic = DMUB_ASIC_DCN303;
1923 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1924 break;
1925 case IP_VERSION(3, 1, 2):
1926 case IP_VERSION(3, 1, 3):
1927 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1928 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1929 break;
1930 case IP_VERSION(3, 1, 4):
1931 dmub_asic = DMUB_ASIC_DCN314;
1932 fw_name_dmub = FIRMWARE_DCN_314_DMUB;
1933 break;
1934 case IP_VERSION(3, 1, 5):
1935 dmub_asic = DMUB_ASIC_DCN315;
1936 fw_name_dmub = FIRMWARE_DCN_315_DMUB;
1937 break;
1938 case IP_VERSION(3, 1, 6):
1939 dmub_asic = DMUB_ASIC_DCN316;
1940 fw_name_dmub = FIRMWARE_DCN316_DMUB;
1941 break;
1942 case IP_VERSION(3, 2, 0):
1943 dmub_asic = DMUB_ASIC_DCN32;
1944 fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB;
1945 break;
1946 case IP_VERSION(3, 2, 1):
1947 dmub_asic = DMUB_ASIC_DCN321;
1948 fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
1949 break;
1950 default:
1951
1952 return 0;
1953 }
1954
1955 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1956 if (r) {
1957 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1958 return 0;
1959 }
1960
1961 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1962 if (r) {
1963 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1964 return 0;
1965 }
1966
1967 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1968 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1969
1970 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1971 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1972 AMDGPU_UCODE_ID_DMCUB;
1973 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1974 adev->dm.dmub_fw;
1975 adev->firmware.fw_size +=
1976 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1977
1978 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1979 adev->dm.dmcub_fw_version);
1980 }
1981
1982
1983 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1984 dmub_srv = adev->dm.dmub_srv;
1985
1986 if (!dmub_srv) {
1987 DRM_ERROR("Failed to allocate DMUB service!\n");
1988 return -ENOMEM;
1989 }
1990
1991 memset(&create_params, 0, sizeof(create_params));
1992 create_params.user_ctx = adev;
1993 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1994 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1995 create_params.asic = dmub_asic;
1996
1997
1998 status = dmub_srv_create(dmub_srv, &create_params);
1999 if (status != DMUB_STATUS_OK) {
2000 DRM_ERROR("Error creating DMUB service: %d\n", status);
2001 return -EINVAL;
2002 }
2003
2004
2005 memset(®ion_params, 0, sizeof(region_params));
2006
2007 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
2008 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
2009 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
2010 region_params.vbios_size = adev->bios_size;
2011 region_params.fw_bss_data = region_params.bss_data_size ?
2012 adev->dm.dmub_fw->data +
2013 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2014 le32_to_cpu(hdr->inst_const_bytes) : NULL;
2015 region_params.fw_inst_const =
2016 adev->dm.dmub_fw->data +
2017 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2018 PSP_HEADER_BYTES;
2019
2020 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
2021 ®ion_info);
2022
2023 if (status != DMUB_STATUS_OK) {
2024 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
2025 return -EINVAL;
2026 }
2027
2028
2029
2030
2031
2032 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
2033 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
2034 &adev->dm.dmub_bo_gpu_addr,
2035 &adev->dm.dmub_bo_cpu_addr);
2036 if (r)
2037 return r;
2038
2039
2040 memset(&fb_params, 0, sizeof(fb_params));
2041 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
2042 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
2043 fb_params.region_info = ®ion_info;
2044
2045 adev->dm.dmub_fb_info =
2046 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2047 fb_info = adev->dm.dmub_fb_info;
2048
2049 if (!fb_info) {
2050 DRM_ERROR(
2051 "Failed to allocate framebuffer info for DMUB service!\n");
2052 return -ENOMEM;
2053 }
2054
2055 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
2056 if (status != DMUB_STATUS_OK) {
2057 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2058 return -EINVAL;
2059 }
2060
2061 return 0;
2062 }
2063
2064 static int dm_sw_init(void *handle)
2065 {
2066 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2067 int r;
2068
2069 r = dm_dmub_sw_init(adev);
2070 if (r)
2071 return r;
2072
2073 return load_dmcu_fw(adev);
2074 }
2075
2076 static int dm_sw_fini(void *handle)
2077 {
2078 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2079
2080 kfree(adev->dm.dmub_fb_info);
2081 adev->dm.dmub_fb_info = NULL;
2082
2083 if (adev->dm.dmub_srv) {
2084 dmub_srv_destroy(adev->dm.dmub_srv);
2085 adev->dm.dmub_srv = NULL;
2086 }
2087
2088 release_firmware(adev->dm.dmub_fw);
2089 adev->dm.dmub_fw = NULL;
2090
2091 release_firmware(adev->dm.fw_dmcu);
2092 adev->dm.fw_dmcu = NULL;
2093
2094 return 0;
2095 }
2096
2097 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2098 {
2099 struct amdgpu_dm_connector *aconnector;
2100 struct drm_connector *connector;
2101 struct drm_connector_list_iter iter;
2102 int ret = 0;
2103
2104 drm_connector_list_iter_begin(dev, &iter);
2105 drm_for_each_connector_iter(connector, &iter) {
2106 aconnector = to_amdgpu_dm_connector(connector);
2107 if (aconnector->dc_link->type == dc_connection_mst_branch &&
2108 aconnector->mst_mgr.aux) {
2109 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2110 aconnector,
2111 aconnector->base.base.id);
2112
2113 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2114 if (ret < 0) {
2115 DRM_ERROR("DM_MST: Failed to start MST\n");
2116 aconnector->dc_link->type =
2117 dc_connection_single;
2118 break;
2119 }
2120 }
2121 }
2122 drm_connector_list_iter_end(&iter);
2123
2124 return ret;
2125 }
2126
2127 static int dm_late_init(void *handle)
2128 {
2129 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2130
2131 struct dmcu_iram_parameters params;
2132 unsigned int linear_lut[16];
2133 int i;
2134 struct dmcu *dmcu = NULL;
2135
2136 dmcu = adev->dm.dc->res_pool->dmcu;
2137
2138 for (i = 0; i < 16; i++)
2139 linear_lut[i] = 0xFFFF * i / 15;
2140
2141 params.set = 0;
2142 params.backlight_ramping_override = false;
2143 params.backlight_ramping_start = 0xCCCC;
2144 params.backlight_ramping_reduction = 0xCCCCCCCC;
2145 params.backlight_lut_array_size = 16;
2146 params.backlight_lut_array = linear_lut;
2147
2148
2149
2150
2151 params.min_abm_backlight = 0x28F;
2152
2153
2154
2155
2156 if (dmcu) {
2157 if (!dmcu_load_iram(dmcu, params))
2158 return -EINVAL;
2159 } else if (adev->dm.dc->ctx->dmub_srv) {
2160 struct dc_link *edp_links[MAX_NUM_EDP];
2161 int edp_num;
2162
2163 get_edp_links(adev->dm.dc, edp_links, &edp_num);
2164 for (i = 0; i < edp_num; i++) {
2165 if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2166 return -EINVAL;
2167 }
2168 }
2169
2170 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2171 }
2172
2173 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2174 {
2175 struct amdgpu_dm_connector *aconnector;
2176 struct drm_connector *connector;
2177 struct drm_connector_list_iter iter;
2178 struct drm_dp_mst_topology_mgr *mgr;
2179 int ret;
2180 bool need_hotplug = false;
2181
2182 drm_connector_list_iter_begin(dev, &iter);
2183 drm_for_each_connector_iter(connector, &iter) {
2184 aconnector = to_amdgpu_dm_connector(connector);
2185 if (aconnector->dc_link->type != dc_connection_mst_branch ||
2186 aconnector->mst_port)
2187 continue;
2188
2189 mgr = &aconnector->mst_mgr;
2190
2191 if (suspend) {
2192 drm_dp_mst_topology_mgr_suspend(mgr);
2193 } else {
2194 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2195 if (ret < 0) {
2196 dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
2197 aconnector->dc_link);
2198 need_hotplug = true;
2199 }
2200 }
2201 }
2202 drm_connector_list_iter_end(&iter);
2203
2204 if (need_hotplug)
2205 drm_kms_helper_hotplug_event(dev);
2206 }
2207
2208 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2209 {
2210 int ret = 0;
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242 switch (adev->ip_versions[DCE_HWIP][0]) {
2243 case IP_VERSION(2, 0, 2):
2244 case IP_VERSION(2, 0, 0):
2245 break;
2246 default:
2247 return 0;
2248 }
2249
2250 ret = amdgpu_dpm_write_watermarks_table(adev);
2251 if (ret) {
2252 DRM_ERROR("Failed to update WMTABLE!\n");
2253 return ret;
2254 }
2255
2256 return 0;
2257 }
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279 static int dm_hw_init(void *handle)
2280 {
2281 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2282
2283 amdgpu_dm_init(adev);
2284 amdgpu_dm_hpd_init(adev);
2285
2286 return 0;
2287 }
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297 static int dm_hw_fini(void *handle)
2298 {
2299 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2300
2301 amdgpu_dm_hpd_fini(adev);
2302
2303 amdgpu_dm_irq_fini(adev);
2304 amdgpu_dm_fini(adev);
2305 return 0;
2306 }
2307
2308
2309 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2310 struct dc_state *state, bool enable)
2311 {
2312 enum dc_irq_source irq_source;
2313 struct amdgpu_crtc *acrtc;
2314 int rc = -EBUSY;
2315 int i = 0;
2316
2317 for (i = 0; i < state->stream_count; i++) {
2318 acrtc = get_crtc_by_otg_inst(
2319 adev, state->stream_status[i].primary_otg_inst);
2320
2321 if (acrtc && state->stream_status[i].plane_count != 0) {
2322 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2323 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2324 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2325 acrtc->crtc_id, enable ? "en" : "dis", rc);
2326 if (rc)
2327 DRM_WARN("Failed to %s pflip interrupts\n",
2328 enable ? "enable" : "disable");
2329
2330 if (enable) {
2331 rc = dm_enable_vblank(&acrtc->base);
2332 if (rc)
2333 DRM_WARN("Failed to enable vblank interrupts\n");
2334 } else {
2335 dm_disable_vblank(&acrtc->base);
2336 }
2337
2338 }
2339 }
2340
2341 }
2342
2343 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2344 {
2345 struct dc_state *context = NULL;
2346 enum dc_status res = DC_ERROR_UNEXPECTED;
2347 int i;
2348 struct dc_stream_state *del_streams[MAX_PIPES];
2349 int del_streams_count = 0;
2350
2351 memset(del_streams, 0, sizeof(del_streams));
2352
2353 context = dc_create_state(dc);
2354 if (context == NULL)
2355 goto context_alloc_fail;
2356
2357 dc_resource_state_copy_construct_current(dc, context);
2358
2359
2360 for (i = 0; i < context->stream_count; i++) {
2361 struct dc_stream_state *stream = context->streams[i];
2362
2363 del_streams[del_streams_count++] = stream;
2364 }
2365
2366
2367 for (i = 0; i < del_streams_count; i++) {
2368 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2369 res = DC_FAIL_DETACH_SURFACES;
2370 goto fail;
2371 }
2372
2373 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2374 if (res != DC_OK)
2375 goto fail;
2376 }
2377
2378 res = dc_commit_state(dc, context);
2379
2380 fail:
2381 dc_release_state(context);
2382
2383 context_alloc_fail:
2384 return res;
2385 }
2386
2387 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2388 {
2389 int i;
2390
2391 if (dm->hpd_rx_offload_wq) {
2392 for (i = 0; i < dm->dc->caps.max_links; i++)
2393 flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2394 }
2395 }
2396
2397 static int dm_suspend(void *handle)
2398 {
2399 struct amdgpu_device *adev = handle;
2400 struct amdgpu_display_manager *dm = &adev->dm;
2401 int ret = 0;
2402
2403 if (amdgpu_in_reset(adev)) {
2404 mutex_lock(&dm->dc_lock);
2405
2406 dc_allow_idle_optimizations(adev->dm.dc, false);
2407
2408 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2409
2410 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2411
2412 amdgpu_dm_commit_zero_streams(dm->dc);
2413
2414 amdgpu_dm_irq_suspend(adev);
2415
2416 hpd_rx_irq_work_suspend(dm);
2417
2418 return ret;
2419 }
2420
2421 WARN_ON(adev->dm.cached_state);
2422 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2423
2424 s3_handle_mst(adev_to_drm(adev), true);
2425
2426 amdgpu_dm_irq_suspend(adev);
2427
2428 hpd_rx_irq_work_suspend(dm);
2429
2430 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2431
2432 return 0;
2433 }
2434
2435 struct amdgpu_dm_connector *
2436 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2437 struct drm_crtc *crtc)
2438 {
2439 uint32_t i;
2440 struct drm_connector_state *new_con_state;
2441 struct drm_connector *connector;
2442 struct drm_crtc *crtc_from_state;
2443
2444 for_each_new_connector_in_state(state, connector, new_con_state, i) {
2445 crtc_from_state = new_con_state->crtc;
2446
2447 if (crtc_from_state == crtc)
2448 return to_amdgpu_dm_connector(connector);
2449 }
2450
2451 return NULL;
2452 }
2453
2454 static void emulated_link_detect(struct dc_link *link)
2455 {
2456 struct dc_sink_init_data sink_init_data = { 0 };
2457 struct display_sink_capability sink_caps = { 0 };
2458 enum dc_edid_status edid_status;
2459 struct dc_context *dc_ctx = link->ctx;
2460 struct dc_sink *sink = NULL;
2461 struct dc_sink *prev_sink = NULL;
2462
2463 link->type = dc_connection_none;
2464 prev_sink = link->local_sink;
2465
2466 if (prev_sink)
2467 dc_sink_release(prev_sink);
2468
2469 switch (link->connector_signal) {
2470 case SIGNAL_TYPE_HDMI_TYPE_A: {
2471 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2472 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2473 break;
2474 }
2475
2476 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2477 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2478 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2479 break;
2480 }
2481
2482 case SIGNAL_TYPE_DVI_DUAL_LINK: {
2483 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2484 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2485 break;
2486 }
2487
2488 case SIGNAL_TYPE_LVDS: {
2489 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2490 sink_caps.signal = SIGNAL_TYPE_LVDS;
2491 break;
2492 }
2493
2494 case SIGNAL_TYPE_EDP: {
2495 sink_caps.transaction_type =
2496 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2497 sink_caps.signal = SIGNAL_TYPE_EDP;
2498 break;
2499 }
2500
2501 case SIGNAL_TYPE_DISPLAY_PORT: {
2502 sink_caps.transaction_type =
2503 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2504 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2505 break;
2506 }
2507
2508 default:
2509 DC_ERROR("Invalid connector type! signal:%d\n",
2510 link->connector_signal);
2511 return;
2512 }
2513
2514 sink_init_data.link = link;
2515 sink_init_data.sink_signal = sink_caps.signal;
2516
2517 sink = dc_sink_create(&sink_init_data);
2518 if (!sink) {
2519 DC_ERROR("Failed to create sink!\n");
2520 return;
2521 }
2522
2523
2524 link->local_sink = sink;
2525
2526 edid_status = dm_helpers_read_local_edid(
2527 link->ctx,
2528 link,
2529 sink);
2530
2531 if (edid_status != EDID_OK)
2532 DC_ERROR("Failed to read EDID");
2533
2534 }
2535
2536 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2537 struct amdgpu_display_manager *dm)
2538 {
2539 struct {
2540 struct dc_surface_update surface_updates[MAX_SURFACES];
2541 struct dc_plane_info plane_infos[MAX_SURFACES];
2542 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2543 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2544 struct dc_stream_update stream_update;
2545 } * bundle;
2546 int k, m;
2547
2548 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2549
2550 if (!bundle) {
2551 dm_error("Failed to allocate update bundle\n");
2552 goto cleanup;
2553 }
2554
2555 for (k = 0; k < dc_state->stream_count; k++) {
2556 bundle->stream_update.stream = dc_state->streams[k];
2557
2558 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2559 bundle->surface_updates[m].surface =
2560 dc_state->stream_status->plane_states[m];
2561 bundle->surface_updates[m].surface->force_full_update =
2562 true;
2563 }
2564 dc_commit_updates_for_stream(
2565 dm->dc, bundle->surface_updates,
2566 dc_state->stream_status->plane_count,
2567 dc_state->streams[k], &bundle->stream_update, dc_state);
2568 }
2569
2570 cleanup:
2571 kfree(bundle);
2572
2573 return;
2574 }
2575
2576 static int dm_resume(void *handle)
2577 {
2578 struct amdgpu_device *adev = handle;
2579 struct drm_device *ddev = adev_to_drm(adev);
2580 struct amdgpu_display_manager *dm = &adev->dm;
2581 struct amdgpu_dm_connector *aconnector;
2582 struct drm_connector *connector;
2583 struct drm_connector_list_iter iter;
2584 struct drm_crtc *crtc;
2585 struct drm_crtc_state *new_crtc_state;
2586 struct dm_crtc_state *dm_new_crtc_state;
2587 struct drm_plane *plane;
2588 struct drm_plane_state *new_plane_state;
2589 struct dm_plane_state *dm_new_plane_state;
2590 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2591 enum dc_connection_type new_connection_type = dc_connection_none;
2592 struct dc_state *dc_state;
2593 int i, r, j;
2594
2595 if (amdgpu_in_reset(adev)) {
2596 dc_state = dm->cached_dc_state;
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613 link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
2614
2615 r = dm_dmub_hw_init(adev);
2616 if (r)
2617 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2618
2619 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2620 dc_resume(dm->dc);
2621
2622 amdgpu_dm_irq_resume_early(adev);
2623
2624 for (i = 0; i < dc_state->stream_count; i++) {
2625 dc_state->streams[i]->mode_changed = true;
2626 for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2627 dc_state->stream_status[i].plane_states[j]->update_flags.raw
2628 = 0xffffffff;
2629 }
2630 }
2631
2632 if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
2633 amdgpu_dm_outbox_init(adev);
2634 dc_enable_dmub_outbox(adev->dm.dc);
2635 }
2636
2637 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2638
2639 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2640
2641 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2642
2643 dc_release_state(dm->cached_dc_state);
2644 dm->cached_dc_state = NULL;
2645
2646 amdgpu_dm_irq_resume_late(adev);
2647
2648 mutex_unlock(&dm->dc_lock);
2649
2650 return 0;
2651 }
2652
2653 dc_release_state(dm_state->context);
2654 dm_state->context = dc_create_state(dm->dc);
2655
2656 dc_resource_state_construct(dm->dc, dm_state->context);
2657
2658
2659 dm_dmub_hw_resume(adev);
2660
2661
2662 if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
2663 amdgpu_dm_outbox_init(adev);
2664 dc_enable_dmub_outbox(adev->dm.dc);
2665 }
2666
2667
2668 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2669
2670
2671 dc_resume(dm->dc);
2672
2673
2674
2675
2676
2677 amdgpu_dm_irq_resume_early(adev);
2678
2679
2680 s3_handle_mst(ddev, false);
2681
2682
2683 drm_connector_list_iter_begin(ddev, &iter);
2684 drm_for_each_connector_iter(connector, &iter) {
2685 aconnector = to_amdgpu_dm_connector(connector);
2686
2687
2688
2689
2690
2691 if (aconnector->dc_link &&
2692 aconnector->dc_link->type == dc_connection_mst_branch)
2693 continue;
2694
2695 mutex_lock(&aconnector->hpd_lock);
2696 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2697 DRM_ERROR("KMS: Failed to detect connector\n");
2698
2699 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2700 emulated_link_detect(aconnector->dc_link);
2701 } else {
2702 mutex_lock(&dm->dc_lock);
2703 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2704 mutex_unlock(&dm->dc_lock);
2705 }
2706
2707 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2708 aconnector->fake_enable = false;
2709
2710 if (aconnector->dc_sink)
2711 dc_sink_release(aconnector->dc_sink);
2712 aconnector->dc_sink = NULL;
2713 amdgpu_dm_update_connector_after_detect(aconnector);
2714 mutex_unlock(&aconnector->hpd_lock);
2715 }
2716 drm_connector_list_iter_end(&iter);
2717
2718
2719 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2720 new_crtc_state->active_changed = true;
2721
2722
2723
2724
2725
2726
2727 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2728 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2729 if (dm_new_crtc_state->stream) {
2730 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2731 dc_stream_release(dm_new_crtc_state->stream);
2732 dm_new_crtc_state->stream = NULL;
2733 }
2734 }
2735
2736 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2737 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2738 if (dm_new_plane_state->dc_state) {
2739 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2740 dc_plane_state_release(dm_new_plane_state->dc_state);
2741 dm_new_plane_state->dc_state = NULL;
2742 }
2743 }
2744
2745 drm_atomic_helper_resume(ddev, dm->cached_state);
2746
2747 dm->cached_state = NULL;
2748
2749 amdgpu_dm_irq_resume_late(adev);
2750
2751 amdgpu_dm_smu_write_watermarks_table(adev);
2752
2753 return 0;
2754 }
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2767 .name = "dm",
2768 .early_init = dm_early_init,
2769 .late_init = dm_late_init,
2770 .sw_init = dm_sw_init,
2771 .sw_fini = dm_sw_fini,
2772 .early_fini = amdgpu_dm_early_fini,
2773 .hw_init = dm_hw_init,
2774 .hw_fini = dm_hw_fini,
2775 .suspend = dm_suspend,
2776 .resume = dm_resume,
2777 .is_idle = dm_is_idle,
2778 .wait_for_idle = dm_wait_for_idle,
2779 .check_soft_reset = dm_check_soft_reset,
2780 .soft_reset = dm_soft_reset,
2781 .set_clockgating_state = dm_set_clockgating_state,
2782 .set_powergating_state = dm_set_powergating_state,
2783 };
2784
2785 const struct amdgpu_ip_block_version dm_ip_block =
2786 {
2787 .type = AMD_IP_BLOCK_TYPE_DCE,
2788 .major = 1,
2789 .minor = 0,
2790 .rev = 0,
2791 .funcs = &amdgpu_dm_funcs,
2792 };
2793
2794
2795
2796
2797
2798
2799
2800
2801 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2802 .fb_create = amdgpu_display_user_framebuffer_create,
2803 .get_format_info = amd_get_format_info,
2804 .output_poll_changed = drm_fb_helper_output_poll_changed,
2805 .atomic_check = amdgpu_dm_atomic_check,
2806 .atomic_commit = drm_atomic_helper_commit,
2807 };
2808
2809 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2810 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2811 };
2812
2813 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2814 {
2815 u32 max_avg, min_cll, max, min, q, r;
2816 struct amdgpu_dm_backlight_caps *caps;
2817 struct amdgpu_display_manager *dm;
2818 struct drm_connector *conn_base;
2819 struct amdgpu_device *adev;
2820 struct dc_link *link = NULL;
2821 static const u8 pre_computed_values[] = {
2822 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2823 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2824 int i;
2825
2826 if (!aconnector || !aconnector->dc_link)
2827 return;
2828
2829 link = aconnector->dc_link;
2830 if (link->connector_signal != SIGNAL_TYPE_EDP)
2831 return;
2832
2833 conn_base = &aconnector->base;
2834 adev = drm_to_adev(conn_base->dev);
2835 dm = &adev->dm;
2836 for (i = 0; i < dm->num_of_edps; i++) {
2837 if (link == dm->backlight_link[i])
2838 break;
2839 }
2840 if (i >= dm->num_of_edps)
2841 return;
2842 caps = &dm->backlight_caps[i];
2843 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2844 caps->aux_support = false;
2845 max_avg = conn_base->hdr_sink_metadata.hdmi_type1.max_fall;
2846 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2847
2848 if (caps->ext_caps->bits.oled == 1
2849
2850 )
2851 caps->aux_support = true;
2852
2853 if (amdgpu_backlight == 0)
2854 caps->aux_support = false;
2855 else if (amdgpu_backlight == 1)
2856 caps->aux_support = true;
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873 q = max_avg >> 5;
2874 r = max_avg % 32;
2875 max = (1 << q) * pre_computed_values[r];
2876
2877
2878 q = DIV_ROUND_CLOSEST(min_cll, 255);
2879 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2880
2881 caps->aux_max_input_signal = max;
2882 caps->aux_min_input_signal = min;
2883 }
2884
2885 void amdgpu_dm_update_connector_after_detect(
2886 struct amdgpu_dm_connector *aconnector)
2887 {
2888 struct drm_connector *connector = &aconnector->base;
2889 struct drm_device *dev = connector->dev;
2890 struct dc_sink *sink;
2891
2892
2893 if (aconnector->mst_mgr.mst_state == true)
2894 return;
2895
2896 sink = aconnector->dc_link->local_sink;
2897 if (sink)
2898 dc_sink_retain(sink);
2899
2900
2901
2902
2903
2904
2905 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2906 && aconnector->dc_em_sink) {
2907
2908
2909
2910
2911
2912 mutex_lock(&dev->mode_config.mutex);
2913
2914 if (sink) {
2915 if (aconnector->dc_sink) {
2916 amdgpu_dm_update_freesync_caps(connector, NULL);
2917
2918
2919
2920
2921
2922
2923 dc_sink_release(aconnector->dc_sink);
2924 }
2925 aconnector->dc_sink = sink;
2926 dc_sink_retain(aconnector->dc_sink);
2927 amdgpu_dm_update_freesync_caps(connector,
2928 aconnector->edid);
2929 } else {
2930 amdgpu_dm_update_freesync_caps(connector, NULL);
2931 if (!aconnector->dc_sink) {
2932 aconnector->dc_sink = aconnector->dc_em_sink;
2933 dc_sink_retain(aconnector->dc_sink);
2934 }
2935 }
2936
2937 mutex_unlock(&dev->mode_config.mutex);
2938
2939 if (sink)
2940 dc_sink_release(sink);
2941 return;
2942 }
2943
2944
2945
2946
2947
2948 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2949 dc_sink_release(sink);
2950 return;
2951 }
2952
2953 if (aconnector->dc_sink == sink) {
2954
2955
2956
2957
2958 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2959 aconnector->connector_id);
2960 if (sink)
2961 dc_sink_release(sink);
2962 return;
2963 }
2964
2965 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2966 aconnector->connector_id, aconnector->dc_sink, sink);
2967
2968 mutex_lock(&dev->mode_config.mutex);
2969
2970
2971
2972
2973
2974 if (sink) {
2975
2976
2977
2978
2979 if (aconnector->dc_sink) {
2980 amdgpu_dm_update_freesync_caps(connector, NULL);
2981 dc_sink_release(aconnector->dc_sink);
2982 }
2983
2984 aconnector->dc_sink = sink;
2985 dc_sink_retain(aconnector->dc_sink);
2986 if (sink->dc_edid.length == 0) {
2987 aconnector->edid = NULL;
2988 if (aconnector->dc_link->aux_mode) {
2989 drm_dp_cec_unset_edid(
2990 &aconnector->dm_dp_aux.aux);
2991 }
2992 } else {
2993 aconnector->edid =
2994 (struct edid *)sink->dc_edid.raw_edid;
2995
2996 if (aconnector->dc_link->aux_mode)
2997 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2998 aconnector->edid);
2999 }
3000
3001 drm_connector_update_edid_property(connector, aconnector->edid);
3002 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
3003 update_connector_ext_caps(aconnector);
3004 } else {
3005 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
3006 amdgpu_dm_update_freesync_caps(connector, NULL);
3007 drm_connector_update_edid_property(connector, NULL);
3008 aconnector->num_modes = 0;
3009 dc_sink_release(aconnector->dc_sink);
3010 aconnector->dc_sink = NULL;
3011 aconnector->edid = NULL;
3012 #ifdef CONFIG_DRM_AMD_DC_HDCP
3013
3014 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3015 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
3016 #endif
3017 }
3018
3019 mutex_unlock(&dev->mode_config.mutex);
3020
3021 update_subconnector_property(aconnector);
3022
3023 if (sink)
3024 dc_sink_release(sink);
3025 }
3026
3027 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
3028 {
3029 struct drm_connector *connector = &aconnector->base;
3030 struct drm_device *dev = connector->dev;
3031 enum dc_connection_type new_connection_type = dc_connection_none;
3032 struct amdgpu_device *adev = drm_to_adev(dev);
3033 #ifdef CONFIG_DRM_AMD_DC_HDCP
3034 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
3035 #endif
3036 bool ret = false;
3037
3038 if (adev->dm.disable_hpd_irq)
3039 return;
3040
3041
3042
3043
3044
3045 mutex_lock(&aconnector->hpd_lock);
3046
3047 #ifdef CONFIG_DRM_AMD_DC_HDCP
3048 if (adev->dm.hdcp_workqueue) {
3049 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3050 dm_con_state->update_hdcp = true;
3051 }
3052 #endif
3053 if (aconnector->fake_enable)
3054 aconnector->fake_enable = false;
3055
3056 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3057 DRM_ERROR("KMS: Failed to detect connector\n");
3058
3059 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3060 emulated_link_detect(aconnector->dc_link);
3061
3062 drm_modeset_lock_all(dev);
3063 dm_restore_drm_connector_state(dev, connector);
3064 drm_modeset_unlock_all(dev);
3065
3066 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3067 drm_kms_helper_connector_hotplug_event(connector);
3068 } else {
3069 mutex_lock(&adev->dm.dc_lock);
3070 ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3071 mutex_unlock(&adev->dm.dc_lock);
3072 if (ret) {
3073 amdgpu_dm_update_connector_after_detect(aconnector);
3074
3075 drm_modeset_lock_all(dev);
3076 dm_restore_drm_connector_state(dev, connector);
3077 drm_modeset_unlock_all(dev);
3078
3079 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3080 drm_kms_helper_connector_hotplug_event(connector);
3081 }
3082 }
3083 mutex_unlock(&aconnector->hpd_lock);
3084
3085 }
3086
3087 static void handle_hpd_irq(void *param)
3088 {
3089 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3090
3091 handle_hpd_irq_helper(aconnector);
3092
3093 }
3094
3095 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3096 {
3097 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3098 uint8_t dret;
3099 bool new_irq_handled = false;
3100 int dpcd_addr;
3101 int dpcd_bytes_to_read;
3102
3103 const int max_process_count = 30;
3104 int process_count = 0;
3105
3106 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3107
3108 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3109 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3110
3111 dpcd_addr = DP_SINK_COUNT;
3112 } else {
3113 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3114
3115 dpcd_addr = DP_SINK_COUNT_ESI;
3116 }
3117
3118 dret = drm_dp_dpcd_read(
3119 &aconnector->dm_dp_aux.aux,
3120 dpcd_addr,
3121 esi,
3122 dpcd_bytes_to_read);
3123
3124 while (dret == dpcd_bytes_to_read &&
3125 process_count < max_process_count) {
3126 uint8_t retry;
3127 dret = 0;
3128
3129 process_count++;
3130
3131 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3132
3133 if (aconnector->mst_mgr.mst_state)
3134 drm_dp_mst_hpd_irq(
3135 &aconnector->mst_mgr,
3136 esi,
3137 &new_irq_handled);
3138
3139 if (new_irq_handled) {
3140
3141 const int ack_dpcd_bytes_to_write =
3142 dpcd_bytes_to_read - 1;
3143
3144 for (retry = 0; retry < 3; retry++) {
3145 uint8_t wret;
3146
3147 wret = drm_dp_dpcd_write(
3148 &aconnector->dm_dp_aux.aux,
3149 dpcd_addr + 1,
3150 &esi[1],
3151 ack_dpcd_bytes_to_write);
3152 if (wret == ack_dpcd_bytes_to_write)
3153 break;
3154 }
3155
3156
3157 dret = drm_dp_dpcd_read(
3158 &aconnector->dm_dp_aux.aux,
3159 dpcd_addr,
3160 esi,
3161 dpcd_bytes_to_read);
3162
3163 new_irq_handled = false;
3164 } else {
3165 break;
3166 }
3167 }
3168
3169 if (process_count == max_process_count)
3170 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3171 }
3172
3173 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3174 union hpd_irq_data hpd_irq_data)
3175 {
3176 struct hpd_rx_irq_offload_work *offload_work =
3177 kzalloc(sizeof(*offload_work), GFP_KERNEL);
3178
3179 if (!offload_work) {
3180 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3181 return;
3182 }
3183
3184 INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3185 offload_work->data = hpd_irq_data;
3186 offload_work->offload_wq = offload_wq;
3187
3188 queue_work(offload_wq->wq, &offload_work->work);
3189 DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3190 }
3191
3192 static void handle_hpd_rx_irq(void *param)
3193 {
3194 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3195 struct drm_connector *connector = &aconnector->base;
3196 struct drm_device *dev = connector->dev;
3197 struct dc_link *dc_link = aconnector->dc_link;
3198 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3199 bool result = false;
3200 enum dc_connection_type new_connection_type = dc_connection_none;
3201 struct amdgpu_device *adev = drm_to_adev(dev);
3202 union hpd_irq_data hpd_irq_data;
3203 bool link_loss = false;
3204 bool has_left_work = false;
3205 int idx = aconnector->base.index;
3206 struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3207
3208 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3209
3210 if (adev->dm.disable_hpd_irq)
3211 return;
3212
3213
3214
3215
3216
3217
3218 mutex_lock(&aconnector->hpd_lock);
3219
3220 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3221 &link_loss, true, &has_left_work);
3222
3223 if (!has_left_work)
3224 goto out;
3225
3226 if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3227 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3228 goto out;
3229 }
3230
3231 if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3232 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3233 hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3234 dm_handle_mst_sideband_msg(aconnector);
3235 goto out;
3236 }
3237
3238 if (link_loss) {
3239 bool skip = false;
3240
3241 spin_lock(&offload_wq->offload_lock);
3242 skip = offload_wq->is_handling_link_loss;
3243
3244 if (!skip)
3245 offload_wq->is_handling_link_loss = true;
3246
3247 spin_unlock(&offload_wq->offload_lock);
3248
3249 if (!skip)
3250 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3251
3252 goto out;
3253 }
3254 }
3255
3256 out:
3257 if (result && !is_mst_root_connector) {
3258
3259 if (!dc_link_detect_sink(dc_link, &new_connection_type))
3260 DRM_ERROR("KMS: Failed to detect connector\n");
3261
3262 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3263 emulated_link_detect(dc_link);
3264
3265 if (aconnector->fake_enable)
3266 aconnector->fake_enable = false;
3267
3268 amdgpu_dm_update_connector_after_detect(aconnector);
3269
3270
3271 drm_modeset_lock_all(dev);
3272 dm_restore_drm_connector_state(dev, connector);
3273 drm_modeset_unlock_all(dev);
3274
3275 drm_kms_helper_connector_hotplug_event(connector);
3276 } else {
3277 bool ret = false;
3278
3279 mutex_lock(&adev->dm.dc_lock);
3280 ret = dc_link_detect(dc_link, DETECT_REASON_HPDRX);
3281 mutex_unlock(&adev->dm.dc_lock);
3282
3283 if (ret) {
3284 if (aconnector->fake_enable)
3285 aconnector->fake_enable = false;
3286
3287 amdgpu_dm_update_connector_after_detect(aconnector);
3288
3289 drm_modeset_lock_all(dev);
3290 dm_restore_drm_connector_state(dev, connector);
3291 drm_modeset_unlock_all(dev);
3292
3293 drm_kms_helper_connector_hotplug_event(connector);
3294 }
3295 }
3296 }
3297 #ifdef CONFIG_DRM_AMD_DC_HDCP
3298 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3299 if (adev->dm.hdcp_workqueue)
3300 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
3301 }
3302 #endif
3303
3304 if (dc_link->type != dc_connection_mst_branch)
3305 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3306
3307 mutex_unlock(&aconnector->hpd_lock);
3308 }
3309
3310 static void register_hpd_handlers(struct amdgpu_device *adev)
3311 {
3312 struct drm_device *dev = adev_to_drm(adev);
3313 struct drm_connector *connector;
3314 struct amdgpu_dm_connector *aconnector;
3315 const struct dc_link *dc_link;
3316 struct dc_interrupt_params int_params = {0};
3317
3318 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3319 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3320
3321 list_for_each_entry(connector,
3322 &dev->mode_config.connector_list, head) {
3323
3324 aconnector = to_amdgpu_dm_connector(connector);
3325 dc_link = aconnector->dc_link;
3326
3327 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3328 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3329 int_params.irq_source = dc_link->irq_source_hpd;
3330
3331 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3332 handle_hpd_irq,
3333 (void *) aconnector);
3334 }
3335
3336 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3337
3338
3339 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3340 int_params.irq_source = dc_link->irq_source_hpd_rx;
3341
3342 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3343 handle_hpd_rx_irq,
3344 (void *) aconnector);
3345
3346 if (adev->dm.hpd_rx_offload_wq)
3347 adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3348 aconnector;
3349 }
3350 }
3351 }
3352
3353 #if defined(CONFIG_DRM_AMD_DC_SI)
3354
3355 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3356 {
3357 struct dc *dc = adev->dm.dc;
3358 struct common_irq_params *c_irq_params;
3359 struct dc_interrupt_params int_params = {0};
3360 int r;
3361 int i;
3362 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3363
3364 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3365 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3366
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379 for (i = 0; i < adev->mode_info.num_crtc; i++) {
3380 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3381 if (r) {
3382 DRM_ERROR("Failed to add crtc irq id!\n");
3383 return r;
3384 }
3385
3386 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3387 int_params.irq_source =
3388 dc_interrupt_to_irq_source(dc, i+1 , 0);
3389
3390 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3391
3392 c_irq_params->adev = adev;
3393 c_irq_params->irq_src = int_params.irq_source;
3394
3395 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3396 dm_crtc_high_irq, c_irq_params);
3397 }
3398
3399
3400 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3401 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3402 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3403 if (r) {
3404 DRM_ERROR("Failed to add page flip irq id!\n");
3405 return r;
3406 }
3407
3408 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3409 int_params.irq_source =
3410 dc_interrupt_to_irq_source(dc, i, 0);
3411
3412 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3413
3414 c_irq_params->adev = adev;
3415 c_irq_params->irq_src = int_params.irq_source;
3416
3417 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3418 dm_pflip_high_irq, c_irq_params);
3419
3420 }
3421
3422
3423 r = amdgpu_irq_add_id(adev, client_id,
3424 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3425 if (r) {
3426 DRM_ERROR("Failed to add hpd irq id!\n");
3427 return r;
3428 }
3429
3430 register_hpd_handlers(adev);
3431
3432 return 0;
3433 }
3434 #endif
3435
3436
3437 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3438 {
3439 struct dc *dc = adev->dm.dc;
3440 struct common_irq_params *c_irq_params;
3441 struct dc_interrupt_params int_params = {0};
3442 int r;
3443 int i;
3444 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3445
3446 if (adev->family >= AMDGPU_FAMILY_AI)
3447 client_id = SOC15_IH_CLIENTID_DCE;
3448
3449 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3450 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3451
3452
3453
3454
3455
3456
3457
3458
3459
3460
3461
3462
3463
3464 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3465 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3466 if (r) {
3467 DRM_ERROR("Failed to add crtc irq id!\n");
3468 return r;
3469 }
3470
3471 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3472 int_params.irq_source =
3473 dc_interrupt_to_irq_source(dc, i, 0);
3474
3475 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3476
3477 c_irq_params->adev = adev;
3478 c_irq_params->irq_src = int_params.irq_source;
3479
3480 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3481 dm_crtc_high_irq, c_irq_params);
3482 }
3483
3484
3485 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3486 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3487 if (r) {
3488 DRM_ERROR("Failed to add vupdate irq id!\n");
3489 return r;
3490 }
3491
3492 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3493 int_params.irq_source =
3494 dc_interrupt_to_irq_source(dc, i, 0);
3495
3496 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3497
3498 c_irq_params->adev = adev;
3499 c_irq_params->irq_src = int_params.irq_source;
3500
3501 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3502 dm_vupdate_high_irq, c_irq_params);
3503 }
3504
3505
3506 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3507 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3508 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3509 if (r) {
3510 DRM_ERROR("Failed to add page flip irq id!\n");
3511 return r;
3512 }
3513
3514 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3515 int_params.irq_source =
3516 dc_interrupt_to_irq_source(dc, i, 0);
3517
3518 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3519
3520 c_irq_params->adev = adev;
3521 c_irq_params->irq_src = int_params.irq_source;
3522
3523 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3524 dm_pflip_high_irq, c_irq_params);
3525
3526 }
3527
3528
3529 r = amdgpu_irq_add_id(adev, client_id,
3530 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3531 if (r) {
3532 DRM_ERROR("Failed to add hpd irq id!\n");
3533 return r;
3534 }
3535
3536 register_hpd_handlers(adev);
3537
3538 return 0;
3539 }
3540
3541
3542 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3543 {
3544 struct dc *dc = adev->dm.dc;
3545 struct common_irq_params *c_irq_params;
3546 struct dc_interrupt_params int_params = {0};
3547 int r;
3548 int i;
3549 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3550 static const unsigned int vrtl_int_srcid[] = {
3551 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3552 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3553 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3554 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3555 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3556 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3557 };
3558 #endif
3559
3560 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3561 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3562
3563
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574
3575
3576 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3577 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3578 i++) {
3579 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3580
3581 if (r) {
3582 DRM_ERROR("Failed to add crtc irq id!\n");
3583 return r;
3584 }
3585
3586 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3587 int_params.irq_source =
3588 dc_interrupt_to_irq_source(dc, i, 0);
3589
3590 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3591
3592 c_irq_params->adev = adev;
3593 c_irq_params->irq_src = int_params.irq_source;
3594
3595 amdgpu_dm_irq_register_interrupt(
3596 adev, &int_params, dm_crtc_high_irq, c_irq_params);
3597 }
3598
3599
3600 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3601 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3602 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3603 vrtl_int_srcid[i], &adev->vline0_irq);
3604
3605 if (r) {
3606 DRM_ERROR("Failed to add vline0 irq id!\n");
3607 return r;
3608 }
3609
3610 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3611 int_params.irq_source =
3612 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3613
3614 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3615 DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3616 break;
3617 }
3618
3619 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3620 - DC_IRQ_SOURCE_DC1_VLINE0];
3621
3622 c_irq_params->adev = adev;
3623 c_irq_params->irq_src = int_params.irq_source;
3624
3625 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3626 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3627 }
3628 #endif
3629
3630
3631
3632
3633
3634
3635 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3636 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3637 i++) {
3638 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3639
3640 if (r) {
3641 DRM_ERROR("Failed to add vupdate irq id!\n");
3642 return r;
3643 }
3644
3645 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3646 int_params.irq_source =
3647 dc_interrupt_to_irq_source(dc, i, 0);
3648
3649 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3650
3651 c_irq_params->adev = adev;
3652 c_irq_params->irq_src = int_params.irq_source;
3653
3654 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3655 dm_vupdate_high_irq, c_irq_params);
3656 }
3657
3658
3659 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3660 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
3661 i++) {
3662 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3663 if (r) {
3664 DRM_ERROR("Failed to add page flip irq id!\n");
3665 return r;
3666 }
3667
3668 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3669 int_params.irq_source =
3670 dc_interrupt_to_irq_source(dc, i, 0);
3671
3672 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3673
3674 c_irq_params->adev = adev;
3675 c_irq_params->irq_src = int_params.irq_source;
3676
3677 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3678 dm_pflip_high_irq, c_irq_params);
3679
3680 }
3681
3682
3683 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3684 &adev->hpd_irq);
3685 if (r) {
3686 DRM_ERROR("Failed to add hpd irq id!\n");
3687 return r;
3688 }
3689
3690 register_hpd_handlers(adev);
3691
3692 return 0;
3693 }
3694
3695 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3696 {
3697 struct dc *dc = adev->dm.dc;
3698 struct common_irq_params *c_irq_params;
3699 struct dc_interrupt_params int_params = {0};
3700 int r, i;
3701
3702 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3703 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3704
3705 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3706 &adev->dmub_outbox_irq);
3707 if (r) {
3708 DRM_ERROR("Failed to add outbox irq id!\n");
3709 return r;
3710 }
3711
3712 if (dc->ctx->dmub_srv) {
3713 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3714 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3715 int_params.irq_source =
3716 dc_interrupt_to_irq_source(dc, i, 0);
3717
3718 c_irq_params = &adev->dm.dmub_outbox_params[0];
3719
3720 c_irq_params->adev = adev;
3721 c_irq_params->irq_src = int_params.irq_source;
3722
3723 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3724 dm_dmub_outbox1_low_irq, c_irq_params);
3725 }
3726
3727 return 0;
3728 }
3729
3730
3731
3732
3733
3734
3735
3736 int dm_atomic_get_state(struct drm_atomic_state *state,
3737 struct dm_atomic_state **dm_state)
3738 {
3739 struct drm_device *dev = state->dev;
3740 struct amdgpu_device *adev = drm_to_adev(dev);
3741 struct amdgpu_display_manager *dm = &adev->dm;
3742 struct drm_private_state *priv_state;
3743
3744 if (*dm_state)
3745 return 0;
3746
3747 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3748 if (IS_ERR(priv_state))
3749 return PTR_ERR(priv_state);
3750
3751 *dm_state = to_dm_atomic_state(priv_state);
3752
3753 return 0;
3754 }
3755
3756 static struct dm_atomic_state *
3757 dm_atomic_get_new_state(struct drm_atomic_state *state)
3758 {
3759 struct drm_device *dev = state->dev;
3760 struct amdgpu_device *adev = drm_to_adev(dev);
3761 struct amdgpu_display_manager *dm = &adev->dm;
3762 struct drm_private_obj *obj;
3763 struct drm_private_state *new_obj_state;
3764 int i;
3765
3766 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3767 if (obj->funcs == dm->atomic_obj.funcs)
3768 return to_dm_atomic_state(new_obj_state);
3769 }
3770
3771 return NULL;
3772 }
3773
3774 static struct drm_private_state *
3775 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3776 {
3777 struct dm_atomic_state *old_state, *new_state;
3778
3779 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3780 if (!new_state)
3781 return NULL;
3782
3783 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3784
3785 old_state = to_dm_atomic_state(obj->state);
3786
3787 if (old_state && old_state->context)
3788 new_state->context = dc_copy_state(old_state->context);
3789
3790 if (!new_state->context) {
3791 kfree(new_state);
3792 return NULL;
3793 }
3794
3795 return &new_state->base;
3796 }
3797
3798 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3799 struct drm_private_state *state)
3800 {
3801 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3802
3803 if (dm_state && dm_state->context)
3804 dc_release_state(dm_state->context);
3805
3806 kfree(dm_state);
3807 }
3808
3809 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3810 .atomic_duplicate_state = dm_atomic_duplicate_state,
3811 .atomic_destroy_state = dm_atomic_destroy_state,
3812 };
3813
3814 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3815 {
3816 struct dm_atomic_state *state;
3817 int r;
3818
3819 adev->mode_info.mode_config_initialized = true;
3820
3821 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3822 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3823
3824 adev_to_drm(adev)->mode_config.max_width = 16384;
3825 adev_to_drm(adev)->mode_config.max_height = 16384;
3826
3827 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3828 if (adev->asic_type == CHIP_HAWAII)
3829
3830 adev_to_drm(adev)->mode_config.prefer_shadow = 0;
3831 else
3832 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3833
3834 adev_to_drm(adev)->mode_config.async_page_flip = true;
3835
3836 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3837
3838 state = kzalloc(sizeof(*state), GFP_KERNEL);
3839 if (!state)
3840 return -ENOMEM;
3841
3842 state->context = dc_create_state(adev->dm.dc);
3843 if (!state->context) {
3844 kfree(state);
3845 return -ENOMEM;
3846 }
3847
3848 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3849
3850 drm_atomic_private_obj_init(adev_to_drm(adev),
3851 &adev->dm.atomic_obj,
3852 &state->base,
3853 &dm_atomic_state_funcs);
3854
3855 r = amdgpu_display_modeset_create_props(adev);
3856 if (r) {
3857 dc_release_state(state->context);
3858 kfree(state);
3859 return r;
3860 }
3861
3862 r = amdgpu_dm_audio_init(adev);
3863 if (r) {
3864 dc_release_state(state->context);
3865 kfree(state);
3866 return r;
3867 }
3868
3869 return 0;
3870 }
3871
3872 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3873 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3874 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3875
3876 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3877 int bl_idx)
3878 {
3879 #if defined(CONFIG_ACPI)
3880 struct amdgpu_dm_backlight_caps caps;
3881
3882 memset(&caps, 0, sizeof(caps));
3883
3884 if (dm->backlight_caps[bl_idx].caps_valid)
3885 return;
3886
3887 amdgpu_acpi_get_backlight_caps(&caps);
3888 if (caps.caps_valid) {
3889 dm->backlight_caps[bl_idx].caps_valid = true;
3890 if (caps.aux_support)
3891 return;
3892 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3893 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3894 } else {
3895 dm->backlight_caps[bl_idx].min_input_signal =
3896 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3897 dm->backlight_caps[bl_idx].max_input_signal =
3898 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3899 }
3900 #else
3901 if (dm->backlight_caps[bl_idx].aux_support)
3902 return;
3903
3904 dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3905 dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3906 #endif
3907 }
3908
3909 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3910 unsigned *min, unsigned *max)
3911 {
3912 if (!caps)
3913 return 0;
3914
3915 if (caps->aux_support) {
3916
3917 *max = 1000 * caps->aux_max_input_signal;
3918 *min = 1000 * caps->aux_min_input_signal;
3919 } else {
3920
3921 *max = 0x101 * caps->max_input_signal;
3922 *min = 0x101 * caps->min_input_signal;
3923 }
3924 return 1;
3925 }
3926
3927 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3928 uint32_t brightness)
3929 {
3930 unsigned min, max;
3931
3932 if (!get_brightness_range(caps, &min, &max))
3933 return brightness;
3934
3935
3936 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3937 AMDGPU_MAX_BL_LEVEL);
3938 }
3939
3940 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3941 uint32_t brightness)
3942 {
3943 unsigned min, max;
3944
3945 if (!get_brightness_range(caps, &min, &max))
3946 return brightness;
3947
3948 if (brightness < min)
3949 return 0;
3950
3951 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3952 max - min);
3953 }
3954
3955 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3956 int bl_idx,
3957 u32 user_brightness)
3958 {
3959 struct amdgpu_dm_backlight_caps caps;
3960 struct dc_link *link;
3961 u32 brightness;
3962 bool rc;
3963
3964 amdgpu_dm_update_backlight_caps(dm, bl_idx);
3965 caps = dm->backlight_caps[bl_idx];
3966
3967 dm->brightness[bl_idx] = user_brightness;
3968
3969 if (bl_idx == 0)
3970 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
3971 brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3972 link = (struct dc_link *)dm->backlight_link[bl_idx];
3973
3974
3975 if (caps.aux_support) {
3976 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3977 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3978 if (!rc)
3979 DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
3980 } else {
3981 rc = dc_link_set_backlight_level(link, brightness, 0);
3982 if (!rc)
3983 DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
3984 }
3985
3986 if (rc)
3987 dm->actual_brightness[bl_idx] = user_brightness;
3988 }
3989
3990 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3991 {
3992 struct amdgpu_display_manager *dm = bl_get_data(bd);
3993 int i;
3994
3995 for (i = 0; i < dm->num_of_edps; i++) {
3996 if (bd == dm->backlight_dev[i])
3997 break;
3998 }
3999 if (i >= AMDGPU_DM_MAX_NUM_EDP)
4000 i = 0;
4001 amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
4002
4003 return 0;
4004 }
4005
4006 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
4007 int bl_idx)
4008 {
4009 struct amdgpu_dm_backlight_caps caps;
4010 struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
4011
4012 amdgpu_dm_update_backlight_caps(dm, bl_idx);
4013 caps = dm->backlight_caps[bl_idx];
4014
4015 if (caps.aux_support) {
4016 u32 avg, peak;
4017 bool rc;
4018
4019 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
4020 if (!rc)
4021 return dm->brightness[bl_idx];
4022 return convert_brightness_to_user(&caps, avg);
4023 } else {
4024 int ret = dc_link_get_backlight_level(link);
4025
4026 if (ret == DC_ERROR_UNEXPECTED)
4027 return dm->brightness[bl_idx];
4028 return convert_brightness_to_user(&caps, ret);
4029 }
4030 }
4031
4032 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4033 {
4034 struct amdgpu_display_manager *dm = bl_get_data(bd);
4035 int i;
4036
4037 for (i = 0; i < dm->num_of_edps; i++) {
4038 if (bd == dm->backlight_dev[i])
4039 break;
4040 }
4041 if (i >= AMDGPU_DM_MAX_NUM_EDP)
4042 i = 0;
4043 return amdgpu_dm_backlight_get_level(dm, i);
4044 }
4045
4046 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4047 .options = BL_CORE_SUSPENDRESUME,
4048 .get_brightness = amdgpu_dm_backlight_get_brightness,
4049 .update_status = amdgpu_dm_backlight_update_status,
4050 };
4051
4052 static void
4053 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4054 {
4055 char bl_name[16];
4056 struct backlight_properties props = { 0 };
4057
4058 amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4059 dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4060
4061 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4062 props.brightness = AMDGPU_MAX_BL_LEVEL;
4063 props.type = BACKLIGHT_RAW;
4064
4065 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4066 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4067
4068 dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4069 adev_to_drm(dm->adev)->dev,
4070 dm,
4071 &amdgpu_dm_backlight_ops,
4072 &props);
4073
4074 if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4075 DRM_ERROR("DM: Backlight registration failed!\n");
4076 else
4077 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4078 }
4079
4080 static int initialize_plane(struct amdgpu_display_manager *dm,
4081 struct amdgpu_mode_info *mode_info, int plane_id,
4082 enum drm_plane_type plane_type,
4083 const struct dc_plane_cap *plane_cap)
4084 {
4085 struct drm_plane *plane;
4086 unsigned long possible_crtcs;
4087 int ret = 0;
4088
4089 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4090 if (!plane) {
4091 DRM_ERROR("KMS: Failed to allocate plane\n");
4092 return -ENOMEM;
4093 }
4094 plane->type = plane_type;
4095
4096
4097
4098
4099
4100
4101
4102 possible_crtcs = 1 << plane_id;
4103 if (plane_id >= dm->dc->caps.max_streams)
4104 possible_crtcs = 0xff;
4105
4106 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4107
4108 if (ret) {
4109 DRM_ERROR("KMS: Failed to initialize plane\n");
4110 kfree(plane);
4111 return ret;
4112 }
4113
4114 if (mode_info)
4115 mode_info->planes[plane_id] = plane;
4116
4117 return ret;
4118 }
4119
4120
4121 static void register_backlight_device(struct amdgpu_display_manager *dm,
4122 struct dc_link *link)
4123 {
4124 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4125 link->type != dc_connection_none) {
4126
4127
4128
4129
4130
4131 if (!dm->backlight_dev[dm->num_of_edps])
4132 amdgpu_dm_register_backlight_device(dm);
4133
4134 if (dm->backlight_dev[dm->num_of_edps]) {
4135 dm->backlight_link[dm->num_of_edps] = link;
4136 dm->num_of_edps++;
4137 }
4138 }
4139 }
4140
4141 static void amdgpu_set_panel_orientation(struct drm_connector *connector);
4142
4143
4144
4145
4146
4147
4148
4149
4150
4151 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4152 {
4153 struct amdgpu_display_manager *dm = &adev->dm;
4154 int32_t i;
4155 struct amdgpu_dm_connector *aconnector = NULL;
4156 struct amdgpu_encoder *aencoder = NULL;
4157 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4158 uint32_t link_cnt;
4159 int32_t primary_planes;
4160 enum dc_connection_type new_connection_type = dc_connection_none;
4161 const struct dc_plane_cap *plane;
4162 bool psr_feature_enabled = false;
4163
4164 dm->display_indexes_num = dm->dc->caps.max_streams;
4165
4166 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4167
4168 link_cnt = dm->dc->caps.max_links;
4169 if (amdgpu_dm_mode_config_init(dm->adev)) {
4170 DRM_ERROR("DM: Failed to initialize mode config\n");
4171 return -EINVAL;
4172 }
4173
4174
4175 primary_planes = dm->dc->caps.max_streams;
4176 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4177
4178
4179
4180
4181
4182 for (i = (primary_planes - 1); i >= 0; i--) {
4183 plane = &dm->dc->caps.planes[i];
4184
4185 if (initialize_plane(dm, mode_info, i,
4186 DRM_PLANE_TYPE_PRIMARY, plane)) {
4187 DRM_ERROR("KMS: Failed to initialize primary plane\n");
4188 goto fail;
4189 }
4190 }
4191
4192
4193
4194
4195
4196
4197
4198
4199
4200
4201 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4202 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4203
4204
4205 if (amdgpu_dc_debug_mask & DC_DISABLE_MPO)
4206 break;
4207
4208 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4209 continue;
4210
4211 if (!plane->blends_with_above || !plane->blends_with_below)
4212 continue;
4213
4214 if (!plane->pixel_format_support.argb8888)
4215 continue;
4216
4217 if (initialize_plane(dm, NULL, primary_planes + i,
4218 DRM_PLANE_TYPE_OVERLAY, plane)) {
4219 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4220 goto fail;
4221 }
4222
4223
4224 break;
4225 }
4226
4227 for (i = 0; i < dm->dc->caps.max_streams; i++)
4228 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4229 DRM_ERROR("KMS: Failed to initialize crtc\n");
4230 goto fail;
4231 }
4232
4233
4234 switch (adev->ip_versions[DCE_HWIP][0]) {
4235 case IP_VERSION(3, 0, 0):
4236 case IP_VERSION(3, 1, 2):
4237 case IP_VERSION(3, 1, 3):
4238 case IP_VERSION(3, 1, 4):
4239 case IP_VERSION(3, 1, 5):
4240 case IP_VERSION(3, 1, 6):
4241 case IP_VERSION(3, 2, 0):
4242 case IP_VERSION(3, 2, 1):
4243 case IP_VERSION(2, 1, 0):
4244 if (register_outbox_irq_handlers(dm->adev)) {
4245 DRM_ERROR("DM: Failed to initialize IRQ\n");
4246 goto fail;
4247 }
4248 break;
4249 default:
4250 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4251 adev->ip_versions[DCE_HWIP][0]);
4252 }
4253
4254
4255 if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4256 switch (adev->ip_versions[DCE_HWIP][0]) {
4257 case IP_VERSION(3, 1, 2):
4258 case IP_VERSION(3, 1, 3):
4259 case IP_VERSION(3, 1, 4):
4260 case IP_VERSION(3, 1, 5):
4261 case IP_VERSION(3, 1, 6):
4262 case IP_VERSION(3, 2, 0):
4263 case IP_VERSION(3, 2, 1):
4264 psr_feature_enabled = true;
4265 break;
4266 default:
4267 psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4268 break;
4269 }
4270 }
4271
4272
4273 for (i = 0; i < link_cnt; i++) {
4274 struct dc_link *link = NULL;
4275
4276 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4277 DRM_ERROR(
4278 "KMS: Cannot support more than %d display indexes\n",
4279 AMDGPU_DM_MAX_DISPLAY_INDEX);
4280 continue;
4281 }
4282
4283 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4284 if (!aconnector)
4285 goto fail;
4286
4287 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4288 if (!aencoder)
4289 goto fail;
4290
4291 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4292 DRM_ERROR("KMS: Failed to initialize encoder\n");
4293 goto fail;
4294 }
4295
4296 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4297 DRM_ERROR("KMS: Failed to initialize connector\n");
4298 goto fail;
4299 }
4300
4301 link = dc_get_link_at_index(dm->dc, i);
4302
4303 if (!dc_link_detect_sink(link, &new_connection_type))
4304 DRM_ERROR("KMS: Failed to detect connector\n");
4305
4306 if (aconnector->base.force && new_connection_type == dc_connection_none) {
4307 emulated_link_detect(link);
4308 amdgpu_dm_update_connector_after_detect(aconnector);
4309 } else {
4310 bool ret = false;
4311
4312 mutex_lock(&dm->dc_lock);
4313 ret = dc_link_detect(link, DETECT_REASON_BOOT);
4314 mutex_unlock(&dm->dc_lock);
4315
4316 if (ret) {
4317 amdgpu_dm_update_connector_after_detect(aconnector);
4318 register_backlight_device(dm, link);
4319
4320 if (dm->num_of_edps)
4321 update_connector_ext_caps(aconnector);
4322
4323 if (psr_feature_enabled)
4324 amdgpu_dm_set_psr_caps(link);
4325
4326
4327
4328
4329 if (link->psr_settings.psr_feature_enabled)
4330 adev_to_drm(adev)->vblank_disable_immediate = false;
4331 }
4332 }
4333 amdgpu_set_panel_orientation(&aconnector->base);
4334 }
4335
4336
4337 switch (adev->asic_type) {
4338 #if defined(CONFIG_DRM_AMD_DC_SI)
4339 case CHIP_TAHITI:
4340 case CHIP_PITCAIRN:
4341 case CHIP_VERDE:
4342 case CHIP_OLAND:
4343 if (dce60_register_irq_handlers(dm->adev)) {
4344 DRM_ERROR("DM: Failed to initialize IRQ\n");
4345 goto fail;
4346 }
4347 break;
4348 #endif
4349 case CHIP_BONAIRE:
4350 case CHIP_HAWAII:
4351 case CHIP_KAVERI:
4352 case CHIP_KABINI:
4353 case CHIP_MULLINS:
4354 case CHIP_TONGA:
4355 case CHIP_FIJI:
4356 case CHIP_CARRIZO:
4357 case CHIP_STONEY:
4358 case CHIP_POLARIS11:
4359 case CHIP_POLARIS10:
4360 case CHIP_POLARIS12:
4361 case CHIP_VEGAM:
4362 case CHIP_VEGA10:
4363 case CHIP_VEGA12:
4364 case CHIP_VEGA20:
4365 if (dce110_register_irq_handlers(dm->adev)) {
4366 DRM_ERROR("DM: Failed to initialize IRQ\n");
4367 goto fail;
4368 }
4369 break;
4370 default:
4371 switch (adev->ip_versions[DCE_HWIP][0]) {
4372 case IP_VERSION(1, 0, 0):
4373 case IP_VERSION(1, 0, 1):
4374 case IP_VERSION(2, 0, 2):
4375 case IP_VERSION(2, 0, 3):
4376 case IP_VERSION(2, 0, 0):
4377 case IP_VERSION(2, 1, 0):
4378 case IP_VERSION(3, 0, 0):
4379 case IP_VERSION(3, 0, 2):
4380 case IP_VERSION(3, 0, 3):
4381 case IP_VERSION(3, 0, 1):
4382 case IP_VERSION(3, 1, 2):
4383 case IP_VERSION(3, 1, 3):
4384 case IP_VERSION(3, 1, 4):
4385 case IP_VERSION(3, 1, 5):
4386 case IP_VERSION(3, 1, 6):
4387 case IP_VERSION(3, 2, 0):
4388 case IP_VERSION(3, 2, 1):
4389 if (dcn10_register_irq_handlers(dm->adev)) {
4390 DRM_ERROR("DM: Failed to initialize IRQ\n");
4391 goto fail;
4392 }
4393 break;
4394 default:
4395 DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4396 adev->ip_versions[DCE_HWIP][0]);
4397 goto fail;
4398 }
4399 break;
4400 }
4401
4402 return 0;
4403 fail:
4404 kfree(aencoder);
4405 kfree(aconnector);
4406
4407 return -EINVAL;
4408 }
4409
4410 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4411 {
4412 drm_atomic_private_obj_fini(&dm->atomic_obj);
4413 return;
4414 }
4415
4416
4417
4418
4419
4420
4421
4422
4423
4424
4425
4426
4427 static void dm_bandwidth_update(struct amdgpu_device *adev)
4428 {
4429
4430 }
4431
4432 static const struct amdgpu_display_funcs dm_display_funcs = {
4433 .bandwidth_update = dm_bandwidth_update,
4434 .vblank_get_counter = dm_vblank_get_counter,
4435 .backlight_set_level = NULL,
4436 .backlight_get_level = NULL,
4437 .hpd_sense = NULL,
4438 .hpd_set_polarity = NULL,
4439 .hpd_get_gpio_reg = NULL,
4440 .page_flip_get_scanoutpos =
4441 dm_crtc_get_scanoutpos,
4442 .add_encoder = NULL,
4443 .add_connector = NULL,
4444 };
4445
4446 #if defined(CONFIG_DEBUG_KERNEL_DC)
4447
4448 static ssize_t s3_debug_store(struct device *device,
4449 struct device_attribute *attr,
4450 const char *buf,
4451 size_t count)
4452 {
4453 int ret;
4454 int s3_state;
4455 struct drm_device *drm_dev = dev_get_drvdata(device);
4456 struct amdgpu_device *adev = drm_to_adev(drm_dev);
4457
4458 ret = kstrtoint(buf, 0, &s3_state);
4459
4460 if (ret == 0) {
4461 if (s3_state) {
4462 dm_resume(adev);
4463 drm_kms_helper_hotplug_event(adev_to_drm(adev));
4464 } else
4465 dm_suspend(adev);
4466 }
4467
4468 return ret == 0 ? count : 0;
4469 }
4470
4471 DEVICE_ATTR_WO(s3_debug);
4472
4473 #endif
4474
4475 static int dm_early_init(void *handle)
4476 {
4477 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4478
4479 switch (adev->asic_type) {
4480 #if defined(CONFIG_DRM_AMD_DC_SI)
4481 case CHIP_TAHITI:
4482 case CHIP_PITCAIRN:
4483 case CHIP_VERDE:
4484 adev->mode_info.num_crtc = 6;
4485 adev->mode_info.num_hpd = 6;
4486 adev->mode_info.num_dig = 6;
4487 break;
4488 case CHIP_OLAND:
4489 adev->mode_info.num_crtc = 2;
4490 adev->mode_info.num_hpd = 2;
4491 adev->mode_info.num_dig = 2;
4492 break;
4493 #endif
4494 case CHIP_BONAIRE:
4495 case CHIP_HAWAII:
4496 adev->mode_info.num_crtc = 6;
4497 adev->mode_info.num_hpd = 6;
4498 adev->mode_info.num_dig = 6;
4499 break;
4500 case CHIP_KAVERI:
4501 adev->mode_info.num_crtc = 4;
4502 adev->mode_info.num_hpd = 6;
4503 adev->mode_info.num_dig = 7;
4504 break;
4505 case CHIP_KABINI:
4506 case CHIP_MULLINS:
4507 adev->mode_info.num_crtc = 2;
4508 adev->mode_info.num_hpd = 6;
4509 adev->mode_info.num_dig = 6;
4510 break;
4511 case CHIP_FIJI:
4512 case CHIP_TONGA:
4513 adev->mode_info.num_crtc = 6;
4514 adev->mode_info.num_hpd = 6;
4515 adev->mode_info.num_dig = 7;
4516 break;
4517 case CHIP_CARRIZO:
4518 adev->mode_info.num_crtc = 3;
4519 adev->mode_info.num_hpd = 6;
4520 adev->mode_info.num_dig = 9;
4521 break;
4522 case CHIP_STONEY:
4523 adev->mode_info.num_crtc = 2;
4524 adev->mode_info.num_hpd = 6;
4525 adev->mode_info.num_dig = 9;
4526 break;
4527 case CHIP_POLARIS11:
4528 case CHIP_POLARIS12:
4529 adev->mode_info.num_crtc = 5;
4530 adev->mode_info.num_hpd = 5;
4531 adev->mode_info.num_dig = 5;
4532 break;
4533 case CHIP_POLARIS10:
4534 case CHIP_VEGAM:
4535 adev->mode_info.num_crtc = 6;
4536 adev->mode_info.num_hpd = 6;
4537 adev->mode_info.num_dig = 6;
4538 break;
4539 case CHIP_VEGA10:
4540 case CHIP_VEGA12:
4541 case CHIP_VEGA20:
4542 adev->mode_info.num_crtc = 6;
4543 adev->mode_info.num_hpd = 6;
4544 adev->mode_info.num_dig = 6;
4545 break;
4546 default:
4547
4548 switch (adev->ip_versions[DCE_HWIP][0]) {
4549 case IP_VERSION(2, 0, 2):
4550 case IP_VERSION(3, 0, 0):
4551 adev->mode_info.num_crtc = 6;
4552 adev->mode_info.num_hpd = 6;
4553 adev->mode_info.num_dig = 6;
4554 break;
4555 case IP_VERSION(2, 0, 0):
4556 case IP_VERSION(3, 0, 2):
4557 adev->mode_info.num_crtc = 5;
4558 adev->mode_info.num_hpd = 5;
4559 adev->mode_info.num_dig = 5;
4560 break;
4561 case IP_VERSION(2, 0, 3):
4562 case IP_VERSION(3, 0, 3):
4563 adev->mode_info.num_crtc = 2;
4564 adev->mode_info.num_hpd = 2;
4565 adev->mode_info.num_dig = 2;
4566 break;
4567 case IP_VERSION(1, 0, 0):
4568 case IP_VERSION(1, 0, 1):
4569 case IP_VERSION(3, 0, 1):
4570 case IP_VERSION(2, 1, 0):
4571 case IP_VERSION(3, 1, 2):
4572 case IP_VERSION(3, 1, 3):
4573 case IP_VERSION(3, 1, 4):
4574 case IP_VERSION(3, 1, 5):
4575 case IP_VERSION(3, 1, 6):
4576 case IP_VERSION(3, 2, 0):
4577 case IP_VERSION(3, 2, 1):
4578 adev->mode_info.num_crtc = 4;
4579 adev->mode_info.num_hpd = 4;
4580 adev->mode_info.num_dig = 4;
4581 break;
4582 default:
4583 DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4584 adev->ip_versions[DCE_HWIP][0]);
4585 return -EINVAL;
4586 }
4587 break;
4588 }
4589
4590 amdgpu_dm_set_irq_funcs(adev);
4591
4592 if (adev->mode_info.funcs == NULL)
4593 adev->mode_info.funcs = &dm_display_funcs;
4594
4595
4596
4597
4598
4599
4600 #if defined(CONFIG_DEBUG_KERNEL_DC)
4601 device_create_file(
4602 adev_to_drm(adev)->dev,
4603 &dev_attr_s3_debug);
4604 #endif
4605
4606 return 0;
4607 }
4608
4609 static bool modereset_required(struct drm_crtc_state *crtc_state)
4610 {
4611 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4612 }
4613
4614 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4615 {
4616 drm_encoder_cleanup(encoder);
4617 kfree(encoder);
4618 }
4619
4620 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4621 .destroy = amdgpu_dm_encoder_destroy,
4622 };
4623
4624 static int
4625 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4626 const enum surface_pixel_format format,
4627 enum dc_color_space *color_space)
4628 {
4629 bool full_range;
4630
4631 *color_space = COLOR_SPACE_SRGB;
4632
4633
4634 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4635 return 0;
4636
4637 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4638
4639 switch (plane_state->color_encoding) {
4640 case DRM_COLOR_YCBCR_BT601:
4641 if (full_range)
4642 *color_space = COLOR_SPACE_YCBCR601;
4643 else
4644 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4645 break;
4646
4647 case DRM_COLOR_YCBCR_BT709:
4648 if (full_range)
4649 *color_space = COLOR_SPACE_YCBCR709;
4650 else
4651 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4652 break;
4653
4654 case DRM_COLOR_YCBCR_BT2020:
4655 if (full_range)
4656 *color_space = COLOR_SPACE_2020_YCBCR;
4657 else
4658 return -EINVAL;
4659 break;
4660
4661 default:
4662 return -EINVAL;
4663 }
4664
4665 return 0;
4666 }
4667
4668 static int
4669 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4670 const struct drm_plane_state *plane_state,
4671 const uint64_t tiling_flags,
4672 struct dc_plane_info *plane_info,
4673 struct dc_plane_address *address,
4674 bool tmz_surface,
4675 bool force_disable_dcc)
4676 {
4677 const struct drm_framebuffer *fb = plane_state->fb;
4678 const struct amdgpu_framebuffer *afb =
4679 to_amdgpu_framebuffer(plane_state->fb);
4680 int ret;
4681
4682 memset(plane_info, 0, sizeof(*plane_info));
4683
4684 switch (fb->format->format) {
4685 case DRM_FORMAT_C8:
4686 plane_info->format =
4687 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4688 break;
4689 case DRM_FORMAT_RGB565:
4690 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4691 break;
4692 case DRM_FORMAT_XRGB8888:
4693 case DRM_FORMAT_ARGB8888:
4694 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4695 break;
4696 case DRM_FORMAT_XRGB2101010:
4697 case DRM_FORMAT_ARGB2101010:
4698 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4699 break;
4700 case DRM_FORMAT_XBGR2101010:
4701 case DRM_FORMAT_ABGR2101010:
4702 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4703 break;
4704 case DRM_FORMAT_XBGR8888:
4705 case DRM_FORMAT_ABGR8888:
4706 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4707 break;
4708 case DRM_FORMAT_NV21:
4709 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4710 break;
4711 case DRM_FORMAT_NV12:
4712 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4713 break;
4714 case DRM_FORMAT_P010:
4715 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4716 break;
4717 case DRM_FORMAT_XRGB16161616F:
4718 case DRM_FORMAT_ARGB16161616F:
4719 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4720 break;
4721 case DRM_FORMAT_XBGR16161616F:
4722 case DRM_FORMAT_ABGR16161616F:
4723 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4724 break;
4725 case DRM_FORMAT_XRGB16161616:
4726 case DRM_FORMAT_ARGB16161616:
4727 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
4728 break;
4729 case DRM_FORMAT_XBGR16161616:
4730 case DRM_FORMAT_ABGR16161616:
4731 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
4732 break;
4733 default:
4734 DRM_ERROR(
4735 "Unsupported screen format %p4cc\n",
4736 &fb->format->format);
4737 return -EINVAL;
4738 }
4739
4740 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4741 case DRM_MODE_ROTATE_0:
4742 plane_info->rotation = ROTATION_ANGLE_0;
4743 break;
4744 case DRM_MODE_ROTATE_90:
4745 plane_info->rotation = ROTATION_ANGLE_90;
4746 break;
4747 case DRM_MODE_ROTATE_180:
4748 plane_info->rotation = ROTATION_ANGLE_180;
4749 break;
4750 case DRM_MODE_ROTATE_270:
4751 plane_info->rotation = ROTATION_ANGLE_270;
4752 break;
4753 default:
4754 plane_info->rotation = ROTATION_ANGLE_0;
4755 break;
4756 }
4757
4758
4759 plane_info->visible = true;
4760 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4761
4762 plane_info->layer_index = plane_state->normalized_zpos;
4763
4764 ret = fill_plane_color_attributes(plane_state, plane_info->format,
4765 &plane_info->color_space);
4766 if (ret)
4767 return ret;
4768
4769 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4770 plane_info->rotation, tiling_flags,
4771 &plane_info->tiling_info,
4772 &plane_info->plane_size,
4773 &plane_info->dcc, address,
4774 tmz_surface, force_disable_dcc);
4775 if (ret)
4776 return ret;
4777
4778 fill_blending_from_plane_state(
4779 plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha,
4780 &plane_info->global_alpha, &plane_info->global_alpha_value);
4781
4782 return 0;
4783 }
4784
4785 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4786 struct dc_plane_state *dc_plane_state,
4787 struct drm_plane_state *plane_state,
4788 struct drm_crtc_state *crtc_state)
4789 {
4790 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4791 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4792 struct dc_scaling_info scaling_info;
4793 struct dc_plane_info plane_info;
4794 int ret;
4795 bool force_disable_dcc = false;
4796
4797 ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
4798 if (ret)
4799 return ret;
4800
4801 dc_plane_state->src_rect = scaling_info.src_rect;
4802 dc_plane_state->dst_rect = scaling_info.dst_rect;
4803 dc_plane_state->clip_rect = scaling_info.clip_rect;
4804 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4805
4806 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4807 ret = fill_dc_plane_info_and_addr(adev, plane_state,
4808 afb->tiling_flags,
4809 &plane_info,
4810 &dc_plane_state->address,
4811 afb->tmz_surface,
4812 force_disable_dcc);
4813 if (ret)
4814 return ret;
4815
4816 dc_plane_state->format = plane_info.format;
4817 dc_plane_state->color_space = plane_info.color_space;
4818 dc_plane_state->format = plane_info.format;
4819 dc_plane_state->plane_size = plane_info.plane_size;
4820 dc_plane_state->rotation = plane_info.rotation;
4821 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4822 dc_plane_state->stereo_format = plane_info.stereo_format;
4823 dc_plane_state->tiling_info = plane_info.tiling_info;
4824 dc_plane_state->visible = plane_info.visible;
4825 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4826 dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha;
4827 dc_plane_state->global_alpha = plane_info.global_alpha;
4828 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4829 dc_plane_state->dcc = plane_info.dcc;
4830 dc_plane_state->layer_index = plane_info.layer_index;
4831 dc_plane_state->flip_int_enabled = true;
4832
4833
4834
4835
4836
4837 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4838 if (ret)
4839 return ret;
4840
4841 return 0;
4842 }
4843
4844
4845
4846
4847
4848
4849
4850
4851
4852
4853
4854
4855
4856
4857
4858
4859
4860
4861
4862
4863
4864
4865
4866
4867
4868
4869 static void fill_dc_dirty_rects(struct drm_plane *plane,
4870 struct drm_plane_state *old_plane_state,
4871 struct drm_plane_state *new_plane_state,
4872 struct drm_crtc_state *crtc_state,
4873 struct dc_flip_addrs *flip_addrs)
4874 {
4875 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4876 struct rect *dirty_rects = flip_addrs->dirty_rects;
4877 uint32_t num_clips;
4878 bool bb_changed;
4879 bool fb_changed;
4880 uint32_t i = 0;
4881
4882 flip_addrs->dirty_rect_count = 0;
4883
4884
4885
4886
4887
4888 if (plane->type == DRM_PLANE_TYPE_CURSOR)
4889 return;
4890
4891
4892
4893
4894
4895 if (!dm_crtc_state->mpo_requested) {
4896 dirty_rects[0].x = 0;
4897 dirty_rects[0].y = 0;
4898 dirty_rects[0].width = dm_crtc_state->base.mode.crtc_hdisplay;
4899 dirty_rects[0].height = dm_crtc_state->base.mode.crtc_vdisplay;
4900 flip_addrs->dirty_rect_count = 1;
4901 DRM_DEBUG_DRIVER("[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n",
4902 new_plane_state->plane->base.id,
4903 dm_crtc_state->base.mode.crtc_hdisplay,
4904 dm_crtc_state->base.mode.crtc_vdisplay);
4905 return;
4906 }
4907
4908
4909
4910
4911
4912
4913
4914
4915 num_clips = drm_plane_get_damage_clips_count(new_plane_state);
4916 fb_changed = old_plane_state->fb->base.id !=
4917 new_plane_state->fb->base.id;
4918 bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x ||
4919 old_plane_state->crtc_y != new_plane_state->crtc_y ||
4920 old_plane_state->crtc_w != new_plane_state->crtc_w ||
4921 old_plane_state->crtc_h != new_plane_state->crtc_h);
4922
4923 DRM_DEBUG_DRIVER("[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n",
4924 new_plane_state->plane->base.id,
4925 bb_changed, fb_changed, num_clips);
4926
4927 if (num_clips || fb_changed || bb_changed) {
4928 dirty_rects[i].x = new_plane_state->crtc_x;
4929 dirty_rects[i].y = new_plane_state->crtc_y;
4930 dirty_rects[i].width = new_plane_state->crtc_w;
4931 dirty_rects[i].height = new_plane_state->crtc_h;
4932 DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n",
4933 new_plane_state->plane->base.id,
4934 dirty_rects[i].x, dirty_rects[i].y,
4935 dirty_rects[i].width, dirty_rects[i].height);
4936 i += 1;
4937 }
4938
4939
4940 if (bb_changed) {
4941 dirty_rects[i].x = old_plane_state->crtc_x;
4942 dirty_rects[i].y = old_plane_state->crtc_y;
4943 dirty_rects[i].width = old_plane_state->crtc_w;
4944 dirty_rects[i].height = old_plane_state->crtc_h;
4945 DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n",
4946 old_plane_state->plane->base.id,
4947 dirty_rects[i].x, dirty_rects[i].y,
4948 dirty_rects[i].width, dirty_rects[i].height);
4949 i += 1;
4950 }
4951
4952 flip_addrs->dirty_rect_count = i;
4953 }
4954
4955 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4956 const struct dm_connector_state *dm_state,
4957 struct dc_stream_state *stream)
4958 {
4959 enum amdgpu_rmx_type rmx_type;
4960
4961 struct rect src = { 0 };
4962 struct rect dst = { 0 };
4963
4964
4965 if (!mode)
4966 return;
4967
4968
4969 src.width = mode->hdisplay;
4970 src.height = mode->vdisplay;
4971 dst.width = stream->timing.h_addressable;
4972 dst.height = stream->timing.v_addressable;
4973
4974 if (dm_state) {
4975 rmx_type = dm_state->scaling;
4976 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4977 if (src.width * dst.height <
4978 src.height * dst.width) {
4979
4980 dst.width = src.width *
4981 dst.height / src.height;
4982 } else {
4983
4984 dst.height = src.height *
4985 dst.width / src.width;
4986 }
4987 } else if (rmx_type == RMX_CENTER) {
4988 dst = src;
4989 }
4990
4991 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4992 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4993
4994 if (dm_state->underscan_enable) {
4995 dst.x += dm_state->underscan_hborder / 2;
4996 dst.y += dm_state->underscan_vborder / 2;
4997 dst.width -= dm_state->underscan_hborder;
4998 dst.height -= dm_state->underscan_vborder;
4999 }
5000 }
5001
5002 stream->src = src;
5003 stream->dst = dst;
5004
5005 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
5006 dst.x, dst.y, dst.width, dst.height);
5007
5008 }
5009
5010 static enum dc_color_depth
5011 convert_color_depth_from_display_info(const struct drm_connector *connector,
5012 bool is_y420, int requested_bpc)
5013 {
5014 uint8_t bpc;
5015
5016 if (is_y420) {
5017 bpc = 8;
5018
5019
5020 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5021 bpc = 16;
5022 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5023 bpc = 12;
5024 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5025 bpc = 10;
5026 } else {
5027 bpc = (uint8_t)connector->display_info.bpc;
5028
5029 bpc = bpc ? bpc : 8;
5030 }
5031
5032 if (requested_bpc > 0) {
5033
5034
5035
5036
5037
5038
5039
5040
5041 bpc = min_t(u8, bpc, requested_bpc);
5042
5043
5044 bpc = bpc - (bpc & 1);
5045 }
5046
5047 switch (bpc) {
5048 case 0:
5049
5050
5051
5052
5053
5054 return COLOR_DEPTH_888;
5055 case 6:
5056 return COLOR_DEPTH_666;
5057 case 8:
5058 return COLOR_DEPTH_888;
5059 case 10:
5060 return COLOR_DEPTH_101010;
5061 case 12:
5062 return COLOR_DEPTH_121212;
5063 case 14:
5064 return COLOR_DEPTH_141414;
5065 case 16:
5066 return COLOR_DEPTH_161616;
5067 default:
5068 return COLOR_DEPTH_UNDEFINED;
5069 }
5070 }
5071
5072 static enum dc_aspect_ratio
5073 get_aspect_ratio(const struct drm_display_mode *mode_in)
5074 {
5075
5076 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5077 }
5078
5079 static enum dc_color_space
5080 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5081 {
5082 enum dc_color_space color_space = COLOR_SPACE_SRGB;
5083
5084 switch (dc_crtc_timing->pixel_encoding) {
5085 case PIXEL_ENCODING_YCBCR422:
5086 case PIXEL_ENCODING_YCBCR444:
5087 case PIXEL_ENCODING_YCBCR420:
5088 {
5089
5090
5091
5092
5093
5094 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5095 if (dc_crtc_timing->flags.Y_ONLY)
5096 color_space =
5097 COLOR_SPACE_YCBCR709_LIMITED;
5098 else
5099 color_space = COLOR_SPACE_YCBCR709;
5100 } else {
5101 if (dc_crtc_timing->flags.Y_ONLY)
5102 color_space =
5103 COLOR_SPACE_YCBCR601_LIMITED;
5104 else
5105 color_space = COLOR_SPACE_YCBCR601;
5106 }
5107
5108 }
5109 break;
5110 case PIXEL_ENCODING_RGB:
5111 color_space = COLOR_SPACE_SRGB;
5112 break;
5113
5114 default:
5115 WARN_ON(1);
5116 break;
5117 }
5118
5119 return color_space;
5120 }
5121
5122 static bool adjust_colour_depth_from_display_info(
5123 struct dc_crtc_timing *timing_out,
5124 const struct drm_display_info *info)
5125 {
5126 enum dc_color_depth depth = timing_out->display_color_depth;
5127 int normalized_clk;
5128 do {
5129 normalized_clk = timing_out->pix_clk_100hz / 10;
5130
5131 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5132 normalized_clk /= 2;
5133
5134 switch (depth) {
5135 case COLOR_DEPTH_888:
5136 break;
5137 case COLOR_DEPTH_101010:
5138 normalized_clk = (normalized_clk * 30) / 24;
5139 break;
5140 case COLOR_DEPTH_121212:
5141 normalized_clk = (normalized_clk * 36) / 24;
5142 break;
5143 case COLOR_DEPTH_161616:
5144 normalized_clk = (normalized_clk * 48) / 24;
5145 break;
5146 default:
5147
5148 return false;
5149 }
5150 if (normalized_clk <= info->max_tmds_clock) {
5151 timing_out->display_color_depth = depth;
5152 return true;
5153 }
5154 } while (--depth > COLOR_DEPTH_666);
5155 return false;
5156 }
5157
5158 static void fill_stream_properties_from_drm_display_mode(
5159 struct dc_stream_state *stream,
5160 const struct drm_display_mode *mode_in,
5161 const struct drm_connector *connector,
5162 const struct drm_connector_state *connector_state,
5163 const struct dc_stream_state *old_stream,
5164 int requested_bpc)
5165 {
5166 struct dc_crtc_timing *timing_out = &stream->timing;
5167 const struct drm_display_info *info = &connector->display_info;
5168 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5169 struct hdmi_vendor_infoframe hv_frame;
5170 struct hdmi_avi_infoframe avi_frame;
5171
5172 memset(&hv_frame, 0, sizeof(hv_frame));
5173 memset(&avi_frame, 0, sizeof(avi_frame));
5174
5175 timing_out->h_border_left = 0;
5176 timing_out->h_border_right = 0;
5177 timing_out->v_border_top = 0;
5178 timing_out->v_border_bottom = 0;
5179
5180 if (drm_mode_is_420_only(info, mode_in)
5181 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5182 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5183 else if (drm_mode_is_420_also(info, mode_in)
5184 && aconnector->force_yuv420_output)
5185 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5186 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
5187 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5188 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5189 else
5190 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5191
5192 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5193 timing_out->display_color_depth = convert_color_depth_from_display_info(
5194 connector,
5195 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5196 requested_bpc);
5197 timing_out->scan_type = SCANNING_TYPE_NODATA;
5198 timing_out->hdmi_vic = 0;
5199
5200 if (old_stream) {
5201 timing_out->vic = old_stream->timing.vic;
5202 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5203 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5204 } else {
5205 timing_out->vic = drm_match_cea_mode(mode_in);
5206 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5207 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5208 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5209 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5210 }
5211
5212 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5213 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5214 timing_out->vic = avi_frame.video_code;
5215 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5216 timing_out->hdmi_vic = hv_frame.vic;
5217 }
5218
5219 if (is_freesync_video_mode(mode_in, aconnector)) {
5220 timing_out->h_addressable = mode_in->hdisplay;
5221 timing_out->h_total = mode_in->htotal;
5222 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5223 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5224 timing_out->v_total = mode_in->vtotal;
5225 timing_out->v_addressable = mode_in->vdisplay;
5226 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5227 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5228 timing_out->pix_clk_100hz = mode_in->clock * 10;
5229 } else {
5230 timing_out->h_addressable = mode_in->crtc_hdisplay;
5231 timing_out->h_total = mode_in->crtc_htotal;
5232 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5233 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5234 timing_out->v_total = mode_in->crtc_vtotal;
5235 timing_out->v_addressable = mode_in->crtc_vdisplay;
5236 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5237 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5238 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5239 }
5240
5241 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5242
5243 stream->output_color_space = get_output_color_space(timing_out);
5244
5245 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5246 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5247 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5248 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5249 drm_mode_is_420_also(info, mode_in) &&
5250 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5251 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5252 adjust_colour_depth_from_display_info(timing_out, info);
5253 }
5254 }
5255 }
5256
5257 static void fill_audio_info(struct audio_info *audio_info,
5258 const struct drm_connector *drm_connector,
5259 const struct dc_sink *dc_sink)
5260 {
5261 int i = 0;
5262 int cea_revision = 0;
5263 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5264
5265 audio_info->manufacture_id = edid_caps->manufacturer_id;
5266 audio_info->product_id = edid_caps->product_id;
5267
5268 cea_revision = drm_connector->display_info.cea_rev;
5269
5270 strscpy(audio_info->display_name,
5271 edid_caps->display_name,
5272 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5273
5274 if (cea_revision >= 3) {
5275 audio_info->mode_count = edid_caps->audio_mode_count;
5276
5277 for (i = 0; i < audio_info->mode_count; ++i) {
5278 audio_info->modes[i].format_code =
5279 (enum audio_format_code)
5280 (edid_caps->audio_modes[i].format_code);
5281 audio_info->modes[i].channel_count =
5282 edid_caps->audio_modes[i].channel_count;
5283 audio_info->modes[i].sample_rates.all =
5284 edid_caps->audio_modes[i].sample_rate;
5285 audio_info->modes[i].sample_size =
5286 edid_caps->audio_modes[i].sample_size;
5287 }
5288 }
5289
5290 audio_info->flags.all = edid_caps->speaker_flags;
5291
5292
5293 if (drm_connector->latency_present[0]) {
5294 audio_info->video_latency = drm_connector->video_latency[0];
5295 audio_info->audio_latency = drm_connector->audio_latency[0];
5296 }
5297
5298
5299
5300 }
5301
5302 static void
5303 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5304 struct drm_display_mode *dst_mode)
5305 {
5306 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5307 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5308 dst_mode->crtc_clock = src_mode->crtc_clock;
5309 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5310 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5311 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
5312 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5313 dst_mode->crtc_htotal = src_mode->crtc_htotal;
5314 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5315 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5316 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5317 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5318 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5319 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5320 }
5321
5322 static void
5323 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5324 const struct drm_display_mode *native_mode,
5325 bool scale_enabled)
5326 {
5327 if (scale_enabled) {
5328 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5329 } else if (native_mode->clock == drm_mode->clock &&
5330 native_mode->htotal == drm_mode->htotal &&
5331 native_mode->vtotal == drm_mode->vtotal) {
5332 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5333 } else {
5334
5335 }
5336 }
5337
5338 static struct dc_sink *
5339 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5340 {
5341 struct dc_sink_init_data sink_init_data = { 0 };
5342 struct dc_sink *sink = NULL;
5343 sink_init_data.link = aconnector->dc_link;
5344 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5345
5346 sink = dc_sink_create(&sink_init_data);
5347 if (!sink) {
5348 DRM_ERROR("Failed to create sink!\n");
5349 return NULL;
5350 }
5351 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5352
5353 return sink;
5354 }
5355
5356 static void set_multisync_trigger_params(
5357 struct dc_stream_state *stream)
5358 {
5359 struct dc_stream_state *master = NULL;
5360
5361 if (stream->triggered_crtc_reset.enabled) {
5362 master = stream->triggered_crtc_reset.event_source;
5363 stream->triggered_crtc_reset.event =
5364 master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5365 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5366 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5367 }
5368 }
5369
5370 static void set_master_stream(struct dc_stream_state *stream_set[],
5371 int stream_count)
5372 {
5373 int j, highest_rfr = 0, master_stream = 0;
5374
5375 for (j = 0; j < stream_count; j++) {
5376 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5377 int refresh_rate = 0;
5378
5379 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5380 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5381 if (refresh_rate > highest_rfr) {
5382 highest_rfr = refresh_rate;
5383 master_stream = j;
5384 }
5385 }
5386 }
5387 for (j = 0; j < stream_count; j++) {
5388 if (stream_set[j])
5389 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5390 }
5391 }
5392
5393 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5394 {
5395 int i = 0;
5396 struct dc_stream_state *stream;
5397
5398 if (context->stream_count < 2)
5399 return;
5400 for (i = 0; i < context->stream_count ; i++) {
5401 if (!context->streams[i])
5402 continue;
5403
5404
5405
5406
5407
5408 }
5409
5410 set_master_stream(context->streams, context->stream_count);
5411
5412 for (i = 0; i < context->stream_count ; i++) {
5413 stream = context->streams[i];
5414
5415 if (!stream)
5416 continue;
5417
5418 set_multisync_trigger_params(stream);
5419 }
5420 }
5421
5422
5423
5424
5425
5426
5427
5428
5429
5430
5431
5432
5433
5434
5435
5436
5437
5438
5439
5440
5441
5442
5443
5444
5445
5446
5447
5448
5449
5450
5451
5452 static struct drm_display_mode *
5453 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5454 bool use_probed_modes)
5455 {
5456 struct drm_display_mode *m, *m_pref = NULL;
5457 u16 current_refresh, highest_refresh;
5458 struct list_head *list_head = use_probed_modes ?
5459 &aconnector->base.probed_modes :
5460 &aconnector->base.modes;
5461
5462 if (aconnector->freesync_vid_base.clock != 0)
5463 return &aconnector->freesync_vid_base;
5464
5465
5466 list_for_each_entry (m, list_head, head) {
5467 if (m->type & DRM_MODE_TYPE_PREFERRED) {
5468 m_pref = m;
5469 break;
5470 }
5471 }
5472
5473 if (!m_pref) {
5474
5475 m_pref = list_first_entry_or_null(
5476 &aconnector->base.modes, struct drm_display_mode, head);
5477 if (!m_pref) {
5478 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5479 return NULL;
5480 }
5481 }
5482
5483 highest_refresh = drm_mode_vrefresh(m_pref);
5484
5485
5486
5487
5488
5489
5490 list_for_each_entry (m, list_head, head) {
5491 current_refresh = drm_mode_vrefresh(m);
5492
5493 if (m->hdisplay == m_pref->hdisplay &&
5494 m->vdisplay == m_pref->vdisplay &&
5495 highest_refresh < current_refresh) {
5496 highest_refresh = current_refresh;
5497 m_pref = m;
5498 }
5499 }
5500
5501 drm_mode_copy(&aconnector->freesync_vid_base, m_pref);
5502 return m_pref;
5503 }
5504
5505 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5506 struct amdgpu_dm_connector *aconnector)
5507 {
5508 struct drm_display_mode *high_mode;
5509 int timing_diff;
5510
5511 high_mode = get_highest_refresh_rate_mode(aconnector, false);
5512 if (!high_mode || !mode)
5513 return false;
5514
5515 timing_diff = high_mode->vtotal - mode->vtotal;
5516
5517 if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5518 high_mode->hdisplay != mode->hdisplay ||
5519 high_mode->vdisplay != mode->vdisplay ||
5520 high_mode->hsync_start != mode->hsync_start ||
5521 high_mode->hsync_end != mode->hsync_end ||
5522 high_mode->htotal != mode->htotal ||
5523 high_mode->hskew != mode->hskew ||
5524 high_mode->vscan != mode->vscan ||
5525 high_mode->vsync_start - mode->vsync_start != timing_diff ||
5526 high_mode->vsync_end - mode->vsync_end != timing_diff)
5527 return false;
5528 else
5529 return true;
5530 }
5531
5532 #if defined(CONFIG_DRM_AMD_DC_DCN)
5533 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
5534 struct dc_sink *sink, struct dc_stream_state *stream,
5535 struct dsc_dec_dpcd_caps *dsc_caps)
5536 {
5537 stream->timing.flags.DSC = 0;
5538 dsc_caps->is_dsc_supported = false;
5539
5540 if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
5541 sink->sink_signal == SIGNAL_TYPE_EDP)) {
5542 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
5543 sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
5544 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5545 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5546 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5547 dsc_caps);
5548 }
5549 }
5550
5551
5552 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
5553 struct dc_sink *sink, struct dc_stream_state *stream,
5554 struct dsc_dec_dpcd_caps *dsc_caps,
5555 uint32_t max_dsc_target_bpp_limit_override)
5556 {
5557 const struct dc_link_settings *verified_link_cap = NULL;
5558 uint32_t link_bw_in_kbps;
5559 uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
5560 struct dc *dc = sink->ctx->dc;
5561 struct dc_dsc_bw_range bw_range = {0};
5562 struct dc_dsc_config dsc_cfg = {0};
5563
5564 verified_link_cap = dc_link_get_link_cap(stream->link);
5565 link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
5566 edp_min_bpp_x16 = 8 * 16;
5567 edp_max_bpp_x16 = 8 * 16;
5568
5569 if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
5570 edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
5571
5572 if (edp_max_bpp_x16 < edp_min_bpp_x16)
5573 edp_min_bpp_x16 = edp_max_bpp_x16;
5574
5575 if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
5576 dc->debug.dsc_min_slice_height_override,
5577 edp_min_bpp_x16, edp_max_bpp_x16,
5578 dsc_caps,
5579 &stream->timing,
5580 &bw_range)) {
5581
5582 if (bw_range.max_kbps < link_bw_in_kbps) {
5583 if (dc_dsc_compute_config(dc->res_pool->dscs[0],
5584 dsc_caps,
5585 dc->debug.dsc_min_slice_height_override,
5586 max_dsc_target_bpp_limit_override,
5587 0,
5588 &stream->timing,
5589 &dsc_cfg)) {
5590 stream->timing.dsc_cfg = dsc_cfg;
5591 stream->timing.flags.DSC = 1;
5592 stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
5593 }
5594 return;
5595 }
5596 }
5597
5598 if (dc_dsc_compute_config(dc->res_pool->dscs[0],
5599 dsc_caps,
5600 dc->debug.dsc_min_slice_height_override,
5601 max_dsc_target_bpp_limit_override,
5602 link_bw_in_kbps,
5603 &stream->timing,
5604 &dsc_cfg)) {
5605 stream->timing.dsc_cfg = dsc_cfg;
5606 stream->timing.flags.DSC = 1;
5607 }
5608 }
5609
5610
5611 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
5612 struct dc_sink *sink, struct dc_stream_state *stream,
5613 struct dsc_dec_dpcd_caps *dsc_caps)
5614 {
5615 struct drm_connector *drm_connector = &aconnector->base;
5616 uint32_t link_bandwidth_kbps;
5617 uint32_t max_dsc_target_bpp_limit_override = 0;
5618 struct dc *dc = sink->ctx->dc;
5619 uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
5620 uint32_t dsc_max_supported_bw_in_kbps;
5621
5622 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5623 dc_link_get_link_cap(aconnector->dc_link));
5624 if (stream->link && stream->link->local_sink)
5625 max_dsc_target_bpp_limit_override =
5626 stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
5627
5628
5629 dc_dsc_policy_set_enable_dsc_when_not_needed(
5630 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5631
5632 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
5633 dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
5634
5635 apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
5636
5637 } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5638 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
5639 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5640 dsc_caps,
5641 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5642 max_dsc_target_bpp_limit_override,
5643 link_bandwidth_kbps,
5644 &stream->timing,
5645 &stream->timing.dsc_cfg)) {
5646 stream->timing.flags.DSC = 1;
5647 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
5648 }
5649 } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
5650 timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
5651 max_supported_bw_in_kbps = link_bandwidth_kbps;
5652 dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
5653
5654 if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
5655 max_supported_bw_in_kbps > 0 &&
5656 dsc_max_supported_bw_in_kbps > 0)
5657 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5658 dsc_caps,
5659 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5660 max_dsc_target_bpp_limit_override,
5661 dsc_max_supported_bw_in_kbps,
5662 &stream->timing,
5663 &stream->timing.dsc_cfg)) {
5664 stream->timing.flags.DSC = 1;
5665 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
5666 __func__, drm_connector->name);
5667 }
5668 }
5669 }
5670
5671
5672 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5673 stream->timing.flags.DSC = 1;
5674
5675 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5676 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5677
5678 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5679 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5680
5681 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5682 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5683 }
5684 #endif
5685
5686 static struct dc_stream_state *
5687 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5688 const struct drm_display_mode *drm_mode,
5689 const struct dm_connector_state *dm_state,
5690 const struct dc_stream_state *old_stream,
5691 int requested_bpc)
5692 {
5693 struct drm_display_mode *preferred_mode = NULL;
5694 struct drm_connector *drm_connector;
5695 const struct drm_connector_state *con_state =
5696 dm_state ? &dm_state->base : NULL;
5697 struct dc_stream_state *stream = NULL;
5698 struct drm_display_mode mode = *drm_mode;
5699 struct drm_display_mode saved_mode;
5700 struct drm_display_mode *freesync_mode = NULL;
5701 bool native_mode_found = false;
5702 bool recalculate_timing = false;
5703 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5704 int mode_refresh;
5705 int preferred_refresh = 0;
5706 #if defined(CONFIG_DRM_AMD_DC_DCN)
5707 struct dsc_dec_dpcd_caps dsc_caps;
5708 #endif
5709
5710 struct dc_sink *sink = NULL;
5711
5712 memset(&saved_mode, 0, sizeof(saved_mode));
5713
5714 if (aconnector == NULL) {
5715 DRM_ERROR("aconnector is NULL!\n");
5716 return stream;
5717 }
5718
5719 drm_connector = &aconnector->base;
5720
5721 if (!aconnector->dc_sink) {
5722 sink = create_fake_sink(aconnector);
5723 if (!sink)
5724 return stream;
5725 } else {
5726 sink = aconnector->dc_sink;
5727 dc_sink_retain(sink);
5728 }
5729
5730 stream = dc_create_stream_for_sink(sink);
5731
5732 if (stream == NULL) {
5733 DRM_ERROR("Failed to create stream for sink!\n");
5734 goto finish;
5735 }
5736
5737 stream->dm_stream_context = aconnector;
5738
5739 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5740 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5741
5742 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5743
5744 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5745 native_mode_found = true;
5746 break;
5747 }
5748 }
5749 if (!native_mode_found)
5750 preferred_mode = list_first_entry_or_null(
5751 &aconnector->base.modes,
5752 struct drm_display_mode,
5753 head);
5754
5755 mode_refresh = drm_mode_vrefresh(&mode);
5756
5757 if (preferred_mode == NULL) {
5758
5759
5760
5761
5762
5763
5764 DRM_DEBUG_DRIVER("No preferred mode found\n");
5765 } else {
5766 recalculate_timing = is_freesync_video_mode(&mode, aconnector);
5767 if (recalculate_timing) {
5768 freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5769 drm_mode_copy(&saved_mode, &mode);
5770 drm_mode_copy(&mode, freesync_mode);
5771 } else {
5772 decide_crtc_timing_for_drm_display_mode(
5773 &mode, preferred_mode, scale);
5774
5775 preferred_refresh = drm_mode_vrefresh(preferred_mode);
5776 }
5777 }
5778
5779 if (recalculate_timing)
5780 drm_mode_set_crtcinfo(&saved_mode, 0);
5781 else if (!dm_state)
5782 drm_mode_set_crtcinfo(&mode, 0);
5783
5784
5785
5786
5787
5788 if (!scale || mode_refresh != preferred_refresh)
5789 fill_stream_properties_from_drm_display_mode(
5790 stream, &mode, &aconnector->base, con_state, NULL,
5791 requested_bpc);
5792 else
5793 fill_stream_properties_from_drm_display_mode(
5794 stream, &mode, &aconnector->base, con_state, old_stream,
5795 requested_bpc);
5796
5797 #if defined(CONFIG_DRM_AMD_DC_DCN)
5798
5799 update_dsc_caps(aconnector, sink, stream, &dsc_caps);
5800 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
5801 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
5802 #endif
5803
5804 update_stream_scaling_settings(&mode, dm_state, stream);
5805
5806 fill_audio_info(
5807 &stream->audio_info,
5808 drm_connector,
5809 sink);
5810
5811 update_stream_signal(stream, sink);
5812
5813 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5814 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5815
5816 if (stream->link->psr_settings.psr_feature_enabled) {
5817
5818
5819
5820
5821 stream->use_vsc_sdp_for_colorimetry = false;
5822 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5823 stream->use_vsc_sdp_for_colorimetry =
5824 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5825 } else {
5826 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5827 stream->use_vsc_sdp_for_colorimetry = true;
5828 }
5829 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space);
5830 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
5831
5832 }
5833 finish:
5834 dc_sink_release(sink);
5835
5836 return stream;
5837 }
5838
5839 static enum drm_connector_status
5840 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5841 {
5842 bool connected;
5843 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5844
5845
5846
5847
5848
5849
5850
5851
5852 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5853 !aconnector->fake_enable)
5854 connected = (aconnector->dc_sink != NULL);
5855 else
5856 connected = (aconnector->base.force == DRM_FORCE_ON ||
5857 aconnector->base.force == DRM_FORCE_ON_DIGITAL);
5858
5859 update_subconnector_property(aconnector);
5860
5861 return (connected ? connector_status_connected :
5862 connector_status_disconnected);
5863 }
5864
5865 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5866 struct drm_connector_state *connector_state,
5867 struct drm_property *property,
5868 uint64_t val)
5869 {
5870 struct drm_device *dev = connector->dev;
5871 struct amdgpu_device *adev = drm_to_adev(dev);
5872 struct dm_connector_state *dm_old_state =
5873 to_dm_connector_state(connector->state);
5874 struct dm_connector_state *dm_new_state =
5875 to_dm_connector_state(connector_state);
5876
5877 int ret = -EINVAL;
5878
5879 if (property == dev->mode_config.scaling_mode_property) {
5880 enum amdgpu_rmx_type rmx_type;
5881
5882 switch (val) {
5883 case DRM_MODE_SCALE_CENTER:
5884 rmx_type = RMX_CENTER;
5885 break;
5886 case DRM_MODE_SCALE_ASPECT:
5887 rmx_type = RMX_ASPECT;
5888 break;
5889 case DRM_MODE_SCALE_FULLSCREEN:
5890 rmx_type = RMX_FULL;
5891 break;
5892 case DRM_MODE_SCALE_NONE:
5893 default:
5894 rmx_type = RMX_OFF;
5895 break;
5896 }
5897
5898 if (dm_old_state->scaling == rmx_type)
5899 return 0;
5900
5901 dm_new_state->scaling = rmx_type;
5902 ret = 0;
5903 } else if (property == adev->mode_info.underscan_hborder_property) {
5904 dm_new_state->underscan_hborder = val;
5905 ret = 0;
5906 } else if (property == adev->mode_info.underscan_vborder_property) {
5907 dm_new_state->underscan_vborder = val;
5908 ret = 0;
5909 } else if (property == adev->mode_info.underscan_property) {
5910 dm_new_state->underscan_enable = val;
5911 ret = 0;
5912 } else if (property == adev->mode_info.abm_level_property) {
5913 dm_new_state->abm_level = val;
5914 ret = 0;
5915 }
5916
5917 return ret;
5918 }
5919
5920 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5921 const struct drm_connector_state *state,
5922 struct drm_property *property,
5923 uint64_t *val)
5924 {
5925 struct drm_device *dev = connector->dev;
5926 struct amdgpu_device *adev = drm_to_adev(dev);
5927 struct dm_connector_state *dm_state =
5928 to_dm_connector_state(state);
5929 int ret = -EINVAL;
5930
5931 if (property == dev->mode_config.scaling_mode_property) {
5932 switch (dm_state->scaling) {
5933 case RMX_CENTER:
5934 *val = DRM_MODE_SCALE_CENTER;
5935 break;
5936 case RMX_ASPECT:
5937 *val = DRM_MODE_SCALE_ASPECT;
5938 break;
5939 case RMX_FULL:
5940 *val = DRM_MODE_SCALE_FULLSCREEN;
5941 break;
5942 case RMX_OFF:
5943 default:
5944 *val = DRM_MODE_SCALE_NONE;
5945 break;
5946 }
5947 ret = 0;
5948 } else if (property == adev->mode_info.underscan_hborder_property) {
5949 *val = dm_state->underscan_hborder;
5950 ret = 0;
5951 } else if (property == adev->mode_info.underscan_vborder_property) {
5952 *val = dm_state->underscan_vborder;
5953 ret = 0;
5954 } else if (property == adev->mode_info.underscan_property) {
5955 *val = dm_state->underscan_enable;
5956 ret = 0;
5957 } else if (property == adev->mode_info.abm_level_property) {
5958 *val = dm_state->abm_level;
5959 ret = 0;
5960 }
5961
5962 return ret;
5963 }
5964
5965 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5966 {
5967 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5968
5969 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5970 }
5971
5972 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5973 {
5974 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5975 const struct dc_link *link = aconnector->dc_link;
5976 struct amdgpu_device *adev = drm_to_adev(connector->dev);
5977 struct amdgpu_display_manager *dm = &adev->dm;
5978 int i;
5979
5980
5981
5982
5983
5984 if (aconnector->mst_mgr.dev)
5985 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5986
5987 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5988 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5989 for (i = 0; i < dm->num_of_edps; i++) {
5990 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
5991 backlight_device_unregister(dm->backlight_dev[i]);
5992 dm->backlight_dev[i] = NULL;
5993 }
5994 }
5995 #endif
5996
5997 if (aconnector->dc_em_sink)
5998 dc_sink_release(aconnector->dc_em_sink);
5999 aconnector->dc_em_sink = NULL;
6000 if (aconnector->dc_sink)
6001 dc_sink_release(aconnector->dc_sink);
6002 aconnector->dc_sink = NULL;
6003
6004 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6005 drm_connector_unregister(connector);
6006 drm_connector_cleanup(connector);
6007 if (aconnector->i2c) {
6008 i2c_del_adapter(&aconnector->i2c->base);
6009 kfree(aconnector->i2c);
6010 }
6011 kfree(aconnector->dm_dp_aux.aux.name);
6012
6013 kfree(connector);
6014 }
6015
6016 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6017 {
6018 struct dm_connector_state *state =
6019 to_dm_connector_state(connector->state);
6020
6021 if (connector->state)
6022 __drm_atomic_helper_connector_destroy_state(connector->state);
6023
6024 kfree(state);
6025
6026 state = kzalloc(sizeof(*state), GFP_KERNEL);
6027
6028 if (state) {
6029 state->scaling = RMX_OFF;
6030 state->underscan_enable = false;
6031 state->underscan_hborder = 0;
6032 state->underscan_vborder = 0;
6033 state->base.max_requested_bpc = 8;
6034 state->vcpi_slots = 0;
6035 state->pbn = 0;
6036
6037 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6038 state->abm_level = amdgpu_dm_abm_level;
6039
6040 __drm_atomic_helper_connector_reset(connector, &state->base);
6041 }
6042 }
6043
6044 struct drm_connector_state *
6045 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6046 {
6047 struct dm_connector_state *state =
6048 to_dm_connector_state(connector->state);
6049
6050 struct dm_connector_state *new_state =
6051 kmemdup(state, sizeof(*state), GFP_KERNEL);
6052
6053 if (!new_state)
6054 return NULL;
6055
6056 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6057
6058 new_state->freesync_capable = state->freesync_capable;
6059 new_state->abm_level = state->abm_level;
6060 new_state->scaling = state->scaling;
6061 new_state->underscan_enable = state->underscan_enable;
6062 new_state->underscan_hborder = state->underscan_hborder;
6063 new_state->underscan_vborder = state->underscan_vborder;
6064 new_state->vcpi_slots = state->vcpi_slots;
6065 new_state->pbn = state->pbn;
6066 return &new_state->base;
6067 }
6068
6069 static int
6070 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6071 {
6072 struct amdgpu_dm_connector *amdgpu_dm_connector =
6073 to_amdgpu_dm_connector(connector);
6074 int r;
6075
6076 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6077 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6078 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6079 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6080 if (r)
6081 return r;
6082 }
6083
6084 #if defined(CONFIG_DEBUG_FS)
6085 connector_debugfs_init(amdgpu_dm_connector);
6086 #endif
6087
6088 return 0;
6089 }
6090
6091 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6092 .reset = amdgpu_dm_connector_funcs_reset,
6093 .detect = amdgpu_dm_connector_detect,
6094 .fill_modes = drm_helper_probe_single_connector_modes,
6095 .destroy = amdgpu_dm_connector_destroy,
6096 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6097 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6098 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6099 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6100 .late_register = amdgpu_dm_connector_late_register,
6101 .early_unregister = amdgpu_dm_connector_unregister
6102 };
6103
6104 static int get_modes(struct drm_connector *connector)
6105 {
6106 return amdgpu_dm_connector_get_modes(connector);
6107 }
6108
6109 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6110 {
6111 struct dc_sink_init_data init_params = {
6112 .link = aconnector->dc_link,
6113 .sink_signal = SIGNAL_TYPE_VIRTUAL
6114 };
6115 struct edid *edid;
6116
6117 if (!aconnector->base.edid_blob_ptr) {
6118 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6119 aconnector->base.name);
6120
6121 aconnector->base.force = DRM_FORCE_OFF;
6122 aconnector->base.override_edid = false;
6123 return;
6124 }
6125
6126 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6127
6128 aconnector->edid = edid;
6129
6130 aconnector->dc_em_sink = dc_link_add_remote_sink(
6131 aconnector->dc_link,
6132 (uint8_t *)edid,
6133 (edid->extensions + 1) * EDID_LENGTH,
6134 &init_params);
6135
6136 if (aconnector->base.force == DRM_FORCE_ON) {
6137 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6138 aconnector->dc_link->local_sink :
6139 aconnector->dc_em_sink;
6140 dc_sink_retain(aconnector->dc_sink);
6141 }
6142 }
6143
6144 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6145 {
6146 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6147
6148
6149
6150
6151
6152 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6153 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6154 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6155 }
6156
6157
6158 aconnector->base.override_edid = true;
6159 create_eml_sink(aconnector);
6160 }
6161
6162 struct dc_stream_state *
6163 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6164 const struct drm_display_mode *drm_mode,
6165 const struct dm_connector_state *dm_state,
6166 const struct dc_stream_state *old_stream)
6167 {
6168 struct drm_connector *connector = &aconnector->base;
6169 struct amdgpu_device *adev = drm_to_adev(connector->dev);
6170 struct dc_stream_state *stream;
6171 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6172 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6173 enum dc_status dc_result = DC_OK;
6174
6175 do {
6176 stream = create_stream_for_sink(aconnector, drm_mode,
6177 dm_state, old_stream,
6178 requested_bpc);
6179 if (stream == NULL) {
6180 DRM_ERROR("Failed to create stream for sink!\n");
6181 break;
6182 }
6183
6184 dc_result = dc_validate_stream(adev->dm.dc, stream);
6185 if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
6186 dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream);
6187
6188 if (dc_result != DC_OK) {
6189 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6190 drm_mode->hdisplay,
6191 drm_mode->vdisplay,
6192 drm_mode->clock,
6193 dc_result,
6194 dc_status_to_str(dc_result));
6195
6196 dc_stream_release(stream);
6197 stream = NULL;
6198 requested_bpc -= 2;
6199 }
6200
6201 } while (stream == NULL && requested_bpc >= 6);
6202
6203 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6204 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6205
6206 aconnector->force_yuv420_output = true;
6207 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6208 dm_state, old_stream);
6209 aconnector->force_yuv420_output = false;
6210 }
6211
6212 return stream;
6213 }
6214
6215 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6216 struct drm_display_mode *mode)
6217 {
6218 int result = MODE_ERROR;
6219 struct dc_sink *dc_sink;
6220
6221 struct dc_stream_state *stream;
6222 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6223
6224 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6225 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6226 return result;
6227
6228
6229
6230
6231
6232 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6233 !aconnector->dc_em_sink)
6234 handle_edid_mgmt(aconnector);
6235
6236 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6237
6238 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6239 aconnector->base.force != DRM_FORCE_ON) {
6240 DRM_ERROR("dc_sink is NULL!\n");
6241 goto fail;
6242 }
6243
6244 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6245 if (stream) {
6246 dc_stream_release(stream);
6247 result = MODE_OK;
6248 }
6249
6250 fail:
6251
6252 return result;
6253 }
6254
6255 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6256 struct dc_info_packet *out)
6257 {
6258 struct hdmi_drm_infoframe frame;
6259 unsigned char buf[30];
6260 ssize_t len;
6261 int ret, i;
6262
6263 memset(out, 0, sizeof(*out));
6264
6265 if (!state->hdr_output_metadata)
6266 return 0;
6267
6268 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6269 if (ret)
6270 return ret;
6271
6272 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6273 if (len < 0)
6274 return (int)len;
6275
6276
6277 if (len != 30)
6278 return -EINVAL;
6279
6280
6281 switch (state->connector->connector_type) {
6282 case DRM_MODE_CONNECTOR_HDMIA:
6283 out->hb0 = 0x87;
6284 out->hb1 = 0x01;
6285 out->hb2 = 0x1A;
6286 out->sb[0] = buf[3];
6287 i = 1;
6288 break;
6289
6290 case DRM_MODE_CONNECTOR_DisplayPort:
6291 case DRM_MODE_CONNECTOR_eDP:
6292 out->hb0 = 0x00;
6293 out->hb1 = 0x87;
6294 out->hb2 = 0x1D;
6295 out->hb3 = (0x13 << 2);
6296 out->sb[0] = 0x01;
6297 out->sb[1] = 0x1A;
6298 i = 2;
6299 break;
6300
6301 default:
6302 return -EINVAL;
6303 }
6304
6305 memcpy(&out->sb[i], &buf[4], 26);
6306 out->valid = true;
6307
6308 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6309 sizeof(out->sb), false);
6310
6311 return 0;
6312 }
6313
6314 static int
6315 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6316 struct drm_atomic_state *state)
6317 {
6318 struct drm_connector_state *new_con_state =
6319 drm_atomic_get_new_connector_state(state, conn);
6320 struct drm_connector_state *old_con_state =
6321 drm_atomic_get_old_connector_state(state, conn);
6322 struct drm_crtc *crtc = new_con_state->crtc;
6323 struct drm_crtc_state *new_crtc_state;
6324 int ret;
6325
6326 trace_amdgpu_dm_connector_atomic_check(new_con_state);
6327
6328 if (!crtc)
6329 return 0;
6330
6331 if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
6332 struct dc_info_packet hdr_infopacket;
6333
6334 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6335 if (ret)
6336 return ret;
6337
6338 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6339 if (IS_ERR(new_crtc_state))
6340 return PTR_ERR(new_crtc_state);
6341
6342
6343
6344
6345
6346
6347
6348
6349
6350
6351
6352
6353 new_crtc_state->mode_changed =
6354 !old_con_state->hdr_output_metadata ||
6355 !new_con_state->hdr_output_metadata;
6356 }
6357
6358 return 0;
6359 }
6360
6361 static const struct drm_connector_helper_funcs
6362 amdgpu_dm_connector_helper_funcs = {
6363
6364
6365
6366
6367
6368
6369 .get_modes = get_modes,
6370 .mode_valid = amdgpu_dm_connector_mode_valid,
6371 .atomic_check = amdgpu_dm_connector_atomic_check,
6372 };
6373
6374 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6375 {
6376
6377 }
6378
6379 int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth)
6380 {
6381 switch (display_color_depth) {
6382 case COLOR_DEPTH_666:
6383 return 6;
6384 case COLOR_DEPTH_888:
6385 return 8;
6386 case COLOR_DEPTH_101010:
6387 return 10;
6388 case COLOR_DEPTH_121212:
6389 return 12;
6390 case COLOR_DEPTH_141414:
6391 return 14;
6392 case COLOR_DEPTH_161616:
6393 return 16;
6394 default:
6395 break;
6396 }
6397 return 0;
6398 }
6399
6400 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6401 struct drm_crtc_state *crtc_state,
6402 struct drm_connector_state *conn_state)
6403 {
6404 struct drm_atomic_state *state = crtc_state->state;
6405 struct drm_connector *connector = conn_state->connector;
6406 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6407 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6408 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6409 struct drm_dp_mst_topology_mgr *mst_mgr;
6410 struct drm_dp_mst_port *mst_port;
6411 enum dc_color_depth color_depth;
6412 int clock, bpp = 0;
6413 bool is_y420 = false;
6414
6415 if (!aconnector->port || !aconnector->dc_sink)
6416 return 0;
6417
6418 mst_port = aconnector->port;
6419 mst_mgr = &aconnector->mst_port->mst_mgr;
6420
6421 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6422 return 0;
6423
6424 if (!state->duplicated) {
6425 int max_bpc = conn_state->max_requested_bpc;
6426 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6427 aconnector->force_yuv420_output;
6428 color_depth = convert_color_depth_from_display_info(connector,
6429 is_y420,
6430 max_bpc);
6431 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6432 clock = adjusted_mode->clock;
6433 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6434 }
6435 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6436 mst_mgr,
6437 mst_port,
6438 dm_new_connector_state->pbn,
6439 dm_mst_get_pbn_divider(aconnector->dc_link));
6440 if (dm_new_connector_state->vcpi_slots < 0) {
6441 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6442 return dm_new_connector_state->vcpi_slots;
6443 }
6444 return 0;
6445 }
6446
6447 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6448 .disable = dm_encoder_helper_disable,
6449 .atomic_check = dm_encoder_helper_atomic_check
6450 };
6451
6452 #if defined(CONFIG_DRM_AMD_DC_DCN)
6453 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6454 struct dc_state *dc_state,
6455 struct dsc_mst_fairness_vars *vars)
6456 {
6457 struct dc_stream_state *stream = NULL;
6458 struct drm_connector *connector;
6459 struct drm_connector_state *new_con_state;
6460 struct amdgpu_dm_connector *aconnector;
6461 struct dm_connector_state *dm_conn_state;
6462 int i, j;
6463 int vcpi, pbn_div, pbn, slot_num = 0;
6464
6465 for_each_new_connector_in_state(state, connector, new_con_state, i) {
6466
6467 aconnector = to_amdgpu_dm_connector(connector);
6468
6469 if (!aconnector->port)
6470 continue;
6471
6472 if (!new_con_state || !new_con_state->crtc)
6473 continue;
6474
6475 dm_conn_state = to_dm_connector_state(new_con_state);
6476
6477 for (j = 0; j < dc_state->stream_count; j++) {
6478 stream = dc_state->streams[j];
6479 if (!stream)
6480 continue;
6481
6482 if ((struct amdgpu_dm_connector *)stream->dm_stream_context == aconnector)
6483 break;
6484
6485 stream = NULL;
6486 }
6487
6488 if (!stream)
6489 continue;
6490
6491 pbn_div = dm_mst_get_pbn_divider(stream->link);
6492
6493 for (j = 0; j < dc_state->stream_count; j++) {
6494 if (vars[j].aconnector == aconnector) {
6495 pbn = vars[j].pbn;
6496 break;
6497 }
6498 }
6499
6500 if (j == dc_state->stream_count)
6501 continue;
6502
6503 slot_num = DIV_ROUND_UP(pbn, pbn_div);
6504
6505 if (stream->timing.flags.DSC != 1) {
6506 dm_conn_state->pbn = pbn;
6507 dm_conn_state->vcpi_slots = slot_num;
6508
6509 drm_dp_mst_atomic_enable_dsc(state,
6510 aconnector->port,
6511 dm_conn_state->pbn,
6512 0,
6513 false);
6514 continue;
6515 }
6516
6517 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6518 aconnector->port,
6519 pbn, pbn_div,
6520 true);
6521 if (vcpi < 0)
6522 return vcpi;
6523
6524 dm_conn_state->pbn = pbn;
6525 dm_conn_state->vcpi_slots = vcpi;
6526 }
6527 return 0;
6528 }
6529 #endif
6530
6531 static int to_drm_connector_type(enum signal_type st)
6532 {
6533 switch (st) {
6534 case SIGNAL_TYPE_HDMI_TYPE_A:
6535 return DRM_MODE_CONNECTOR_HDMIA;
6536 case SIGNAL_TYPE_EDP:
6537 return DRM_MODE_CONNECTOR_eDP;
6538 case SIGNAL_TYPE_LVDS:
6539 return DRM_MODE_CONNECTOR_LVDS;
6540 case SIGNAL_TYPE_RGB:
6541 return DRM_MODE_CONNECTOR_VGA;
6542 case SIGNAL_TYPE_DISPLAY_PORT:
6543 case SIGNAL_TYPE_DISPLAY_PORT_MST:
6544 return DRM_MODE_CONNECTOR_DisplayPort;
6545 case SIGNAL_TYPE_DVI_DUAL_LINK:
6546 case SIGNAL_TYPE_DVI_SINGLE_LINK:
6547 return DRM_MODE_CONNECTOR_DVID;
6548 case SIGNAL_TYPE_VIRTUAL:
6549 return DRM_MODE_CONNECTOR_VIRTUAL;
6550
6551 default:
6552 return DRM_MODE_CONNECTOR_Unknown;
6553 }
6554 }
6555
6556 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6557 {
6558 struct drm_encoder *encoder;
6559
6560
6561 drm_connector_for_each_possible_encoder(connector, encoder)
6562 return encoder;
6563
6564 return NULL;
6565 }
6566
6567 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6568 {
6569 struct drm_encoder *encoder;
6570 struct amdgpu_encoder *amdgpu_encoder;
6571
6572 encoder = amdgpu_dm_connector_to_encoder(connector);
6573
6574 if (encoder == NULL)
6575 return;
6576
6577 amdgpu_encoder = to_amdgpu_encoder(encoder);
6578
6579 amdgpu_encoder->native_mode.clock = 0;
6580
6581 if (!list_empty(&connector->probed_modes)) {
6582 struct drm_display_mode *preferred_mode = NULL;
6583
6584 list_for_each_entry(preferred_mode,
6585 &connector->probed_modes,
6586 head) {
6587 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6588 amdgpu_encoder->native_mode = *preferred_mode;
6589
6590 break;
6591 }
6592
6593 }
6594 }
6595
6596 static struct drm_display_mode *
6597 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6598 char *name,
6599 int hdisplay, int vdisplay)
6600 {
6601 struct drm_device *dev = encoder->dev;
6602 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6603 struct drm_display_mode *mode = NULL;
6604 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6605
6606 mode = drm_mode_duplicate(dev, native_mode);
6607
6608 if (mode == NULL)
6609 return NULL;
6610
6611 mode->hdisplay = hdisplay;
6612 mode->vdisplay = vdisplay;
6613 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6614 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6615
6616 return mode;
6617
6618 }
6619
6620 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6621 struct drm_connector *connector)
6622 {
6623 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6624 struct drm_display_mode *mode = NULL;
6625 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6626 struct amdgpu_dm_connector *amdgpu_dm_connector =
6627 to_amdgpu_dm_connector(connector);
6628 int i;
6629 int n;
6630 struct mode_size {
6631 char name[DRM_DISPLAY_MODE_LEN];
6632 int w;
6633 int h;
6634 } common_modes[] = {
6635 { "640x480", 640, 480},
6636 { "800x600", 800, 600},
6637 { "1024x768", 1024, 768},
6638 { "1280x720", 1280, 720},
6639 { "1280x800", 1280, 800},
6640 {"1280x1024", 1280, 1024},
6641 { "1440x900", 1440, 900},
6642 {"1680x1050", 1680, 1050},
6643 {"1600x1200", 1600, 1200},
6644 {"1920x1080", 1920, 1080},
6645 {"1920x1200", 1920, 1200}
6646 };
6647
6648 n = ARRAY_SIZE(common_modes);
6649
6650 for (i = 0; i < n; i++) {
6651 struct drm_display_mode *curmode = NULL;
6652 bool mode_existed = false;
6653
6654 if (common_modes[i].w > native_mode->hdisplay ||
6655 common_modes[i].h > native_mode->vdisplay ||
6656 (common_modes[i].w == native_mode->hdisplay &&
6657 common_modes[i].h == native_mode->vdisplay))
6658 continue;
6659
6660 list_for_each_entry(curmode, &connector->probed_modes, head) {
6661 if (common_modes[i].w == curmode->hdisplay &&
6662 common_modes[i].h == curmode->vdisplay) {
6663 mode_existed = true;
6664 break;
6665 }
6666 }
6667
6668 if (mode_existed)
6669 continue;
6670
6671 mode = amdgpu_dm_create_common_mode(encoder,
6672 common_modes[i].name, common_modes[i].w,
6673 common_modes[i].h);
6674 if (!mode)
6675 continue;
6676
6677 drm_mode_probed_add(connector, mode);
6678 amdgpu_dm_connector->num_modes++;
6679 }
6680 }
6681
6682 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
6683 {
6684 struct drm_encoder *encoder;
6685 struct amdgpu_encoder *amdgpu_encoder;
6686 const struct drm_display_mode *native_mode;
6687
6688 if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
6689 connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
6690 return;
6691
6692 mutex_lock(&connector->dev->mode_config.mutex);
6693 amdgpu_dm_connector_get_modes(connector);
6694 mutex_unlock(&connector->dev->mode_config.mutex);
6695
6696 encoder = amdgpu_dm_connector_to_encoder(connector);
6697 if (!encoder)
6698 return;
6699
6700 amdgpu_encoder = to_amdgpu_encoder(encoder);
6701
6702 native_mode = &amdgpu_encoder->native_mode;
6703 if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
6704 return;
6705
6706 drm_connector_set_panel_orientation_with_quirk(connector,
6707 DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
6708 native_mode->hdisplay,
6709 native_mode->vdisplay);
6710 }
6711
6712 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6713 struct edid *edid)
6714 {
6715 struct amdgpu_dm_connector *amdgpu_dm_connector =
6716 to_amdgpu_dm_connector(connector);
6717
6718 if (edid) {
6719
6720 INIT_LIST_HEAD(&connector->probed_modes);
6721 amdgpu_dm_connector->num_modes =
6722 drm_add_edid_modes(connector, edid);
6723
6724
6725
6726
6727
6728
6729
6730
6731
6732 drm_mode_sort(&connector->probed_modes);
6733 amdgpu_dm_get_native_mode(connector);
6734
6735
6736
6737
6738
6739 amdgpu_dm_update_freesync_caps(connector, edid);
6740 } else {
6741 amdgpu_dm_connector->num_modes = 0;
6742 }
6743 }
6744
6745 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
6746 struct drm_display_mode *mode)
6747 {
6748 struct drm_display_mode *m;
6749
6750 list_for_each_entry (m, &aconnector->base.probed_modes, head) {
6751 if (drm_mode_equal(m, mode))
6752 return true;
6753 }
6754
6755 return false;
6756 }
6757
6758 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
6759 {
6760 const struct drm_display_mode *m;
6761 struct drm_display_mode *new_mode;
6762 uint i;
6763 uint32_t new_modes_count = 0;
6764
6765
6766
6767
6768
6769
6770
6771
6772
6773
6774
6775
6776
6777 static const uint32_t common_rates[] = {
6778 23976, 24000, 25000, 29970, 30000,
6779 48000, 50000, 60000, 72000, 96000, 120000
6780 };
6781
6782
6783
6784
6785
6786
6787
6788 m = get_highest_refresh_rate_mode(aconnector, true);
6789 if (!m)
6790 return 0;
6791
6792 for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
6793 uint64_t target_vtotal, target_vtotal_diff;
6794 uint64_t num, den;
6795
6796 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
6797 continue;
6798
6799 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
6800 common_rates[i] > aconnector->max_vfreq * 1000)
6801 continue;
6802
6803 num = (unsigned long long)m->clock * 1000 * 1000;
6804 den = common_rates[i] * (unsigned long long)m->htotal;
6805 target_vtotal = div_u64(num, den);
6806 target_vtotal_diff = target_vtotal - m->vtotal;
6807
6808
6809 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
6810 m->vsync_end + target_vtotal_diff < m->vsync_start ||
6811 m->vtotal + target_vtotal_diff < m->vsync_end)
6812 continue;
6813
6814 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
6815 if (!new_mode)
6816 goto out;
6817
6818 new_mode->vtotal += (u16)target_vtotal_diff;
6819 new_mode->vsync_start += (u16)target_vtotal_diff;
6820 new_mode->vsync_end += (u16)target_vtotal_diff;
6821 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6822 new_mode->type |= DRM_MODE_TYPE_DRIVER;
6823
6824 if (!is_duplicate_mode(aconnector, new_mode)) {
6825 drm_mode_probed_add(&aconnector->base, new_mode);
6826 new_modes_count += 1;
6827 } else
6828 drm_mode_destroy(aconnector->base.dev, new_mode);
6829 }
6830 out:
6831 return new_modes_count;
6832 }
6833
6834 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
6835 struct edid *edid)
6836 {
6837 struct amdgpu_dm_connector *amdgpu_dm_connector =
6838 to_amdgpu_dm_connector(connector);
6839
6840 if (!edid)
6841 return;
6842
6843 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
6844 amdgpu_dm_connector->num_modes +=
6845 add_fs_modes(amdgpu_dm_connector);
6846 }
6847
6848 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6849 {
6850 struct amdgpu_dm_connector *amdgpu_dm_connector =
6851 to_amdgpu_dm_connector(connector);
6852 struct drm_encoder *encoder;
6853 struct edid *edid = amdgpu_dm_connector->edid;
6854
6855 encoder = amdgpu_dm_connector_to_encoder(connector);
6856
6857 if (!drm_edid_is_valid(edid)) {
6858 amdgpu_dm_connector->num_modes =
6859 drm_add_modes_noedid(connector, 640, 480);
6860 } else {
6861 amdgpu_dm_connector_ddc_get_modes(connector, edid);
6862 amdgpu_dm_connector_add_common_modes(encoder, connector);
6863 amdgpu_dm_connector_add_freesync_modes(connector, edid);
6864 }
6865 amdgpu_dm_fbc_init(connector);
6866
6867 return amdgpu_dm_connector->num_modes;
6868 }
6869
6870 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6871 struct amdgpu_dm_connector *aconnector,
6872 int connector_type,
6873 struct dc_link *link,
6874 int link_index)
6875 {
6876 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
6877
6878
6879
6880
6881
6882 if (aconnector->base.funcs->reset)
6883 aconnector->base.funcs->reset(&aconnector->base);
6884
6885 aconnector->connector_id = link_index;
6886 aconnector->dc_link = link;
6887 aconnector->base.interlace_allowed = false;
6888 aconnector->base.doublescan_allowed = false;
6889 aconnector->base.stereo_allowed = false;
6890 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6891 aconnector->hpd.hpd = AMDGPU_HPD_NONE;
6892 aconnector->audio_inst = -1;
6893 mutex_init(&aconnector->hpd_lock);
6894
6895
6896
6897
6898
6899 switch (connector_type) {
6900 case DRM_MODE_CONNECTOR_HDMIA:
6901 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6902 aconnector->base.ycbcr_420_allowed =
6903 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6904 break;
6905 case DRM_MODE_CONNECTOR_DisplayPort:
6906 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6907 link->link_enc = link_enc_cfg_get_link_enc(link);
6908 ASSERT(link->link_enc);
6909 if (link->link_enc)
6910 aconnector->base.ycbcr_420_allowed =
6911 link->link_enc->features.dp_ycbcr420_supported ? true : false;
6912 break;
6913 case DRM_MODE_CONNECTOR_DVID:
6914 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6915 break;
6916 default:
6917 break;
6918 }
6919
6920 drm_object_attach_property(&aconnector->base.base,
6921 dm->ddev->mode_config.scaling_mode_property,
6922 DRM_MODE_SCALE_NONE);
6923
6924 drm_object_attach_property(&aconnector->base.base,
6925 adev->mode_info.underscan_property,
6926 UNDERSCAN_OFF);
6927 drm_object_attach_property(&aconnector->base.base,
6928 adev->mode_info.underscan_hborder_property,
6929 0);
6930 drm_object_attach_property(&aconnector->base.base,
6931 adev->mode_info.underscan_vborder_property,
6932 0);
6933
6934 if (!aconnector->mst_port)
6935 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6936
6937
6938 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6939 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6940
6941 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6942 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
6943 drm_object_attach_property(&aconnector->base.base,
6944 adev->mode_info.abm_level_property, 0);
6945 }
6946
6947 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6948 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6949 connector_type == DRM_MODE_CONNECTOR_eDP) {
6950 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
6951
6952 if (!aconnector->mst_port)
6953 drm_connector_attach_vrr_capable_property(&aconnector->base);
6954
6955 #ifdef CONFIG_DRM_AMD_DC_HDCP
6956 if (adev->dm.hdcp_workqueue)
6957 drm_connector_attach_content_protection_property(&aconnector->base, true);
6958 #endif
6959 }
6960 }
6961
6962 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6963 struct i2c_msg *msgs, int num)
6964 {
6965 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6966 struct ddc_service *ddc_service = i2c->ddc_service;
6967 struct i2c_command cmd;
6968 int i;
6969 int result = -EIO;
6970
6971 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6972
6973 if (!cmd.payloads)
6974 return result;
6975
6976 cmd.number_of_payloads = num;
6977 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6978 cmd.speed = 100;
6979
6980 for (i = 0; i < num; i++) {
6981 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6982 cmd.payloads[i].address = msgs[i].addr;
6983 cmd.payloads[i].length = msgs[i].len;
6984 cmd.payloads[i].data = msgs[i].buf;
6985 }
6986
6987 if (dc_submit_i2c(
6988 ddc_service->ctx->dc,
6989 ddc_service->link->link_index,
6990 &cmd))
6991 result = num;
6992
6993 kfree(cmd.payloads);
6994 return result;
6995 }
6996
6997 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6998 {
6999 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7000 }
7001
7002 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7003 .master_xfer = amdgpu_dm_i2c_xfer,
7004 .functionality = amdgpu_dm_i2c_func,
7005 };
7006
7007 static struct amdgpu_i2c_adapter *
7008 create_i2c(struct ddc_service *ddc_service,
7009 int link_index,
7010 int *res)
7011 {
7012 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7013 struct amdgpu_i2c_adapter *i2c;
7014
7015 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7016 if (!i2c)
7017 return NULL;
7018 i2c->base.owner = THIS_MODULE;
7019 i2c->base.class = I2C_CLASS_DDC;
7020 i2c->base.dev.parent = &adev->pdev->dev;
7021 i2c->base.algo = &amdgpu_dm_i2c_algo;
7022 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7023 i2c_set_adapdata(&i2c->base, i2c);
7024 i2c->ddc_service = ddc_service;
7025
7026 return i2c;
7027 }
7028
7029
7030
7031
7032
7033
7034 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7035 struct amdgpu_dm_connector *aconnector,
7036 uint32_t link_index,
7037 struct amdgpu_encoder *aencoder)
7038 {
7039 int res = 0;
7040 int connector_type;
7041 struct dc *dc = dm->dc;
7042 struct dc_link *link = dc_get_link_at_index(dc, link_index);
7043 struct amdgpu_i2c_adapter *i2c;
7044
7045 link->priv = aconnector;
7046
7047 DRM_DEBUG_DRIVER("%s()\n", __func__);
7048
7049 i2c = create_i2c(link->ddc, link->link_index, &res);
7050 if (!i2c) {
7051 DRM_ERROR("Failed to create i2c adapter data\n");
7052 return -ENOMEM;
7053 }
7054
7055 aconnector->i2c = i2c;
7056 res = i2c_add_adapter(&i2c->base);
7057
7058 if (res) {
7059 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7060 goto out_free;
7061 }
7062
7063 connector_type = to_drm_connector_type(link->connector_signal);
7064
7065 res = drm_connector_init_with_ddc(
7066 dm->ddev,
7067 &aconnector->base,
7068 &amdgpu_dm_connector_funcs,
7069 connector_type,
7070 &i2c->base);
7071
7072 if (res) {
7073 DRM_ERROR("connector_init failed\n");
7074 aconnector->connector_id = -1;
7075 goto out_free;
7076 }
7077
7078 drm_connector_helper_add(
7079 &aconnector->base,
7080 &amdgpu_dm_connector_helper_funcs);
7081
7082 amdgpu_dm_connector_init_helper(
7083 dm,
7084 aconnector,
7085 connector_type,
7086 link,
7087 link_index);
7088
7089 drm_connector_attach_encoder(
7090 &aconnector->base, &aencoder->base);
7091
7092 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7093 || connector_type == DRM_MODE_CONNECTOR_eDP)
7094 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7095
7096 out_free:
7097 if (res) {
7098 kfree(i2c);
7099 aconnector->i2c = NULL;
7100 }
7101 return res;
7102 }
7103
7104 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7105 {
7106 switch (adev->mode_info.num_crtc) {
7107 case 1:
7108 return 0x1;
7109 case 2:
7110 return 0x3;
7111 case 3:
7112 return 0x7;
7113 case 4:
7114 return 0xf;
7115 case 5:
7116 return 0x1f;
7117 case 6:
7118 default:
7119 return 0x3f;
7120 }
7121 }
7122
7123 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7124 struct amdgpu_encoder *aencoder,
7125 uint32_t link_index)
7126 {
7127 struct amdgpu_device *adev = drm_to_adev(dev);
7128
7129 int res = drm_encoder_init(dev,
7130 &aencoder->base,
7131 &amdgpu_dm_encoder_funcs,
7132 DRM_MODE_ENCODER_TMDS,
7133 NULL);
7134
7135 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7136
7137 if (!res)
7138 aencoder->encoder_id = link_index;
7139 else
7140 aencoder->encoder_id = -1;
7141
7142 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7143
7144 return res;
7145 }
7146
7147 static void manage_dm_interrupts(struct amdgpu_device *adev,
7148 struct amdgpu_crtc *acrtc,
7149 bool enable)
7150 {
7151
7152
7153
7154
7155
7156
7157 int irq_type =
7158 amdgpu_display_crtc_idx_to_irq_type(
7159 adev,
7160 acrtc->crtc_id);
7161
7162 if (enable) {
7163 drm_crtc_vblank_on(&acrtc->base);
7164 amdgpu_irq_get(
7165 adev,
7166 &adev->pageflip_irq,
7167 irq_type);
7168 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7169 amdgpu_irq_get(
7170 adev,
7171 &adev->vline0_irq,
7172 irq_type);
7173 #endif
7174 } else {
7175 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7176 amdgpu_irq_put(
7177 adev,
7178 &adev->vline0_irq,
7179 irq_type);
7180 #endif
7181 amdgpu_irq_put(
7182 adev,
7183 &adev->pageflip_irq,
7184 irq_type);
7185 drm_crtc_vblank_off(&acrtc->base);
7186 }
7187 }
7188
7189 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7190 struct amdgpu_crtc *acrtc)
7191 {
7192 int irq_type =
7193 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7194
7195
7196
7197
7198
7199 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7200 }
7201
7202 static bool
7203 is_scaling_state_different(const struct dm_connector_state *dm_state,
7204 const struct dm_connector_state *old_dm_state)
7205 {
7206 if (dm_state->scaling != old_dm_state->scaling)
7207 return true;
7208 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7209 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7210 return true;
7211 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7212 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7213 return true;
7214 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7215 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7216 return true;
7217 return false;
7218 }
7219
7220 #ifdef CONFIG_DRM_AMD_DC_HDCP
7221 static bool is_content_protection_different(struct drm_connector_state *state,
7222 const struct drm_connector_state *old_state,
7223 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7224 {
7225 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7226 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7227
7228
7229 if (old_state->hdcp_content_type != state->hdcp_content_type &&
7230 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7231 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7232 return true;
7233 }
7234
7235
7236
7237
7238
7239 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7240 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7241 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7242 return false;
7243 }
7244
7245
7246
7247
7248
7249 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7250 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7251 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7252
7253
7254
7255
7256
7257
7258
7259
7260
7261 if (!(old_state->crtc && old_state->crtc->enabled) &&
7262 state->crtc && state->crtc->enabled &&
7263 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7264 dm_con_state->update_hdcp = false;
7265 return true;
7266 }
7267
7268
7269
7270
7271
7272
7273
7274
7275
7276 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7277 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7278 dm_con_state->update_hdcp = false;
7279 return true;
7280 }
7281
7282
7283
7284
7285
7286
7287 if (old_state->content_protection == state->content_protection)
7288 return false;
7289
7290
7291
7292
7293
7294
7295 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7296 return true;
7297
7298
7299
7300
7301 return false;
7302 }
7303
7304 #endif
7305 static void remove_stream(struct amdgpu_device *adev,
7306 struct amdgpu_crtc *acrtc,
7307 struct dc_stream_state *stream)
7308 {
7309
7310
7311 acrtc->otg_inst = -1;
7312 acrtc->enabled = false;
7313 }
7314
7315 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7316 {
7317
7318 assert_spin_locked(&acrtc->base.dev->event_lock);
7319 WARN_ON(acrtc->event);
7320
7321 acrtc->event = acrtc->base.state->event;
7322
7323
7324 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7325
7326
7327 acrtc->base.state->event = NULL;
7328
7329 DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7330 acrtc->crtc_id);
7331 }
7332
7333 static void update_freesync_state_on_stream(
7334 struct amdgpu_display_manager *dm,
7335 struct dm_crtc_state *new_crtc_state,
7336 struct dc_stream_state *new_stream,
7337 struct dc_plane_state *surface,
7338 u32 flip_timestamp_in_us)
7339 {
7340 struct mod_vrr_params vrr_params;
7341 struct dc_info_packet vrr_infopacket = {0};
7342 struct amdgpu_device *adev = dm->adev;
7343 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7344 unsigned long flags;
7345 bool pack_sdp_v1_3 = false;
7346
7347 if (!new_stream)
7348 return;
7349
7350
7351
7352
7353
7354
7355 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7356 return;
7357
7358 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7359 vrr_params = acrtc->dm_irq_params.vrr_params;
7360
7361 if (surface) {
7362 mod_freesync_handle_preflip(
7363 dm->freesync_module,
7364 surface,
7365 new_stream,
7366 flip_timestamp_in_us,
7367 &vrr_params);
7368
7369 if (adev->family < AMDGPU_FAMILY_AI &&
7370 amdgpu_dm_vrr_active(new_crtc_state)) {
7371 mod_freesync_handle_v_update(dm->freesync_module,
7372 new_stream, &vrr_params);
7373
7374
7375 dc_stream_adjust_vmin_vmax(dm->dc,
7376 new_crtc_state->stream,
7377 &vrr_params.adjust);
7378 }
7379 }
7380
7381 mod_freesync_build_vrr_infopacket(
7382 dm->freesync_module,
7383 new_stream,
7384 &vrr_params,
7385 PACKET_TYPE_VRR,
7386 TRANSFER_FUNC_UNKNOWN,
7387 &vrr_infopacket,
7388 pack_sdp_v1_3);
7389
7390 new_crtc_state->freesync_timing_changed |=
7391 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7392 &vrr_params.adjust,
7393 sizeof(vrr_params.adjust)) != 0);
7394
7395 new_crtc_state->freesync_vrr_info_changed |=
7396 (memcmp(&new_crtc_state->vrr_infopacket,
7397 &vrr_infopacket,
7398 sizeof(vrr_infopacket)) != 0);
7399
7400 acrtc->dm_irq_params.vrr_params = vrr_params;
7401 new_crtc_state->vrr_infopacket = vrr_infopacket;
7402
7403 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
7404 new_stream->vrr_infopacket = vrr_infopacket;
7405
7406 if (new_crtc_state->freesync_vrr_info_changed)
7407 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7408 new_crtc_state->base.crtc->base.id,
7409 (int)new_crtc_state->base.vrr_enabled,
7410 (int)vrr_params.state);
7411
7412 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7413 }
7414
7415 static void update_stream_irq_parameters(
7416 struct amdgpu_display_manager *dm,
7417 struct dm_crtc_state *new_crtc_state)
7418 {
7419 struct dc_stream_state *new_stream = new_crtc_state->stream;
7420 struct mod_vrr_params vrr_params;
7421 struct mod_freesync_config config = new_crtc_state->freesync_config;
7422 struct amdgpu_device *adev = dm->adev;
7423 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7424 unsigned long flags;
7425
7426 if (!new_stream)
7427 return;
7428
7429
7430
7431
7432
7433 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7434 return;
7435
7436 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7437 vrr_params = acrtc->dm_irq_params.vrr_params;
7438
7439 if (new_crtc_state->vrr_supported &&
7440 config.min_refresh_in_uhz &&
7441 config.max_refresh_in_uhz) {
7442
7443
7444
7445
7446 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
7447 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
7448 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
7449 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
7450 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
7451 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
7452 vrr_params.state = VRR_STATE_ACTIVE_FIXED;
7453 } else {
7454 config.state = new_crtc_state->base.vrr_enabled ?
7455 VRR_STATE_ACTIVE_VARIABLE :
7456 VRR_STATE_INACTIVE;
7457 }
7458 } else {
7459 config.state = VRR_STATE_UNSUPPORTED;
7460 }
7461
7462 mod_freesync_build_vrr_params(dm->freesync_module,
7463 new_stream,
7464 &config, &vrr_params);
7465
7466 new_crtc_state->freesync_timing_changed |=
7467 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7468 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
7469
7470 new_crtc_state->freesync_config = config;
7471
7472 acrtc->dm_irq_params.freesync_config = config;
7473 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7474 acrtc->dm_irq_params.vrr_params = vrr_params;
7475 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7476 }
7477
7478 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7479 struct dm_crtc_state *new_state)
7480 {
7481 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7482 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7483
7484 if (!old_vrr_active && new_vrr_active) {
7485
7486
7487
7488
7489
7490
7491
7492
7493 dm_set_vupdate_irq(new_state->base.crtc, true);
7494 drm_crtc_vblank_get(new_state->base.crtc);
7495 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7496 __func__, new_state->base.crtc->base.id);
7497 } else if (old_vrr_active && !new_vrr_active) {
7498
7499
7500
7501 dm_set_vupdate_irq(new_state->base.crtc, false);
7502 drm_crtc_vblank_put(new_state->base.crtc);
7503 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7504 __func__, new_state->base.crtc->base.id);
7505 }
7506 }
7507
7508 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7509 {
7510 struct drm_plane *plane;
7511 struct drm_plane_state *old_plane_state;
7512 int i;
7513
7514
7515
7516
7517
7518 for_each_old_plane_in_state(state, plane, old_plane_state, i)
7519 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7520 handle_cursor_update(plane, old_plane_state);
7521 }
7522
7523 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7524 struct dc_state *dc_state,
7525 struct drm_device *dev,
7526 struct amdgpu_display_manager *dm,
7527 struct drm_crtc *pcrtc,
7528 bool wait_for_vblank)
7529 {
7530 uint32_t i;
7531 uint64_t timestamp_ns;
7532 struct drm_plane *plane;
7533 struct drm_plane_state *old_plane_state, *new_plane_state;
7534 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7535 struct drm_crtc_state *new_pcrtc_state =
7536 drm_atomic_get_new_crtc_state(state, pcrtc);
7537 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7538 struct dm_crtc_state *dm_old_crtc_state =
7539 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7540 int planes_count = 0, vpos, hpos;
7541 unsigned long flags;
7542 uint32_t target_vblank, last_flip_vblank;
7543 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7544 bool cursor_update = false;
7545 bool pflip_present = false;
7546 struct {
7547 struct dc_surface_update surface_updates[MAX_SURFACES];
7548 struct dc_plane_info plane_infos[MAX_SURFACES];
7549 struct dc_scaling_info scaling_infos[MAX_SURFACES];
7550 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7551 struct dc_stream_update stream_update;
7552 } *bundle;
7553
7554 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7555
7556 if (!bundle) {
7557 dm_error("Failed to allocate update bundle\n");
7558 goto cleanup;
7559 }
7560
7561
7562
7563
7564
7565
7566 if (acrtc_state->active_planes == 0)
7567 amdgpu_dm_commit_cursors(state);
7568
7569
7570 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7571 struct drm_crtc *crtc = new_plane_state->crtc;
7572 struct drm_crtc_state *new_crtc_state;
7573 struct drm_framebuffer *fb = new_plane_state->fb;
7574 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
7575 bool plane_needs_flip;
7576 struct dc_plane_state *dc_plane;
7577 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7578
7579
7580 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
7581 if ((fb && crtc == pcrtc) ||
7582 (old_plane_state->fb && old_plane_state->crtc == pcrtc))
7583 cursor_update = true;
7584
7585 continue;
7586 }
7587
7588 if (!fb || !crtc || pcrtc != crtc)
7589 continue;
7590
7591 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7592 if (!new_crtc_state->active)
7593 continue;
7594
7595 dc_plane = dm_new_plane_state->dc_state;
7596
7597 bundle->surface_updates[planes_count].surface = dc_plane;
7598 if (new_pcrtc_state->color_mgmt_changed) {
7599 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7600 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7601 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7602 }
7603
7604 fill_dc_scaling_info(dm->adev, new_plane_state,
7605 &bundle->scaling_infos[planes_count]);
7606
7607 bundle->surface_updates[planes_count].scaling_info =
7608 &bundle->scaling_infos[planes_count];
7609
7610 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7611
7612 pflip_present = pflip_present || plane_needs_flip;
7613
7614 if (!plane_needs_flip) {
7615 planes_count += 1;
7616 continue;
7617 }
7618
7619 fill_dc_plane_info_and_addr(
7620 dm->adev, new_plane_state,
7621 afb->tiling_flags,
7622 &bundle->plane_infos[planes_count],
7623 &bundle->flip_addrs[planes_count].address,
7624 afb->tmz_surface, false);
7625
7626 drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
7627 new_plane_state->plane->index,
7628 bundle->plane_infos[planes_count].dcc.enable);
7629
7630 bundle->surface_updates[planes_count].plane_info =
7631 &bundle->plane_infos[planes_count];
7632
7633 fill_dc_dirty_rects(plane, old_plane_state, new_plane_state,
7634 new_crtc_state,
7635 &bundle->flip_addrs[planes_count]);
7636
7637
7638
7639
7640
7641 bundle->flip_addrs[planes_count].flip_immediate =
7642 crtc->state->async_flip &&
7643 acrtc_state->update_type == UPDATE_TYPE_FAST;
7644
7645 timestamp_ns = ktime_get_ns();
7646 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7647 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7648 bundle->surface_updates[planes_count].surface = dc_plane;
7649
7650 if (!bundle->surface_updates[planes_count].surface) {
7651 DRM_ERROR("No surface for CRTC: id=%d\n",
7652 acrtc_attach->crtc_id);
7653 continue;
7654 }
7655
7656 if (plane == pcrtc->primary)
7657 update_freesync_state_on_stream(
7658 dm,
7659 acrtc_state,
7660 acrtc_state->stream,
7661 dc_plane,
7662 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7663
7664 drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n",
7665 __func__,
7666 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7667 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7668
7669 planes_count += 1;
7670
7671 }
7672
7673 if (pflip_present) {
7674 if (!vrr_active) {
7675
7676
7677
7678
7679
7680
7681 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7682 }
7683 else {
7684
7685
7686
7687
7688
7689
7690
7691
7692 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7693 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
7694 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7695 }
7696
7697 target_vblank = last_flip_vblank + wait_for_vblank;
7698
7699
7700
7701
7702
7703 while ((acrtc_attach->enabled &&
7704 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7705 0, &vpos, &hpos, NULL,
7706 NULL, &pcrtc->hwmode)
7707 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7708 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7709 (int)(target_vblank -
7710 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7711 usleep_range(1000, 1100);
7712 }
7713
7714
7715
7716
7717
7718
7719
7720
7721
7722 if (acrtc_attach->base.state->event &&
7723 acrtc_state->active_planes > 0) {
7724 drm_crtc_vblank_get(pcrtc);
7725
7726 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7727
7728 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7729 prepare_flip_isr(acrtc_attach);
7730
7731 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7732 }
7733
7734 if (acrtc_state->stream) {
7735 if (acrtc_state->freesync_vrr_info_changed)
7736 bundle->stream_update.vrr_infopacket =
7737 &acrtc_state->stream->vrr_infopacket;
7738 }
7739 } else if (cursor_update && acrtc_state->active_planes > 0 &&
7740 acrtc_attach->base.state->event) {
7741 drm_crtc_vblank_get(pcrtc);
7742
7743 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7744
7745 acrtc_attach->event = acrtc_attach->base.state->event;
7746 acrtc_attach->base.state->event = NULL;
7747
7748 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7749 }
7750
7751
7752 if ((planes_count || acrtc_state->active_planes == 0) &&
7753 acrtc_state->stream) {
7754
7755
7756
7757
7758 if (dm->vblank_control_workqueue)
7759 flush_workqueue(dm->vblank_control_workqueue);
7760
7761 bundle->stream_update.stream = acrtc_state->stream;
7762 if (new_pcrtc_state->mode_changed) {
7763 bundle->stream_update.src = acrtc_state->stream->src;
7764 bundle->stream_update.dst = acrtc_state->stream->dst;
7765 }
7766
7767 if (new_pcrtc_state->color_mgmt_changed) {
7768
7769
7770
7771
7772 bundle->stream_update.gamut_remap =
7773 &acrtc_state->stream->gamut_remap_matrix;
7774 bundle->stream_update.output_csc_transform =
7775 &acrtc_state->stream->csc_color_matrix;
7776 bundle->stream_update.out_transfer_func =
7777 acrtc_state->stream->out_transfer_func;
7778 }
7779
7780 acrtc_state->stream->abm_level = acrtc_state->abm_level;
7781 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7782 bundle->stream_update.abm_level = &acrtc_state->abm_level;
7783
7784
7785
7786
7787
7788
7789 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
7790 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7791 dc_stream_adjust_vmin_vmax(
7792 dm->dc, acrtc_state->stream,
7793 &acrtc_attach->dm_irq_params.vrr_params.adjust);
7794 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7795 }
7796 mutex_lock(&dm->dc_lock);
7797 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7798 acrtc_state->stream->link->psr_settings.psr_allow_active)
7799 amdgpu_dm_psr_disable(acrtc_state->stream);
7800
7801 dc_commit_updates_for_stream(dm->dc,
7802 bundle->surface_updates,
7803 planes_count,
7804 acrtc_state->stream,
7805 &bundle->stream_update,
7806 dc_state);
7807
7808
7809
7810
7811
7812
7813
7814
7815
7816
7817
7818
7819
7820
7821 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7822 dm_update_pflip_irq_state(drm_to_adev(dev),
7823 acrtc_attach);
7824
7825 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7826 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7827 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7828 amdgpu_dm_link_setup_psr(acrtc_state->stream);
7829
7830
7831 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
7832 acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
7833 struct amdgpu_dm_connector *aconn =
7834 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
7835
7836 if (aconn->psr_skip_count > 0)
7837 aconn->psr_skip_count--;
7838
7839
7840 acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
7841
7842
7843
7844
7845
7846
7847
7848
7849 if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
7850 acrtc_attach->dm_irq_params.allow_psr_entry &&
7851 !acrtc_state->stream->link->psr_settings.psr_allow_active)
7852 amdgpu_dm_psr_enable(acrtc_state->stream);
7853 } else {
7854 acrtc_attach->dm_irq_params.allow_psr_entry = false;
7855 }
7856
7857 mutex_unlock(&dm->dc_lock);
7858 }
7859
7860
7861
7862
7863
7864
7865 if (acrtc_state->active_planes)
7866 amdgpu_dm_commit_cursors(state);
7867
7868 cleanup:
7869 kfree(bundle);
7870 }
7871
7872 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7873 struct drm_atomic_state *state)
7874 {
7875 struct amdgpu_device *adev = drm_to_adev(dev);
7876 struct amdgpu_dm_connector *aconnector;
7877 struct drm_connector *connector;
7878 struct drm_connector_state *old_con_state, *new_con_state;
7879 struct drm_crtc_state *new_crtc_state;
7880 struct dm_crtc_state *new_dm_crtc_state;
7881 const struct dc_stream_status *status;
7882 int i, inst;
7883
7884
7885 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7886 if (old_con_state->crtc != new_con_state->crtc) {
7887
7888 goto notify;
7889 }
7890
7891 if (!new_con_state->crtc)
7892 continue;
7893
7894 new_crtc_state = drm_atomic_get_new_crtc_state(
7895 state, new_con_state->crtc);
7896
7897 if (!new_crtc_state)
7898 continue;
7899
7900 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7901 continue;
7902
7903 notify:
7904 aconnector = to_amdgpu_dm_connector(connector);
7905
7906 mutex_lock(&adev->dm.audio_lock);
7907 inst = aconnector->audio_inst;
7908 aconnector->audio_inst = -1;
7909 mutex_unlock(&adev->dm.audio_lock);
7910
7911 amdgpu_dm_audio_eld_notify(adev, inst);
7912 }
7913
7914
7915 for_each_new_connector_in_state(state, connector, new_con_state, i) {
7916 if (!new_con_state->crtc)
7917 continue;
7918
7919 new_crtc_state = drm_atomic_get_new_crtc_state(
7920 state, new_con_state->crtc);
7921
7922 if (!new_crtc_state)
7923 continue;
7924
7925 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7926 continue;
7927
7928 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7929 if (!new_dm_crtc_state->stream)
7930 continue;
7931
7932 status = dc_stream_get_status(new_dm_crtc_state->stream);
7933 if (!status)
7934 continue;
7935
7936 aconnector = to_amdgpu_dm_connector(connector);
7937
7938 mutex_lock(&adev->dm.audio_lock);
7939 inst = status->audio_inst;
7940 aconnector->audio_inst = inst;
7941 mutex_unlock(&adev->dm.audio_lock);
7942
7943 amdgpu_dm_audio_eld_notify(adev, inst);
7944 }
7945 }
7946
7947
7948
7949
7950
7951
7952
7953
7954
7955 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7956 struct dc_stream_state *stream_state)
7957 {
7958 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7959 }
7960
7961
7962
7963
7964
7965
7966
7967
7968
7969 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7970 {
7971 struct drm_device *dev = state->dev;
7972 struct amdgpu_device *adev = drm_to_adev(dev);
7973 struct amdgpu_display_manager *dm = &adev->dm;
7974 struct dm_atomic_state *dm_state;
7975 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7976 uint32_t i, j;
7977 struct drm_crtc *crtc;
7978 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7979 unsigned long flags;
7980 bool wait_for_vblank = true;
7981 struct drm_connector *connector;
7982 struct drm_connector_state *old_con_state, *new_con_state;
7983 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7984 int crtc_disable_count = 0;
7985 bool mode_set_reset_required = false;
7986 int r;
7987
7988 trace_amdgpu_dm_atomic_commit_tail_begin(state);
7989
7990 r = drm_atomic_helper_wait_for_fences(dev, state, false);
7991 if (unlikely(r))
7992 DRM_ERROR("Waiting for fences timed out!");
7993
7994 drm_atomic_helper_update_legacy_modeset_state(dev, state);
7995
7996 dm_state = dm_atomic_get_new_state(state);
7997 if (dm_state && dm_state->context) {
7998 dc_state = dm_state->context;
7999 } else {
8000
8001 dc_state_temp = dc_create_state(dm->dc);
8002 ASSERT(dc_state_temp);
8003 dc_state = dc_state_temp;
8004 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8005 }
8006
8007 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8008 new_crtc_state, i) {
8009 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8010
8011 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8012
8013 if (old_crtc_state->active &&
8014 (!new_crtc_state->active ||
8015 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8016 manage_dm_interrupts(adev, acrtc, false);
8017 dc_stream_release(dm_old_crtc_state->stream);
8018 }
8019 }
8020
8021 drm_atomic_helper_calc_timestamping_constants(state);
8022
8023
8024 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8025 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8026
8027 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8028 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8029
8030 drm_dbg_state(state->dev,
8031 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8032 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8033 "connectors_changed:%d\n",
8034 acrtc->crtc_id,
8035 new_crtc_state->enable,
8036 new_crtc_state->active,
8037 new_crtc_state->planes_changed,
8038 new_crtc_state->mode_changed,
8039 new_crtc_state->active_changed,
8040 new_crtc_state->connectors_changed);
8041
8042
8043 if (old_crtc_state->active && !new_crtc_state->active) {
8044 struct dc_cursor_position position;
8045
8046 memset(&position, 0, sizeof(position));
8047 mutex_lock(&dm->dc_lock);
8048 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8049 mutex_unlock(&dm->dc_lock);
8050 }
8051
8052
8053 if (dm_new_crtc_state->stream) {
8054 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8055 dm_new_crtc_state->stream);
8056 }
8057
8058
8059
8060
8061
8062 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8063
8064 DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8065
8066 if (!dm_new_crtc_state->stream) {
8067
8068
8069
8070
8071
8072
8073
8074
8075
8076
8077
8078
8079
8080
8081
8082 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8083 __func__, acrtc->base.base.id);
8084 continue;
8085 }
8086
8087 if (dm_old_crtc_state->stream)
8088 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8089
8090 pm_runtime_get_noresume(dev->dev);
8091
8092 acrtc->enabled = true;
8093 acrtc->hw_mode = new_crtc_state->mode;
8094 crtc->hwmode = new_crtc_state->mode;
8095 mode_set_reset_required = true;
8096 } else if (modereset_required(new_crtc_state)) {
8097 DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8098
8099 if (dm_old_crtc_state->stream)
8100 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8101
8102 mode_set_reset_required = true;
8103 }
8104 }
8105
8106 if (dc_state) {
8107
8108 if (mode_set_reset_required) {
8109 if (dm->vblank_control_workqueue)
8110 flush_workqueue(dm->vblank_control_workqueue);
8111
8112 amdgpu_dm_psr_disable_all(dm);
8113 }
8114
8115 dm_enable_per_frame_crtc_master_sync(dc_state);
8116 mutex_lock(&dm->dc_lock);
8117 WARN_ON(!dc_commit_state(dm->dc, dc_state));
8118
8119
8120 if (dm->active_vblank_irq_count == 0)
8121 dc_allow_idle_optimizations(dm->dc, true);
8122 mutex_unlock(&dm->dc_lock);
8123 }
8124
8125 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8126 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8127
8128 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8129
8130 if (dm_new_crtc_state->stream != NULL) {
8131 const struct dc_stream_status *status =
8132 dc_stream_get_status(dm_new_crtc_state->stream);
8133
8134 if (!status)
8135 status = dc_stream_get_status_from_state(dc_state,
8136 dm_new_crtc_state->stream);
8137 if (!status)
8138 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8139 else
8140 acrtc->otg_inst = status->primary_otg_inst;
8141 }
8142 }
8143 #ifdef CONFIG_DRM_AMD_DC_HDCP
8144 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8145 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8146 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8147 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8148
8149 new_crtc_state = NULL;
8150
8151 if (acrtc)
8152 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8153
8154 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8155
8156 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8157 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8158 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8159 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8160 dm_new_con_state->update_hdcp = true;
8161 continue;
8162 }
8163
8164 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8165 hdcp_update_display(
8166 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8167 new_con_state->hdcp_content_type,
8168 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
8169 }
8170 #endif
8171
8172
8173 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8174 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8175 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8176 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8177 struct dc_surface_update dummy_updates[MAX_SURFACES];
8178 struct dc_stream_update stream_update;
8179 struct dc_info_packet hdr_packet;
8180 struct dc_stream_status *status = NULL;
8181 bool abm_changed, hdr_changed, scaling_changed;
8182
8183 memset(&dummy_updates, 0, sizeof(dummy_updates));
8184 memset(&stream_update, 0, sizeof(stream_update));
8185
8186 if (acrtc) {
8187 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8188 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8189 }
8190
8191
8192 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8193 continue;
8194
8195 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8196 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8197
8198 scaling_changed = is_scaling_state_different(dm_new_con_state,
8199 dm_old_con_state);
8200
8201 abm_changed = dm_new_crtc_state->abm_level !=
8202 dm_old_crtc_state->abm_level;
8203
8204 hdr_changed =
8205 !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
8206
8207 if (!scaling_changed && !abm_changed && !hdr_changed)
8208 continue;
8209
8210 stream_update.stream = dm_new_crtc_state->stream;
8211 if (scaling_changed) {
8212 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8213 dm_new_con_state, dm_new_crtc_state->stream);
8214
8215 stream_update.src = dm_new_crtc_state->stream->src;
8216 stream_update.dst = dm_new_crtc_state->stream->dst;
8217 }
8218
8219 if (abm_changed) {
8220 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8221
8222 stream_update.abm_level = &dm_new_crtc_state->abm_level;
8223 }
8224
8225 if (hdr_changed) {
8226 fill_hdr_info_packet(new_con_state, &hdr_packet);
8227 stream_update.hdr_static_metadata = &hdr_packet;
8228 }
8229
8230 status = dc_stream_get_status(dm_new_crtc_state->stream);
8231
8232 if (WARN_ON(!status))
8233 continue;
8234
8235 WARN_ON(!status->plane_count);
8236
8237
8238
8239
8240
8241
8242 for (j = 0; j < status->plane_count; j++)
8243 dummy_updates[j].surface = status->plane_states[0];
8244
8245
8246 mutex_lock(&dm->dc_lock);
8247 dc_commit_updates_for_stream(dm->dc,
8248 dummy_updates,
8249 status->plane_count,
8250 dm_new_crtc_state->stream,
8251 &stream_update,
8252 dc_state);
8253 mutex_unlock(&dm->dc_lock);
8254 }
8255
8256
8257 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8258 new_crtc_state, i) {
8259 if (old_crtc_state->active && !new_crtc_state->active)
8260 crtc_disable_count++;
8261
8262 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8263 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8264
8265
8266 update_stream_irq_parameters(dm, dm_new_crtc_state);
8267
8268
8269 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8270 dm_new_crtc_state);
8271 }
8272
8273
8274
8275
8276
8277
8278
8279 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8280 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8281 #ifdef CONFIG_DEBUG_FS
8282 bool configure_crc = false;
8283 enum amdgpu_dm_pipe_crc_source cur_crc_src;
8284 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8285 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
8286 #endif
8287 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8288 cur_crc_src = acrtc->dm_irq_params.crc_src;
8289 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8290 #endif
8291 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8292
8293 if (new_crtc_state->active &&
8294 (!old_crtc_state->active ||
8295 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8296 dc_stream_retain(dm_new_crtc_state->stream);
8297 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8298 manage_dm_interrupts(adev, acrtc, true);
8299
8300 #ifdef CONFIG_DEBUG_FS
8301
8302
8303
8304
8305 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8306
8307 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
8308 configure_crc = true;
8309 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8310 if (amdgpu_dm_crc_window_is_activated(crtc)) {
8311 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8312 acrtc->dm_irq_params.crc_window.update_win = true;
8313 acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
8314 spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
8315 crc_rd_wrk->crtc = crtc;
8316 spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
8317 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8318 }
8319 #endif
8320 }
8321
8322 if (configure_crc)
8323 if (amdgpu_dm_crtc_configure_crc_source(
8324 crtc, dm_new_crtc_state, cur_crc_src))
8325 DRM_DEBUG_DRIVER("Failed to configure crc source");
8326 #endif
8327 }
8328 }
8329
8330 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8331 if (new_crtc_state->async_flip)
8332 wait_for_vblank = false;
8333
8334
8335 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8336 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8337
8338 if (dm_new_crtc_state->stream)
8339 amdgpu_dm_commit_planes(state, dc_state, dev,
8340 dm, crtc, wait_for_vblank);
8341 }
8342
8343
8344 amdgpu_dm_commit_audio(dev, state);
8345
8346
8347 for (i = 0; i < dm->num_of_edps; i++) {
8348 if (dm->backlight_dev[i] &&
8349 (dm->actual_brightness[i] != dm->brightness[i]))
8350 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
8351 }
8352
8353
8354
8355
8356
8357 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8358 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8359
8360 if (new_crtc_state->event)
8361 drm_send_event_locked(dev, &new_crtc_state->event->base);
8362
8363 new_crtc_state->event = NULL;
8364 }
8365 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8366
8367
8368 drm_atomic_helper_commit_hw_done(state);
8369
8370 if (wait_for_vblank)
8371 drm_atomic_helper_wait_for_flip_done(dev, state);
8372
8373 drm_atomic_helper_cleanup_planes(dev, state);
8374
8375
8376 if (!adev->mman.keep_stolen_vga_memory)
8377 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8378 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8379
8380
8381
8382
8383
8384
8385 for (i = 0; i < crtc_disable_count; i++)
8386 pm_runtime_put_autosuspend(dev->dev);
8387 pm_runtime_mark_last_busy(dev->dev);
8388
8389 if (dc_state_temp)
8390 dc_release_state(dc_state_temp);
8391 }
8392
8393
8394 static int dm_force_atomic_commit(struct drm_connector *connector)
8395 {
8396 int ret = 0;
8397 struct drm_device *ddev = connector->dev;
8398 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8399 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8400 struct drm_plane *plane = disconnected_acrtc->base.primary;
8401 struct drm_connector_state *conn_state;
8402 struct drm_crtc_state *crtc_state;
8403 struct drm_plane_state *plane_state;
8404
8405 if (!state)
8406 return -ENOMEM;
8407
8408 state->acquire_ctx = ddev->mode_config.acquire_ctx;
8409
8410
8411
8412
8413
8414
8415 conn_state = drm_atomic_get_connector_state(state, connector);
8416
8417 ret = PTR_ERR_OR_ZERO(conn_state);
8418 if (ret)
8419 goto out;
8420
8421
8422 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8423
8424 ret = PTR_ERR_OR_ZERO(crtc_state);
8425 if (ret)
8426 goto out;
8427
8428
8429 crtc_state->mode_changed = true;
8430
8431
8432 plane_state = drm_atomic_get_plane_state(state, plane);
8433
8434 ret = PTR_ERR_OR_ZERO(plane_state);
8435 if (ret)
8436 goto out;
8437
8438
8439 ret = drm_atomic_commit(state);
8440
8441 out:
8442 drm_atomic_state_put(state);
8443 if (ret)
8444 DRM_ERROR("Restoring old state failed with %i\n", ret);
8445
8446 return ret;
8447 }
8448
8449
8450
8451
8452
8453
8454 void dm_restore_drm_connector_state(struct drm_device *dev,
8455 struct drm_connector *connector)
8456 {
8457 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8458 struct amdgpu_crtc *disconnected_acrtc;
8459 struct dm_crtc_state *acrtc_state;
8460
8461 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8462 return;
8463
8464 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8465 if (!disconnected_acrtc)
8466 return;
8467
8468 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8469 if (!acrtc_state->stream)
8470 return;
8471
8472
8473
8474
8475
8476
8477 if (acrtc_state->stream->sink != aconnector->dc_sink)
8478 dm_force_atomic_commit(&aconnector->base);
8479 }
8480
8481
8482
8483
8484
8485 static int do_aquire_global_lock(struct drm_device *dev,
8486 struct drm_atomic_state *state)
8487 {
8488 struct drm_crtc *crtc;
8489 struct drm_crtc_commit *commit;
8490 long ret;
8491
8492
8493
8494
8495
8496
8497 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8498 if (ret)
8499 return ret;
8500
8501 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8502 spin_lock(&crtc->commit_lock);
8503 commit = list_first_entry_or_null(&crtc->commit_list,
8504 struct drm_crtc_commit, commit_entry);
8505 if (commit)
8506 drm_crtc_commit_get(commit);
8507 spin_unlock(&crtc->commit_lock);
8508
8509 if (!commit)
8510 continue;
8511
8512
8513
8514
8515
8516 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8517
8518 if (ret > 0)
8519 ret = wait_for_completion_interruptible_timeout(
8520 &commit->flip_done, 10*HZ);
8521
8522 if (ret == 0)
8523 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
8524 "timed out\n", crtc->base.id, crtc->name);
8525
8526 drm_crtc_commit_put(commit);
8527 }
8528
8529 return ret < 0 ? ret : 0;
8530 }
8531
8532 static void get_freesync_config_for_crtc(
8533 struct dm_crtc_state *new_crtc_state,
8534 struct dm_connector_state *new_con_state)
8535 {
8536 struct mod_freesync_config config = {0};
8537 struct amdgpu_dm_connector *aconnector =
8538 to_amdgpu_dm_connector(new_con_state->base.connector);
8539 struct drm_display_mode *mode = &new_crtc_state->base.mode;
8540 int vrefresh = drm_mode_vrefresh(mode);
8541 bool fs_vid_mode = false;
8542
8543 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
8544 vrefresh >= aconnector->min_vfreq &&
8545 vrefresh <= aconnector->max_vfreq;
8546
8547 if (new_crtc_state->vrr_supported) {
8548 new_crtc_state->stream->ignore_msa_timing_param = true;
8549 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
8550
8551 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
8552 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
8553 config.vsif_supported = true;
8554 config.btr = true;
8555
8556 if (fs_vid_mode) {
8557 config.state = VRR_STATE_ACTIVE_FIXED;
8558 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
8559 goto out;
8560 } else if (new_crtc_state->base.vrr_enabled) {
8561 config.state = VRR_STATE_ACTIVE_VARIABLE;
8562 } else {
8563 config.state = VRR_STATE_INACTIVE;
8564 }
8565 }
8566 out:
8567 new_crtc_state->freesync_config = config;
8568 }
8569
8570 static void reset_freesync_config_for_crtc(
8571 struct dm_crtc_state *new_crtc_state)
8572 {
8573 new_crtc_state->vrr_supported = false;
8574
8575 memset(&new_crtc_state->vrr_infopacket, 0,
8576 sizeof(new_crtc_state->vrr_infopacket));
8577 }
8578
8579 static bool
8580 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
8581 struct drm_crtc_state *new_crtc_state)
8582 {
8583 const struct drm_display_mode *old_mode, *new_mode;
8584
8585 if (!old_crtc_state || !new_crtc_state)
8586 return false;
8587
8588 old_mode = &old_crtc_state->mode;
8589 new_mode = &new_crtc_state->mode;
8590
8591 if (old_mode->clock == new_mode->clock &&
8592 old_mode->hdisplay == new_mode->hdisplay &&
8593 old_mode->vdisplay == new_mode->vdisplay &&
8594 old_mode->htotal == new_mode->htotal &&
8595 old_mode->vtotal != new_mode->vtotal &&
8596 old_mode->hsync_start == new_mode->hsync_start &&
8597 old_mode->vsync_start != new_mode->vsync_start &&
8598 old_mode->hsync_end == new_mode->hsync_end &&
8599 old_mode->vsync_end != new_mode->vsync_end &&
8600 old_mode->hskew == new_mode->hskew &&
8601 old_mode->vscan == new_mode->vscan &&
8602 (old_mode->vsync_end - old_mode->vsync_start) ==
8603 (new_mode->vsync_end - new_mode->vsync_start))
8604 return true;
8605
8606 return false;
8607 }
8608
8609 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
8610 uint64_t num, den, res;
8611 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
8612
8613 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
8614
8615 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
8616 den = (unsigned long long)new_crtc_state->mode.htotal *
8617 (unsigned long long)new_crtc_state->mode.vtotal;
8618
8619 res = div_u64(num, den);
8620 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
8621 }
8622
8623 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8624 struct drm_atomic_state *state,
8625 struct drm_crtc *crtc,
8626 struct drm_crtc_state *old_crtc_state,
8627 struct drm_crtc_state *new_crtc_state,
8628 bool enable,
8629 bool *lock_and_validation_needed)
8630 {
8631 struct dm_atomic_state *dm_state = NULL;
8632 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8633 struct dc_stream_state *new_stream;
8634 int ret = 0;
8635
8636
8637
8638
8639
8640 struct amdgpu_crtc *acrtc = NULL;
8641 struct amdgpu_dm_connector *aconnector = NULL;
8642 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8643 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
8644
8645 new_stream = NULL;
8646
8647 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8648 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8649 acrtc = to_amdgpu_crtc(crtc);
8650 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8651
8652
8653 if (aconnector && enable) {
8654
8655 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8656 &aconnector->base);
8657 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8658 &aconnector->base);
8659
8660 if (IS_ERR(drm_new_conn_state)) {
8661 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8662 goto fail;
8663 }
8664
8665 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8666 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8667
8668 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8669 goto skip_modeset;
8670
8671 new_stream = create_validate_stream_for_sink(aconnector,
8672 &new_crtc_state->mode,
8673 dm_new_conn_state,
8674 dm_old_crtc_state->stream);
8675
8676
8677
8678
8679
8680
8681
8682
8683 if (!new_stream) {
8684 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8685 __func__, acrtc->base.base.id);
8686 ret = -ENOMEM;
8687 goto fail;
8688 }
8689
8690
8691
8692
8693
8694 new_stream->triggered_crtc_reset.enabled =
8695 dm->force_timing_sync;
8696
8697 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8698
8699 ret = fill_hdr_info_packet(drm_new_conn_state,
8700 &new_stream->hdr_static_metadata);
8701 if (ret)
8702 goto fail;
8703
8704
8705
8706
8707
8708
8709
8710
8711
8712
8713 if (dm_new_crtc_state->stream &&
8714 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
8715 goto skip_modeset;
8716
8717 if (dm_new_crtc_state->stream &&
8718 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8719 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8720 new_crtc_state->mode_changed = false;
8721 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8722 new_crtc_state->mode_changed);
8723 }
8724 }
8725
8726
8727 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8728 goto skip_modeset;
8729
8730 drm_dbg_state(state->dev,
8731 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8732 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8733 "connectors_changed:%d\n",
8734 acrtc->crtc_id,
8735 new_crtc_state->enable,
8736 new_crtc_state->active,
8737 new_crtc_state->planes_changed,
8738 new_crtc_state->mode_changed,
8739 new_crtc_state->active_changed,
8740 new_crtc_state->connectors_changed);
8741
8742
8743 if (!enable) {
8744
8745 if (!dm_old_crtc_state->stream)
8746 goto skip_modeset;
8747
8748 if (dm_new_crtc_state->stream &&
8749 is_timing_unchanged_for_freesync(new_crtc_state,
8750 old_crtc_state)) {
8751 new_crtc_state->mode_changed = false;
8752 DRM_DEBUG_DRIVER(
8753 "Mode change not required for front porch change, "
8754 "setting mode_changed to %d",
8755 new_crtc_state->mode_changed);
8756
8757 set_freesync_fixed_config(dm_new_crtc_state);
8758
8759 goto skip_modeset;
8760 } else if (aconnector &&
8761 is_freesync_video_mode(&new_crtc_state->mode,
8762 aconnector)) {
8763 struct drm_display_mode *high_mode;
8764
8765 high_mode = get_highest_refresh_rate_mode(aconnector, false);
8766 if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
8767 set_freesync_fixed_config(dm_new_crtc_state);
8768 }
8769 }
8770
8771 ret = dm_atomic_get_state(state, &dm_state);
8772 if (ret)
8773 goto fail;
8774
8775 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8776 crtc->base.id);
8777
8778
8779 if (dc_remove_stream_from_ctx(
8780 dm->dc,
8781 dm_state->context,
8782 dm_old_crtc_state->stream) != DC_OK) {
8783 ret = -EINVAL;
8784 goto fail;
8785 }
8786
8787 dc_stream_release(dm_old_crtc_state->stream);
8788 dm_new_crtc_state->stream = NULL;
8789
8790 reset_freesync_config_for_crtc(dm_new_crtc_state);
8791
8792 *lock_and_validation_needed = true;
8793
8794 } else {
8795
8796
8797
8798
8799
8800 if (!aconnector)
8801 goto skip_modeset;
8802
8803 if (modereset_required(new_crtc_state))
8804 goto skip_modeset;
8805
8806 if (modeset_required(new_crtc_state, new_stream,
8807 dm_old_crtc_state->stream)) {
8808
8809 WARN_ON(dm_new_crtc_state->stream);
8810
8811 ret = dm_atomic_get_state(state, &dm_state);
8812 if (ret)
8813 goto fail;
8814
8815 dm_new_crtc_state->stream = new_stream;
8816
8817 dc_stream_retain(new_stream);
8818
8819 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
8820 crtc->base.id);
8821
8822 if (dc_add_stream_to_ctx(
8823 dm->dc,
8824 dm_state->context,
8825 dm_new_crtc_state->stream) != DC_OK) {
8826 ret = -EINVAL;
8827 goto fail;
8828 }
8829
8830 *lock_and_validation_needed = true;
8831 }
8832 }
8833
8834 skip_modeset:
8835
8836 if (new_stream)
8837 dc_stream_release(new_stream);
8838
8839
8840
8841
8842
8843 if (!(enable && aconnector && new_crtc_state->active))
8844 return 0;
8845
8846
8847
8848
8849
8850
8851
8852
8853 BUG_ON(dm_new_crtc_state->stream == NULL);
8854
8855
8856 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
8857 drm_atomic_crtc_needs_modeset(new_crtc_state))
8858 update_stream_scaling_settings(
8859 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8860
8861
8862 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8863
8864
8865
8866
8867
8868 if (dm_new_crtc_state->base.color_mgmt_changed ||
8869 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8870 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8871 if (ret)
8872 goto fail;
8873 }
8874
8875
8876 get_freesync_config_for_crtc(dm_new_crtc_state,
8877 dm_new_conn_state);
8878
8879 return ret;
8880
8881 fail:
8882 if (new_stream)
8883 dc_stream_release(new_stream);
8884 return ret;
8885 }
8886
8887 static bool should_reset_plane(struct drm_atomic_state *state,
8888 struct drm_plane *plane,
8889 struct drm_plane_state *old_plane_state,
8890 struct drm_plane_state *new_plane_state)
8891 {
8892 struct drm_plane *other;
8893 struct drm_plane_state *old_other_state, *new_other_state;
8894 struct drm_crtc_state *new_crtc_state;
8895 int i;
8896
8897
8898
8899
8900
8901
8902 if (state->allow_modeset)
8903 return true;
8904
8905
8906 if (old_plane_state->crtc != new_plane_state->crtc)
8907 return true;
8908
8909
8910 if (!new_plane_state->crtc)
8911 return false;
8912
8913 new_crtc_state =
8914 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8915
8916 if (!new_crtc_state)
8917 return true;
8918
8919
8920 if (new_crtc_state->color_mgmt_changed)
8921 return true;
8922
8923 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8924 return true;
8925
8926
8927
8928
8929
8930
8931
8932
8933
8934 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8935 struct amdgpu_framebuffer *old_afb, *new_afb;
8936 if (other->type == DRM_PLANE_TYPE_CURSOR)
8937 continue;
8938
8939 if (old_other_state->crtc != new_plane_state->crtc &&
8940 new_other_state->crtc != new_plane_state->crtc)
8941 continue;
8942
8943 if (old_other_state->crtc != new_other_state->crtc)
8944 return true;
8945
8946
8947 if (old_other_state->src_w != new_other_state->src_w ||
8948 old_other_state->src_h != new_other_state->src_h ||
8949 old_other_state->crtc_w != new_other_state->crtc_w ||
8950 old_other_state->crtc_h != new_other_state->crtc_h)
8951 return true;
8952
8953
8954 if (old_other_state->rotation != new_other_state->rotation)
8955 return true;
8956
8957
8958 if (old_other_state->pixel_blend_mode !=
8959 new_other_state->pixel_blend_mode)
8960 return true;
8961
8962
8963 if (old_other_state->alpha != new_other_state->alpha)
8964 return true;
8965
8966
8967 if (old_other_state->color_range != new_other_state->color_range ||
8968 old_other_state->color_encoding != new_other_state->color_encoding)
8969 return true;
8970
8971
8972 if (!old_other_state->fb || !new_other_state->fb)
8973 continue;
8974
8975
8976 if (old_other_state->fb->format != new_other_state->fb->format)
8977 return true;
8978
8979 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
8980 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
8981
8982
8983 if (old_afb->tiling_flags != new_afb->tiling_flags ||
8984 old_afb->base.modifier != new_afb->base.modifier)
8985 return true;
8986 }
8987
8988 return false;
8989 }
8990
8991 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
8992 struct drm_plane_state *new_plane_state,
8993 struct drm_framebuffer *fb)
8994 {
8995 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
8996 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
8997 unsigned int pitch;
8998 bool linear;
8999
9000 if (fb->width > new_acrtc->max_cursor_width ||
9001 fb->height > new_acrtc->max_cursor_height) {
9002 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9003 new_plane_state->fb->width,
9004 new_plane_state->fb->height);
9005 return -EINVAL;
9006 }
9007 if (new_plane_state->src_w != fb->width << 16 ||
9008 new_plane_state->src_h != fb->height << 16) {
9009 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9010 return -EINVAL;
9011 }
9012
9013
9014 pitch = fb->pitches[0] / fb->format->cpp[0];
9015
9016 if (fb->width != pitch) {
9017 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9018 fb->width, pitch);
9019 return -EINVAL;
9020 }
9021
9022 switch (pitch) {
9023 case 64:
9024 case 128:
9025 case 256:
9026
9027 break;
9028 default:
9029 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9030 return -EINVAL;
9031 }
9032
9033
9034
9035 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9036 if (adev->family < AMDGPU_FAMILY_AI) {
9037 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9038 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9039 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9040 } else {
9041 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9042 }
9043 if (!linear) {
9044 DRM_DEBUG_ATOMIC("Cursor FB not linear");
9045 return -EINVAL;
9046 }
9047 }
9048
9049 return 0;
9050 }
9051
9052 static int dm_update_plane_state(struct dc *dc,
9053 struct drm_atomic_state *state,
9054 struct drm_plane *plane,
9055 struct drm_plane_state *old_plane_state,
9056 struct drm_plane_state *new_plane_state,
9057 bool enable,
9058 bool *lock_and_validation_needed)
9059 {
9060
9061 struct dm_atomic_state *dm_state = NULL;
9062 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9063 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9064 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9065 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9066 struct amdgpu_crtc *new_acrtc;
9067 bool needs_reset;
9068 int ret = 0;
9069
9070
9071 new_plane_crtc = new_plane_state->crtc;
9072 old_plane_crtc = old_plane_state->crtc;
9073 dm_new_plane_state = to_dm_plane_state(new_plane_state);
9074 dm_old_plane_state = to_dm_plane_state(old_plane_state);
9075
9076 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9077 if (!enable || !new_plane_crtc ||
9078 drm_atomic_plane_disabling(plane->state, new_plane_state))
9079 return 0;
9080
9081 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9082
9083 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9084 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9085 return -EINVAL;
9086 }
9087
9088 if (new_plane_state->fb) {
9089 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9090 new_plane_state->fb);
9091 if (ret)
9092 return ret;
9093 }
9094
9095 return 0;
9096 }
9097
9098 needs_reset = should_reset_plane(state, plane, old_plane_state,
9099 new_plane_state);
9100
9101
9102 if (!enable) {
9103 if (!needs_reset)
9104 return 0;
9105
9106 if (!old_plane_crtc)
9107 return 0;
9108
9109 old_crtc_state = drm_atomic_get_old_crtc_state(
9110 state, old_plane_crtc);
9111 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9112
9113 if (!dm_old_crtc_state->stream)
9114 return 0;
9115
9116 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9117 plane->base.id, old_plane_crtc->base.id);
9118
9119 ret = dm_atomic_get_state(state, &dm_state);
9120 if (ret)
9121 return ret;
9122
9123 if (!dc_remove_plane_from_context(
9124 dc,
9125 dm_old_crtc_state->stream,
9126 dm_old_plane_state->dc_state,
9127 dm_state->context)) {
9128
9129 return -EINVAL;
9130 }
9131
9132
9133 dc_plane_state_release(dm_old_plane_state->dc_state);
9134 dm_new_plane_state->dc_state = NULL;
9135
9136 *lock_and_validation_needed = true;
9137
9138 } else {
9139 struct dc_plane_state *dc_new_plane_state;
9140
9141 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9142 return 0;
9143
9144 if (!new_plane_crtc)
9145 return 0;
9146
9147 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9148 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9149
9150 if (!dm_new_crtc_state->stream)
9151 return 0;
9152
9153 if (!needs_reset)
9154 return 0;
9155
9156 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9157 if (ret)
9158 return ret;
9159
9160 WARN_ON(dm_new_plane_state->dc_state);
9161
9162 dc_new_plane_state = dc_create_plane_state(dc);
9163 if (!dc_new_plane_state)
9164 return -ENOMEM;
9165
9166 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
9167 plane->base.id, new_plane_crtc->base.id);
9168
9169 ret = fill_dc_plane_attributes(
9170 drm_to_adev(new_plane_crtc->dev),
9171 dc_new_plane_state,
9172 new_plane_state,
9173 new_crtc_state);
9174 if (ret) {
9175 dc_plane_state_release(dc_new_plane_state);
9176 return ret;
9177 }
9178
9179 ret = dm_atomic_get_state(state, &dm_state);
9180 if (ret) {
9181 dc_plane_state_release(dc_new_plane_state);
9182 return ret;
9183 }
9184
9185
9186
9187
9188
9189
9190
9191
9192 if (!dc_add_plane_to_context(
9193 dc,
9194 dm_new_crtc_state->stream,
9195 dc_new_plane_state,
9196 dm_state->context)) {
9197
9198 dc_plane_state_release(dc_new_plane_state);
9199 return -EINVAL;
9200 }
9201
9202 dm_new_plane_state->dc_state = dc_new_plane_state;
9203
9204 dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
9205
9206
9207
9208
9209 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9210
9211 *lock_and_validation_needed = true;
9212 }
9213
9214
9215 return ret;
9216 }
9217
9218 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
9219 int *src_w, int *src_h)
9220 {
9221 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
9222 case DRM_MODE_ROTATE_90:
9223 case DRM_MODE_ROTATE_270:
9224 *src_w = plane_state->src_h >> 16;
9225 *src_h = plane_state->src_w >> 16;
9226 break;
9227 case DRM_MODE_ROTATE_0:
9228 case DRM_MODE_ROTATE_180:
9229 default:
9230 *src_w = plane_state->src_w >> 16;
9231 *src_h = plane_state->src_h >> 16;
9232 break;
9233 }
9234 }
9235
9236 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9237 struct drm_crtc *crtc,
9238 struct drm_crtc_state *new_crtc_state)
9239 {
9240 struct drm_plane *cursor = crtc->cursor, *underlying;
9241 struct drm_plane_state *new_cursor_state, *new_underlying_state;
9242 int i;
9243 int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
9244 int cursor_src_w, cursor_src_h;
9245 int underlying_src_w, underlying_src_h;
9246
9247
9248
9249
9250
9251
9252 new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
9253 if (!new_cursor_state || !new_cursor_state->fb) {
9254 return 0;
9255 }
9256
9257 dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
9258 cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
9259 cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
9260
9261 for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
9262
9263 if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
9264 continue;
9265
9266
9267 if (!new_underlying_state->fb)
9268 continue;
9269
9270 dm_get_oriented_plane_size(new_underlying_state,
9271 &underlying_src_w, &underlying_src_h);
9272 underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
9273 underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
9274
9275 if (cursor_scale_w != underlying_scale_w ||
9276 cursor_scale_h != underlying_scale_h) {
9277 drm_dbg_atomic(crtc->dev,
9278 "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
9279 cursor->base.id, cursor->name, underlying->base.id, underlying->name);
9280 return -EINVAL;
9281 }
9282
9283
9284 if (new_underlying_state->crtc_x <= 0 &&
9285 new_underlying_state->crtc_y <= 0 &&
9286 new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
9287 new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
9288 break;
9289 }
9290
9291 return 0;
9292 }
9293
9294 #if defined(CONFIG_DRM_AMD_DC_DCN)
9295 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9296 {
9297 struct drm_connector *connector;
9298 struct drm_connector_state *conn_state, *old_conn_state;
9299 struct amdgpu_dm_connector *aconnector = NULL;
9300 int i;
9301 for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
9302 if (!conn_state->crtc)
9303 conn_state = old_conn_state;
9304
9305 if (conn_state->crtc != crtc)
9306 continue;
9307
9308 aconnector = to_amdgpu_dm_connector(connector);
9309 if (!aconnector->port || !aconnector->mst_port)
9310 aconnector = NULL;
9311 else
9312 break;
9313 }
9314
9315 if (!aconnector)
9316 return 0;
9317
9318 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9319 }
9320 #endif
9321
9322
9323
9324
9325
9326
9327
9328
9329
9330
9331
9332
9333
9334
9335
9336
9337
9338
9339
9340
9341
9342
9343
9344
9345
9346 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9347 struct drm_atomic_state *state)
9348 {
9349 struct amdgpu_device *adev = drm_to_adev(dev);
9350 struct dm_atomic_state *dm_state = NULL;
9351 struct dc *dc = adev->dm.dc;
9352 struct drm_connector *connector;
9353 struct drm_connector_state *old_con_state, *new_con_state;
9354 struct drm_crtc *crtc;
9355 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9356 struct drm_plane *plane;
9357 struct drm_plane_state *old_plane_state, *new_plane_state;
9358 enum dc_status status;
9359 int ret, i;
9360 bool lock_and_validation_needed = false;
9361 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9362 #if defined(CONFIG_DRM_AMD_DC_DCN)
9363 struct dsc_mst_fairness_vars vars[MAX_PIPES];
9364 struct drm_dp_mst_topology_state *mst_state;
9365 struct drm_dp_mst_topology_mgr *mgr;
9366 #endif
9367
9368 trace_amdgpu_dm_atomic_check_begin(state);
9369
9370 ret = drm_atomic_helper_check_modeset(dev, state);
9371 if (ret) {
9372 DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
9373 goto fail;
9374 }
9375
9376
9377 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9378 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9379 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9380
9381
9382 if (!old_con_state->crtc && !new_con_state->crtc)
9383 continue;
9384
9385 if (!new_con_state->crtc)
9386 continue;
9387
9388 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9389 if (IS_ERR(new_crtc_state)) {
9390 DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
9391 ret = PTR_ERR(new_crtc_state);
9392 goto fail;
9393 }
9394
9395 if (dm_old_con_state->abm_level !=
9396 dm_new_con_state->abm_level)
9397 new_crtc_state->connectors_changed = true;
9398 }
9399
9400 #if defined(CONFIG_DRM_AMD_DC_DCN)
9401 if (dc_resource_is_dsc_encoding_supported(dc)) {
9402 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9403 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9404 ret = add_affected_mst_dsc_crtcs(state, crtc);
9405 if (ret) {
9406 DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
9407 goto fail;
9408 }
9409 }
9410 }
9411 if (!pre_validate_dsc(state, &dm_state, vars)) {
9412 ret = -EINVAL;
9413 goto fail;
9414 }
9415 }
9416 #endif
9417 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9418 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9419
9420 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
9421 !new_crtc_state->color_mgmt_changed &&
9422 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9423 dm_old_crtc_state->dsc_force_changed == false)
9424 continue;
9425
9426 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
9427 if (ret) {
9428 DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
9429 goto fail;
9430 }
9431
9432 if (!new_crtc_state->enable)
9433 continue;
9434
9435 ret = drm_atomic_add_affected_connectors(state, crtc);
9436 if (ret) {
9437 DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
9438 goto fail;
9439 }
9440
9441 ret = drm_atomic_add_affected_planes(state, crtc);
9442 if (ret) {
9443 DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
9444 goto fail;
9445 }
9446
9447 if (dm_old_crtc_state->dsc_force_changed)
9448 new_crtc_state->mode_changed = true;
9449 }
9450
9451
9452
9453
9454
9455
9456 drm_for_each_crtc(crtc, dev) {
9457 bool modified = false;
9458
9459 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9460 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9461 continue;
9462
9463 if (new_plane_state->crtc == crtc ||
9464 old_plane_state->crtc == crtc) {
9465 modified = true;
9466 break;
9467 }
9468 }
9469
9470 if (!modified)
9471 continue;
9472
9473 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9474 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9475 continue;
9476
9477 new_plane_state =
9478 drm_atomic_get_plane_state(state, plane);
9479
9480 if (IS_ERR(new_plane_state)) {
9481 ret = PTR_ERR(new_plane_state);
9482 DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
9483 goto fail;
9484 }
9485 }
9486 }
9487
9488
9489
9490
9491
9492
9493
9494 drm_atomic_normalize_zpos(dev, state);
9495
9496
9497 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9498 ret = dm_update_plane_state(dc, state, plane,
9499 old_plane_state,
9500 new_plane_state,
9501 false,
9502 &lock_and_validation_needed);
9503 if (ret) {
9504 DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
9505 goto fail;
9506 }
9507 }
9508
9509
9510 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9511 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9512 old_crtc_state,
9513 new_crtc_state,
9514 false,
9515 &lock_and_validation_needed);
9516 if (ret) {
9517 DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
9518 goto fail;
9519 }
9520 }
9521
9522
9523 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9524 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9525 old_crtc_state,
9526 new_crtc_state,
9527 true,
9528 &lock_and_validation_needed);
9529 if (ret) {
9530 DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
9531 goto fail;
9532 }
9533 }
9534
9535
9536 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9537 ret = dm_update_plane_state(dc, state, plane,
9538 old_plane_state,
9539 new_plane_state,
9540 true,
9541 &lock_and_validation_needed);
9542 if (ret) {
9543 DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
9544 goto fail;
9545 }
9546 }
9547
9548
9549 ret = drm_atomic_helper_check_planes(dev, state);
9550 if (ret) {
9551 DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
9552 goto fail;
9553 }
9554
9555 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9556 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9557 if (dm_new_crtc_state->mpo_requested)
9558 DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
9559 }
9560
9561
9562 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9563 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
9564 if (ret) {
9565 DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
9566 goto fail;
9567 }
9568 }
9569
9570 if (state->legacy_cursor_update) {
9571
9572
9573
9574
9575
9576 state->async_update =
9577 !drm_atomic_helper_async_check(dev, state);
9578
9579
9580
9581
9582
9583
9584
9585
9586 if (state->async_update)
9587 return 0;
9588 }
9589
9590
9591
9592
9593
9594
9595 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9596 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9597 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9598 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9599
9600
9601 if (!acrtc || drm_atomic_crtc_needs_modeset(
9602 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
9603 continue;
9604
9605
9606 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
9607 continue;
9608
9609 lock_and_validation_needed = true;
9610 }
9611
9612 #if defined(CONFIG_DRM_AMD_DC_DCN)
9613
9614 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
9615 struct amdgpu_dm_connector *aconnector;
9616 struct drm_connector *connector;
9617 struct drm_connector_list_iter iter;
9618 u8 link_coding_cap;
9619
9620 if (!mgr->mst_state )
9621 continue;
9622
9623 drm_connector_list_iter_begin(dev, &iter);
9624 drm_for_each_connector_iter(connector, &iter) {
9625 int id = connector->index;
9626
9627 if (id == mst_state->mgr->conn_base_id) {
9628 aconnector = to_amdgpu_dm_connector(connector);
9629 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
9630 drm_dp_mst_update_slots(mst_state, link_coding_cap);
9631
9632 break;
9633 }
9634 }
9635 drm_connector_list_iter_end(&iter);
9636
9637 }
9638 #endif
9639
9640
9641
9642
9643
9644
9645
9646
9647
9648
9649
9650
9651
9652 if (lock_and_validation_needed) {
9653 ret = dm_atomic_get_state(state, &dm_state);
9654 if (ret) {
9655 DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
9656 goto fail;
9657 }
9658
9659 ret = do_aquire_global_lock(dev, state);
9660 if (ret) {
9661 DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
9662 goto fail;
9663 }
9664
9665 #if defined(CONFIG_DRM_AMD_DC_DCN)
9666 if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
9667 DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
9668 ret = -EINVAL;
9669 goto fail;
9670 }
9671
9672 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
9673 if (ret) {
9674 DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
9675 goto fail;
9676 }
9677 #endif
9678
9679
9680
9681
9682
9683
9684
9685 ret = drm_dp_mst_atomic_check(state);
9686 if (ret) {
9687 DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
9688 goto fail;
9689 }
9690 status = dc_validate_global_state(dc, dm_state->context, true);
9691 if (status != DC_OK) {
9692 DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
9693 dc_status_to_str(status), status);
9694 ret = -EINVAL;
9695 goto fail;
9696 }
9697 } else {
9698
9699
9700
9701
9702
9703
9704
9705
9706
9707
9708
9709
9710
9711
9712 for (i = 0; i < state->num_private_objs; i++) {
9713 struct drm_private_obj *obj = state->private_objs[i].ptr;
9714
9715 if (obj->funcs == adev->dm.atomic_obj.funcs) {
9716 int j = state->num_private_objs-1;
9717
9718 dm_atomic_destroy_state(obj,
9719 state->private_objs[i].state);
9720
9721
9722
9723
9724
9725 if (i != j)
9726 state->private_objs[i] =
9727 state->private_objs[j];
9728
9729 state->private_objs[j].ptr = NULL;
9730 state->private_objs[j].state = NULL;
9731 state->private_objs[j].old_state = NULL;
9732 state->private_objs[j].new_state = NULL;
9733
9734 state->num_private_objs = j;
9735 break;
9736 }
9737 }
9738 }
9739
9740
9741 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
9742 struct dm_crtc_state *dm_new_crtc_state =
9743 to_dm_crtc_state(new_crtc_state);
9744
9745 dm_new_crtc_state->update_type = lock_and_validation_needed ?
9746 UPDATE_TYPE_FULL :
9747 UPDATE_TYPE_FAST;
9748 }
9749
9750
9751 WARN_ON(ret);
9752
9753 trace_amdgpu_dm_atomic_check_finish(state, ret);
9754
9755 return ret;
9756
9757 fail:
9758 if (ret == -EDEADLK)
9759 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
9760 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
9761 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
9762 else
9763 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
9764
9765 trace_amdgpu_dm_atomic_check_finish(state, ret);
9766
9767 return ret;
9768 }
9769
9770 static bool is_dp_capable_without_timing_msa(struct dc *dc,
9771 struct amdgpu_dm_connector *amdgpu_dm_connector)
9772 {
9773 uint8_t dpcd_data;
9774 bool capable = false;
9775
9776 if (amdgpu_dm_connector->dc_link &&
9777 dm_helpers_dp_read_dpcd(
9778 NULL,
9779 amdgpu_dm_connector->dc_link,
9780 DP_DOWN_STREAM_PORT_COUNT,
9781 &dpcd_data,
9782 sizeof(dpcd_data))) {
9783 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9784 }
9785
9786 return capable;
9787 }
9788
9789 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
9790 unsigned int offset,
9791 unsigned int total_length,
9792 uint8_t *data,
9793 unsigned int length,
9794 struct amdgpu_hdmi_vsdb_info *vsdb)
9795 {
9796 bool res;
9797 union dmub_rb_cmd cmd;
9798 struct dmub_cmd_send_edid_cea *input;
9799 struct dmub_cmd_edid_cea_output *output;
9800
9801 if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
9802 return false;
9803
9804 memset(&cmd, 0, sizeof(cmd));
9805
9806 input = &cmd.edid_cea.data.input;
9807
9808 cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
9809 cmd.edid_cea.header.sub_type = 0;
9810 cmd.edid_cea.header.payload_bytes =
9811 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
9812 input->offset = offset;
9813 input->length = length;
9814 input->cea_total_length = total_length;
9815 memcpy(input->payload, data, length);
9816
9817 res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
9818 if (!res) {
9819 DRM_ERROR("EDID CEA parser failed\n");
9820 return false;
9821 }
9822
9823 output = &cmd.edid_cea.data.output;
9824
9825 if (output->type == DMUB_CMD__EDID_CEA_ACK) {
9826 if (!output->ack.success) {
9827 DRM_ERROR("EDID CEA ack failed at offset %d\n",
9828 output->ack.offset);
9829 }
9830 } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
9831 if (!output->amd_vsdb.vsdb_found)
9832 return false;
9833
9834 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
9835 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
9836 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
9837 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
9838 } else {
9839 DRM_WARN("Unknown EDID CEA parser results\n");
9840 return false;
9841 }
9842
9843 return true;
9844 }
9845
9846 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
9847 uint8_t *edid_ext, int len,
9848 struct amdgpu_hdmi_vsdb_info *vsdb_info)
9849 {
9850 int i;
9851
9852
9853 for (i = 0; i < len; i += 8) {
9854 bool res;
9855 int offset;
9856
9857
9858 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
9859 return false;
9860
9861 if (i+8 == len) {
9862
9863 int version, min_rate, max_rate;
9864
9865 res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
9866 if (res) {
9867
9868 vsdb_info->freesync_supported = 1;
9869 vsdb_info->amd_vsdb_version = version;
9870 vsdb_info->min_refresh_rate_hz = min_rate;
9871 vsdb_info->max_refresh_rate_hz = max_rate;
9872 return true;
9873 }
9874
9875 return false;
9876 }
9877
9878
9879 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
9880 if (!res)
9881 return false;
9882 }
9883
9884 return false;
9885 }
9886
9887 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
9888 uint8_t *edid_ext, int len,
9889 struct amdgpu_hdmi_vsdb_info *vsdb_info)
9890 {
9891 int i;
9892
9893
9894 for (i = 0; i < len; i += 8) {
9895
9896 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
9897 return false;
9898 }
9899
9900 return vsdb_info->freesync_supported;
9901 }
9902
9903 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
9904 uint8_t *edid_ext, int len,
9905 struct amdgpu_hdmi_vsdb_info *vsdb_info)
9906 {
9907 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
9908
9909 if (adev->dm.dmub_srv)
9910 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
9911 else
9912 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
9913 }
9914
9915 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
9916 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
9917 {
9918 uint8_t *edid_ext = NULL;
9919 int i;
9920 bool valid_vsdb_found = false;
9921
9922
9923
9924 if (edid == NULL || edid->extensions == 0)
9925 return -ENODEV;
9926
9927
9928 for (i = 0; i < edid->extensions; i++) {
9929 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
9930 if (edid_ext[0] == CEA_EXT)
9931 break;
9932 }
9933
9934 if (i == edid->extensions)
9935 return -ENODEV;
9936
9937
9938 if (edid_ext[0] != CEA_EXT)
9939 return -ENODEV;
9940
9941 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
9942
9943 return valid_vsdb_found ? i : -ENODEV;
9944 }
9945
9946 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9947 struct edid *edid)
9948 {
9949 int i = 0;
9950 struct detailed_timing *timing;
9951 struct detailed_non_pixel *data;
9952 struct detailed_data_monitor_range *range;
9953 struct amdgpu_dm_connector *amdgpu_dm_connector =
9954 to_amdgpu_dm_connector(connector);
9955 struct dm_connector_state *dm_con_state = NULL;
9956 struct dc_sink *sink;
9957
9958 struct drm_device *dev = connector->dev;
9959 struct amdgpu_device *adev = drm_to_adev(dev);
9960 bool freesync_capable = false;
9961 struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
9962
9963 if (!connector->state) {
9964 DRM_ERROR("%s - Connector has no state", __func__);
9965 goto update;
9966 }
9967
9968 sink = amdgpu_dm_connector->dc_sink ?
9969 amdgpu_dm_connector->dc_sink :
9970 amdgpu_dm_connector->dc_em_sink;
9971
9972 if (!edid || !sink) {
9973 dm_con_state = to_dm_connector_state(connector->state);
9974
9975 amdgpu_dm_connector->min_vfreq = 0;
9976 amdgpu_dm_connector->max_vfreq = 0;
9977 amdgpu_dm_connector->pixel_clock_mhz = 0;
9978 connector->display_info.monitor_range.min_vfreq = 0;
9979 connector->display_info.monitor_range.max_vfreq = 0;
9980 freesync_capable = false;
9981
9982 goto update;
9983 }
9984
9985 dm_con_state = to_dm_connector_state(connector->state);
9986
9987 if (!adev->dm.freesync_module)
9988 goto update;
9989
9990
9991 if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9992 || sink->sink_signal == SIGNAL_TYPE_EDP) {
9993 bool edid_check_required = false;
9994
9995 if (edid) {
9996 edid_check_required = is_dp_capable_without_timing_msa(
9997 adev->dm.dc,
9998 amdgpu_dm_connector);
9999 }
10000
10001 if (edid_check_required == true && (edid->version > 1 ||
10002 (edid->version == 1 && edid->revision > 1))) {
10003 for (i = 0; i < 4; i++) {
10004
10005 timing = &edid->detailed_timings[i];
10006 data = &timing->data.other_data;
10007 range = &data->data.range;
10008
10009
10010
10011 if (data->type != EDID_DETAIL_MONITOR_RANGE)
10012 continue;
10013
10014
10015
10016
10017
10018
10019 if (range->flags != 1)
10020 continue;
10021
10022 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10023 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10024 amdgpu_dm_connector->pixel_clock_mhz =
10025 range->pixel_clock_mhz * 10;
10026
10027 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10028 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10029
10030 break;
10031 }
10032
10033 if (amdgpu_dm_connector->max_vfreq -
10034 amdgpu_dm_connector->min_vfreq > 10) {
10035
10036 freesync_capable = true;
10037 }
10038 }
10039 } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10040 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10041 if (i >= 0 && vsdb_info.freesync_supported) {
10042 timing = &edid->detailed_timings[i];
10043 data = &timing->data.other_data;
10044
10045 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10046 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10047 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10048 freesync_capable = true;
10049
10050 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10051 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10052 }
10053 }
10054
10055 update:
10056 if (dm_con_state)
10057 dm_con_state->freesync_capable = freesync_capable;
10058
10059 if (connector->vrr_capable_property)
10060 drm_connector_set_vrr_capable_property(connector,
10061 freesync_capable);
10062 }
10063
10064 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10065 {
10066 struct amdgpu_device *adev = drm_to_adev(dev);
10067 struct dc *dc = adev->dm.dc;
10068 int i;
10069
10070 mutex_lock(&adev->dm.dc_lock);
10071 if (dc->current_state) {
10072 for (i = 0; i < dc->current_state->stream_count; ++i)
10073 dc->current_state->streams[i]
10074 ->triggered_crtc_reset.enabled =
10075 adev->dm.force_timing_sync;
10076
10077 dm_enable_per_frame_crtc_master_sync(dc->current_state);
10078 dc_trigger_sync(dc, dc->current_state);
10079 }
10080 mutex_unlock(&adev->dm.dc_lock);
10081 }
10082
10083 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10084 uint32_t value, const char *func_name)
10085 {
10086 #ifdef DM_CHECK_ADDR_0
10087 if (address == 0) {
10088 DC_ERR("invalid register write. address = 0");
10089 return;
10090 }
10091 #endif
10092 cgs_write_register(ctx->cgs_device, address, value);
10093 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10094 }
10095
10096 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10097 const char *func_name)
10098 {
10099 uint32_t value;
10100 #ifdef DM_CHECK_ADDR_0
10101 if (address == 0) {
10102 DC_ERR("invalid register read; address = 0\n");
10103 return 0;
10104 }
10105 #endif
10106
10107 if (ctx->dmub_srv &&
10108 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10109 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10110 ASSERT(false);
10111 return 0;
10112 }
10113
10114 value = cgs_read_register(ctx->cgs_device, address);
10115
10116 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10117
10118 return value;
10119 }
10120
10121 static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
10122 struct dc_context *ctx,
10123 uint8_t status_type,
10124 uint32_t *operation_result)
10125 {
10126 struct amdgpu_device *adev = ctx->driver_context;
10127 int return_status = -1;
10128 struct dmub_notification *p_notify = adev->dm.dmub_notify;
10129
10130 if (is_cmd_aux) {
10131 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
10132 return_status = p_notify->aux_reply.length;
10133 *operation_result = p_notify->result;
10134 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
10135 *operation_result = AUX_RET_ERROR_TIMEOUT;
10136 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
10137 *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
10138 } else {
10139 *operation_result = AUX_RET_ERROR_UNKNOWN;
10140 }
10141 } else {
10142 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
10143 return_status = 0;
10144 *operation_result = p_notify->sc_status;
10145 } else {
10146 *operation_result = SET_CONFIG_UNKNOWN_ERROR;
10147 }
10148 }
10149
10150 return return_status;
10151 }
10152
10153 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
10154 unsigned int link_index, void *cmd_payload, void *operation_result)
10155 {
10156 struct amdgpu_device *adev = ctx->driver_context;
10157 int ret = 0;
10158
10159 if (is_cmd_aux) {
10160 dc_process_dmub_aux_transfer_async(ctx->dc,
10161 link_index, (struct aux_payload *)cmd_payload);
10162 } else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
10163 (struct set_config_cmd_payload *)cmd_payload,
10164 adev->dm.dmub_notify)) {
10165 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
10166 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
10167 (uint32_t *)operation_result);
10168 }
10169
10170 ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
10171 if (ret == 0) {
10172 DRM_ERROR("wait_for_completion_timeout timeout!");
10173 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
10174 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
10175 (uint32_t *)operation_result);
10176 }
10177
10178 if (is_cmd_aux) {
10179 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
10180 struct aux_payload *payload = (struct aux_payload *)cmd_payload;
10181
10182 payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
10183 if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
10184 payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
10185 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
10186 adev->dm.dmub_notify->aux_reply.length);
10187 }
10188 }
10189 }
10190
10191 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
10192 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
10193 (uint32_t *)operation_result);
10194 }
10195
10196
10197
10198
10199
10200
10201
10202
10203 bool check_seamless_boot_capability(struct amdgpu_device *adev)
10204 {
10205 switch (adev->asic_type) {
10206 case CHIP_VANGOGH:
10207 if (!adev->mman.keep_stolen_vga_memory)
10208 return true;
10209 break;
10210 default:
10211 break;
10212 }
10213
10214 return false;
10215 }