0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026 #include <drm/drm_vblank.h>
0027 #include <drm/drm_atomic_helper.h>
0028
0029 #include "dc.h"
0030 #include "amdgpu.h"
0031 #include "amdgpu_dm_psr.h"
0032 #include "amdgpu_dm_crtc.h"
0033 #include "amdgpu_dm_plane.h"
0034 #include "amdgpu_dm_trace.h"
0035 #include "amdgpu_dm_debugfs.h"
0036
0037 void dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc)
0038 {
0039 struct drm_crtc *crtc = &acrtc->base;
0040 struct drm_device *dev = crtc->dev;
0041 unsigned long flags;
0042
0043 drm_crtc_handle_vblank(crtc);
0044
0045 spin_lock_irqsave(&dev->event_lock, flags);
0046
0047
0048 if (acrtc->event && acrtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
0049 drm_crtc_send_vblank_event(crtc, acrtc->event);
0050 drm_crtc_vblank_put(crtc);
0051 acrtc->event = NULL;
0052 }
0053
0054 spin_unlock_irqrestore(&dev->event_lock, flags);
0055 }
0056
0057 bool modeset_required(struct drm_crtc_state *crtc_state,
0058 struct dc_stream_state *new_stream,
0059 struct dc_stream_state *old_stream)
0060 {
0061 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
0062 }
0063
0064 bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
0065
0066 {
0067 return acrtc->dm_irq_params.freesync_config.state ==
0068 VRR_STATE_ACTIVE_VARIABLE ||
0069 acrtc->dm_irq_params.freesync_config.state ==
0070 VRR_STATE_ACTIVE_FIXED;
0071 }
0072
0073 int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
0074 {
0075 enum dc_irq_source irq_source;
0076 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
0077 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
0078 int rc;
0079
0080 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
0081
0082 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
0083
0084 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
0085 acrtc->crtc_id, enable ? "en" : "dis", rc);
0086 return rc;
0087 }
0088
0089 bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
0090 {
0091 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
0092 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
0093 }
0094
0095 static void vblank_control_worker(struct work_struct *work)
0096 {
0097 struct vblank_control_work *vblank_work =
0098 container_of(work, struct vblank_control_work, work);
0099 struct amdgpu_display_manager *dm = vblank_work->dm;
0100
0101 mutex_lock(&dm->dc_lock);
0102
0103 if (vblank_work->enable)
0104 dm->active_vblank_irq_count++;
0105 else if (dm->active_vblank_irq_count)
0106 dm->active_vblank_irq_count--;
0107
0108 dc_allow_idle_optimizations(
0109 dm->dc, dm->active_vblank_irq_count == 0 ? true : false);
0110
0111 DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123 if (vblank_work->stream && vblank_work->stream->link) {
0124 if (vblank_work->enable) {
0125 if (vblank_work->stream->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 &&
0126 vblank_work->stream->link->psr_settings.psr_allow_active)
0127 amdgpu_dm_psr_disable(vblank_work->stream);
0128 } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
0129 !vblank_work->stream->link->psr_settings.psr_allow_active &&
0130 vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
0131 amdgpu_dm_psr_enable(vblank_work->stream);
0132 }
0133 }
0134
0135 mutex_unlock(&dm->dc_lock);
0136
0137 dc_stream_release(vblank_work->stream);
0138
0139 kfree(vblank_work);
0140 }
0141
0142 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
0143 {
0144 enum dc_irq_source irq_source;
0145 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
0146 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
0147 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
0148 struct amdgpu_display_manager *dm = &adev->dm;
0149 struct vblank_control_work *work;
0150 int rc = 0;
0151
0152 if (enable) {
0153
0154 if (amdgpu_dm_vrr_active(acrtc_state))
0155 rc = dm_set_vupdate_irq(crtc, true);
0156 } else {
0157
0158 rc = dm_set_vupdate_irq(crtc, false);
0159 }
0160
0161 if (rc)
0162 return rc;
0163
0164 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
0165
0166 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
0167 return -EBUSY;
0168
0169 if (amdgpu_in_reset(adev))
0170 return 0;
0171
0172 if (dm->vblank_control_workqueue) {
0173 work = kzalloc(sizeof(*work), GFP_ATOMIC);
0174 if (!work)
0175 return -ENOMEM;
0176
0177 INIT_WORK(&work->work, vblank_control_worker);
0178 work->dm = dm;
0179 work->acrtc = acrtc;
0180 work->enable = enable;
0181
0182 if (acrtc_state->stream) {
0183 dc_stream_retain(acrtc_state->stream);
0184 work->stream = acrtc_state->stream;
0185 }
0186
0187 queue_work(dm->vblank_control_workqueue, &work->work);
0188 }
0189
0190 return 0;
0191 }
0192
0193 int dm_enable_vblank(struct drm_crtc *crtc)
0194 {
0195 return dm_set_vblank(crtc, true);
0196 }
0197
0198 void dm_disable_vblank(struct drm_crtc *crtc)
0199 {
0200 dm_set_vblank(crtc, false);
0201 }
0202
0203 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
0204 struct drm_crtc_state *state)
0205 {
0206 struct dm_crtc_state *cur = to_dm_crtc_state(state);
0207
0208
0209 if (cur->stream)
0210 dc_stream_release(cur->stream);
0211
0212
0213 __drm_atomic_helper_crtc_destroy_state(state);
0214
0215
0216 kfree(state);
0217 }
0218
0219 static struct drm_crtc_state *dm_crtc_duplicate_state(struct drm_crtc *crtc)
0220 {
0221 struct dm_crtc_state *state, *cur;
0222
0223 cur = to_dm_crtc_state(crtc->state);
0224
0225 if (WARN_ON(!crtc->state))
0226 return NULL;
0227
0228 state = kzalloc(sizeof(*state), GFP_KERNEL);
0229 if (!state)
0230 return NULL;
0231
0232 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
0233
0234 if (cur->stream) {
0235 state->stream = cur->stream;
0236 dc_stream_retain(state->stream);
0237 }
0238
0239 state->active_planes = cur->active_planes;
0240 state->vrr_infopacket = cur->vrr_infopacket;
0241 state->abm_level = cur->abm_level;
0242 state->vrr_supported = cur->vrr_supported;
0243 state->freesync_config = cur->freesync_config;
0244 state->cm_has_degamma = cur->cm_has_degamma;
0245 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
0246 state->crc_skip_count = cur->crc_skip_count;
0247 state->mpo_requested = cur->mpo_requested;
0248
0249
0250 return &state->base;
0251 }
0252
0253 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
0254 {
0255 drm_crtc_cleanup(crtc);
0256 kfree(crtc);
0257 }
0258
0259 static void dm_crtc_reset_state(struct drm_crtc *crtc)
0260 {
0261 struct dm_crtc_state *state;
0262
0263 if (crtc->state)
0264 dm_crtc_destroy_state(crtc, crtc->state);
0265
0266 state = kzalloc(sizeof(*state), GFP_KERNEL);
0267 if (WARN_ON(!state))
0268 return;
0269
0270 __drm_atomic_helper_crtc_reset(crtc, &state->base);
0271 }
0272
0273 #ifdef CONFIG_DEBUG_FS
0274 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
0275 {
0276 crtc_debugfs_init(crtc);
0277
0278 return 0;
0279 }
0280 #endif
0281
0282
0283 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
0284 .reset = dm_crtc_reset_state,
0285 .destroy = amdgpu_dm_crtc_destroy,
0286 .set_config = drm_atomic_helper_set_config,
0287 .page_flip = drm_atomic_helper_page_flip,
0288 .atomic_duplicate_state = dm_crtc_duplicate_state,
0289 .atomic_destroy_state = dm_crtc_destroy_state,
0290 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
0291 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
0292 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
0293 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
0294 .enable_vblank = dm_enable_vblank,
0295 .disable_vblank = dm_disable_vblank,
0296 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
0297 #if defined(CONFIG_DEBUG_FS)
0298 .late_register = amdgpu_dm_crtc_late_register,
0299 #endif
0300 };
0301
0302 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
0303 {
0304 }
0305
0306 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
0307 {
0308 struct drm_atomic_state *state = new_crtc_state->state;
0309 struct drm_plane *plane;
0310 int num_active = 0;
0311
0312 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
0313 struct drm_plane_state *new_plane_state;
0314
0315
0316 if (plane->type == DRM_PLANE_TYPE_CURSOR)
0317 continue;
0318
0319 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
0320
0321 if (!new_plane_state) {
0322
0323
0324
0325
0326
0327 num_active += 1;
0328 continue;
0329 }
0330
0331
0332 num_active += (new_plane_state->fb != NULL);
0333 }
0334
0335 return num_active;
0336 }
0337
0338 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
0339 struct drm_crtc_state *new_crtc_state)
0340 {
0341 struct dm_crtc_state *dm_new_crtc_state =
0342 to_dm_crtc_state(new_crtc_state);
0343
0344 dm_new_crtc_state->active_planes = 0;
0345
0346 if (!dm_new_crtc_state->stream)
0347 return;
0348
0349 dm_new_crtc_state->active_planes =
0350 count_crtc_active_planes(new_crtc_state);
0351 }
0352
0353 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
0354 const struct drm_display_mode *mode,
0355 struct drm_display_mode *adjusted_mode)
0356 {
0357 return true;
0358 }
0359
0360 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
0361 struct drm_atomic_state *state)
0362 {
0363 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
0364 crtc);
0365 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
0366 struct dc *dc = adev->dm.dc;
0367 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
0368 int ret = -EINVAL;
0369
0370 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
0371
0372 dm_update_crtc_active_planes(crtc, crtc_state);
0373
0374 if (WARN_ON(unlikely(!dm_crtc_state->stream &&
0375 modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
0376 return ret;
0377 }
0378
0379
0380
0381
0382
0383
0384
0385 if (crtc_state->enable &&
0386 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
0387 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
0388 return -EINVAL;
0389 }
0390
0391
0392 if (!dm_crtc_state->stream)
0393 return 0;
0394
0395 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
0396 return 0;
0397
0398 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
0399 return ret;
0400 }
0401
0402 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
0403 .disable = dm_crtc_helper_disable,
0404 .atomic_check = dm_crtc_helper_atomic_check,
0405 .mode_fixup = dm_crtc_helper_mode_fixup,
0406 .get_scanout_position = amdgpu_crtc_get_scanout_position,
0407 };
0408
0409 int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
0410 struct drm_plane *plane,
0411 uint32_t crtc_index)
0412 {
0413 struct amdgpu_crtc *acrtc = NULL;
0414 struct drm_plane *cursor_plane;
0415
0416 int res = -ENOMEM;
0417
0418 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
0419 if (!cursor_plane)
0420 goto fail;
0421
0422 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
0423 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
0424
0425 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
0426 if (!acrtc)
0427 goto fail;
0428
0429 res = drm_crtc_init_with_planes(
0430 dm->ddev,
0431 &acrtc->base,
0432 plane,
0433 cursor_plane,
0434 &amdgpu_dm_crtc_funcs, NULL);
0435
0436 if (res)
0437 goto fail;
0438
0439 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
0440
0441
0442 if (acrtc->base.funcs->reset)
0443 acrtc->base.funcs->reset(&acrtc->base);
0444
0445 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
0446 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
0447
0448 acrtc->crtc_id = crtc_index;
0449 acrtc->base.enabled = false;
0450 acrtc->otg_inst = -1;
0451
0452 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
0453 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
0454 true, MAX_COLOR_LUT_ENTRIES);
0455 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
0456
0457 return 0;
0458
0459 fail:
0460 kfree(acrtc);
0461 kfree(cursor_plane);
0462 return res;
0463 }
0464