Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (C) 2014 Red Hat
0004  * Author: Rob Clark <robdclark@gmail.com>
0005  */
0006 
0007 #include <drm/drm_atomic_uapi.h>
0008 #include <drm/drm_vblank.h>
0009 
0010 #include "msm_atomic_trace.h"
0011 #include "msm_drv.h"
0012 #include "msm_gem.h"
0013 #include "msm_kms.h"
0014 
0015 /*
0016  * Helpers to control vblanks while we flush.. basically just to ensure
0017  * that vblank accounting is switched on, so we get valid seqn/timestamp
0018  * on pageflip events (if requested)
0019  */
0020 
0021 static void vblank_get(struct msm_kms *kms, unsigned crtc_mask)
0022 {
0023     struct drm_crtc *crtc;
0024 
0025     for_each_crtc_mask(kms->dev, crtc, crtc_mask) {
0026         if (!crtc->state->active)
0027             continue;
0028         drm_crtc_vblank_get(crtc);
0029     }
0030 }
0031 
0032 static void vblank_put(struct msm_kms *kms, unsigned crtc_mask)
0033 {
0034     struct drm_crtc *crtc;
0035 
0036     for_each_crtc_mask(kms->dev, crtc, crtc_mask) {
0037         if (!crtc->state->active)
0038             continue;
0039         drm_crtc_vblank_put(crtc);
0040     }
0041 }
0042 
0043 static void lock_crtcs(struct msm_kms *kms, unsigned int crtc_mask)
0044 {
0045     int crtc_index;
0046     struct drm_crtc *crtc;
0047 
0048     for_each_crtc_mask(kms->dev, crtc, crtc_mask) {
0049         crtc_index = drm_crtc_index(crtc);
0050         mutex_lock_nested(&kms->commit_lock[crtc_index], crtc_index);
0051     }
0052 }
0053 
0054 static void unlock_crtcs(struct msm_kms *kms, unsigned int crtc_mask)
0055 {
0056     struct drm_crtc *crtc;
0057 
0058     for_each_crtc_mask_reverse(kms->dev, crtc, crtc_mask)
0059         mutex_unlock(&kms->commit_lock[drm_crtc_index(crtc)]);
0060 }
0061 
0062 static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx)
0063 {
0064     unsigned crtc_mask = BIT(crtc_idx);
0065 
0066     trace_msm_atomic_async_commit_start(crtc_mask);
0067 
0068     lock_crtcs(kms, crtc_mask);
0069 
0070     if (!(kms->pending_crtc_mask & crtc_mask)) {
0071         unlock_crtcs(kms, crtc_mask);
0072         goto out;
0073     }
0074 
0075     kms->pending_crtc_mask &= ~crtc_mask;
0076 
0077     kms->funcs->enable_commit(kms);
0078 
0079     vblank_get(kms, crtc_mask);
0080 
0081     /*
0082      * Flush hardware updates:
0083      */
0084     trace_msm_atomic_flush_commit(crtc_mask);
0085     kms->funcs->flush_commit(kms, crtc_mask);
0086 
0087     /*
0088      * Wait for flush to complete:
0089      */
0090     trace_msm_atomic_wait_flush_start(crtc_mask);
0091     kms->funcs->wait_flush(kms, crtc_mask);
0092     trace_msm_atomic_wait_flush_finish(crtc_mask);
0093 
0094     vblank_put(kms, crtc_mask);
0095 
0096     kms->funcs->complete_commit(kms, crtc_mask);
0097     unlock_crtcs(kms, crtc_mask);
0098     kms->funcs->disable_commit(kms);
0099 
0100 out:
0101     trace_msm_atomic_async_commit_finish(crtc_mask);
0102 }
0103 
0104 static void msm_atomic_pending_work(struct kthread_work *work)
0105 {
0106     struct msm_pending_timer *timer = container_of(work,
0107             struct msm_pending_timer, work.work);
0108 
0109     msm_atomic_async_commit(timer->kms, timer->crtc_idx);
0110 }
0111 
0112 int msm_atomic_init_pending_timer(struct msm_pending_timer *timer,
0113         struct msm_kms *kms, int crtc_idx)
0114 {
0115     timer->kms = kms;
0116     timer->crtc_idx = crtc_idx;
0117 
0118     timer->worker = kthread_create_worker(0, "atomic-worker-%d", crtc_idx);
0119     if (IS_ERR(timer->worker)) {
0120         int ret = PTR_ERR(timer->worker);
0121         timer->worker = NULL;
0122         return ret;
0123     }
0124     sched_set_fifo(timer->worker->task);
0125 
0126     msm_hrtimer_work_init(&timer->work, timer->worker,
0127                   msm_atomic_pending_work,
0128                   CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
0129 
0130     return 0;
0131 }
0132 
0133 void msm_atomic_destroy_pending_timer(struct msm_pending_timer *timer)
0134 {
0135     if (timer->worker)
0136         kthread_destroy_worker(timer->worker);
0137 }
0138 
0139 static bool can_do_async(struct drm_atomic_state *state,
0140         struct drm_crtc **async_crtc)
0141 {
0142     struct drm_connector_state *connector_state;
0143     struct drm_connector *connector;
0144     struct drm_crtc_state *crtc_state;
0145     struct drm_crtc *crtc;
0146     int i, num_crtcs = 0;
0147 
0148     if (!(state->legacy_cursor_update || state->async_update))
0149         return false;
0150 
0151     /* any connector change, means slow path: */
0152     for_each_new_connector_in_state(state, connector, connector_state, i)
0153         return false;
0154 
0155     for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
0156         if (drm_atomic_crtc_needs_modeset(crtc_state))
0157             return false;
0158         if (++num_crtcs > 1)
0159             return false;
0160         *async_crtc = crtc;
0161     }
0162 
0163     return true;
0164 }
0165 
0166 /* Get bitmask of crtcs that will need to be flushed.  The bitmask
0167  * can be used with for_each_crtc_mask() iterator, to iterate
0168  * effected crtcs without needing to preserve the atomic state.
0169  */
0170 static unsigned get_crtc_mask(struct drm_atomic_state *state)
0171 {
0172     struct drm_crtc_state *crtc_state;
0173     struct drm_crtc *crtc;
0174     unsigned i, mask = 0;
0175 
0176     for_each_new_crtc_in_state(state, crtc, crtc_state, i)
0177         mask |= drm_crtc_mask(crtc);
0178 
0179     return mask;
0180 }
0181 
0182 void msm_atomic_commit_tail(struct drm_atomic_state *state)
0183 {
0184     struct drm_device *dev = state->dev;
0185     struct msm_drm_private *priv = dev->dev_private;
0186     struct msm_kms *kms = priv->kms;
0187     struct drm_crtc *async_crtc = NULL;
0188     unsigned crtc_mask = get_crtc_mask(state);
0189     bool async = kms->funcs->vsync_time &&
0190             can_do_async(state, &async_crtc);
0191 
0192     trace_msm_atomic_commit_tail_start(async, crtc_mask);
0193 
0194     kms->funcs->enable_commit(kms);
0195 
0196     /*
0197      * Ensure any previous (potentially async) commit has
0198      * completed:
0199      */
0200     lock_crtcs(kms, crtc_mask);
0201     trace_msm_atomic_wait_flush_start(crtc_mask);
0202     kms->funcs->wait_flush(kms, crtc_mask);
0203     trace_msm_atomic_wait_flush_finish(crtc_mask);
0204 
0205     /*
0206      * Now that there is no in-progress flush, prepare the
0207      * current update:
0208      */
0209     kms->funcs->prepare_commit(kms, state);
0210 
0211     /*
0212      * Push atomic updates down to hardware:
0213      */
0214     drm_atomic_helper_commit_modeset_disables(dev, state);
0215     drm_atomic_helper_commit_planes(dev, state, 0);
0216     drm_atomic_helper_commit_modeset_enables(dev, state);
0217 
0218     if (async) {
0219         struct msm_pending_timer *timer =
0220             &kms->pending_timers[drm_crtc_index(async_crtc)];
0221 
0222         /* async updates are limited to single-crtc updates: */
0223         WARN_ON(crtc_mask != drm_crtc_mask(async_crtc));
0224 
0225         /*
0226          * Start timer if we don't already have an update pending
0227          * on this crtc:
0228          */
0229         if (!(kms->pending_crtc_mask & crtc_mask)) {
0230             ktime_t vsync_time, wakeup_time;
0231 
0232             kms->pending_crtc_mask |= crtc_mask;
0233 
0234             vsync_time = kms->funcs->vsync_time(kms, async_crtc);
0235             wakeup_time = ktime_sub(vsync_time, ms_to_ktime(1));
0236 
0237             msm_hrtimer_queue_work(&timer->work, wakeup_time,
0238                     HRTIMER_MODE_ABS);
0239         }
0240 
0241         kms->funcs->disable_commit(kms);
0242         unlock_crtcs(kms, crtc_mask);
0243         /*
0244          * At this point, from drm core's perspective, we
0245          * are done with the atomic update, so we can just
0246          * go ahead and signal that it is done:
0247          */
0248         drm_atomic_helper_commit_hw_done(state);
0249         drm_atomic_helper_cleanup_planes(dev, state);
0250 
0251         trace_msm_atomic_commit_tail_finish(async, crtc_mask);
0252 
0253         return;
0254     }
0255 
0256     /*
0257      * If there is any async flush pending on updated crtcs, fold
0258      * them into the current flush.
0259      */
0260     kms->pending_crtc_mask &= ~crtc_mask;
0261 
0262     vblank_get(kms, crtc_mask);
0263 
0264     /*
0265      * Flush hardware updates:
0266      */
0267     trace_msm_atomic_flush_commit(crtc_mask);
0268     kms->funcs->flush_commit(kms, crtc_mask);
0269     unlock_crtcs(kms, crtc_mask);
0270     /*
0271      * Wait for flush to complete:
0272      */
0273     trace_msm_atomic_wait_flush_start(crtc_mask);
0274     kms->funcs->wait_flush(kms, crtc_mask);
0275     trace_msm_atomic_wait_flush_finish(crtc_mask);
0276 
0277     vblank_put(kms, crtc_mask);
0278 
0279     lock_crtcs(kms, crtc_mask);
0280     kms->funcs->complete_commit(kms, crtc_mask);
0281     unlock_crtcs(kms, crtc_mask);
0282     kms->funcs->disable_commit(kms);
0283 
0284     drm_atomic_helper_commit_hw_done(state);
0285     drm_atomic_helper_cleanup_planes(dev, state);
0286 
0287     trace_msm_atomic_commit_tail_finish(async, crtc_mask);
0288 }