Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright © 2014 Intel Corporation
0003  *
0004  * Permission is hereby granted, free of charge, to any person obtaining a
0005  * copy of this software and associated documentation files (the "Software"),
0006  * to deal in the Software without restriction, including without limitation
0007  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0008  * and/or sell copies of the Software, and to permit persons to whom the
0009  * Software is furnished to do so, subject to the following conditions:
0010  *
0011  * The above copyright notice and this permission notice (including the next
0012  * paragraph) shall be included in all copies or substantial portions of the
0013  * Software.
0014  *
0015  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0016  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0017  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0018  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
0019  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
0020  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
0021  * DEALINGS IN THE SOFTWARE.
0022  */
0023 
0024 /**
0025  * DOC: atomic plane helpers
0026  *
0027  * The functions here are used by the atomic plane helper functions to
0028  * implement legacy plane updates (i.e., drm_plane->update_plane() and
0029  * drm_plane->disable_plane()).  This allows plane updates to use the
0030  * atomic state infrastructure and perform plane updates as separate
0031  * prepare/check/commit/cleanup steps.
0032  */
0033 
0034 #include <drm/drm_atomic_helper.h>
0035 #include <drm/drm_fourcc.h>
0036 #include <drm/drm_plane_helper.h>
0037 
0038 #include "gt/intel_rps.h"
0039 
0040 #include "intel_atomic_plane.h"
0041 #include "intel_cdclk.h"
0042 #include "intel_display_trace.h"
0043 #include "intel_display_types.h"
0044 #include "intel_fb.h"
0045 #include "intel_fb_pin.h"
0046 #include "intel_pm.h"
0047 #include "intel_sprite.h"
0048 #include "skl_scaler.h"
0049 
0050 static void intel_plane_state_reset(struct intel_plane_state *plane_state,
0051                     struct intel_plane *plane)
0052 {
0053     memset(plane_state, 0, sizeof(*plane_state));
0054 
0055     __drm_atomic_helper_plane_state_reset(&plane_state->uapi, &plane->base);
0056 
0057     plane_state->scaler_id = -1;
0058 }
0059 
0060 struct intel_plane *intel_plane_alloc(void)
0061 {
0062     struct intel_plane_state *plane_state;
0063     struct intel_plane *plane;
0064 
0065     plane = kzalloc(sizeof(*plane), GFP_KERNEL);
0066     if (!plane)
0067         return ERR_PTR(-ENOMEM);
0068 
0069     plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
0070     if (!plane_state) {
0071         kfree(plane);
0072         return ERR_PTR(-ENOMEM);
0073     }
0074 
0075     intel_plane_state_reset(plane_state, plane);
0076 
0077     plane->base.state = &plane_state->uapi;
0078 
0079     return plane;
0080 }
0081 
0082 void intel_plane_free(struct intel_plane *plane)
0083 {
0084     intel_plane_destroy_state(&plane->base, plane->base.state);
0085     kfree(plane);
0086 }
0087 
0088 /**
0089  * intel_plane_duplicate_state - duplicate plane state
0090  * @plane: drm plane
0091  *
0092  * Allocates and returns a copy of the plane state (both common and
0093  * Intel-specific) for the specified plane.
0094  *
0095  * Returns: The newly allocated plane state, or NULL on failure.
0096  */
0097 struct drm_plane_state *
0098 intel_plane_duplicate_state(struct drm_plane *plane)
0099 {
0100     struct intel_plane_state *intel_state;
0101 
0102     intel_state = to_intel_plane_state(plane->state);
0103     intel_state = kmemdup(intel_state, sizeof(*intel_state), GFP_KERNEL);
0104 
0105     if (!intel_state)
0106         return NULL;
0107 
0108     __drm_atomic_helper_plane_duplicate_state(plane, &intel_state->uapi);
0109 
0110     intel_state->ggtt_vma = NULL;
0111     intel_state->dpt_vma = NULL;
0112     intel_state->flags = 0;
0113 
0114     /* add reference to fb */
0115     if (intel_state->hw.fb)
0116         drm_framebuffer_get(intel_state->hw.fb);
0117 
0118     return &intel_state->uapi;
0119 }
0120 
0121 /**
0122  * intel_plane_destroy_state - destroy plane state
0123  * @plane: drm plane
0124  * @state: state object to destroy
0125  *
0126  * Destroys the plane state (both common and Intel-specific) for the
0127  * specified plane.
0128  */
0129 void
0130 intel_plane_destroy_state(struct drm_plane *plane,
0131               struct drm_plane_state *state)
0132 {
0133     struct intel_plane_state *plane_state = to_intel_plane_state(state);
0134 
0135     drm_WARN_ON(plane->dev, plane_state->ggtt_vma);
0136     drm_WARN_ON(plane->dev, plane_state->dpt_vma);
0137 
0138     __drm_atomic_helper_plane_destroy_state(&plane_state->uapi);
0139     if (plane_state->hw.fb)
0140         drm_framebuffer_put(plane_state->hw.fb);
0141     kfree(plane_state);
0142 }
0143 
0144 unsigned int intel_adjusted_rate(const struct drm_rect *src,
0145                  const struct drm_rect *dst,
0146                  unsigned int rate)
0147 {
0148     unsigned int src_w, src_h, dst_w, dst_h;
0149 
0150     src_w = drm_rect_width(src) >> 16;
0151     src_h = drm_rect_height(src) >> 16;
0152     dst_w = drm_rect_width(dst);
0153     dst_h = drm_rect_height(dst);
0154 
0155     /* Downscaling limits the maximum pixel rate */
0156     dst_w = min(src_w, dst_w);
0157     dst_h = min(src_h, dst_h);
0158 
0159     return DIV_ROUND_UP_ULL(mul_u32_u32(rate, src_w * src_h),
0160                 dst_w * dst_h);
0161 }
0162 
0163 unsigned int intel_plane_pixel_rate(const struct intel_crtc_state *crtc_state,
0164                     const struct intel_plane_state *plane_state)
0165 {
0166     /*
0167      * Note we don't check for plane visibility here as
0168      * we want to use this when calculating the cursor
0169      * watermarks even if the cursor is fully offscreen.
0170      * That depends on the src/dst rectangles being
0171      * correctly populated whenever the watermark code
0172      * considers the cursor to be visible, whether or not
0173      * it is actually visible.
0174      *
0175      * See: intel_wm_plane_visible() and intel_check_cursor()
0176      */
0177 
0178     return intel_adjusted_rate(&plane_state->uapi.src,
0179                    &plane_state->uapi.dst,
0180                    crtc_state->pixel_rate);
0181 }
0182 
0183 unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state,
0184                    const struct intel_plane_state *plane_state,
0185                    int color_plane)
0186 {
0187     const struct drm_framebuffer *fb = plane_state->hw.fb;
0188 
0189     if (!plane_state->uapi.visible)
0190         return 0;
0191 
0192     return intel_plane_pixel_rate(crtc_state, plane_state) *
0193         fb->format->cpp[color_plane];
0194 }
0195 
0196 static bool
0197 use_min_ddb(const struct intel_crtc_state *crtc_state,
0198         struct intel_plane *plane)
0199 {
0200     struct drm_i915_private *i915 = to_i915(plane->base.dev);
0201 
0202     return DISPLAY_VER(i915) >= 13 &&
0203            crtc_state->uapi.async_flip &&
0204            plane->async_flip;
0205 }
0206 
0207 static unsigned int
0208 intel_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
0209                    const struct intel_plane_state *plane_state,
0210                    int color_plane)
0211 {
0212     struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
0213     const struct drm_framebuffer *fb = plane_state->hw.fb;
0214     int width, height;
0215 
0216     if (plane->id == PLANE_CURSOR)
0217         return 0;
0218 
0219     if (!plane_state->uapi.visible)
0220         return 0;
0221 
0222     /*
0223      * We calculate extra ddb based on ratio plane rate/total data rate
0224      * in case, in some cases we should not allocate extra ddb for the plane,
0225      * so do not count its data rate, if this is the case.
0226      */
0227     if (use_min_ddb(crtc_state, plane))
0228         return 0;
0229 
0230     /*
0231      * Src coordinates are already rotated by 270 degrees for
0232      * the 90/270 degree plane rotation cases (to match the
0233      * GTT mapping), hence no need to account for rotation here.
0234      */
0235     width = drm_rect_width(&plane_state->uapi.src) >> 16;
0236     height = drm_rect_height(&plane_state->uapi.src) >> 16;
0237 
0238     /* UV plane does 1/2 pixel sub-sampling */
0239     if (color_plane == 1) {
0240         width /= 2;
0241         height /= 2;
0242     }
0243 
0244     return width * height * fb->format->cpp[color_plane];
0245 }
0246 
0247 int intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
0248                    struct intel_plane *plane,
0249                    bool *need_cdclk_calc)
0250 {
0251     struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
0252     const struct intel_plane_state *plane_state =
0253         intel_atomic_get_new_plane_state(state, plane);
0254     struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc);
0255     const struct intel_cdclk_state *cdclk_state;
0256     const struct intel_crtc_state *old_crtc_state;
0257     struct intel_crtc_state *new_crtc_state;
0258 
0259     if (!plane_state->uapi.visible || !plane->min_cdclk)
0260         return 0;
0261 
0262     old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
0263     new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
0264 
0265     new_crtc_state->min_cdclk[plane->id] =
0266         plane->min_cdclk(new_crtc_state, plane_state);
0267 
0268     /*
0269      * No need to check against the cdclk state if
0270      * the min cdclk for the plane doesn't increase.
0271      *
0272      * Ie. we only ever increase the cdclk due to plane
0273      * requirements. This can reduce back and forth
0274      * display blinking due to constant cdclk changes.
0275      */
0276     if (new_crtc_state->min_cdclk[plane->id] <=
0277         old_crtc_state->min_cdclk[plane->id])
0278         return 0;
0279 
0280     cdclk_state = intel_atomic_get_cdclk_state(state);
0281     if (IS_ERR(cdclk_state))
0282         return PTR_ERR(cdclk_state);
0283 
0284     /*
0285      * No need to recalculate the cdclk state if
0286      * the min cdclk for the pipe doesn't increase.
0287      *
0288      * Ie. we only ever increase the cdclk due to plane
0289      * requirements. This can reduce back and forth
0290      * display blinking due to constant cdclk changes.
0291      */
0292     if (new_crtc_state->min_cdclk[plane->id] <=
0293         cdclk_state->min_cdclk[crtc->pipe])
0294         return 0;
0295 
0296     drm_dbg_kms(&dev_priv->drm,
0297             "[PLANE:%d:%s] min cdclk (%d kHz) > [CRTC:%d:%s] min cdclk (%d kHz)\n",
0298             plane->base.base.id, plane->base.name,
0299             new_crtc_state->min_cdclk[plane->id],
0300             crtc->base.base.id, crtc->base.name,
0301             cdclk_state->min_cdclk[crtc->pipe]);
0302     *need_cdclk_calc = true;
0303 
0304     return 0;
0305 }
0306 
0307 static void intel_plane_clear_hw_state(struct intel_plane_state *plane_state)
0308 {
0309     if (plane_state->hw.fb)
0310         drm_framebuffer_put(plane_state->hw.fb);
0311 
0312     memset(&plane_state->hw, 0, sizeof(plane_state->hw));
0313 }
0314 
0315 void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state,
0316                        const struct intel_plane_state *from_plane_state,
0317                        struct intel_crtc *crtc)
0318 {
0319     intel_plane_clear_hw_state(plane_state);
0320 
0321     /*
0322      * For the bigjoiner slave uapi.crtc will point at
0323      * the master crtc. So we explicitly assign the right
0324      * slave crtc to hw.crtc. uapi.crtc!=NULL simply indicates
0325      * the plane is logically enabled on the uapi level.
0326      */
0327     plane_state->hw.crtc = from_plane_state->uapi.crtc ? &crtc->base : NULL;
0328 
0329     plane_state->hw.fb = from_plane_state->uapi.fb;
0330     if (plane_state->hw.fb)
0331         drm_framebuffer_get(plane_state->hw.fb);
0332 
0333     plane_state->hw.alpha = from_plane_state->uapi.alpha;
0334     plane_state->hw.pixel_blend_mode =
0335         from_plane_state->uapi.pixel_blend_mode;
0336     plane_state->hw.rotation = from_plane_state->uapi.rotation;
0337     plane_state->hw.color_encoding = from_plane_state->uapi.color_encoding;
0338     plane_state->hw.color_range = from_plane_state->uapi.color_range;
0339     plane_state->hw.scaling_filter = from_plane_state->uapi.scaling_filter;
0340 
0341     plane_state->uapi.src = drm_plane_state_src(&from_plane_state->uapi);
0342     plane_state->uapi.dst = drm_plane_state_dest(&from_plane_state->uapi);
0343 }
0344 
0345 void intel_plane_copy_hw_state(struct intel_plane_state *plane_state,
0346                    const struct intel_plane_state *from_plane_state)
0347 {
0348     intel_plane_clear_hw_state(plane_state);
0349 
0350     memcpy(&plane_state->hw, &from_plane_state->hw,
0351            sizeof(plane_state->hw));
0352 
0353     if (plane_state->hw.fb)
0354         drm_framebuffer_get(plane_state->hw.fb);
0355 }
0356 
0357 void intel_plane_set_invisible(struct intel_crtc_state *crtc_state,
0358                    struct intel_plane_state *plane_state)
0359 {
0360     struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
0361 
0362     crtc_state->active_planes &= ~BIT(plane->id);
0363     crtc_state->scaled_planes &= ~BIT(plane->id);
0364     crtc_state->nv12_planes &= ~BIT(plane->id);
0365     crtc_state->c8_planes &= ~BIT(plane->id);
0366     crtc_state->data_rate[plane->id] = 0;
0367     crtc_state->data_rate_y[plane->id] = 0;
0368     crtc_state->rel_data_rate[plane->id] = 0;
0369     crtc_state->rel_data_rate_y[plane->id] = 0;
0370     crtc_state->min_cdclk[plane->id] = 0;
0371 
0372     plane_state->uapi.visible = false;
0373 }
0374 
0375 /* FIXME nuke when all wm code is atomic */
0376 static bool intel_wm_need_update(const struct intel_plane_state *cur,
0377                  struct intel_plane_state *new)
0378 {
0379     /* Update watermarks on tiling or size changes. */
0380     if (new->uapi.visible != cur->uapi.visible)
0381         return true;
0382 
0383     if (!cur->hw.fb || !new->hw.fb)
0384         return false;
0385 
0386     if (cur->hw.fb->modifier != new->hw.fb->modifier ||
0387         cur->hw.rotation != new->hw.rotation ||
0388         drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
0389         drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
0390         drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
0391         drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
0392         return true;
0393 
0394     return false;
0395 }
0396 
0397 static bool intel_plane_is_scaled(const struct intel_plane_state *plane_state)
0398 {
0399     int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
0400     int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
0401     int dst_w = drm_rect_width(&plane_state->uapi.dst);
0402     int dst_h = drm_rect_height(&plane_state->uapi.dst);
0403 
0404     return src_w != dst_w || src_h != dst_h;
0405 }
0406 
0407 static bool intel_plane_do_async_flip(struct intel_plane *plane,
0408                       const struct intel_crtc_state *old_crtc_state,
0409                       const struct intel_crtc_state *new_crtc_state)
0410 {
0411     struct drm_i915_private *i915 = to_i915(plane->base.dev);
0412 
0413     if (!plane->async_flip)
0414         return false;
0415 
0416     if (!new_crtc_state->uapi.async_flip)
0417         return false;
0418 
0419     /*
0420      * In platforms after DISPLAY13, we might need to override
0421      * first async flip in order to change watermark levels
0422      * as part of optimization.
0423      * So for those, we are checking if this is a first async flip.
0424      * For platforms earlier than DISPLAY13 we always do async flip.
0425      */
0426     return DISPLAY_VER(i915) < 13 || old_crtc_state->uapi.async_flip;
0427 }
0428 
0429 static int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
0430                        struct intel_crtc_state *new_crtc_state,
0431                        const struct intel_plane_state *old_plane_state,
0432                        struct intel_plane_state *new_plane_state)
0433 {
0434     struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
0435     struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane);
0436     struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
0437     bool mode_changed = intel_crtc_needs_modeset(new_crtc_state);
0438     bool was_crtc_enabled = old_crtc_state->hw.active;
0439     bool is_crtc_enabled = new_crtc_state->hw.active;
0440     bool turn_off, turn_on, visible, was_visible;
0441     int ret;
0442 
0443     if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
0444         ret = skl_update_scaler_plane(new_crtc_state, new_plane_state);
0445         if (ret)
0446             return ret;
0447     }
0448 
0449     was_visible = old_plane_state->uapi.visible;
0450     visible = new_plane_state->uapi.visible;
0451 
0452     if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible))
0453         was_visible = false;
0454 
0455     /*
0456      * Visibility is calculated as if the crtc was on, but
0457      * after scaler setup everything depends on it being off
0458      * when the crtc isn't active.
0459      *
0460      * FIXME this is wrong for watermarks. Watermarks should also
0461      * be computed as if the pipe would be active. Perhaps move
0462      * per-plane wm computation to the .check_plane() hook, and
0463      * only combine the results from all planes in the current place?
0464      */
0465     if (!is_crtc_enabled) {
0466         intel_plane_set_invisible(new_crtc_state, new_plane_state);
0467         visible = false;
0468     }
0469 
0470     if (!was_visible && !visible)
0471         return 0;
0472 
0473     turn_off = was_visible && (!visible || mode_changed);
0474     turn_on = visible && (!was_visible || mode_changed);
0475 
0476     drm_dbg_atomic(&dev_priv->drm,
0477                "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
0478                crtc->base.base.id, crtc->base.name,
0479                plane->base.base.id, plane->base.name,
0480                was_visible, visible,
0481                turn_off, turn_on, mode_changed);
0482 
0483     if (turn_on) {
0484         if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
0485             new_crtc_state->update_wm_pre = true;
0486 
0487         /* must disable cxsr around plane enable/disable */
0488         if (plane->id != PLANE_CURSOR)
0489             new_crtc_state->disable_cxsr = true;
0490     } else if (turn_off) {
0491         if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
0492             new_crtc_state->update_wm_post = true;
0493 
0494         /* must disable cxsr around plane enable/disable */
0495         if (plane->id != PLANE_CURSOR)
0496             new_crtc_state->disable_cxsr = true;
0497     } else if (intel_wm_need_update(old_plane_state, new_plane_state)) {
0498         if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) {
0499             /* FIXME bollocks */
0500             new_crtc_state->update_wm_pre = true;
0501             new_crtc_state->update_wm_post = true;
0502         }
0503     }
0504 
0505     if (visible || was_visible)
0506         new_crtc_state->fb_bits |= plane->frontbuffer_bit;
0507 
0508     /*
0509      * ILK/SNB DVSACNTR/Sprite Enable
0510      * IVB SPR_CTL/Sprite Enable
0511      * "When in Self Refresh Big FIFO mode, a write to enable the
0512      *  plane will be internally buffered and delayed while Big FIFO
0513      *  mode is exiting."
0514      *
0515      * Which means that enabling the sprite can take an extra frame
0516      * when we start in big FIFO mode (LP1+). Thus we need to drop
0517      * down to LP0 and wait for vblank in order to make sure the
0518      * sprite gets enabled on the next vblank after the register write.
0519      * Doing otherwise would risk enabling the sprite one frame after
0520      * we've already signalled flip completion. We can resume LP1+
0521      * once the sprite has been enabled.
0522      *
0523      *
0524      * WaCxSRDisabledForSpriteScaling:ivb
0525      * IVB SPR_SCALE/Scaling Enable
0526      * "Low Power watermarks must be disabled for at least one
0527      *  frame before enabling sprite scaling, and kept disabled
0528      *  until sprite scaling is disabled."
0529      *
0530      * ILK/SNB DVSASCALE/Scaling Enable
0531      * "When in Self Refresh Big FIFO mode, scaling enable will be
0532      *  masked off while Big FIFO mode is exiting."
0533      *
0534      * Despite the w/a only being listed for IVB we assume that
0535      * the ILK/SNB note has similar ramifications, hence we apply
0536      * the w/a on all three platforms.
0537      *
0538      * With experimental results seems this is needed also for primary
0539      * plane, not only sprite plane.
0540      */
0541     if (plane->id != PLANE_CURSOR &&
0542         (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv) ||
0543          IS_IVYBRIDGE(dev_priv)) &&
0544         (turn_on || (!intel_plane_is_scaled(old_plane_state) &&
0545              intel_plane_is_scaled(new_plane_state))))
0546         new_crtc_state->disable_lp_wm = true;
0547 
0548     if (intel_plane_do_async_flip(plane, old_crtc_state, new_crtc_state))
0549         new_crtc_state->do_async_flip = true;
0550 
0551     return 0;
0552 }
0553 
0554 int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
0555                     struct intel_crtc_state *new_crtc_state,
0556                     const struct intel_plane_state *old_plane_state,
0557                     struct intel_plane_state *new_plane_state)
0558 {
0559     struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane);
0560     const struct drm_framebuffer *fb = new_plane_state->hw.fb;
0561     int ret;
0562 
0563     intel_plane_set_invisible(new_crtc_state, new_plane_state);
0564     new_crtc_state->enabled_planes &= ~BIT(plane->id);
0565 
0566     if (!new_plane_state->hw.crtc && !old_plane_state->hw.crtc)
0567         return 0;
0568 
0569     ret = plane->check_plane(new_crtc_state, new_plane_state);
0570     if (ret)
0571         return ret;
0572 
0573     if (fb)
0574         new_crtc_state->enabled_planes |= BIT(plane->id);
0575 
0576     /* FIXME pre-g4x don't work like this */
0577     if (new_plane_state->uapi.visible)
0578         new_crtc_state->active_planes |= BIT(plane->id);
0579 
0580     if (new_plane_state->uapi.visible &&
0581         intel_plane_is_scaled(new_plane_state))
0582         new_crtc_state->scaled_planes |= BIT(plane->id);
0583 
0584     if (new_plane_state->uapi.visible &&
0585         intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
0586         new_crtc_state->nv12_planes |= BIT(plane->id);
0587 
0588     if (new_plane_state->uapi.visible &&
0589         fb->format->format == DRM_FORMAT_C8)
0590         new_crtc_state->c8_planes |= BIT(plane->id);
0591 
0592     if (new_plane_state->uapi.visible || old_plane_state->uapi.visible)
0593         new_crtc_state->update_planes |= BIT(plane->id);
0594 
0595     if (new_plane_state->uapi.visible &&
0596         intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) {
0597         new_crtc_state->data_rate_y[plane->id] =
0598             intel_plane_data_rate(new_crtc_state, new_plane_state, 0);
0599         new_crtc_state->data_rate[plane->id] =
0600             intel_plane_data_rate(new_crtc_state, new_plane_state, 1);
0601 
0602         new_crtc_state->rel_data_rate_y[plane->id] =
0603             intel_plane_relative_data_rate(new_crtc_state,
0604                                new_plane_state, 0);
0605         new_crtc_state->rel_data_rate[plane->id] =
0606             intel_plane_relative_data_rate(new_crtc_state,
0607                                new_plane_state, 1);
0608     } else if (new_plane_state->uapi.visible) {
0609         new_crtc_state->data_rate[plane->id] =
0610             intel_plane_data_rate(new_crtc_state, new_plane_state, 0);
0611 
0612         new_crtc_state->rel_data_rate[plane->id] =
0613             intel_plane_relative_data_rate(new_crtc_state,
0614                                new_plane_state, 0);
0615     }
0616 
0617     return intel_plane_atomic_calc_changes(old_crtc_state, new_crtc_state,
0618                            old_plane_state, new_plane_state);
0619 }
0620 
0621 static struct intel_plane *
0622 intel_crtc_get_plane(struct intel_crtc *crtc, enum plane_id plane_id)
0623 {
0624     struct drm_i915_private *i915 = to_i915(crtc->base.dev);
0625     struct intel_plane *plane;
0626 
0627     for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
0628         if (plane->id == plane_id)
0629             return plane;
0630     }
0631 
0632     return NULL;
0633 }
0634 
0635 int intel_plane_atomic_check(struct intel_atomic_state *state,
0636                  struct intel_plane *plane)
0637 {
0638     struct drm_i915_private *i915 = to_i915(state->base.dev);
0639     struct intel_plane_state *new_plane_state =
0640         intel_atomic_get_new_plane_state(state, plane);
0641     const struct intel_plane_state *old_plane_state =
0642         intel_atomic_get_old_plane_state(state, plane);
0643     const struct intel_plane_state *new_master_plane_state;
0644     struct intel_crtc *crtc = intel_crtc_for_pipe(i915, plane->pipe);
0645     const struct intel_crtc_state *old_crtc_state =
0646         intel_atomic_get_old_crtc_state(state, crtc);
0647     struct intel_crtc_state *new_crtc_state =
0648         intel_atomic_get_new_crtc_state(state, crtc);
0649 
0650     if (new_crtc_state && intel_crtc_is_bigjoiner_slave(new_crtc_state)) {
0651         struct intel_crtc *master_crtc =
0652             intel_master_crtc(new_crtc_state);
0653         struct intel_plane *master_plane =
0654             intel_crtc_get_plane(master_crtc, plane->id);
0655 
0656         new_master_plane_state =
0657             intel_atomic_get_new_plane_state(state, master_plane);
0658     } else {
0659         new_master_plane_state = new_plane_state;
0660     }
0661 
0662     intel_plane_copy_uapi_to_hw_state(new_plane_state,
0663                       new_master_plane_state,
0664                       crtc);
0665 
0666     new_plane_state->uapi.visible = false;
0667     if (!new_crtc_state)
0668         return 0;
0669 
0670     return intel_plane_atomic_check_with_state(old_crtc_state,
0671                            new_crtc_state,
0672                            old_plane_state,
0673                            new_plane_state);
0674 }
0675 
0676 static struct intel_plane *
0677 skl_next_plane_to_commit(struct intel_atomic_state *state,
0678              struct intel_crtc *crtc,
0679              struct skl_ddb_entry ddb[I915_MAX_PLANES],
0680              struct skl_ddb_entry ddb_y[I915_MAX_PLANES],
0681              unsigned int *update_mask)
0682 {
0683     struct intel_crtc_state *crtc_state =
0684         intel_atomic_get_new_crtc_state(state, crtc);
0685     struct intel_plane_state *plane_state;
0686     struct intel_plane *plane;
0687     int i;
0688 
0689     if (*update_mask == 0)
0690         return NULL;
0691 
0692     for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
0693         enum plane_id plane_id = plane->id;
0694 
0695         if (crtc->pipe != plane->pipe ||
0696             !(*update_mask & BIT(plane_id)))
0697             continue;
0698 
0699         if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.plane_ddb[plane_id],
0700                         ddb, I915_MAX_PLANES, plane_id) ||
0701             skl_ddb_allocation_overlaps(&crtc_state->wm.skl.plane_ddb_y[plane_id],
0702                         ddb_y, I915_MAX_PLANES, plane_id))
0703             continue;
0704 
0705         *update_mask &= ~BIT(plane_id);
0706         ddb[plane_id] = crtc_state->wm.skl.plane_ddb[plane_id];
0707         ddb_y[plane_id] = crtc_state->wm.skl.plane_ddb_y[plane_id];
0708 
0709         return plane;
0710     }
0711 
0712     /* should never happen */
0713     drm_WARN_ON(state->base.dev, 1);
0714 
0715     return NULL;
0716 }
0717 
0718 void intel_plane_update_noarm(struct intel_plane *plane,
0719                   const struct intel_crtc_state *crtc_state,
0720                   const struct intel_plane_state *plane_state)
0721 {
0722     struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
0723 
0724     trace_intel_plane_update_noarm(&plane->base, crtc);
0725 
0726     if (plane->update_noarm)
0727         plane->update_noarm(plane, crtc_state, plane_state);
0728 }
0729 
0730 void intel_plane_update_arm(struct intel_plane *plane,
0731                 const struct intel_crtc_state *crtc_state,
0732                 const struct intel_plane_state *plane_state)
0733 {
0734     struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
0735 
0736     trace_intel_plane_update_arm(&plane->base, crtc);
0737 
0738     if (crtc_state->do_async_flip && plane->async_flip)
0739         plane->async_flip(plane, crtc_state, plane_state, true);
0740     else
0741         plane->update_arm(plane, crtc_state, plane_state);
0742 }
0743 
0744 void intel_plane_disable_arm(struct intel_plane *plane,
0745                  const struct intel_crtc_state *crtc_state)
0746 {
0747     struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
0748 
0749     trace_intel_plane_disable_arm(&plane->base, crtc);
0750     plane->disable_arm(plane, crtc_state);
0751 }
0752 
0753 void intel_crtc_planes_update_noarm(struct intel_atomic_state *state,
0754                     struct intel_crtc *crtc)
0755 {
0756     struct intel_crtc_state *new_crtc_state =
0757         intel_atomic_get_new_crtc_state(state, crtc);
0758     u32 update_mask = new_crtc_state->update_planes;
0759     struct intel_plane_state *new_plane_state;
0760     struct intel_plane *plane;
0761     int i;
0762 
0763     if (new_crtc_state->do_async_flip)
0764         return;
0765 
0766     /*
0767      * Since we only write non-arming registers here,
0768      * the order does not matter even for skl+.
0769      */
0770     for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
0771         if (crtc->pipe != plane->pipe ||
0772             !(update_mask & BIT(plane->id)))
0773             continue;
0774 
0775         /* TODO: for mailbox updates this should be skipped */
0776         if (new_plane_state->uapi.visible ||
0777             new_plane_state->planar_slave)
0778             intel_plane_update_noarm(plane, new_crtc_state, new_plane_state);
0779     }
0780 }
0781 
0782 static void skl_crtc_planes_update_arm(struct intel_atomic_state *state,
0783                        struct intel_crtc *crtc)
0784 {
0785     struct intel_crtc_state *old_crtc_state =
0786         intel_atomic_get_old_crtc_state(state, crtc);
0787     struct intel_crtc_state *new_crtc_state =
0788         intel_atomic_get_new_crtc_state(state, crtc);
0789     struct skl_ddb_entry ddb[I915_MAX_PLANES];
0790     struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
0791     u32 update_mask = new_crtc_state->update_planes;
0792     struct intel_plane *plane;
0793 
0794     memcpy(ddb, old_crtc_state->wm.skl.plane_ddb,
0795            sizeof(old_crtc_state->wm.skl.plane_ddb));
0796     memcpy(ddb_y, old_crtc_state->wm.skl.plane_ddb_y,
0797            sizeof(old_crtc_state->wm.skl.plane_ddb_y));
0798 
0799     while ((plane = skl_next_plane_to_commit(state, crtc, ddb, ddb_y, &update_mask))) {
0800         struct intel_plane_state *new_plane_state =
0801             intel_atomic_get_new_plane_state(state, plane);
0802 
0803         /*
0804          * TODO: for mailbox updates intel_plane_update_noarm()
0805          * would have to be called here as well.
0806          */
0807         if (new_plane_state->uapi.visible ||
0808             new_plane_state->planar_slave)
0809             intel_plane_update_arm(plane, new_crtc_state, new_plane_state);
0810         else
0811             intel_plane_disable_arm(plane, new_crtc_state);
0812     }
0813 }
0814 
0815 static void i9xx_crtc_planes_update_arm(struct intel_atomic_state *state,
0816                     struct intel_crtc *crtc)
0817 {
0818     struct intel_crtc_state *new_crtc_state =
0819         intel_atomic_get_new_crtc_state(state, crtc);
0820     u32 update_mask = new_crtc_state->update_planes;
0821     struct intel_plane_state *new_plane_state;
0822     struct intel_plane *plane;
0823     int i;
0824 
0825     for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
0826         if (crtc->pipe != plane->pipe ||
0827             !(update_mask & BIT(plane->id)))
0828             continue;
0829 
0830         /*
0831          * TODO: for mailbox updates intel_plane_update_noarm()
0832          * would have to be called here as well.
0833          */
0834         if (new_plane_state->uapi.visible)
0835             intel_plane_update_arm(plane, new_crtc_state, new_plane_state);
0836         else
0837             intel_plane_disable_arm(plane, new_crtc_state);
0838     }
0839 }
0840 
0841 void intel_crtc_planes_update_arm(struct intel_atomic_state *state,
0842                   struct intel_crtc *crtc)
0843 {
0844     struct drm_i915_private *i915 = to_i915(state->base.dev);
0845 
0846     if (DISPLAY_VER(i915) >= 9)
0847         skl_crtc_planes_update_arm(state, crtc);
0848     else
0849         i9xx_crtc_planes_update_arm(state, crtc);
0850 }
0851 
0852 int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state,
0853                       struct intel_crtc_state *crtc_state,
0854                       int min_scale, int max_scale,
0855                       bool can_position)
0856 {
0857     struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
0858     struct drm_framebuffer *fb = plane_state->hw.fb;
0859     struct drm_rect *src = &plane_state->uapi.src;
0860     struct drm_rect *dst = &plane_state->uapi.dst;
0861     const struct drm_rect *clip = &crtc_state->pipe_src;
0862     unsigned int rotation = plane_state->hw.rotation;
0863     int hscale, vscale;
0864 
0865     if (!fb) {
0866         plane_state->uapi.visible = false;
0867         return 0;
0868     }
0869 
0870     drm_rect_rotate(src, fb->width << 16, fb->height << 16, rotation);
0871 
0872     /* Check scaling */
0873     hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
0874     vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale);
0875     if (hscale < 0 || vscale < 0) {
0876         drm_dbg_kms(&i915->drm, "Invalid scaling of plane\n");
0877         drm_rect_debug_print("src: ", src, true);
0878         drm_rect_debug_print("dst: ", dst, false);
0879         return -ERANGE;
0880     }
0881 
0882     /*
0883      * FIXME: This might need further adjustment for seamless scaling
0884      * with phase information, for the 2p2 and 2p1 scenarios.
0885      */
0886     plane_state->uapi.visible = drm_rect_clip_scaled(src, dst, clip);
0887 
0888     drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, rotation);
0889 
0890     if (!can_position && plane_state->uapi.visible &&
0891         !drm_rect_equals(dst, clip)) {
0892         drm_dbg_kms(&i915->drm, "Plane must cover entire CRTC\n");
0893         drm_rect_debug_print("dst: ", dst, false);
0894         drm_rect_debug_print("clip: ", clip, false);
0895         return -EINVAL;
0896     }
0897 
0898     /* final plane coordinates will be relative to the plane's pipe */
0899     drm_rect_translate(dst, -clip->x1, -clip->y1);
0900 
0901     return 0;
0902 }
0903 
0904 struct wait_rps_boost {
0905     struct wait_queue_entry wait;
0906 
0907     struct drm_crtc *crtc;
0908     struct i915_request *request;
0909 };
0910 
0911 static int do_rps_boost(struct wait_queue_entry *_wait,
0912             unsigned mode, int sync, void *key)
0913 {
0914     struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
0915     struct i915_request *rq = wait->request;
0916 
0917     /*
0918      * If we missed the vblank, but the request is already running it
0919      * is reasonable to assume that it will complete before the next
0920      * vblank without our intervention, so leave RPS alone.
0921      */
0922     if (!i915_request_started(rq))
0923         intel_rps_boost(rq);
0924     i915_request_put(rq);
0925 
0926     drm_crtc_vblank_put(wait->crtc);
0927 
0928     list_del(&wait->wait.entry);
0929     kfree(wait);
0930     return 1;
0931 }
0932 
0933 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
0934                        struct dma_fence *fence)
0935 {
0936     struct wait_rps_boost *wait;
0937 
0938     if (!dma_fence_is_i915(fence))
0939         return;
0940 
0941     if (DISPLAY_VER(to_i915(crtc->dev)) < 6)
0942         return;
0943 
0944     if (drm_crtc_vblank_get(crtc))
0945         return;
0946 
0947     wait = kmalloc(sizeof(*wait), GFP_KERNEL);
0948     if (!wait) {
0949         drm_crtc_vblank_put(crtc);
0950         return;
0951     }
0952 
0953     wait->request = to_request(dma_fence_get(fence));
0954     wait->crtc = crtc;
0955 
0956     wait->wait.func = do_rps_boost;
0957     wait->wait.flags = 0;
0958 
0959     add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
0960 }
0961 
0962 /**
0963  * intel_prepare_plane_fb - Prepare fb for usage on plane
0964  * @_plane: drm plane to prepare for
0965  * @_new_plane_state: the plane state being prepared
0966  *
0967  * Prepares a framebuffer for usage on a display plane.  Generally this
0968  * involves pinning the underlying object and updating the frontbuffer tracking
0969  * bits.  Some older platforms need special physical address handling for
0970  * cursor planes.
0971  *
0972  * Returns 0 on success, negative error code on failure.
0973  */
0974 static int
0975 intel_prepare_plane_fb(struct drm_plane *_plane,
0976                struct drm_plane_state *_new_plane_state)
0977 {
0978     struct i915_sched_attr attr = { .priority = I915_PRIORITY_DISPLAY };
0979     struct intel_plane *plane = to_intel_plane(_plane);
0980     struct intel_plane_state *new_plane_state =
0981         to_intel_plane_state(_new_plane_state);
0982     struct intel_atomic_state *state =
0983         to_intel_atomic_state(new_plane_state->uapi.state);
0984     struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
0985     const struct intel_plane_state *old_plane_state =
0986         intel_atomic_get_old_plane_state(state, plane);
0987     struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb);
0988     struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb);
0989     int ret;
0990 
0991     if (old_obj) {
0992         const struct intel_crtc_state *crtc_state =
0993             intel_atomic_get_new_crtc_state(state,
0994                             to_intel_crtc(old_plane_state->hw.crtc));
0995 
0996         /* Big Hammer, we also need to ensure that any pending
0997          * MI_WAIT_FOR_EVENT inside a user batch buffer on the
0998          * current scanout is retired before unpinning the old
0999          * framebuffer. Note that we rely on userspace rendering
1000          * into the buffer attached to the pipe they are waiting
1001          * on. If not, userspace generates a GPU hang with IPEHR
1002          * point to the MI_WAIT_FOR_EVENT.
1003          *
1004          * This should only fail upon a hung GPU, in which case we
1005          * can safely continue.
1006          */
1007         if (intel_crtc_needs_modeset(crtc_state)) {
1008             ret = i915_sw_fence_await_reservation(&state->commit_ready,
1009                                   old_obj->base.resv, NULL,
1010                                   false, 0,
1011                                   GFP_KERNEL);
1012             if (ret < 0)
1013                 return ret;
1014         }
1015     }
1016 
1017     if (new_plane_state->uapi.fence) { /* explicit fencing */
1018         i915_gem_fence_wait_priority(new_plane_state->uapi.fence,
1019                          &attr);
1020         ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
1021                             new_plane_state->uapi.fence,
1022                             i915_fence_timeout(dev_priv),
1023                             GFP_KERNEL);
1024         if (ret < 0)
1025             return ret;
1026     }
1027 
1028     if (!obj)
1029         return 0;
1030 
1031 
1032     ret = intel_plane_pin_fb(new_plane_state);
1033     if (ret)
1034         return ret;
1035 
1036     i915_gem_object_wait_priority(obj, 0, &attr);
1037 
1038     if (!new_plane_state->uapi.fence) { /* implicit fencing */
1039         struct dma_resv_iter cursor;
1040         struct dma_fence *fence;
1041 
1042         ret = i915_sw_fence_await_reservation(&state->commit_ready,
1043                               obj->base.resv, NULL,
1044                               false,
1045                               i915_fence_timeout(dev_priv),
1046                               GFP_KERNEL);
1047         if (ret < 0)
1048             goto unpin_fb;
1049 
1050         dma_resv_iter_begin(&cursor, obj->base.resv,
1051                     DMA_RESV_USAGE_WRITE);
1052         dma_resv_for_each_fence_unlocked(&cursor, fence) {
1053             add_rps_boost_after_vblank(new_plane_state->hw.crtc,
1054                            fence);
1055         }
1056         dma_resv_iter_end(&cursor);
1057     } else {
1058         add_rps_boost_after_vblank(new_plane_state->hw.crtc,
1059                        new_plane_state->uapi.fence);
1060     }
1061 
1062     /*
1063      * We declare pageflips to be interactive and so merit a small bias
1064      * towards upclocking to deliver the frame on time. By only changing
1065      * the RPS thresholds to sample more regularly and aim for higher
1066      * clocks we can hopefully deliver low power workloads (like kodi)
1067      * that are not quite steady state without resorting to forcing
1068      * maximum clocks following a vblank miss (see do_rps_boost()).
1069      */
1070     if (!state->rps_interactive) {
1071         intel_rps_mark_interactive(&to_gt(dev_priv)->rps, true);
1072         state->rps_interactive = true;
1073     }
1074 
1075     return 0;
1076 
1077 unpin_fb:
1078     intel_plane_unpin_fb(new_plane_state);
1079 
1080     return ret;
1081 }
1082 
1083 /**
1084  * intel_cleanup_plane_fb - Cleans up an fb after plane use
1085  * @plane: drm plane to clean up for
1086  * @_old_plane_state: the state from the previous modeset
1087  *
1088  * Cleans up a framebuffer that has just been removed from a plane.
1089  */
1090 static void
1091 intel_cleanup_plane_fb(struct drm_plane *plane,
1092                struct drm_plane_state *_old_plane_state)
1093 {
1094     struct intel_plane_state *old_plane_state =
1095         to_intel_plane_state(_old_plane_state);
1096     struct intel_atomic_state *state =
1097         to_intel_atomic_state(old_plane_state->uapi.state);
1098     struct drm_i915_private *dev_priv = to_i915(plane->dev);
1099     struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb);
1100 
1101     if (!obj)
1102         return;
1103 
1104     if (state->rps_interactive) {
1105         intel_rps_mark_interactive(&to_gt(dev_priv)->rps, false);
1106         state->rps_interactive = false;
1107     }
1108 
1109     /* Should only be called after a successful intel_prepare_plane_fb()! */
1110     intel_plane_unpin_fb(old_plane_state);
1111 }
1112 
1113 static const struct drm_plane_helper_funcs intel_plane_helper_funcs = {
1114     .prepare_fb = intel_prepare_plane_fb,
1115     .cleanup_fb = intel_cleanup_plane_fb,
1116 };
1117 
1118 void intel_plane_helper_add(struct intel_plane *plane)
1119 {
1120     drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
1121 }