Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright © 2014 Intel Corporation
0003  *
0004  * Permission is hereby granted, free of charge, to any person obtaining a
0005  * copy of this software and associated documentation files (the "Software"),
0006  * to deal in the Software without restriction, including without limitation
0007  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0008  * and/or sell copies of the Software, and to permit persons to whom the
0009  * Software is furnished to do so, subject to the following conditions:
0010  *
0011  * The above copyright notice and this permission notice (including the next
0012  * paragraph) shall be included in all copies or substantial portions of the
0013  * Software.
0014  *
0015  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0016  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0017  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0018  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
0019  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
0020  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
0021  * DEALINGS IN THE SOFTWARE.
0022  */
0023 
0024 /**
0025  * DOC: Frame Buffer Compression (FBC)
0026  *
0027  * FBC tries to save memory bandwidth (and so power consumption) by
0028  * compressing the amount of memory used by the display. It is total
0029  * transparent to user space and completely handled in the kernel.
0030  *
0031  * The benefits of FBC are mostly visible with solid backgrounds and
0032  * variation-less patterns. It comes from keeping the memory footprint small
0033  * and having fewer memory pages opened and accessed for refreshing the display.
0034  *
0035  * i915 is responsible to reserve stolen memory for FBC and configure its
0036  * offset on proper registers. The hardware takes care of all
0037  * compress/decompress. However there are many known cases where we have to
0038  * forcibly disable it to allow proper screen updates.
0039  */
0040 
0041 #include <linux/string_helpers.h>
0042 
0043 #include <drm/drm_blend.h>
0044 #include <drm/drm_fourcc.h>
0045 
0046 #include "i915_drv.h"
0047 #include "i915_utils.h"
0048 #include "i915_vgpu.h"
0049 #include "intel_cdclk.h"
0050 #include "intel_de.h"
0051 #include "intel_display_trace.h"
0052 #include "intel_display_types.h"
0053 #include "intel_fbc.h"
0054 #include "intel_frontbuffer.h"
0055 
0056 #define for_each_fbc_id(__dev_priv, __fbc_id) \
0057     for ((__fbc_id) = INTEL_FBC_A; (__fbc_id) < I915_MAX_FBCS; (__fbc_id)++) \
0058         for_each_if(INTEL_INFO(__dev_priv)->display.fbc_mask & BIT(__fbc_id))
0059 
0060 #define for_each_intel_fbc(__dev_priv, __fbc, __fbc_id) \
0061     for_each_fbc_id((__dev_priv), (__fbc_id)) \
0062         for_each_if((__fbc) = (__dev_priv)->fbc[(__fbc_id)])
0063 
0064 struct intel_fbc_funcs {
0065     void (*activate)(struct intel_fbc *fbc);
0066     void (*deactivate)(struct intel_fbc *fbc);
0067     bool (*is_active)(struct intel_fbc *fbc);
0068     bool (*is_compressing)(struct intel_fbc *fbc);
0069     void (*nuke)(struct intel_fbc *fbc);
0070     void (*program_cfb)(struct intel_fbc *fbc);
0071     void (*set_false_color)(struct intel_fbc *fbc, bool enable);
0072 };
0073 
0074 struct intel_fbc_state {
0075     struct intel_plane *plane;
0076     unsigned int cfb_stride;
0077     unsigned int cfb_size;
0078     unsigned int fence_y_offset;
0079     u16 override_cfb_stride;
0080     u16 interval;
0081     s8 fence_id;
0082 };
0083 
0084 struct intel_fbc {
0085     struct drm_i915_private *i915;
0086     const struct intel_fbc_funcs *funcs;
0087 
0088     /*
0089      * This is always the inner lock when overlapping with
0090      * struct_mutex and it's the outer lock when overlapping
0091      * with stolen_lock.
0092      */
0093     struct mutex lock;
0094     unsigned int busy_bits;
0095 
0096     struct drm_mm_node compressed_fb;
0097     struct drm_mm_node compressed_llb;
0098 
0099     enum intel_fbc_id id;
0100 
0101     u8 limit;
0102 
0103     bool false_color;
0104 
0105     bool active;
0106     bool activated;
0107     bool flip_pending;
0108 
0109     bool underrun_detected;
0110     struct work_struct underrun_work;
0111 
0112     /*
0113      * This structure contains everything that's relevant to program the
0114      * hardware registers. When we want to figure out if we need to disable
0115      * and re-enable FBC for a new configuration we just check if there's
0116      * something different in the struct. The genx_fbc_activate functions
0117      * are supposed to read from it in order to program the registers.
0118      */
0119     struct intel_fbc_state state;
0120     const char *no_fbc_reason;
0121 };
0122 
0123 /* plane stride in pixels */
0124 static unsigned int intel_fbc_plane_stride(const struct intel_plane_state *plane_state)
0125 {
0126     const struct drm_framebuffer *fb = plane_state->hw.fb;
0127     unsigned int stride;
0128 
0129     stride = plane_state->view.color_plane[0].mapping_stride;
0130     if (!drm_rotation_90_or_270(plane_state->hw.rotation))
0131         stride /= fb->format->cpp[0];
0132 
0133     return stride;
0134 }
0135 
0136 /* plane stride based cfb stride in bytes, assuming 1:1 compression limit */
0137 static unsigned int _intel_fbc_cfb_stride(const struct intel_plane_state *plane_state)
0138 {
0139     unsigned int cpp = 4; /* FBC always 4 bytes per pixel */
0140 
0141     return intel_fbc_plane_stride(plane_state) * cpp;
0142 }
0143 
0144 /* minimum acceptable cfb stride in bytes, assuming 1:1 compression limit */
0145 static unsigned int skl_fbc_min_cfb_stride(const struct intel_plane_state *plane_state)
0146 {
0147     struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
0148     unsigned int limit = 4; /* 1:4 compression limit is the worst case */
0149     unsigned int cpp = 4; /* FBC always 4 bytes per pixel */
0150     unsigned int width = drm_rect_width(&plane_state->uapi.src) >> 16;
0151     unsigned int height = 4; /* FBC segment is 4 lines */
0152     unsigned int stride;
0153 
0154     /* minimum segment stride we can use */
0155     stride = width * cpp * height / limit;
0156 
0157     /*
0158      * Wa_16011863758: icl+
0159      * Avoid some hardware segment address miscalculation.
0160      */
0161     if (DISPLAY_VER(i915) >= 11)
0162         stride += 64;
0163 
0164     /*
0165      * At least some of the platforms require each 4 line segment to
0166      * be 512 byte aligned. Just do it always for simplicity.
0167      */
0168     stride = ALIGN(stride, 512);
0169 
0170     /* convert back to single line equivalent with 1:1 compression limit */
0171     return stride * limit / height;
0172 }
0173 
0174 /* properly aligned cfb stride in bytes, assuming 1:1 compression limit */
0175 static unsigned int intel_fbc_cfb_stride(const struct intel_plane_state *plane_state)
0176 {
0177     struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
0178     unsigned int stride = _intel_fbc_cfb_stride(plane_state);
0179 
0180     /*
0181      * At least some of the platforms require each 4 line segment to
0182      * be 512 byte aligned. Aligning each line to 512 bytes guarantees
0183      * that regardless of the compression limit we choose later.
0184      */
0185     if (DISPLAY_VER(i915) >= 9)
0186         return max(ALIGN(stride, 512), skl_fbc_min_cfb_stride(plane_state));
0187     else
0188         return stride;
0189 }
0190 
0191 static unsigned int intel_fbc_cfb_size(const struct intel_plane_state *plane_state)
0192 {
0193     struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
0194     int lines = drm_rect_height(&plane_state->uapi.src) >> 16;
0195 
0196     if (DISPLAY_VER(i915) == 7)
0197         lines = min(lines, 2048);
0198     else if (DISPLAY_VER(i915) >= 8)
0199         lines = min(lines, 2560);
0200 
0201     return lines * intel_fbc_cfb_stride(plane_state);
0202 }
0203 
0204 static u16 intel_fbc_override_cfb_stride(const struct intel_plane_state *plane_state)
0205 {
0206     struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
0207     unsigned int stride_aligned = intel_fbc_cfb_stride(plane_state);
0208     unsigned int stride = _intel_fbc_cfb_stride(plane_state);
0209     const struct drm_framebuffer *fb = plane_state->hw.fb;
0210 
0211     /*
0212      * Override stride in 64 byte units per 4 line segment.
0213      *
0214      * Gen9 hw miscalculates cfb stride for linear as
0215      * PLANE_STRIDE*512 instead of PLANE_STRIDE*64, so
0216      * we always need to use the override there.
0217      */
0218     if (stride != stride_aligned ||
0219         (DISPLAY_VER(i915) == 9 && fb->modifier == DRM_FORMAT_MOD_LINEAR))
0220         return stride_aligned * 4 / 64;
0221 
0222     return 0;
0223 }
0224 
0225 static u32 i8xx_fbc_ctl(struct intel_fbc *fbc)
0226 {
0227     const struct intel_fbc_state *fbc_state = &fbc->state;
0228     struct drm_i915_private *i915 = fbc->i915;
0229     unsigned int cfb_stride;
0230     u32 fbc_ctl;
0231 
0232     cfb_stride = fbc_state->cfb_stride / fbc->limit;
0233 
0234     /* FBC_CTL wants 32B or 64B units */
0235     if (DISPLAY_VER(i915) == 2)
0236         cfb_stride = (cfb_stride / 32) - 1;
0237     else
0238         cfb_stride = (cfb_stride / 64) - 1;
0239 
0240     fbc_ctl = FBC_CTL_PERIODIC |
0241         FBC_CTL_INTERVAL(fbc_state->interval) |
0242         FBC_CTL_STRIDE(cfb_stride);
0243 
0244     if (IS_I945GM(i915))
0245         fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
0246 
0247     if (fbc_state->fence_id >= 0)
0248         fbc_ctl |= FBC_CTL_FENCENO(fbc_state->fence_id);
0249 
0250     return fbc_ctl;
0251 }
0252 
0253 static u32 i965_fbc_ctl2(struct intel_fbc *fbc)
0254 {
0255     const struct intel_fbc_state *fbc_state = &fbc->state;
0256     u32 fbc_ctl2;
0257 
0258     fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM |
0259         FBC_CTL_PLANE(fbc_state->plane->i9xx_plane);
0260 
0261     if (fbc_state->fence_id >= 0)
0262         fbc_ctl2 |= FBC_CTL_CPU_FENCE_EN;
0263 
0264     return fbc_ctl2;
0265 }
0266 
0267 static void i8xx_fbc_deactivate(struct intel_fbc *fbc)
0268 {
0269     struct drm_i915_private *i915 = fbc->i915;
0270     u32 fbc_ctl;
0271 
0272     /* Disable compression */
0273     fbc_ctl = intel_de_read(i915, FBC_CONTROL);
0274     if ((fbc_ctl & FBC_CTL_EN) == 0)
0275         return;
0276 
0277     fbc_ctl &= ~FBC_CTL_EN;
0278     intel_de_write(i915, FBC_CONTROL, fbc_ctl);
0279 
0280     /* Wait for compressing bit to clear */
0281     if (intel_de_wait_for_clear(i915, FBC_STATUS,
0282                     FBC_STAT_COMPRESSING, 10)) {
0283         drm_dbg_kms(&i915->drm, "FBC idle timed out\n");
0284         return;
0285     }
0286 }
0287 
0288 static void i8xx_fbc_activate(struct intel_fbc *fbc)
0289 {
0290     const struct intel_fbc_state *fbc_state = &fbc->state;
0291     struct drm_i915_private *i915 = fbc->i915;
0292     int i;
0293 
0294     /* Clear old tags */
0295     for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
0296         intel_de_write(i915, FBC_TAG(i), 0);
0297 
0298     if (DISPLAY_VER(i915) == 4) {
0299         intel_de_write(i915, FBC_CONTROL2,
0300                    i965_fbc_ctl2(fbc));
0301         intel_de_write(i915, FBC_FENCE_OFF,
0302                    fbc_state->fence_y_offset);
0303     }
0304 
0305     intel_de_write(i915, FBC_CONTROL,
0306                FBC_CTL_EN | i8xx_fbc_ctl(fbc));
0307 }
0308 
0309 static bool i8xx_fbc_is_active(struct intel_fbc *fbc)
0310 {
0311     return intel_de_read(fbc->i915, FBC_CONTROL) & FBC_CTL_EN;
0312 }
0313 
0314 static bool i8xx_fbc_is_compressing(struct intel_fbc *fbc)
0315 {
0316     return intel_de_read(fbc->i915, FBC_STATUS) &
0317         (FBC_STAT_COMPRESSING | FBC_STAT_COMPRESSED);
0318 }
0319 
0320 static void i8xx_fbc_nuke(struct intel_fbc *fbc)
0321 {
0322     struct intel_fbc_state *fbc_state = &fbc->state;
0323     enum i9xx_plane_id i9xx_plane = fbc_state->plane->i9xx_plane;
0324     struct drm_i915_private *dev_priv = fbc->i915;
0325 
0326     spin_lock_irq(&dev_priv->uncore.lock);
0327     intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane),
0328               intel_de_read_fw(dev_priv, DSPADDR(i9xx_plane)));
0329     spin_unlock_irq(&dev_priv->uncore.lock);
0330 }
0331 
0332 static void i8xx_fbc_program_cfb(struct intel_fbc *fbc)
0333 {
0334     struct drm_i915_private *i915 = fbc->i915;
0335 
0336     GEM_BUG_ON(range_overflows_end_t(u64, i915->dsm.start,
0337                      fbc->compressed_fb.start, U32_MAX));
0338     GEM_BUG_ON(range_overflows_end_t(u64, i915->dsm.start,
0339                      fbc->compressed_llb.start, U32_MAX));
0340 
0341     intel_de_write(i915, FBC_CFB_BASE,
0342                i915->dsm.start + fbc->compressed_fb.start);
0343     intel_de_write(i915, FBC_LL_BASE,
0344                i915->dsm.start + fbc->compressed_llb.start);
0345 }
0346 
0347 static const struct intel_fbc_funcs i8xx_fbc_funcs = {
0348     .activate = i8xx_fbc_activate,
0349     .deactivate = i8xx_fbc_deactivate,
0350     .is_active = i8xx_fbc_is_active,
0351     .is_compressing = i8xx_fbc_is_compressing,
0352     .nuke = i8xx_fbc_nuke,
0353     .program_cfb = i8xx_fbc_program_cfb,
0354 };
0355 
0356 static void i965_fbc_nuke(struct intel_fbc *fbc)
0357 {
0358     struct intel_fbc_state *fbc_state = &fbc->state;
0359     enum i9xx_plane_id i9xx_plane = fbc_state->plane->i9xx_plane;
0360     struct drm_i915_private *dev_priv = fbc->i915;
0361 
0362     spin_lock_irq(&dev_priv->uncore.lock);
0363     intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane),
0364               intel_de_read_fw(dev_priv, DSPSURF(i9xx_plane)));
0365     spin_unlock_irq(&dev_priv->uncore.lock);
0366 }
0367 
0368 static const struct intel_fbc_funcs i965_fbc_funcs = {
0369     .activate = i8xx_fbc_activate,
0370     .deactivate = i8xx_fbc_deactivate,
0371     .is_active = i8xx_fbc_is_active,
0372     .is_compressing = i8xx_fbc_is_compressing,
0373     .nuke = i965_fbc_nuke,
0374     .program_cfb = i8xx_fbc_program_cfb,
0375 };
0376 
0377 static u32 g4x_dpfc_ctl_limit(struct intel_fbc *fbc)
0378 {
0379     switch (fbc->limit) {
0380     default:
0381         MISSING_CASE(fbc->limit);
0382         fallthrough;
0383     case 1:
0384         return DPFC_CTL_LIMIT_1X;
0385     case 2:
0386         return DPFC_CTL_LIMIT_2X;
0387     case 4:
0388         return DPFC_CTL_LIMIT_4X;
0389     }
0390 }
0391 
0392 static u32 g4x_dpfc_ctl(struct intel_fbc *fbc)
0393 {
0394     const struct intel_fbc_state *fbc_state = &fbc->state;
0395     struct drm_i915_private *i915 = fbc->i915;
0396     u32 dpfc_ctl;
0397 
0398     dpfc_ctl = g4x_dpfc_ctl_limit(fbc) |
0399         DPFC_CTL_PLANE_G4X(fbc_state->plane->i9xx_plane);
0400 
0401     if (IS_G4X(i915))
0402         dpfc_ctl |= DPFC_CTL_SR_EN;
0403 
0404     if (fbc_state->fence_id >= 0) {
0405         dpfc_ctl |= DPFC_CTL_FENCE_EN_G4X;
0406 
0407         if (DISPLAY_VER(i915) < 6)
0408             dpfc_ctl |= DPFC_CTL_FENCENO(fbc_state->fence_id);
0409     }
0410 
0411     return dpfc_ctl;
0412 }
0413 
0414 static void g4x_fbc_activate(struct intel_fbc *fbc)
0415 {
0416     const struct intel_fbc_state *fbc_state = &fbc->state;
0417     struct drm_i915_private *i915 = fbc->i915;
0418 
0419     intel_de_write(i915, DPFC_FENCE_YOFF,
0420                fbc_state->fence_y_offset);
0421 
0422     intel_de_write(i915, DPFC_CONTROL,
0423                DPFC_CTL_EN | g4x_dpfc_ctl(fbc));
0424 }
0425 
0426 static void g4x_fbc_deactivate(struct intel_fbc *fbc)
0427 {
0428     struct drm_i915_private *i915 = fbc->i915;
0429     u32 dpfc_ctl;
0430 
0431     /* Disable compression */
0432     dpfc_ctl = intel_de_read(i915, DPFC_CONTROL);
0433     if (dpfc_ctl & DPFC_CTL_EN) {
0434         dpfc_ctl &= ~DPFC_CTL_EN;
0435         intel_de_write(i915, DPFC_CONTROL, dpfc_ctl);
0436     }
0437 }
0438 
0439 static bool g4x_fbc_is_active(struct intel_fbc *fbc)
0440 {
0441     return intel_de_read(fbc->i915, DPFC_CONTROL) & DPFC_CTL_EN;
0442 }
0443 
0444 static bool g4x_fbc_is_compressing(struct intel_fbc *fbc)
0445 {
0446     return intel_de_read(fbc->i915, DPFC_STATUS) & DPFC_COMP_SEG_MASK;
0447 }
0448 
0449 static void g4x_fbc_program_cfb(struct intel_fbc *fbc)
0450 {
0451     struct drm_i915_private *i915 = fbc->i915;
0452 
0453     intel_de_write(i915, DPFC_CB_BASE, fbc->compressed_fb.start);
0454 }
0455 
0456 static const struct intel_fbc_funcs g4x_fbc_funcs = {
0457     .activate = g4x_fbc_activate,
0458     .deactivate = g4x_fbc_deactivate,
0459     .is_active = g4x_fbc_is_active,
0460     .is_compressing = g4x_fbc_is_compressing,
0461     .nuke = i965_fbc_nuke,
0462     .program_cfb = g4x_fbc_program_cfb,
0463 };
0464 
0465 static void ilk_fbc_activate(struct intel_fbc *fbc)
0466 {
0467     struct intel_fbc_state *fbc_state = &fbc->state;
0468     struct drm_i915_private *i915 = fbc->i915;
0469 
0470     intel_de_write(i915, ILK_DPFC_FENCE_YOFF(fbc->id),
0471                fbc_state->fence_y_offset);
0472 
0473     intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id),
0474                DPFC_CTL_EN | g4x_dpfc_ctl(fbc));
0475 }
0476 
0477 static void ilk_fbc_deactivate(struct intel_fbc *fbc)
0478 {
0479     struct drm_i915_private *i915 = fbc->i915;
0480     u32 dpfc_ctl;
0481 
0482     /* Disable compression */
0483     dpfc_ctl = intel_de_read(i915, ILK_DPFC_CONTROL(fbc->id));
0484     if (dpfc_ctl & DPFC_CTL_EN) {
0485         dpfc_ctl &= ~DPFC_CTL_EN;
0486         intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id), dpfc_ctl);
0487     }
0488 }
0489 
0490 static bool ilk_fbc_is_active(struct intel_fbc *fbc)
0491 {
0492     return intel_de_read(fbc->i915, ILK_DPFC_CONTROL(fbc->id)) & DPFC_CTL_EN;
0493 }
0494 
0495 static bool ilk_fbc_is_compressing(struct intel_fbc *fbc)
0496 {
0497     return intel_de_read(fbc->i915, ILK_DPFC_STATUS(fbc->id)) & DPFC_COMP_SEG_MASK;
0498 }
0499 
0500 static void ilk_fbc_program_cfb(struct intel_fbc *fbc)
0501 {
0502     struct drm_i915_private *i915 = fbc->i915;
0503 
0504     intel_de_write(i915, ILK_DPFC_CB_BASE(fbc->id), fbc->compressed_fb.start);
0505 }
0506 
0507 static const struct intel_fbc_funcs ilk_fbc_funcs = {
0508     .activate = ilk_fbc_activate,
0509     .deactivate = ilk_fbc_deactivate,
0510     .is_active = ilk_fbc_is_active,
0511     .is_compressing = ilk_fbc_is_compressing,
0512     .nuke = i965_fbc_nuke,
0513     .program_cfb = ilk_fbc_program_cfb,
0514 };
0515 
0516 static void snb_fbc_program_fence(struct intel_fbc *fbc)
0517 {
0518     const struct intel_fbc_state *fbc_state = &fbc->state;
0519     struct drm_i915_private *i915 = fbc->i915;
0520     u32 ctl = 0;
0521 
0522     if (fbc_state->fence_id >= 0)
0523         ctl = SNB_DPFC_FENCE_EN | SNB_DPFC_FENCENO(fbc_state->fence_id);
0524 
0525     intel_de_write(i915, SNB_DPFC_CTL_SA, ctl);
0526     intel_de_write(i915, SNB_DPFC_CPU_FENCE_OFFSET, fbc_state->fence_y_offset);
0527 }
0528 
0529 static void snb_fbc_activate(struct intel_fbc *fbc)
0530 {
0531     snb_fbc_program_fence(fbc);
0532 
0533     ilk_fbc_activate(fbc);
0534 }
0535 
0536 static void snb_fbc_nuke(struct intel_fbc *fbc)
0537 {
0538     struct drm_i915_private *i915 = fbc->i915;
0539 
0540     intel_de_write(i915, MSG_FBC_REND_STATE(fbc->id), FBC_REND_NUKE);
0541     intel_de_posting_read(i915, MSG_FBC_REND_STATE(fbc->id));
0542 }
0543 
0544 static const struct intel_fbc_funcs snb_fbc_funcs = {
0545     .activate = snb_fbc_activate,
0546     .deactivate = ilk_fbc_deactivate,
0547     .is_active = ilk_fbc_is_active,
0548     .is_compressing = ilk_fbc_is_compressing,
0549     .nuke = snb_fbc_nuke,
0550     .program_cfb = ilk_fbc_program_cfb,
0551 };
0552 
0553 static void glk_fbc_program_cfb_stride(struct intel_fbc *fbc)
0554 {
0555     const struct intel_fbc_state *fbc_state = &fbc->state;
0556     struct drm_i915_private *i915 = fbc->i915;
0557     u32 val = 0;
0558 
0559     if (fbc_state->override_cfb_stride)
0560         val |= FBC_STRIDE_OVERRIDE |
0561             FBC_STRIDE(fbc_state->override_cfb_stride / fbc->limit);
0562 
0563     intel_de_write(i915, GLK_FBC_STRIDE(fbc->id), val);
0564 }
0565 
0566 static void skl_fbc_program_cfb_stride(struct intel_fbc *fbc)
0567 {
0568     const struct intel_fbc_state *fbc_state = &fbc->state;
0569     struct drm_i915_private *i915 = fbc->i915;
0570     u32 val = 0;
0571 
0572     /* Display WA #0529: skl, kbl, bxt. */
0573     if (fbc_state->override_cfb_stride)
0574         val |= CHICKEN_FBC_STRIDE_OVERRIDE |
0575             CHICKEN_FBC_STRIDE(fbc_state->override_cfb_stride / fbc->limit);
0576 
0577     intel_de_rmw(i915, CHICKEN_MISC_4,
0578              CHICKEN_FBC_STRIDE_OVERRIDE |
0579              CHICKEN_FBC_STRIDE_MASK, val);
0580 }
0581 
0582 static u32 ivb_dpfc_ctl(struct intel_fbc *fbc)
0583 {
0584     const struct intel_fbc_state *fbc_state = &fbc->state;
0585     struct drm_i915_private *i915 = fbc->i915;
0586     u32 dpfc_ctl;
0587 
0588     dpfc_ctl = g4x_dpfc_ctl_limit(fbc);
0589 
0590     if (IS_IVYBRIDGE(i915))
0591         dpfc_ctl |= DPFC_CTL_PLANE_IVB(fbc_state->plane->i9xx_plane);
0592 
0593     if (fbc_state->fence_id >= 0)
0594         dpfc_ctl |= DPFC_CTL_FENCE_EN_IVB;
0595 
0596     if (fbc->false_color)
0597         dpfc_ctl |= DPFC_CTL_FALSE_COLOR;
0598 
0599     return dpfc_ctl;
0600 }
0601 
0602 static void ivb_fbc_activate(struct intel_fbc *fbc)
0603 {
0604     struct drm_i915_private *i915 = fbc->i915;
0605 
0606     if (DISPLAY_VER(i915) >= 10)
0607         glk_fbc_program_cfb_stride(fbc);
0608     else if (DISPLAY_VER(i915) == 9)
0609         skl_fbc_program_cfb_stride(fbc);
0610 
0611     if (to_gt(i915)->ggtt->num_fences)
0612         snb_fbc_program_fence(fbc);
0613 
0614     intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id),
0615                DPFC_CTL_EN | ivb_dpfc_ctl(fbc));
0616 }
0617 
0618 static bool ivb_fbc_is_compressing(struct intel_fbc *fbc)
0619 {
0620     return intel_de_read(fbc->i915, ILK_DPFC_STATUS2(fbc->id)) & DPFC_COMP_SEG_MASK_IVB;
0621 }
0622 
0623 static void ivb_fbc_set_false_color(struct intel_fbc *fbc,
0624                     bool enable)
0625 {
0626     intel_de_rmw(fbc->i915, ILK_DPFC_CONTROL(fbc->id),
0627              DPFC_CTL_FALSE_COLOR, enable ? DPFC_CTL_FALSE_COLOR : 0);
0628 }
0629 
0630 static const struct intel_fbc_funcs ivb_fbc_funcs = {
0631     .activate = ivb_fbc_activate,
0632     .deactivate = ilk_fbc_deactivate,
0633     .is_active = ilk_fbc_is_active,
0634     .is_compressing = ivb_fbc_is_compressing,
0635     .nuke = snb_fbc_nuke,
0636     .program_cfb = ilk_fbc_program_cfb,
0637     .set_false_color = ivb_fbc_set_false_color,
0638 };
0639 
0640 static bool intel_fbc_hw_is_active(struct intel_fbc *fbc)
0641 {
0642     return fbc->funcs->is_active(fbc);
0643 }
0644 
0645 static void intel_fbc_hw_activate(struct intel_fbc *fbc)
0646 {
0647     trace_intel_fbc_activate(fbc->state.plane);
0648 
0649     fbc->active = true;
0650     fbc->activated = true;
0651 
0652     fbc->funcs->activate(fbc);
0653 }
0654 
0655 static void intel_fbc_hw_deactivate(struct intel_fbc *fbc)
0656 {
0657     trace_intel_fbc_deactivate(fbc->state.plane);
0658 
0659     fbc->active = false;
0660 
0661     fbc->funcs->deactivate(fbc);
0662 }
0663 
0664 static bool intel_fbc_is_compressing(struct intel_fbc *fbc)
0665 {
0666     return fbc->funcs->is_compressing(fbc);
0667 }
0668 
0669 static void intel_fbc_nuke(struct intel_fbc *fbc)
0670 {
0671     struct drm_i915_private *i915 = fbc->i915;
0672 
0673     drm_WARN_ON(&i915->drm, fbc->flip_pending);
0674 
0675     trace_intel_fbc_nuke(fbc->state.plane);
0676 
0677     fbc->funcs->nuke(fbc);
0678 }
0679 
0680 static void intel_fbc_activate(struct intel_fbc *fbc)
0681 {
0682     intel_fbc_hw_activate(fbc);
0683     intel_fbc_nuke(fbc);
0684 
0685     fbc->no_fbc_reason = NULL;
0686 }
0687 
0688 static void intel_fbc_deactivate(struct intel_fbc *fbc, const char *reason)
0689 {
0690     struct drm_i915_private *i915 = fbc->i915;
0691 
0692     drm_WARN_ON(&i915->drm, !mutex_is_locked(&fbc->lock));
0693 
0694     if (fbc->active)
0695         intel_fbc_hw_deactivate(fbc);
0696 
0697     fbc->no_fbc_reason = reason;
0698 }
0699 
0700 static u64 intel_fbc_cfb_base_max(struct drm_i915_private *i915)
0701 {
0702     if (DISPLAY_VER(i915) >= 5 || IS_G4X(i915))
0703         return BIT_ULL(28);
0704     else
0705         return BIT_ULL(32);
0706 }
0707 
0708 static u64 intel_fbc_stolen_end(struct drm_i915_private *i915)
0709 {
0710     u64 end;
0711 
0712     /* The FBC hardware for BDW/SKL doesn't have access to the stolen
0713      * reserved range size, so it always assumes the maximum (8mb) is used.
0714      * If we enable FBC using a CFB on that memory range we'll get FIFO
0715      * underruns, even if that range is not reserved by the BIOS. */
0716     if (IS_BROADWELL(i915) ||
0717         (DISPLAY_VER(i915) == 9 && !IS_BROXTON(i915)))
0718         end = resource_size(&i915->dsm) - 8 * 1024 * 1024;
0719     else
0720         end = U64_MAX;
0721 
0722     return min(end, intel_fbc_cfb_base_max(i915));
0723 }
0724 
0725 static int intel_fbc_min_limit(const struct intel_plane_state *plane_state)
0726 {
0727     return plane_state->hw.fb->format->cpp[0] == 2 ? 2 : 1;
0728 }
0729 
0730 static int intel_fbc_max_limit(struct drm_i915_private *i915)
0731 {
0732     /* WaFbcOnly1to1Ratio:ctg */
0733     if (IS_G4X(i915))
0734         return 1;
0735 
0736     /*
0737      * FBC2 can only do 1:1, 1:2, 1:4, we limit
0738      * FBC1 to the same out of convenience.
0739      */
0740     return 4;
0741 }
0742 
0743 static int find_compression_limit(struct intel_fbc *fbc,
0744                   unsigned int size, int min_limit)
0745 {
0746     struct drm_i915_private *i915 = fbc->i915;
0747     u64 end = intel_fbc_stolen_end(i915);
0748     int ret, limit = min_limit;
0749 
0750     size /= limit;
0751 
0752     /* Try to over-allocate to reduce reallocations and fragmentation. */
0753     ret = i915_gem_stolen_insert_node_in_range(i915, &fbc->compressed_fb,
0754                            size <<= 1, 4096, 0, end);
0755     if (ret == 0)
0756         return limit;
0757 
0758     for (; limit <= intel_fbc_max_limit(i915); limit <<= 1) {
0759         ret = i915_gem_stolen_insert_node_in_range(i915, &fbc->compressed_fb,
0760                                size >>= 1, 4096, 0, end);
0761         if (ret == 0)
0762             return limit;
0763     }
0764 
0765     return 0;
0766 }
0767 
0768 static int intel_fbc_alloc_cfb(struct intel_fbc *fbc,
0769                    unsigned int size, int min_limit)
0770 {
0771     struct drm_i915_private *i915 = fbc->i915;
0772     int ret;
0773 
0774     drm_WARN_ON(&i915->drm,
0775             drm_mm_node_allocated(&fbc->compressed_fb));
0776     drm_WARN_ON(&i915->drm,
0777             drm_mm_node_allocated(&fbc->compressed_llb));
0778 
0779     if (DISPLAY_VER(i915) < 5 && !IS_G4X(i915)) {
0780         ret = i915_gem_stolen_insert_node(i915, &fbc->compressed_llb,
0781                           4096, 4096);
0782         if (ret)
0783             goto err;
0784     }
0785 
0786     ret = find_compression_limit(fbc, size, min_limit);
0787     if (!ret)
0788         goto err_llb;
0789     else if (ret > min_limit)
0790         drm_info_once(&i915->drm,
0791                   "Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
0792 
0793     fbc->limit = ret;
0794 
0795     drm_dbg_kms(&i915->drm,
0796             "reserved %llu bytes of contiguous stolen space for FBC, limit: %d\n",
0797             fbc->compressed_fb.size, fbc->limit);
0798 
0799     return 0;
0800 
0801 err_llb:
0802     if (drm_mm_node_allocated(&fbc->compressed_llb))
0803         i915_gem_stolen_remove_node(i915, &fbc->compressed_llb);
0804 err:
0805     if (drm_mm_initialized(&i915->mm.stolen))
0806         drm_info_once(&i915->drm, "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
0807     return -ENOSPC;
0808 }
0809 
0810 static void intel_fbc_program_cfb(struct intel_fbc *fbc)
0811 {
0812     fbc->funcs->program_cfb(fbc);
0813 }
0814 
0815 static void intel_fbc_program_workarounds(struct intel_fbc *fbc)
0816 {
0817     /* Wa_22014263786:icl,jsl,tgl,dg1,rkl,adls,adlp */
0818     if (DISPLAY_VER(fbc->i915) >= 11 && !IS_DG2(fbc->i915))
0819         intel_de_rmw(fbc->i915, ILK_DPFC_CHICKEN(fbc->id), 0,
0820                  DPFC_CHICKEN_FORCE_SLB_INVALIDATION);
0821 }
0822 
0823 static void __intel_fbc_cleanup_cfb(struct intel_fbc *fbc)
0824 {
0825     struct drm_i915_private *i915 = fbc->i915;
0826 
0827     if (WARN_ON(intel_fbc_hw_is_active(fbc)))
0828         return;
0829 
0830     if (drm_mm_node_allocated(&fbc->compressed_llb))
0831         i915_gem_stolen_remove_node(i915, &fbc->compressed_llb);
0832     if (drm_mm_node_allocated(&fbc->compressed_fb))
0833         i915_gem_stolen_remove_node(i915, &fbc->compressed_fb);
0834 }
0835 
0836 void intel_fbc_cleanup(struct drm_i915_private *i915)
0837 {
0838     struct intel_fbc *fbc;
0839     enum intel_fbc_id fbc_id;
0840 
0841     for_each_intel_fbc(i915, fbc, fbc_id) {
0842         mutex_lock(&fbc->lock);
0843         __intel_fbc_cleanup_cfb(fbc);
0844         mutex_unlock(&fbc->lock);
0845 
0846         kfree(fbc);
0847     }
0848 }
0849 
0850 static bool stride_is_valid(const struct intel_plane_state *plane_state)
0851 {
0852     struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
0853     const struct drm_framebuffer *fb = plane_state->hw.fb;
0854     unsigned int stride = intel_fbc_plane_stride(plane_state) *
0855         fb->format->cpp[0];
0856 
0857     /* This should have been caught earlier. */
0858     if (drm_WARN_ON_ONCE(&i915->drm, (stride & (64 - 1)) != 0))
0859         return false;
0860 
0861     /* Below are the additional FBC restrictions. */
0862     if (stride < 512)
0863         return false;
0864 
0865     if (DISPLAY_VER(i915) == 2 || DISPLAY_VER(i915) == 3)
0866         return stride == 4096 || stride == 8192;
0867 
0868     if (DISPLAY_VER(i915) == 4 && !IS_G4X(i915) && stride < 2048)
0869         return false;
0870 
0871     /* Display WA #1105: skl,bxt,kbl,cfl,glk */
0872     if ((DISPLAY_VER(i915) == 9 || IS_GEMINILAKE(i915)) &&
0873         fb->modifier == DRM_FORMAT_MOD_LINEAR && stride & 511)
0874         return false;
0875 
0876     if (stride > 16384)
0877         return false;
0878 
0879     return true;
0880 }
0881 
0882 static bool pixel_format_is_valid(const struct intel_plane_state *plane_state)
0883 {
0884     struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
0885     const struct drm_framebuffer *fb = plane_state->hw.fb;
0886 
0887     switch (fb->format->format) {
0888     case DRM_FORMAT_XRGB8888:
0889     case DRM_FORMAT_XBGR8888:
0890         return true;
0891     case DRM_FORMAT_XRGB1555:
0892     case DRM_FORMAT_RGB565:
0893         /* 16bpp not supported on gen2 */
0894         if (DISPLAY_VER(i915) == 2)
0895             return false;
0896         /* WaFbcOnly1to1Ratio:ctg */
0897         if (IS_G4X(i915))
0898             return false;
0899         return true;
0900     default:
0901         return false;
0902     }
0903 }
0904 
0905 static bool rotation_is_valid(const struct intel_plane_state *plane_state)
0906 {
0907     struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
0908     const struct drm_framebuffer *fb = plane_state->hw.fb;
0909     unsigned int rotation = plane_state->hw.rotation;
0910 
0911     if (DISPLAY_VER(i915) >= 9 && fb->format->format == DRM_FORMAT_RGB565 &&
0912         drm_rotation_90_or_270(rotation))
0913         return false;
0914     else if (DISPLAY_VER(i915) <= 4 && !IS_G4X(i915) &&
0915          rotation != DRM_MODE_ROTATE_0)
0916         return false;
0917 
0918     return true;
0919 }
0920 
0921 /*
0922  * For some reason, the hardware tracking starts looking at whatever we
0923  * programmed as the display plane base address register. It does not look at
0924  * the X and Y offset registers. That's why we include the src x/y offsets
0925  * instead of just looking at the plane size.
0926  */
0927 static bool intel_fbc_hw_tracking_covers_screen(const struct intel_plane_state *plane_state)
0928 {
0929     struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
0930     unsigned int effective_w, effective_h, max_w, max_h;
0931 
0932     if (DISPLAY_VER(i915) >= 10) {
0933         max_w = 5120;
0934         max_h = 4096;
0935     } else if (DISPLAY_VER(i915) >= 8 || IS_HASWELL(i915)) {
0936         max_w = 4096;
0937         max_h = 4096;
0938     } else if (IS_G4X(i915) || DISPLAY_VER(i915) >= 5) {
0939         max_w = 4096;
0940         max_h = 2048;
0941     } else {
0942         max_w = 2048;
0943         max_h = 1536;
0944     }
0945 
0946     effective_w = plane_state->view.color_plane[0].x +
0947         (drm_rect_width(&plane_state->uapi.src) >> 16);
0948     effective_h = plane_state->view.color_plane[0].y +
0949         (drm_rect_height(&plane_state->uapi.src) >> 16);
0950 
0951     return effective_w <= max_w && effective_h <= max_h;
0952 }
0953 
0954 static bool tiling_is_valid(const struct intel_plane_state *plane_state)
0955 {
0956     struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
0957     const struct drm_framebuffer *fb = plane_state->hw.fb;
0958 
0959     switch (fb->modifier) {
0960     case DRM_FORMAT_MOD_LINEAR:
0961     case I915_FORMAT_MOD_Y_TILED:
0962     case I915_FORMAT_MOD_Yf_TILED:
0963         return DISPLAY_VER(i915) >= 9;
0964     case I915_FORMAT_MOD_4_TILED:
0965     case I915_FORMAT_MOD_X_TILED:
0966         return true;
0967     default:
0968         return false;
0969     }
0970 }
0971 
0972 static void intel_fbc_update_state(struct intel_atomic_state *state,
0973                    struct intel_crtc *crtc,
0974                    struct intel_plane *plane)
0975 {
0976     struct drm_i915_private *i915 = to_i915(state->base.dev);
0977     const struct intel_crtc_state *crtc_state =
0978         intel_atomic_get_new_crtc_state(state, crtc);
0979     const struct intel_plane_state *plane_state =
0980         intel_atomic_get_new_plane_state(state, plane);
0981     struct intel_fbc *fbc = plane->fbc;
0982     struct intel_fbc_state *fbc_state = &fbc->state;
0983 
0984     WARN_ON(plane_state->no_fbc_reason);
0985     WARN_ON(fbc_state->plane && fbc_state->plane != plane);
0986 
0987     fbc_state->plane = plane;
0988 
0989     /* FBC1 compression interval: arbitrary choice of 1 second */
0990     fbc_state->interval = drm_mode_vrefresh(&crtc_state->hw.adjusted_mode);
0991 
0992     fbc_state->fence_y_offset = intel_plane_fence_y_offset(plane_state);
0993 
0994     drm_WARN_ON(&i915->drm, plane_state->flags & PLANE_HAS_FENCE &&
0995             !plane_state->ggtt_vma->fence);
0996 
0997     if (plane_state->flags & PLANE_HAS_FENCE &&
0998         plane_state->ggtt_vma->fence)
0999         fbc_state->fence_id = plane_state->ggtt_vma->fence->id;
1000     else
1001         fbc_state->fence_id = -1;
1002 
1003     fbc_state->cfb_stride = intel_fbc_cfb_stride(plane_state);
1004     fbc_state->cfb_size = intel_fbc_cfb_size(plane_state);
1005     fbc_state->override_cfb_stride = intel_fbc_override_cfb_stride(plane_state);
1006 }
1007 
1008 static bool intel_fbc_is_fence_ok(const struct intel_plane_state *plane_state)
1009 {
1010     struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
1011 
1012     /* The use of a CPU fence is one of two ways to detect writes by the
1013      * CPU to the scanout and trigger updates to the FBC.
1014      *
1015      * The other method is by software tracking (see
1016      * intel_fbc_invalidate/flush()), it will manually notify FBC and nuke
1017      * the current compressed buffer and recompress it.
1018      *
1019      * Note that is possible for a tiled surface to be unmappable (and
1020      * so have no fence associated with it) due to aperture constraints
1021      * at the time of pinning.
1022      *
1023      * FIXME with 90/270 degree rotation we should use the fence on
1024      * the normal GTT view (the rotated view doesn't even have a
1025      * fence). Would need changes to the FBC fence Y offset as well.
1026      * For now this will effectively disable FBC with 90/270 degree
1027      * rotation.
1028      */
1029     return DISPLAY_VER(i915) >= 9 ||
1030         (plane_state->flags & PLANE_HAS_FENCE &&
1031          plane_state->ggtt_vma->fence);
1032 }
1033 
1034 static bool intel_fbc_is_cfb_ok(const struct intel_plane_state *plane_state)
1035 {
1036     struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1037     struct intel_fbc *fbc = plane->fbc;
1038 
1039     return intel_fbc_min_limit(plane_state) <= fbc->limit &&
1040         intel_fbc_cfb_size(plane_state) <= fbc->compressed_fb.size * fbc->limit;
1041 }
1042 
1043 static bool intel_fbc_is_ok(const struct intel_plane_state *plane_state)
1044 {
1045     return !plane_state->no_fbc_reason &&
1046         intel_fbc_is_fence_ok(plane_state) &&
1047         intel_fbc_is_cfb_ok(plane_state);
1048 }
1049 
1050 static int intel_fbc_check_plane(struct intel_atomic_state *state,
1051                  struct intel_plane *plane)
1052 {
1053     struct drm_i915_private *i915 = to_i915(state->base.dev);
1054     struct intel_plane_state *plane_state =
1055         intel_atomic_get_new_plane_state(state, plane);
1056     const struct drm_framebuffer *fb = plane_state->hw.fb;
1057     struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc);
1058     const struct intel_crtc_state *crtc_state;
1059     struct intel_fbc *fbc = plane->fbc;
1060 
1061     if (!fbc)
1062         return 0;
1063 
1064     if (intel_vgpu_active(i915)) {
1065         plane_state->no_fbc_reason = "VGPU active";
1066         return 0;
1067     }
1068 
1069     if (!i915->params.enable_fbc) {
1070         plane_state->no_fbc_reason = "disabled per module param or by default";
1071         return 0;
1072     }
1073 
1074     if (!plane_state->uapi.visible) {
1075         plane_state->no_fbc_reason = "plane not visible";
1076         return 0;
1077     }
1078 
1079     crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
1080 
1081     if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
1082         plane_state->no_fbc_reason = "interlaced mode not supported";
1083         return 0;
1084     }
1085 
1086     if (crtc_state->double_wide) {
1087         plane_state->no_fbc_reason = "double wide pipe not supported";
1088         return 0;
1089     }
1090 
1091     /*
1092      * Display 12+ is not supporting FBC with PSR2.
1093      * Recommendation is to keep this combination disabled
1094      * Bspec: 50422 HSD: 14010260002
1095      */
1096     if (DISPLAY_VER(i915) >= 12 && crtc_state->has_psr2) {
1097         plane_state->no_fbc_reason = "PSR2 enabled";
1098         return 0;
1099     }
1100 
1101     if (!pixel_format_is_valid(plane_state)) {
1102         plane_state->no_fbc_reason = "pixel format not supported";
1103         return 0;
1104     }
1105 
1106     if (!tiling_is_valid(plane_state)) {
1107         plane_state->no_fbc_reason = "tiling not supported";
1108         return 0;
1109     }
1110 
1111     if (!rotation_is_valid(plane_state)) {
1112         plane_state->no_fbc_reason = "rotation not supported";
1113         return 0;
1114     }
1115 
1116     if (!stride_is_valid(plane_state)) {
1117         plane_state->no_fbc_reason = "stride not supported";
1118         return 0;
1119     }
1120 
1121     if (plane_state->hw.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE &&
1122         fb->format->has_alpha) {
1123         plane_state->no_fbc_reason = "per-pixel alpha not supported";
1124         return 0;
1125     }
1126 
1127     if (!intel_fbc_hw_tracking_covers_screen(plane_state)) {
1128         plane_state->no_fbc_reason = "plane size too big";
1129         return 0;
1130     }
1131 
1132     /*
1133      * Work around a problem on GEN9+ HW, where enabling FBC on a plane
1134      * having a Y offset that isn't divisible by 4 causes FIFO underrun
1135      * and screen flicker.
1136      */
1137     if (DISPLAY_VER(i915) >= 9 &&
1138         plane_state->view.color_plane[0].y & 3) {
1139         plane_state->no_fbc_reason = "plane start Y offset misaligned";
1140         return 0;
1141     }
1142 
1143     /* Wa_22010751166: icl, ehl, tgl, dg1, rkl */
1144     if (DISPLAY_VER(i915) >= 11 &&
1145         (plane_state->view.color_plane[0].y +
1146          (drm_rect_height(&plane_state->uapi.src) >> 16)) & 3) {
1147         plane_state->no_fbc_reason = "plane end Y offset misaligned";
1148         return 0;
1149     }
1150 
1151     /* WaFbcExceedCdClockThreshold:hsw,bdw */
1152     if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
1153         const struct intel_cdclk_state *cdclk_state;
1154 
1155         cdclk_state = intel_atomic_get_cdclk_state(state);
1156         if (IS_ERR(cdclk_state))
1157             return PTR_ERR(cdclk_state);
1158 
1159         if (crtc_state->pixel_rate >= cdclk_state->logical.cdclk * 95 / 100) {
1160             plane_state->no_fbc_reason = "pixel rate too high";
1161             return 0;
1162         }
1163     }
1164 
1165     plane_state->no_fbc_reason = NULL;
1166 
1167     return 0;
1168 }
1169 
1170 
1171 static bool intel_fbc_can_flip_nuke(struct intel_atomic_state *state,
1172                     struct intel_crtc *crtc,
1173                     struct intel_plane *plane)
1174 {
1175     const struct intel_crtc_state *new_crtc_state =
1176         intel_atomic_get_new_crtc_state(state, crtc);
1177     const struct intel_plane_state *old_plane_state =
1178         intel_atomic_get_old_plane_state(state, plane);
1179     const struct intel_plane_state *new_plane_state =
1180         intel_atomic_get_new_plane_state(state, plane);
1181     const struct drm_framebuffer *old_fb = old_plane_state->hw.fb;
1182     const struct drm_framebuffer *new_fb = new_plane_state->hw.fb;
1183 
1184     if (drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi))
1185         return false;
1186 
1187     if (!intel_fbc_is_ok(old_plane_state) ||
1188         !intel_fbc_is_ok(new_plane_state))
1189         return false;
1190 
1191     if (old_fb->format->format != new_fb->format->format)
1192         return false;
1193 
1194     if (old_fb->modifier != new_fb->modifier)
1195         return false;
1196 
1197     if (intel_fbc_plane_stride(old_plane_state) !=
1198         intel_fbc_plane_stride(new_plane_state))
1199         return false;
1200 
1201     if (intel_fbc_cfb_stride(old_plane_state) !=
1202         intel_fbc_cfb_stride(new_plane_state))
1203         return false;
1204 
1205     if (intel_fbc_cfb_size(old_plane_state) !=
1206         intel_fbc_cfb_size(new_plane_state))
1207         return false;
1208 
1209     if (intel_fbc_override_cfb_stride(old_plane_state) !=
1210         intel_fbc_override_cfb_stride(new_plane_state))
1211         return false;
1212 
1213     return true;
1214 }
1215 
1216 static bool __intel_fbc_pre_update(struct intel_atomic_state *state,
1217                    struct intel_crtc *crtc,
1218                    struct intel_plane *plane)
1219 {
1220     struct drm_i915_private *i915 = to_i915(state->base.dev);
1221     struct intel_fbc *fbc = plane->fbc;
1222     bool need_vblank_wait = false;
1223 
1224     fbc->flip_pending = true;
1225 
1226     if (intel_fbc_can_flip_nuke(state, crtc, plane))
1227         return need_vblank_wait;
1228 
1229     intel_fbc_deactivate(fbc, "update pending");
1230 
1231     /*
1232      * Display WA #1198: glk+
1233      * Need an extra vblank wait between FBC disable and most plane
1234      * updates. Bspec says this is only needed for plane disable, but
1235      * that is not true. Touching most plane registers will cause the
1236      * corruption to appear. Also SKL/derivatives do not seem to be
1237      * affected.
1238      *
1239      * TODO: could optimize this a bit by sampling the frame
1240      * counter when we disable FBC (if it was already done earlier)
1241      * and skipping the extra vblank wait before the plane update
1242      * if at least one frame has already passed.
1243      */
1244     if (fbc->activated && DISPLAY_VER(i915) >= 10)
1245         need_vblank_wait = true;
1246     fbc->activated = false;
1247 
1248     return need_vblank_wait;
1249 }
1250 
1251 bool intel_fbc_pre_update(struct intel_atomic_state *state,
1252               struct intel_crtc *crtc)
1253 {
1254     const struct intel_plane_state *plane_state;
1255     bool need_vblank_wait = false;
1256     struct intel_plane *plane;
1257     int i;
1258 
1259     for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1260         struct intel_fbc *fbc = plane->fbc;
1261 
1262         if (!fbc || plane->pipe != crtc->pipe)
1263             continue;
1264 
1265         mutex_lock(&fbc->lock);
1266 
1267         if (fbc->state.plane == plane)
1268             need_vblank_wait |= __intel_fbc_pre_update(state, crtc, plane);
1269 
1270         mutex_unlock(&fbc->lock);
1271     }
1272 
1273     return need_vblank_wait;
1274 }
1275 
1276 static void __intel_fbc_disable(struct intel_fbc *fbc)
1277 {
1278     struct drm_i915_private *i915 = fbc->i915;
1279     struct intel_plane *plane = fbc->state.plane;
1280 
1281     drm_WARN_ON(&i915->drm, !mutex_is_locked(&fbc->lock));
1282     drm_WARN_ON(&i915->drm, fbc->active);
1283 
1284     drm_dbg_kms(&i915->drm, "Disabling FBC on [PLANE:%d:%s]\n",
1285             plane->base.base.id, plane->base.name);
1286 
1287     __intel_fbc_cleanup_cfb(fbc);
1288 
1289     fbc->state.plane = NULL;
1290     fbc->flip_pending = false;
1291     fbc->busy_bits = 0;
1292 }
1293 
1294 static void __intel_fbc_post_update(struct intel_fbc *fbc)
1295 {
1296     struct drm_i915_private *i915 = fbc->i915;
1297 
1298     drm_WARN_ON(&i915->drm, !mutex_is_locked(&fbc->lock));
1299 
1300     if (!fbc->busy_bits)
1301         intel_fbc_activate(fbc);
1302     else
1303         intel_fbc_deactivate(fbc, "frontbuffer write");
1304 }
1305 
1306 void intel_fbc_post_update(struct intel_atomic_state *state,
1307                struct intel_crtc *crtc)
1308 {
1309     const struct intel_plane_state *plane_state;
1310     struct intel_plane *plane;
1311     int i;
1312 
1313     for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1314         struct intel_fbc *fbc = plane->fbc;
1315 
1316         if (!fbc || plane->pipe != crtc->pipe)
1317             continue;
1318 
1319         mutex_lock(&fbc->lock);
1320 
1321         if (fbc->state.plane == plane) {
1322             fbc->flip_pending = false;
1323             __intel_fbc_post_update(fbc);
1324         }
1325 
1326         mutex_unlock(&fbc->lock);
1327     }
1328 }
1329 
1330 static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc)
1331 {
1332     if (fbc->state.plane)
1333         return fbc->state.plane->frontbuffer_bit;
1334     else
1335         return 0;
1336 }
1337 
1338 static void __intel_fbc_invalidate(struct intel_fbc *fbc,
1339                    unsigned int frontbuffer_bits,
1340                    enum fb_op_origin origin)
1341 {
1342     if (origin == ORIGIN_FLIP || origin == ORIGIN_CURSOR_UPDATE)
1343         return;
1344 
1345     mutex_lock(&fbc->lock);
1346 
1347     frontbuffer_bits &= intel_fbc_get_frontbuffer_bit(fbc);
1348     if (!frontbuffer_bits)
1349         goto out;
1350 
1351     fbc->busy_bits |= frontbuffer_bits;
1352     intel_fbc_deactivate(fbc, "frontbuffer write");
1353 
1354 out:
1355     mutex_unlock(&fbc->lock);
1356 }
1357 
1358 void intel_fbc_invalidate(struct drm_i915_private *i915,
1359               unsigned int frontbuffer_bits,
1360               enum fb_op_origin origin)
1361 {
1362     struct intel_fbc *fbc;
1363     enum intel_fbc_id fbc_id;
1364 
1365     for_each_intel_fbc(i915, fbc, fbc_id)
1366         __intel_fbc_invalidate(fbc, frontbuffer_bits, origin);
1367 
1368 }
1369 
1370 static void __intel_fbc_flush(struct intel_fbc *fbc,
1371                   unsigned int frontbuffer_bits,
1372                   enum fb_op_origin origin)
1373 {
1374     mutex_lock(&fbc->lock);
1375 
1376     frontbuffer_bits &= intel_fbc_get_frontbuffer_bit(fbc);
1377     if (!frontbuffer_bits)
1378         goto out;
1379 
1380     fbc->busy_bits &= ~frontbuffer_bits;
1381 
1382     if (origin == ORIGIN_FLIP || origin == ORIGIN_CURSOR_UPDATE)
1383         goto out;
1384 
1385     if (fbc->busy_bits || fbc->flip_pending)
1386         goto out;
1387 
1388     if (fbc->active)
1389         intel_fbc_nuke(fbc);
1390     else
1391         intel_fbc_activate(fbc);
1392 
1393 out:
1394     mutex_unlock(&fbc->lock);
1395 }
1396 
1397 void intel_fbc_flush(struct drm_i915_private *i915,
1398              unsigned int frontbuffer_bits,
1399              enum fb_op_origin origin)
1400 {
1401     struct intel_fbc *fbc;
1402     enum intel_fbc_id fbc_id;
1403 
1404     for_each_intel_fbc(i915, fbc, fbc_id)
1405         __intel_fbc_flush(fbc, frontbuffer_bits, origin);
1406 }
1407 
1408 int intel_fbc_atomic_check(struct intel_atomic_state *state)
1409 {
1410     struct intel_plane_state *plane_state;
1411     struct intel_plane *plane;
1412     int i;
1413 
1414     for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1415         int ret;
1416 
1417         ret = intel_fbc_check_plane(state, plane);
1418         if (ret)
1419             return ret;
1420     }
1421 
1422     return 0;
1423 }
1424 
1425 static void __intel_fbc_enable(struct intel_atomic_state *state,
1426                    struct intel_crtc *crtc,
1427                    struct intel_plane *plane)
1428 {
1429     struct drm_i915_private *i915 = to_i915(state->base.dev);
1430     const struct intel_plane_state *plane_state =
1431         intel_atomic_get_new_plane_state(state, plane);
1432     struct intel_fbc *fbc = plane->fbc;
1433 
1434     if (fbc->state.plane) {
1435         if (fbc->state.plane != plane)
1436             return;
1437 
1438         if (intel_fbc_is_ok(plane_state)) {
1439             intel_fbc_update_state(state, crtc, plane);
1440             return;
1441         }
1442 
1443         __intel_fbc_disable(fbc);
1444     }
1445 
1446     drm_WARN_ON(&i915->drm, fbc->active);
1447 
1448     fbc->no_fbc_reason = plane_state->no_fbc_reason;
1449     if (fbc->no_fbc_reason)
1450         return;
1451 
1452     if (!intel_fbc_is_fence_ok(plane_state)) {
1453         fbc->no_fbc_reason = "framebuffer not fenced";
1454         return;
1455     }
1456 
1457     if (fbc->underrun_detected) {
1458         fbc->no_fbc_reason = "FIFO underrun";
1459         return;
1460     }
1461 
1462     if (intel_fbc_alloc_cfb(fbc, intel_fbc_cfb_size(plane_state),
1463                 intel_fbc_min_limit(plane_state))) {
1464         fbc->no_fbc_reason = "not enough stolen memory";
1465         return;
1466     }
1467 
1468     drm_dbg_kms(&i915->drm, "Enabling FBC on [PLANE:%d:%s]\n",
1469             plane->base.base.id, plane->base.name);
1470     fbc->no_fbc_reason = "FBC enabled but not active yet\n";
1471 
1472     intel_fbc_update_state(state, crtc, plane);
1473 
1474     intel_fbc_program_workarounds(fbc);
1475     intel_fbc_program_cfb(fbc);
1476 }
1477 
1478 /**
1479  * intel_fbc_disable - disable FBC if it's associated with crtc
1480  * @crtc: the CRTC
1481  *
1482  * This function disables FBC if it's associated with the provided CRTC.
1483  */
1484 void intel_fbc_disable(struct intel_crtc *crtc)
1485 {
1486     struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1487     struct intel_plane *plane;
1488 
1489     for_each_intel_plane(&i915->drm, plane) {
1490         struct intel_fbc *fbc = plane->fbc;
1491 
1492         if (!fbc || plane->pipe != crtc->pipe)
1493             continue;
1494 
1495         mutex_lock(&fbc->lock);
1496         if (fbc->state.plane == plane)
1497             __intel_fbc_disable(fbc);
1498         mutex_unlock(&fbc->lock);
1499     }
1500 }
1501 
1502 void intel_fbc_update(struct intel_atomic_state *state,
1503               struct intel_crtc *crtc)
1504 {
1505     const struct intel_crtc_state *crtc_state =
1506         intel_atomic_get_new_crtc_state(state, crtc);
1507     const struct intel_plane_state *plane_state;
1508     struct intel_plane *plane;
1509     int i;
1510 
1511     for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1512         struct intel_fbc *fbc = plane->fbc;
1513 
1514         if (!fbc || plane->pipe != crtc->pipe)
1515             continue;
1516 
1517         mutex_lock(&fbc->lock);
1518 
1519         if (crtc_state->update_pipe && plane_state->no_fbc_reason) {
1520             if (fbc->state.plane == plane)
1521                 __intel_fbc_disable(fbc);
1522         } else {
1523             __intel_fbc_enable(state, crtc, plane);
1524         }
1525 
1526         mutex_unlock(&fbc->lock);
1527     }
1528 }
1529 
1530 static void intel_fbc_underrun_work_fn(struct work_struct *work)
1531 {
1532     struct intel_fbc *fbc = container_of(work, typeof(*fbc), underrun_work);
1533     struct drm_i915_private *i915 = fbc->i915;
1534 
1535     mutex_lock(&fbc->lock);
1536 
1537     /* Maybe we were scheduled twice. */
1538     if (fbc->underrun_detected || !fbc->state.plane)
1539         goto out;
1540 
1541     drm_dbg_kms(&i915->drm, "Disabling FBC due to FIFO underrun.\n");
1542     fbc->underrun_detected = true;
1543 
1544     intel_fbc_deactivate(fbc, "FIFO underrun");
1545     if (!fbc->flip_pending)
1546         intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(i915, fbc->state.plane->pipe));
1547     __intel_fbc_disable(fbc);
1548 out:
1549     mutex_unlock(&fbc->lock);
1550 }
1551 
1552 static void __intel_fbc_reset_underrun(struct intel_fbc *fbc)
1553 {
1554     struct drm_i915_private *i915 = fbc->i915;
1555 
1556     cancel_work_sync(&fbc->underrun_work);
1557 
1558     mutex_lock(&fbc->lock);
1559 
1560     if (fbc->underrun_detected) {
1561         drm_dbg_kms(&i915->drm,
1562                 "Re-allowing FBC after fifo underrun\n");
1563         fbc->no_fbc_reason = "FIFO underrun cleared";
1564     }
1565 
1566     fbc->underrun_detected = false;
1567     mutex_unlock(&fbc->lock);
1568 }
1569 
1570 /*
1571  * intel_fbc_reset_underrun - reset FBC fifo underrun status.
1572  * @i915: the i915 device
1573  *
1574  * See intel_fbc_handle_fifo_underrun_irq(). For automated testing we
1575  * want to re-enable FBC after an underrun to increase test coverage.
1576  */
1577 void intel_fbc_reset_underrun(struct drm_i915_private *i915)
1578 {
1579     struct intel_fbc *fbc;
1580     enum intel_fbc_id fbc_id;
1581 
1582     for_each_intel_fbc(i915, fbc, fbc_id)
1583         __intel_fbc_reset_underrun(fbc);
1584 }
1585 
1586 static void __intel_fbc_handle_fifo_underrun_irq(struct intel_fbc *fbc)
1587 {
1588     /*
1589      * There's no guarantee that underrun_detected won't be set to true
1590      * right after this check and before the work is scheduled, but that's
1591      * not a problem since we'll check it again under the work function
1592      * while FBC is locked. This check here is just to prevent us from
1593      * unnecessarily scheduling the work, and it relies on the fact that we
1594      * never switch underrun_detect back to false after it's true.
1595      */
1596     if (READ_ONCE(fbc->underrun_detected))
1597         return;
1598 
1599     schedule_work(&fbc->underrun_work);
1600 }
1601 
1602 /**
1603  * intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun
1604  * @i915: i915 device
1605  *
1606  * Without FBC, most underruns are harmless and don't really cause too many
1607  * problems, except for an annoying message on dmesg. With FBC, underruns can
1608  * become black screens or even worse, especially when paired with bad
1609  * watermarks. So in order for us to be on the safe side, completely disable FBC
1610  * in case we ever detect a FIFO underrun on any pipe. An underrun on any pipe
1611  * already suggests that watermarks may be bad, so try to be as safe as
1612  * possible.
1613  *
1614  * This function is called from the IRQ handler.
1615  */
1616 void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *i915)
1617 {
1618     struct intel_fbc *fbc;
1619     enum intel_fbc_id fbc_id;
1620 
1621     for_each_intel_fbc(i915, fbc, fbc_id)
1622         __intel_fbc_handle_fifo_underrun_irq(fbc);
1623 }
1624 
1625 /*
1626  * The DDX driver changes its behavior depending on the value it reads from
1627  * i915.enable_fbc, so sanitize it by translating the default value into either
1628  * 0 or 1 in order to allow it to know what's going on.
1629  *
1630  * Notice that this is done at driver initialization and we still allow user
1631  * space to change the value during runtime without sanitizing it again. IGT
1632  * relies on being able to change i915.enable_fbc at runtime.
1633  */
1634 static int intel_sanitize_fbc_option(struct drm_i915_private *i915)
1635 {
1636     if (i915->params.enable_fbc >= 0)
1637         return !!i915->params.enable_fbc;
1638 
1639     if (!HAS_FBC(i915))
1640         return 0;
1641 
1642     if (IS_BROADWELL(i915) || DISPLAY_VER(i915) >= 9)
1643         return 1;
1644 
1645     return 0;
1646 }
1647 
1648 static bool need_fbc_vtd_wa(struct drm_i915_private *i915)
1649 {
1650     /* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */
1651     if (i915_vtd_active(i915) &&
1652         (IS_SKYLAKE(i915) || IS_BROXTON(i915))) {
1653         drm_info(&i915->drm,
1654              "Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n");
1655         return true;
1656     }
1657 
1658     return false;
1659 }
1660 
1661 void intel_fbc_add_plane(struct intel_fbc *fbc, struct intel_plane *plane)
1662 {
1663     plane->fbc = fbc;
1664 }
1665 
1666 static struct intel_fbc *intel_fbc_create(struct drm_i915_private *i915,
1667                       enum intel_fbc_id fbc_id)
1668 {
1669     struct intel_fbc *fbc;
1670 
1671     fbc = kzalloc(sizeof(*fbc), GFP_KERNEL);
1672     if (!fbc)
1673         return NULL;
1674 
1675     fbc->id = fbc_id;
1676     fbc->i915 = i915;
1677     INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn);
1678     mutex_init(&fbc->lock);
1679 
1680     if (DISPLAY_VER(i915) >= 7)
1681         fbc->funcs = &ivb_fbc_funcs;
1682     else if (DISPLAY_VER(i915) == 6)
1683         fbc->funcs = &snb_fbc_funcs;
1684     else if (DISPLAY_VER(i915) == 5)
1685         fbc->funcs = &ilk_fbc_funcs;
1686     else if (IS_G4X(i915))
1687         fbc->funcs = &g4x_fbc_funcs;
1688     else if (DISPLAY_VER(i915) == 4)
1689         fbc->funcs = &i965_fbc_funcs;
1690     else
1691         fbc->funcs = &i8xx_fbc_funcs;
1692 
1693     return fbc;
1694 }
1695 
1696 /**
1697  * intel_fbc_init - Initialize FBC
1698  * @i915: the i915 device
1699  *
1700  * This function might be called during PM init process.
1701  */
1702 void intel_fbc_init(struct drm_i915_private *i915)
1703 {
1704     enum intel_fbc_id fbc_id;
1705 
1706     if (!drm_mm_initialized(&i915->mm.stolen))
1707         mkwrite_device_info(i915)->display.fbc_mask = 0;
1708 
1709     if (need_fbc_vtd_wa(i915))
1710         mkwrite_device_info(i915)->display.fbc_mask = 0;
1711 
1712     i915->params.enable_fbc = intel_sanitize_fbc_option(i915);
1713     drm_dbg_kms(&i915->drm, "Sanitized enable_fbc value: %d\n",
1714             i915->params.enable_fbc);
1715 
1716     for_each_fbc_id(i915, fbc_id)
1717         i915->fbc[fbc_id] = intel_fbc_create(i915, fbc_id);
1718 }
1719 
1720 /**
1721  * intel_fbc_sanitize - Sanitize FBC
1722  * @i915: the i915 device
1723  *
1724  * Make sure FBC is initially disabled since we have no
1725  * idea eg. into which parts of stolen it might be scribbling
1726  * into.
1727  */
1728 void intel_fbc_sanitize(struct drm_i915_private *i915)
1729 {
1730     struct intel_fbc *fbc;
1731     enum intel_fbc_id fbc_id;
1732 
1733     for_each_intel_fbc(i915, fbc, fbc_id) {
1734         if (intel_fbc_hw_is_active(fbc))
1735             intel_fbc_hw_deactivate(fbc);
1736     }
1737 }
1738 
1739 static int intel_fbc_debugfs_status_show(struct seq_file *m, void *unused)
1740 {
1741     struct intel_fbc *fbc = m->private;
1742     struct drm_i915_private *i915 = fbc->i915;
1743     struct intel_plane *plane;
1744     intel_wakeref_t wakeref;
1745 
1746     drm_modeset_lock_all(&i915->drm);
1747 
1748     wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1749     mutex_lock(&fbc->lock);
1750 
1751     if (fbc->active) {
1752         seq_puts(m, "FBC enabled\n");
1753         seq_printf(m, "Compressing: %s\n",
1754                str_yes_no(intel_fbc_is_compressing(fbc)));
1755     } else {
1756         seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1757     }
1758 
1759     for_each_intel_plane(&i915->drm, plane) {
1760         const struct intel_plane_state *plane_state =
1761             to_intel_plane_state(plane->base.state);
1762 
1763         if (plane->fbc != fbc)
1764             continue;
1765 
1766         seq_printf(m, "%c [PLANE:%d:%s]: %s\n",
1767                fbc->state.plane == plane ? '*' : ' ',
1768                plane->base.base.id, plane->base.name,
1769                plane_state->no_fbc_reason ?: "FBC possible");
1770     }
1771 
1772     mutex_unlock(&fbc->lock);
1773     intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1774 
1775     drm_modeset_unlock_all(&i915->drm);
1776 
1777     return 0;
1778 }
1779 
1780 DEFINE_SHOW_ATTRIBUTE(intel_fbc_debugfs_status);
1781 
1782 static int intel_fbc_debugfs_false_color_get(void *data, u64 *val)
1783 {
1784     struct intel_fbc *fbc = data;
1785 
1786     *val = fbc->false_color;
1787 
1788     return 0;
1789 }
1790 
1791 static int intel_fbc_debugfs_false_color_set(void *data, u64 val)
1792 {
1793     struct intel_fbc *fbc = data;
1794 
1795     mutex_lock(&fbc->lock);
1796 
1797     fbc->false_color = val;
1798 
1799     if (fbc->active)
1800         fbc->funcs->set_false_color(fbc, fbc->false_color);
1801 
1802     mutex_unlock(&fbc->lock);
1803 
1804     return 0;
1805 }
1806 
1807 DEFINE_SIMPLE_ATTRIBUTE(intel_fbc_debugfs_false_color_fops,
1808             intel_fbc_debugfs_false_color_get,
1809             intel_fbc_debugfs_false_color_set,
1810             "%llu\n");
1811 
1812 static void intel_fbc_debugfs_add(struct intel_fbc *fbc,
1813                   struct dentry *parent)
1814 {
1815     debugfs_create_file("i915_fbc_status", 0444, parent,
1816                 fbc, &intel_fbc_debugfs_status_fops);
1817 
1818     if (fbc->funcs->set_false_color)
1819         debugfs_create_file("i915_fbc_false_color", 0644, parent,
1820                     fbc, &intel_fbc_debugfs_false_color_fops);
1821 }
1822 
1823 void intel_fbc_crtc_debugfs_add(struct intel_crtc *crtc)
1824 {
1825     struct intel_plane *plane = to_intel_plane(crtc->base.primary);
1826 
1827     if (plane->fbc)
1828         intel_fbc_debugfs_add(plane->fbc, crtc->base.debugfs_entry);
1829 }
1830 
1831 /* FIXME: remove this once igt is on board with per-crtc stuff */
1832 void intel_fbc_debugfs_register(struct drm_i915_private *i915)
1833 {
1834     struct drm_minor *minor = i915->drm.primary;
1835     struct intel_fbc *fbc;
1836 
1837     fbc = i915->fbc[INTEL_FBC_A];
1838     if (fbc)
1839         intel_fbc_debugfs_add(fbc, minor->debugfs_root);
1840 }