0001
0002 #include <linux/string.h>
0003 #include <drm/drm_crtc.h>
0004 #include <drm/drm_atomic_helper.h>
0005 #include <drm/drm_vblank.h>
0006 #include <drm/drm_vblank_work.h>
0007
0008 #include <nvif/class.h>
0009 #include <nvif/cl0002.h>
0010 #include <nvif/timer.h>
0011
0012 #include <nvhw/class/cl907d.h>
0013
0014 #include "nouveau_drv.h"
0015 #include "core.h"
0016 #include "head.h"
0017 #include "wndw.h"
0018 #include "handles.h"
0019 #include "crc.h"
0020
0021 static const char * const nv50_crc_sources[] = {
0022 [NV50_CRC_SOURCE_NONE] = "none",
0023 [NV50_CRC_SOURCE_AUTO] = "auto",
0024 [NV50_CRC_SOURCE_RG] = "rg",
0025 [NV50_CRC_SOURCE_OUTP_ACTIVE] = "outp-active",
0026 [NV50_CRC_SOURCE_OUTP_COMPLETE] = "outp-complete",
0027 [NV50_CRC_SOURCE_OUTP_INACTIVE] = "outp-inactive",
0028 };
0029
0030 static int nv50_crc_parse_source(const char *buf, enum nv50_crc_source *s)
0031 {
0032 int i;
0033
0034 if (!buf) {
0035 *s = NV50_CRC_SOURCE_NONE;
0036 return 0;
0037 }
0038
0039 i = match_string(nv50_crc_sources, ARRAY_SIZE(nv50_crc_sources), buf);
0040 if (i < 0)
0041 return i;
0042
0043 *s = i;
0044 return 0;
0045 }
0046
0047 int
0048 nv50_crc_verify_source(struct drm_crtc *crtc, const char *source_name,
0049 size_t *values_cnt)
0050 {
0051 struct nouveau_drm *drm = nouveau_drm(crtc->dev);
0052 enum nv50_crc_source source;
0053
0054 if (nv50_crc_parse_source(source_name, &source) < 0) {
0055 NV_DEBUG(drm, "unknown source %s\n", source_name);
0056 return -EINVAL;
0057 }
0058
0059 *values_cnt = 1;
0060 return 0;
0061 }
0062
0063 const char *const *nv50_crc_get_sources(struct drm_crtc *crtc, size_t *count)
0064 {
0065 *count = ARRAY_SIZE(nv50_crc_sources);
0066 return nv50_crc_sources;
0067 }
0068
0069 static void
0070 nv50_crc_program_ctx(struct nv50_head *head,
0071 struct nv50_crc_notifier_ctx *ctx)
0072 {
0073 struct nv50_disp *disp = nv50_disp(head->base.base.dev);
0074 struct nv50_core *core = disp->core;
0075 u32 interlock[NV50_DISP_INTERLOCK__SIZE] = { 0 };
0076
0077 core->func->crc->set_ctx(head, ctx);
0078 core->func->update(core, interlock, false);
0079 }
0080
0081 static void nv50_crc_ctx_flip_work(struct kthread_work *base)
0082 {
0083 struct drm_vblank_work *work = to_drm_vblank_work(base);
0084 struct nv50_crc *crc = container_of(work, struct nv50_crc, flip_work);
0085 struct nv50_head *head = container_of(crc, struct nv50_head, crc);
0086 struct drm_crtc *crtc = &head->base.base;
0087 struct drm_device *dev = crtc->dev;
0088 struct nv50_disp *disp = nv50_disp(dev);
0089 const uint64_t start_vbl = drm_crtc_vblank_count(crtc);
0090 uint64_t end_vbl;
0091 u8 new_idx = crc->ctx_idx ^ 1;
0092
0093
0094
0095
0096
0097 if (!mutex_trylock(&disp->mutex)) {
0098 drm_dbg_kms(dev, "Lock contended, delaying CRC ctx flip for %s\n", crtc->name);
0099 drm_vblank_work_schedule(work, start_vbl + 1, true);
0100 return;
0101 }
0102
0103 drm_dbg_kms(dev, "Flipping notifier ctx for %s (%d -> %d)\n",
0104 crtc->name, crc->ctx_idx, new_idx);
0105
0106 nv50_crc_program_ctx(head, NULL);
0107 nv50_crc_program_ctx(head, &crc->ctx[new_idx]);
0108 mutex_unlock(&disp->mutex);
0109
0110 end_vbl = drm_crtc_vblank_count(crtc);
0111 if (unlikely(end_vbl != start_vbl))
0112 NV_ERROR(nouveau_drm(dev),
0113 "Failed to flip CRC context on %s on time (%llu > %llu)\n",
0114 crtc->name, end_vbl, start_vbl);
0115
0116 spin_lock_irq(&crc->lock);
0117 crc->ctx_changed = true;
0118 spin_unlock_irq(&crc->lock);
0119 }
0120
0121 static inline void nv50_crc_reset_ctx(struct nv50_crc_notifier_ctx *ctx)
0122 {
0123 memset_io(ctx->mem.object.map.ptr, 0, ctx->mem.object.map.size);
0124 }
0125
0126 static void
0127 nv50_crc_get_entries(struct nv50_head *head,
0128 const struct nv50_crc_func *func,
0129 enum nv50_crc_source source)
0130 {
0131 struct drm_crtc *crtc = &head->base.base;
0132 struct nv50_crc *crc = &head->crc;
0133 u32 output_crc;
0134
0135 while (crc->entry_idx < func->num_entries) {
0136
0137
0138
0139
0140
0141 output_crc = func->get_entry(head, &crc->ctx[crc->ctx_idx],
0142 source, crc->entry_idx);
0143 if (!output_crc)
0144 return;
0145
0146 drm_crtc_add_crc_entry(crtc, true, crc->frame, &output_crc);
0147 crc->frame++;
0148 crc->entry_idx++;
0149 }
0150 }
0151
0152 void nv50_crc_handle_vblank(struct nv50_head *head)
0153 {
0154 struct drm_crtc *crtc = &head->base.base;
0155 struct nv50_crc *crc = &head->crc;
0156 const struct nv50_crc_func *func =
0157 nv50_disp(head->base.base.dev)->core->func->crc;
0158 struct nv50_crc_notifier_ctx *ctx;
0159 bool need_reschedule = false;
0160
0161 if (!func)
0162 return;
0163
0164
0165
0166
0167
0168
0169 if (!spin_trylock(&crc->lock))
0170 return;
0171
0172 if (!crc->src)
0173 goto out;
0174
0175 ctx = &crc->ctx[crc->ctx_idx];
0176 if (crc->ctx_changed && func->ctx_finished(head, ctx)) {
0177 nv50_crc_get_entries(head, func, crc->src);
0178
0179 crc->ctx_idx ^= 1;
0180 crc->entry_idx = 0;
0181 crc->ctx_changed = false;
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196 drm_dbg_kms(head->base.base.dev,
0197 "Notifier ctx flip for head-%d finished, lost CRC for frame %llu\n",
0198 head->base.index, crc->frame);
0199 crc->frame++;
0200
0201 nv50_crc_reset_ctx(ctx);
0202 need_reschedule = true;
0203 }
0204
0205 nv50_crc_get_entries(head, func, crc->src);
0206
0207 if (need_reschedule)
0208 drm_vblank_work_schedule(&crc->flip_work,
0209 drm_crtc_vblank_count(crtc)
0210 + crc->flip_threshold
0211 - crc->entry_idx,
0212 true);
0213
0214 out:
0215 spin_unlock(&crc->lock);
0216 }
0217
0218 static void nv50_crc_wait_ctx_finished(struct nv50_head *head,
0219 const struct nv50_crc_func *func,
0220 struct nv50_crc_notifier_ctx *ctx)
0221 {
0222 struct drm_device *dev = head->base.base.dev;
0223 struct nouveau_drm *drm = nouveau_drm(dev);
0224 s64 ret;
0225
0226 ret = nvif_msec(&drm->client.device, 50,
0227 if (func->ctx_finished(head, ctx)) break;);
0228 if (ret == -ETIMEDOUT)
0229 NV_ERROR(drm,
0230 "CRC notifier ctx for head %d not finished after 50ms\n",
0231 head->base.index);
0232 else if (ret)
0233 NV_ATOMIC(drm,
0234 "CRC notifier ctx for head-%d finished after %lldns\n",
0235 head->base.index, ret);
0236 }
0237
0238 void nv50_crc_atomic_stop_reporting(struct drm_atomic_state *state)
0239 {
0240 struct drm_crtc_state *crtc_state;
0241 struct drm_crtc *crtc;
0242 int i;
0243
0244 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
0245 struct nv50_head *head = nv50_head(crtc);
0246 struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
0247 struct nv50_crc *crc = &head->crc;
0248
0249 if (!asyh->clr.crc)
0250 continue;
0251
0252 spin_lock_irq(&crc->lock);
0253 crc->src = NV50_CRC_SOURCE_NONE;
0254 spin_unlock_irq(&crc->lock);
0255
0256 drm_crtc_vblank_put(crtc);
0257 drm_vblank_work_cancel_sync(&crc->flip_work);
0258
0259 NV_ATOMIC(nouveau_drm(crtc->dev),
0260 "CRC reporting on vblank for head-%d disabled\n",
0261 head->base.index);
0262
0263
0264
0265
0266
0267 }
0268 }
0269
0270 void nv50_crc_atomic_init_notifier_contexts(struct drm_atomic_state *state)
0271 {
0272 struct drm_crtc_state *new_crtc_state;
0273 struct drm_crtc *crtc;
0274 int i;
0275
0276 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
0277 struct nv50_head *head = nv50_head(crtc);
0278 struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
0279 struct nv50_crc *crc = &head->crc;
0280 int i;
0281
0282 if (!asyh->set.crc)
0283 continue;
0284
0285 crc->entry_idx = 0;
0286 crc->ctx_changed = false;
0287 for (i = 0; i < ARRAY_SIZE(crc->ctx); i++)
0288 nv50_crc_reset_ctx(&crc->ctx[i]);
0289 }
0290 }
0291
0292 void nv50_crc_atomic_release_notifier_contexts(struct drm_atomic_state *state)
0293 {
0294 const struct nv50_crc_func *func =
0295 nv50_disp(state->dev)->core->func->crc;
0296 struct drm_crtc_state *new_crtc_state;
0297 struct drm_crtc *crtc;
0298 int i;
0299
0300 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
0301 struct nv50_head *head = nv50_head(crtc);
0302 struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
0303 struct nv50_crc *crc = &head->crc;
0304 struct nv50_crc_notifier_ctx *ctx = &crc->ctx[crc->ctx_idx];
0305
0306 if (!asyh->clr.crc)
0307 continue;
0308
0309 if (crc->ctx_changed) {
0310 nv50_crc_wait_ctx_finished(head, func, ctx);
0311 ctx = &crc->ctx[crc->ctx_idx ^ 1];
0312 }
0313 nv50_crc_wait_ctx_finished(head, func, ctx);
0314 }
0315 }
0316
0317 void nv50_crc_atomic_start_reporting(struct drm_atomic_state *state)
0318 {
0319 struct drm_crtc_state *crtc_state;
0320 struct drm_crtc *crtc;
0321 int i;
0322
0323 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
0324 struct nv50_head *head = nv50_head(crtc);
0325 struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
0326 struct nv50_crc *crc = &head->crc;
0327 u64 vbl_count;
0328
0329 if (!asyh->set.crc)
0330 continue;
0331
0332 drm_crtc_vblank_get(crtc);
0333
0334 spin_lock_irq(&crc->lock);
0335 vbl_count = drm_crtc_vblank_count(crtc);
0336 crc->frame = vbl_count;
0337 crc->src = asyh->crc.src;
0338 drm_vblank_work_schedule(&crc->flip_work,
0339 vbl_count + crc->flip_threshold,
0340 true);
0341 spin_unlock_irq(&crc->lock);
0342
0343 NV_ATOMIC(nouveau_drm(crtc->dev),
0344 "CRC reporting on vblank for head-%d enabled\n",
0345 head->base.index);
0346 }
0347 }
0348
0349 int nv50_crc_atomic_check_head(struct nv50_head *head,
0350 struct nv50_head_atom *asyh,
0351 struct nv50_head_atom *armh)
0352 {
0353 struct nv50_atom *atom = nv50_atom(asyh->state.state);
0354 bool changed = armh->crc.src != asyh->crc.src;
0355
0356 if (!armh->crc.src && !asyh->crc.src) {
0357 asyh->set.crc = false;
0358 asyh->clr.crc = false;
0359 return 0;
0360 }
0361
0362 if (drm_atomic_crtc_needs_modeset(&asyh->state) || changed) {
0363 asyh->clr.crc = armh->crc.src && armh->state.active;
0364 asyh->set.crc = asyh->crc.src && asyh->state.active;
0365 if (changed)
0366 asyh->set.or |= armh->or.crc_raster !=
0367 asyh->or.crc_raster;
0368
0369 if (asyh->clr.crc && asyh->set.crc)
0370 atom->flush_disable = true;
0371 } else {
0372 asyh->set.crc = false;
0373 asyh->clr.crc = false;
0374 }
0375
0376 return 0;
0377 }
0378
0379 void nv50_crc_atomic_check_outp(struct nv50_atom *atom)
0380 {
0381 struct drm_crtc *crtc;
0382 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
0383 int i;
0384
0385 if (atom->flush_disable)
0386 return;
0387
0388 for_each_oldnew_crtc_in_state(&atom->state, crtc, old_crtc_state,
0389 new_crtc_state, i) {
0390 struct nv50_head_atom *armh = nv50_head_atom(old_crtc_state);
0391 struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
0392 struct nv50_outp_atom *outp_atom;
0393 struct nouveau_encoder *outp;
0394 struct drm_encoder *encoder, *enc;
0395
0396 enc = nv50_head_atom_get_encoder(armh);
0397 if (!enc)
0398 continue;
0399
0400 outp = nv50_real_outp(enc);
0401 if (!outp)
0402 continue;
0403
0404 encoder = &outp->base.base;
0405
0406 if (!asyh->clr.crc)
0407 continue;
0408
0409
0410
0411
0412
0413 list_for_each_entry(outp_atom, &atom->outp, head) {
0414 if (outp_atom->encoder == encoder) {
0415 if (outp_atom->set.mask) {
0416 atom->flush_disable = true;
0417 return;
0418 } else {
0419 break;
0420 }
0421 }
0422 }
0423 }
0424 }
0425
0426 static enum nv50_crc_source_type
0427 nv50_crc_source_type(struct nouveau_encoder *outp,
0428 enum nv50_crc_source source)
0429 {
0430 struct dcb_output *dcbe = outp->dcb;
0431
0432 switch (source) {
0433 case NV50_CRC_SOURCE_NONE: return NV50_CRC_SOURCE_TYPE_NONE;
0434 case NV50_CRC_SOURCE_RG: return NV50_CRC_SOURCE_TYPE_RG;
0435 default: break;
0436 }
0437
0438 if (dcbe->location != DCB_LOC_ON_CHIP)
0439 return NV50_CRC_SOURCE_TYPE_PIOR;
0440
0441 switch (dcbe->type) {
0442 case DCB_OUTPUT_DP: return NV50_CRC_SOURCE_TYPE_SF;
0443 case DCB_OUTPUT_ANALOG: return NV50_CRC_SOURCE_TYPE_DAC;
0444 default: return NV50_CRC_SOURCE_TYPE_SOR;
0445 }
0446 }
0447
0448 void nv50_crc_atomic_set(struct nv50_head *head,
0449 struct nv50_head_atom *asyh)
0450 {
0451 struct drm_crtc *crtc = &head->base.base;
0452 struct drm_device *dev = crtc->dev;
0453 struct nv50_crc *crc = &head->crc;
0454 const struct nv50_crc_func *func = nv50_disp(dev)->core->func->crc;
0455 struct nouveau_encoder *outp;
0456 struct drm_encoder *encoder;
0457
0458 encoder = nv50_head_atom_get_encoder(asyh);
0459 if (!encoder)
0460 return;
0461
0462 outp = nv50_real_outp(encoder);
0463 if (!outp)
0464 return;
0465
0466 func->set_src(head, outp->or, nv50_crc_source_type(outp, asyh->crc.src),
0467 &crc->ctx[crc->ctx_idx]);
0468 }
0469
0470 void nv50_crc_atomic_clr(struct nv50_head *head)
0471 {
0472 const struct nv50_crc_func *func =
0473 nv50_disp(head->base.base.dev)->core->func->crc;
0474
0475 func->set_src(head, 0, NV50_CRC_SOURCE_TYPE_NONE, NULL);
0476 }
0477
0478 static inline int
0479 nv50_crc_raster_type(enum nv50_crc_source source)
0480 {
0481 switch (source) {
0482 case NV50_CRC_SOURCE_NONE:
0483 case NV50_CRC_SOURCE_AUTO:
0484 case NV50_CRC_SOURCE_RG:
0485 case NV50_CRC_SOURCE_OUTP_ACTIVE:
0486 return NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_ACTIVE_RASTER;
0487 case NV50_CRC_SOURCE_OUTP_COMPLETE:
0488 return NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_COMPLETE_RASTER;
0489 case NV50_CRC_SOURCE_OUTP_INACTIVE:
0490 return NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_NON_ACTIVE_RASTER;
0491 }
0492
0493 return 0;
0494 }
0495
0496
0497
0498
0499 static inline int
0500 nv50_crc_ctx_init(struct nv50_head *head, struct nvif_mmu *mmu,
0501 struct nv50_crc_notifier_ctx *ctx, size_t len, int idx)
0502 {
0503 struct nv50_core *core = nv50_disp(head->base.base.dev)->core;
0504 int ret;
0505
0506 ret = nvif_mem_ctor_map(mmu, "kmsCrcNtfy", NVIF_MEM_VRAM, len, &ctx->mem);
0507 if (ret)
0508 return ret;
0509
0510 ret = nvif_object_ctor(&core->chan.base.user, "kmsCrcNtfyCtxDma",
0511 NV50_DISP_HANDLE_CRC_CTX(head, idx),
0512 NV_DMA_IN_MEMORY,
0513 &(struct nv_dma_v0) {
0514 .target = NV_DMA_V0_TARGET_VRAM,
0515 .access = NV_DMA_V0_ACCESS_RDWR,
0516 .start = ctx->mem.addr,
0517 .limit = ctx->mem.addr
0518 + ctx->mem.size - 1,
0519 }, sizeof(struct nv_dma_v0),
0520 &ctx->ntfy);
0521 if (ret)
0522 goto fail_fini;
0523
0524 return 0;
0525
0526 fail_fini:
0527 nvif_mem_dtor(&ctx->mem);
0528 return ret;
0529 }
0530
0531 static inline void
0532 nv50_crc_ctx_fini(struct nv50_crc_notifier_ctx *ctx)
0533 {
0534 nvif_object_dtor(&ctx->ntfy);
0535 nvif_mem_dtor(&ctx->mem);
0536 }
0537
0538 int nv50_crc_set_source(struct drm_crtc *crtc, const char *source_str)
0539 {
0540 struct drm_device *dev = crtc->dev;
0541 struct drm_atomic_state *state;
0542 struct drm_modeset_acquire_ctx ctx;
0543 struct nv50_head *head = nv50_head(crtc);
0544 struct nv50_crc *crc = &head->crc;
0545 const struct nv50_crc_func *func = nv50_disp(dev)->core->func->crc;
0546 struct nvif_mmu *mmu = &nouveau_drm(dev)->client.mmu;
0547 struct nv50_head_atom *asyh;
0548 struct drm_crtc_state *crtc_state;
0549 enum nv50_crc_source source;
0550 int ret = 0, ctx_flags = 0, i;
0551
0552 ret = nv50_crc_parse_source(source_str, &source);
0553 if (ret)
0554 return ret;
0555
0556
0557
0558
0559
0560 if (source)
0561 ctx_flags |= DRM_MODESET_ACQUIRE_INTERRUPTIBLE;
0562 drm_modeset_acquire_init(&ctx, ctx_flags);
0563
0564 state = drm_atomic_state_alloc(dev);
0565 if (!state) {
0566 ret = -ENOMEM;
0567 goto out_acquire_fini;
0568 }
0569 state->acquire_ctx = &ctx;
0570
0571 if (source) {
0572 for (i = 0; i < ARRAY_SIZE(head->crc.ctx); i++) {
0573 ret = nv50_crc_ctx_init(head, mmu, &crc->ctx[i],
0574 func->notifier_len, i);
0575 if (ret)
0576 goto out_ctx_fini;
0577 }
0578 }
0579
0580 retry:
0581 crtc_state = drm_atomic_get_crtc_state(state, &head->base.base);
0582 if (IS_ERR(crtc_state)) {
0583 ret = PTR_ERR(crtc_state);
0584 if (ret == -EDEADLK)
0585 goto deadlock;
0586 else if (ret)
0587 goto out_drop_locks;
0588 }
0589 asyh = nv50_head_atom(crtc_state);
0590 asyh->crc.src = source;
0591 asyh->or.crc_raster = nv50_crc_raster_type(source);
0592
0593 ret = drm_atomic_commit(state);
0594 if (ret == -EDEADLK)
0595 goto deadlock;
0596 else if (ret)
0597 goto out_drop_locks;
0598
0599 if (!source) {
0600
0601
0602
0603
0604 crc->flip_threshold = func->flip_threshold;
0605 }
0606
0607 out_drop_locks:
0608 drm_modeset_drop_locks(&ctx);
0609 out_ctx_fini:
0610 if (!source || ret) {
0611 for (i = 0; i < ARRAY_SIZE(crc->ctx); i++)
0612 nv50_crc_ctx_fini(&crc->ctx[i]);
0613 }
0614 drm_atomic_state_put(state);
0615 out_acquire_fini:
0616 drm_modeset_acquire_fini(&ctx);
0617 return ret;
0618
0619 deadlock:
0620 drm_atomic_state_clear(state);
0621 drm_modeset_backoff(&ctx);
0622 goto retry;
0623 }
0624
0625 static int
0626 nv50_crc_debugfs_flip_threshold_get(struct seq_file *m, void *data)
0627 {
0628 struct nv50_head *head = m->private;
0629 struct drm_crtc *crtc = &head->base.base;
0630 struct nv50_crc *crc = &head->crc;
0631 int ret;
0632
0633 ret = drm_modeset_lock_single_interruptible(&crtc->mutex);
0634 if (ret)
0635 return ret;
0636
0637 seq_printf(m, "%d\n", crc->flip_threshold);
0638
0639 drm_modeset_unlock(&crtc->mutex);
0640 return ret;
0641 }
0642
0643 static int
0644 nv50_crc_debugfs_flip_threshold_open(struct inode *inode, struct file *file)
0645 {
0646 return single_open(file, nv50_crc_debugfs_flip_threshold_get,
0647 inode->i_private);
0648 }
0649
0650 static ssize_t
0651 nv50_crc_debugfs_flip_threshold_set(struct file *file,
0652 const char __user *ubuf, size_t len,
0653 loff_t *offp)
0654 {
0655 struct seq_file *m = file->private_data;
0656 struct nv50_head *head = m->private;
0657 struct nv50_head_atom *armh;
0658 struct drm_crtc *crtc = &head->base.base;
0659 struct nouveau_drm *drm = nouveau_drm(crtc->dev);
0660 struct nv50_crc *crc = &head->crc;
0661 const struct nv50_crc_func *func =
0662 nv50_disp(crtc->dev)->core->func->crc;
0663 int value, ret;
0664
0665 ret = kstrtoint_from_user(ubuf, len, 10, &value);
0666 if (ret)
0667 return ret;
0668
0669 if (value > func->flip_threshold)
0670 return -EINVAL;
0671 else if (value == -1)
0672 value = func->flip_threshold;
0673 else if (value < -1)
0674 return -EINVAL;
0675
0676 ret = drm_modeset_lock_single_interruptible(&crtc->mutex);
0677 if (ret)
0678 return ret;
0679
0680 armh = nv50_head_atom(crtc->state);
0681 if (armh->crc.src) {
0682 ret = -EBUSY;
0683 goto out;
0684 }
0685
0686 NV_DEBUG(drm,
0687 "Changing CRC flip threshold for next capture on head-%d to %d\n",
0688 head->base.index, value);
0689 crc->flip_threshold = value;
0690 ret = len;
0691
0692 out:
0693 drm_modeset_unlock(&crtc->mutex);
0694 return ret;
0695 }
0696
0697 static const struct file_operations nv50_crc_flip_threshold_fops = {
0698 .owner = THIS_MODULE,
0699 .open = nv50_crc_debugfs_flip_threshold_open,
0700 .read = seq_read,
0701 .write = nv50_crc_debugfs_flip_threshold_set,
0702 .release = single_release,
0703 };
0704
0705 int nv50_head_crc_late_register(struct nv50_head *head)
0706 {
0707 struct drm_crtc *crtc = &head->base.base;
0708 const struct nv50_crc_func *func =
0709 nv50_disp(crtc->dev)->core->func->crc;
0710 struct dentry *root;
0711
0712 if (!func || !crtc->debugfs_entry)
0713 return 0;
0714
0715 root = debugfs_create_dir("nv_crc", crtc->debugfs_entry);
0716 debugfs_create_file("flip_threshold", 0644, root, head,
0717 &nv50_crc_flip_threshold_fops);
0718
0719 return 0;
0720 }
0721
0722 static inline void
0723 nv50_crc_init_head(struct nv50_disp *disp, const struct nv50_crc_func *func,
0724 struct nv50_head *head)
0725 {
0726 struct nv50_crc *crc = &head->crc;
0727
0728 crc->flip_threshold = func->flip_threshold;
0729 spin_lock_init(&crc->lock);
0730 drm_vblank_work_init(&crc->flip_work, &head->base.base,
0731 nv50_crc_ctx_flip_work);
0732 }
0733
0734 void nv50_crc_init(struct drm_device *dev)
0735 {
0736 struct nv50_disp *disp = nv50_disp(dev);
0737 struct drm_crtc *crtc;
0738 const struct nv50_crc_func *func = disp->core->func->crc;
0739
0740 if (!func)
0741 return;
0742
0743 drm_for_each_crtc(crtc, dev)
0744 nv50_crc_init_head(disp, func, nv50_head(crtc));
0745 }