1 // SPDX-License-Identifier: MIT
2 #include <linux/string.h>
3 #include <drm/drm_crtc.h>
4 #include <drm/drm_atomic_helper.h>
5 #include <drm/drm_vblank.h>
6 #include <drm/drm_vblank_work.h>
8 #include <nvif/class.h>
9 #include <nvif/cl0002.h>
10 #include <nvif/timer.h>
12 #include <nvhw/class/cl907d.h>
14 #include "nouveau_drv.h"
21 static const char * const nv50_crc_sources
[] = {
22 [NV50_CRC_SOURCE_NONE
] = "none",
23 [NV50_CRC_SOURCE_AUTO
] = "auto",
24 [NV50_CRC_SOURCE_RG
] = "rg",
25 [NV50_CRC_SOURCE_OUTP_ACTIVE
] = "outp-active",
26 [NV50_CRC_SOURCE_OUTP_COMPLETE
] = "outp-complete",
27 [NV50_CRC_SOURCE_OUTP_INACTIVE
] = "outp-inactive",
30 static int nv50_crc_parse_source(const char *buf
, enum nv50_crc_source
*s
)
35 *s
= NV50_CRC_SOURCE_NONE
;
39 i
= match_string(nv50_crc_sources
, ARRAY_SIZE(nv50_crc_sources
), buf
);
48 nv50_crc_verify_source(struct drm_crtc
*crtc
, const char *source_name
,
51 struct nouveau_drm
*drm
= nouveau_drm(crtc
->dev
);
52 enum nv50_crc_source source
;
54 if (nv50_crc_parse_source(source_name
, &source
) < 0) {
55 NV_DEBUG(drm
, "unknown source %s\n", source_name
);
63 const char *const *nv50_crc_get_sources(struct drm_crtc
*crtc
, size_t *count
)
65 *count
= ARRAY_SIZE(nv50_crc_sources
);
66 return nv50_crc_sources
;
70 nv50_crc_program_ctx(struct nv50_head
*head
,
71 struct nv50_crc_notifier_ctx
*ctx
)
73 struct nv50_disp
*disp
= nv50_disp(head
->base
.base
.dev
);
74 struct nv50_core
*core
= disp
->core
;
75 u32 interlock
[NV50_DISP_INTERLOCK__SIZE
] = { 0 };
77 core
->func
->crc
->set_ctx(head
, ctx
);
78 core
->func
->update(core
, interlock
, false);
81 static void nv50_crc_ctx_flip_work(struct kthread_work
*base
)
83 struct drm_vblank_work
*work
= to_drm_vblank_work(base
);
84 struct nv50_crc
*crc
= container_of(work
, struct nv50_crc
, flip_work
);
85 struct nv50_head
*head
= container_of(crc
, struct nv50_head
, crc
);
86 struct drm_crtc
*crtc
= &head
->base
.base
;
87 struct nv50_disp
*disp
= nv50_disp(crtc
->dev
);
88 u8 new_idx
= crc
->ctx_idx
^ 1;
91 * We don't want to accidentally wait for longer then the vblank, so
92 * try again for the next vblank if we don't grab the lock
94 if (!mutex_trylock(&disp
->mutex
)) {
95 DRM_DEV_DEBUG_KMS(crtc
->dev
->dev
,
96 "Lock contended, delaying CRC ctx flip for head-%d\n",
98 drm_vblank_work_schedule(work
,
99 drm_crtc_vblank_count(crtc
) + 1,
104 DRM_DEV_DEBUG_KMS(crtc
->dev
->dev
,
105 "Flipping notifier ctx for head %d (%d -> %d)\n",
106 drm_crtc_index(crtc
), crc
->ctx_idx
, new_idx
);
108 nv50_crc_program_ctx(head
, NULL
);
109 nv50_crc_program_ctx(head
, &crc
->ctx
[new_idx
]);
110 mutex_unlock(&disp
->mutex
);
112 spin_lock_irq(&crc
->lock
);
113 crc
->ctx_changed
= true;
114 spin_unlock_irq(&crc
->lock
);
117 static inline void nv50_crc_reset_ctx(struct nv50_crc_notifier_ctx
*ctx
)
119 memset_io(ctx
->mem
.object
.map
.ptr
, 0, ctx
->mem
.object
.map
.size
);
123 nv50_crc_get_entries(struct nv50_head
*head
,
124 const struct nv50_crc_func
*func
,
125 enum nv50_crc_source source
)
127 struct drm_crtc
*crtc
= &head
->base
.base
;
128 struct nv50_crc
*crc
= &head
->crc
;
131 while (crc
->entry_idx
< func
->num_entries
) {
133 * While Nvidia's documentation says CRCs are written on each
134 * subsequent vblank after being enabled, in practice they
135 * aren't written immediately.
137 output_crc
= func
->get_entry(head
, &crc
->ctx
[crc
->ctx_idx
],
138 source
, crc
->entry_idx
);
142 drm_crtc_add_crc_entry(crtc
, true, crc
->frame
, &output_crc
);
148 void nv50_crc_handle_vblank(struct nv50_head
*head
)
150 struct drm_crtc
*crtc
= &head
->base
.base
;
151 struct nv50_crc
*crc
= &head
->crc
;
152 const struct nv50_crc_func
*func
=
153 nv50_disp(head
->base
.base
.dev
)->core
->func
->crc
;
154 struct nv50_crc_notifier_ctx
*ctx
;
155 bool need_reschedule
= false;
161 * We don't lose events if we aren't able to report CRCs until the
162 * next vblank, so only report CRCs if the locks we need aren't
163 * contended to prevent missing an actual vblank event
165 if (!spin_trylock(&crc
->lock
))
171 ctx
= &crc
->ctx
[crc
->ctx_idx
];
172 if (crc
->ctx_changed
&& func
->ctx_finished(head
, ctx
)) {
173 nv50_crc_get_entries(head
, func
, crc
->src
);
177 crc
->ctx_changed
= false;
180 * Unfortunately when notifier contexts are changed during CRC
181 * capture, we will inevitably lose the CRC entry for the
182 * frame where the hardware actually latched onto the first
183 * UPDATE. According to Nvidia's hardware engineers, there's
184 * no workaround for this.
186 * Now, we could try to be smart here and calculate the number
187 * of missed CRCs based on audit timestamps, but those were
188 * removed starting with volta. Since we always flush our
189 * updates back-to-back without waiting, we'll just be
190 * optimistic and assume we always miss exactly one frame.
192 DRM_DEV_DEBUG_KMS(head
->base
.base
.dev
->dev
,
193 "Notifier ctx flip for head-%d finished, lost CRC for frame %llu\n",
194 head
->base
.index
, crc
->frame
);
197 nv50_crc_reset_ctx(ctx
);
198 need_reschedule
= true;
201 nv50_crc_get_entries(head
, func
, crc
->src
);
204 drm_vblank_work_schedule(&crc
->flip_work
,
205 drm_crtc_vblank_count(crtc
)
206 + crc
->flip_threshold
211 spin_unlock(&crc
->lock
);
214 static void nv50_crc_wait_ctx_finished(struct nv50_head
*head
,
215 const struct nv50_crc_func
*func
,
216 struct nv50_crc_notifier_ctx
*ctx
)
218 struct drm_device
*dev
= head
->base
.base
.dev
;
219 struct nouveau_drm
*drm
= nouveau_drm(dev
);
222 ret
= nvif_msec(&drm
->client
.device
, 50,
223 if (func
->ctx_finished(head
, ctx
)) break;);
224 if (ret
== -ETIMEDOUT
)
226 "CRC notifier ctx for head %d not finished after 50ms\n",
230 "CRC notifier ctx for head-%d finished after %lldns\n",
231 head
->base
.index
, ret
);
234 void nv50_crc_atomic_stop_reporting(struct drm_atomic_state
*state
)
236 struct drm_crtc_state
*crtc_state
;
237 struct drm_crtc
*crtc
;
240 for_each_new_crtc_in_state(state
, crtc
, crtc_state
, i
) {
241 struct nv50_head
*head
= nv50_head(crtc
);
242 struct nv50_head_atom
*asyh
= nv50_head_atom(crtc_state
);
243 struct nv50_crc
*crc
= &head
->crc
;
248 spin_lock_irq(&crc
->lock
);
249 crc
->src
= NV50_CRC_SOURCE_NONE
;
250 spin_unlock_irq(&crc
->lock
);
252 drm_crtc_vblank_put(crtc
);
253 drm_vblank_work_cancel_sync(&crc
->flip_work
);
255 NV_ATOMIC(nouveau_drm(crtc
->dev
),
256 "CRC reporting on vblank for head-%d disabled\n",
259 /* CRC generation is still enabled in hw, we'll just report
260 * any remaining CRC entries ourselves after it gets disabled
266 void nv50_crc_atomic_init_notifier_contexts(struct drm_atomic_state
*state
)
268 struct drm_crtc_state
*new_crtc_state
;
269 struct drm_crtc
*crtc
;
272 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, i
) {
273 struct nv50_head
*head
= nv50_head(crtc
);
274 struct nv50_head_atom
*asyh
= nv50_head_atom(new_crtc_state
);
275 struct nv50_crc
*crc
= &head
->crc
;
282 crc
->ctx_changed
= false;
283 for (i
= 0; i
< ARRAY_SIZE(crc
->ctx
); i
++)
284 nv50_crc_reset_ctx(&crc
->ctx
[i
]);
288 void nv50_crc_atomic_release_notifier_contexts(struct drm_atomic_state
*state
)
290 const struct nv50_crc_func
*func
=
291 nv50_disp(state
->dev
)->core
->func
->crc
;
292 struct drm_crtc_state
*new_crtc_state
;
293 struct drm_crtc
*crtc
;
296 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, i
) {
297 struct nv50_head
*head
= nv50_head(crtc
);
298 struct nv50_head_atom
*asyh
= nv50_head_atom(new_crtc_state
);
299 struct nv50_crc
*crc
= &head
->crc
;
300 struct nv50_crc_notifier_ctx
*ctx
= &crc
->ctx
[crc
->ctx_idx
];
305 if (crc
->ctx_changed
) {
306 nv50_crc_wait_ctx_finished(head
, func
, ctx
);
307 ctx
= &crc
->ctx
[crc
->ctx_idx
^ 1];
309 nv50_crc_wait_ctx_finished(head
, func
, ctx
);
313 void nv50_crc_atomic_start_reporting(struct drm_atomic_state
*state
)
315 struct drm_crtc_state
*crtc_state
;
316 struct drm_crtc
*crtc
;
319 for_each_new_crtc_in_state(state
, crtc
, crtc_state
, i
) {
320 struct nv50_head
*head
= nv50_head(crtc
);
321 struct nv50_head_atom
*asyh
= nv50_head_atom(crtc_state
);
322 struct nv50_crc
*crc
= &head
->crc
;
328 drm_crtc_vblank_get(crtc
);
330 spin_lock_irq(&crc
->lock
);
331 vbl_count
= drm_crtc_vblank_count(crtc
);
332 crc
->frame
= vbl_count
;
333 crc
->src
= asyh
->crc
.src
;
334 drm_vblank_work_schedule(&crc
->flip_work
,
335 vbl_count
+ crc
->flip_threshold
,
337 spin_unlock_irq(&crc
->lock
);
339 NV_ATOMIC(nouveau_drm(crtc
->dev
),
340 "CRC reporting on vblank for head-%d enabled\n",
345 int nv50_crc_atomic_check_head(struct nv50_head
*head
,
346 struct nv50_head_atom
*asyh
,
347 struct nv50_head_atom
*armh
)
349 struct nv50_atom
*atom
= nv50_atom(asyh
->state
.state
);
350 struct drm_device
*dev
= head
->base
.base
.dev
;
351 struct nv50_disp
*disp
= nv50_disp(dev
);
352 bool changed
= armh
->crc
.src
!= asyh
->crc
.src
;
354 if (!armh
->crc
.src
&& !asyh
->crc
.src
) {
355 asyh
->set
.crc
= false;
356 asyh
->clr
.crc
= false;
360 /* While we don't care about entry tags, Volta+ hw always needs the
361 * controlling wndw channel programmed to a wndw that's owned by our
364 if (asyh
->crc
.src
&& disp
->disp
->object
.oclass
>= GV100_DISP
&&
365 !(BIT(asyh
->crc
.wndw
) & asyh
->wndw
.owned
)) {
366 if (!asyh
->wndw
.owned
) {
367 /* TODO: once we support flexible channel ownership,
368 * we should write some code here to handle attempting
369 * to "steal" a plane: e.g. take a plane that is
370 * currently not-visible and owned by another head,
371 * and reassign it to this head. If we fail to do so,
372 * we shuld reject the mode outright as CRC capture
373 * then becomes impossible.
375 NV_ATOMIC(nouveau_drm(dev
),
376 "No available wndws for CRC readback\n");
379 asyh
->crc
.wndw
= ffs(asyh
->wndw
.owned
) - 1;
382 if (drm_atomic_crtc_needs_modeset(&asyh
->state
) || changed
||
383 armh
->crc
.wndw
!= asyh
->crc
.wndw
) {
384 asyh
->clr
.crc
= armh
->crc
.src
&& armh
->state
.active
;
385 asyh
->set
.crc
= asyh
->crc
.src
&& asyh
->state
.active
;
387 asyh
->set
.or |= armh
->or.crc_raster
!=
390 if (asyh
->clr
.crc
&& asyh
->set
.crc
)
391 atom
->flush_disable
= true;
393 asyh
->set
.crc
= false;
394 asyh
->clr
.crc
= false;
400 void nv50_crc_atomic_check_outp(struct nv50_atom
*atom
)
402 struct drm_crtc
*crtc
;
403 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
406 if (atom
->flush_disable
)
409 for_each_oldnew_crtc_in_state(&atom
->state
, crtc
, old_crtc_state
,
411 struct nv50_head_atom
*armh
= nv50_head_atom(old_crtc_state
);
412 struct nv50_head_atom
*asyh
= nv50_head_atom(new_crtc_state
);
413 struct nv50_outp_atom
*outp_atom
;
414 struct nouveau_encoder
*outp
=
415 nv50_real_outp(nv50_head_atom_get_encoder(armh
));
416 struct drm_encoder
*encoder
= &outp
->base
.base
;
422 * Re-programming ORs can't be done in the same flush as
425 list_for_each_entry(outp_atom
, &atom
->outp
, head
) {
426 if (outp_atom
->encoder
== encoder
) {
427 if (outp_atom
->set
.mask
) {
428 atom
->flush_disable
= true;
438 static enum nv50_crc_source_type
439 nv50_crc_source_type(struct nouveau_encoder
*outp
,
440 enum nv50_crc_source source
)
442 struct dcb_output
*dcbe
= outp
->dcb
;
445 case NV50_CRC_SOURCE_NONE
: return NV50_CRC_SOURCE_TYPE_NONE
;
446 case NV50_CRC_SOURCE_RG
: return NV50_CRC_SOURCE_TYPE_RG
;
450 if (dcbe
->location
!= DCB_LOC_ON_CHIP
)
451 return NV50_CRC_SOURCE_TYPE_PIOR
;
453 switch (dcbe
->type
) {
454 case DCB_OUTPUT_DP
: return NV50_CRC_SOURCE_TYPE_SF
;
455 case DCB_OUTPUT_ANALOG
: return NV50_CRC_SOURCE_TYPE_DAC
;
456 default: return NV50_CRC_SOURCE_TYPE_SOR
;
460 void nv50_crc_atomic_set(struct nv50_head
*head
,
461 struct nv50_head_atom
*asyh
)
463 struct drm_crtc
*crtc
= &head
->base
.base
;
464 struct drm_device
*dev
= crtc
->dev
;
465 struct nv50_crc
*crc
= &head
->crc
;
466 const struct nv50_crc_func
*func
= nv50_disp(dev
)->core
->func
->crc
;
467 struct nouveau_encoder
*outp
=
468 nv50_real_outp(nv50_head_atom_get_encoder(asyh
));
470 func
->set_src(head
, outp
->or,
471 nv50_crc_source_type(outp
, asyh
->crc
.src
),
472 &crc
->ctx
[crc
->ctx_idx
], asyh
->crc
.wndw
);
475 void nv50_crc_atomic_clr(struct nv50_head
*head
)
477 const struct nv50_crc_func
*func
=
478 nv50_disp(head
->base
.base
.dev
)->core
->func
->crc
;
480 func
->set_src(head
, 0, NV50_CRC_SOURCE_TYPE_NONE
, NULL
, 0);
484 nv50_crc_raster_type(enum nv50_crc_source source
)
487 case NV50_CRC_SOURCE_NONE
:
488 case NV50_CRC_SOURCE_AUTO
:
489 case NV50_CRC_SOURCE_RG
:
490 case NV50_CRC_SOURCE_OUTP_ACTIVE
:
491 return NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_ACTIVE_RASTER
;
492 case NV50_CRC_SOURCE_OUTP_COMPLETE
:
493 return NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_COMPLETE_RASTER
;
494 case NV50_CRC_SOURCE_OUTP_INACTIVE
:
495 return NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_NON_ACTIVE_RASTER
;
501 /* We handle mapping the memory for CRC notifiers ourselves, since each
502 * notifier needs it's own handle
505 nv50_crc_ctx_init(struct nv50_head
*head
, struct nvif_mmu
*mmu
,
506 struct nv50_crc_notifier_ctx
*ctx
, size_t len
, int idx
)
508 struct nv50_core
*core
= nv50_disp(head
->base
.base
.dev
)->core
;
511 ret
= nvif_mem_ctor_map(mmu
, "kmsCrcNtfy", NVIF_MEM_VRAM
, len
, &ctx
->mem
);
515 ret
= nvif_object_ctor(&core
->chan
.base
.user
, "kmsCrcNtfyCtxDma",
516 NV50_DISP_HANDLE_CRC_CTX(head
, idx
),
518 &(struct nv_dma_v0
) {
519 .target
= NV_DMA_V0_TARGET_VRAM
,
520 .access
= NV_DMA_V0_ACCESS_RDWR
,
521 .start
= ctx
->mem
.addr
,
522 .limit
= ctx
->mem
.addr
524 }, sizeof(struct nv_dma_v0
),
532 nvif_mem_dtor(&ctx
->mem
);
537 nv50_crc_ctx_fini(struct nv50_crc_notifier_ctx
*ctx
)
539 nvif_object_dtor(&ctx
->ntfy
);
540 nvif_mem_dtor(&ctx
->mem
);
543 int nv50_crc_set_source(struct drm_crtc
*crtc
, const char *source_str
)
545 struct drm_device
*dev
= crtc
->dev
;
546 struct drm_atomic_state
*state
;
547 struct drm_modeset_acquire_ctx ctx
;
548 struct nv50_head
*head
= nv50_head(crtc
);
549 struct nv50_crc
*crc
= &head
->crc
;
550 const struct nv50_crc_func
*func
= nv50_disp(dev
)->core
->func
->crc
;
551 struct nvif_mmu
*mmu
= &nouveau_drm(dev
)->client
.mmu
;
552 struct nv50_head_atom
*asyh
;
553 struct drm_crtc_state
*crtc_state
;
554 enum nv50_crc_source source
;
555 int ret
= 0, ctx_flags
= 0, i
;
557 ret
= nv50_crc_parse_source(source_str
, &source
);
562 * Since we don't want the user to accidentally interrupt us as we're
566 ctx_flags
|= DRM_MODESET_ACQUIRE_INTERRUPTIBLE
;
567 drm_modeset_acquire_init(&ctx
, ctx_flags
);
569 state
= drm_atomic_state_alloc(dev
);
572 goto out_acquire_fini
;
574 state
->acquire_ctx
= &ctx
;
577 for (i
= 0; i
< ARRAY_SIZE(head
->crc
.ctx
); i
++) {
578 ret
= nv50_crc_ctx_init(head
, mmu
, &crc
->ctx
[i
],
579 func
->notifier_len
, i
);
586 crtc_state
= drm_atomic_get_crtc_state(state
, &head
->base
.base
);
587 if (IS_ERR(crtc_state
)) {
588 ret
= PTR_ERR(crtc_state
);
594 asyh
= nv50_head_atom(crtc_state
);
595 asyh
->crc
.src
= source
;
596 asyh
->or.crc_raster
= nv50_crc_raster_type(source
);
598 ret
= drm_atomic_commit(state
);
606 * If the user specified a custom flip threshold through
609 crc
->flip_threshold
= func
->flip_threshold
;
613 drm_modeset_drop_locks(&ctx
);
615 if (!source
|| ret
) {
616 for (i
= 0; i
< ARRAY_SIZE(crc
->ctx
); i
++)
617 nv50_crc_ctx_fini(&crc
->ctx
[i
]);
619 drm_atomic_state_put(state
);
621 drm_modeset_acquire_fini(&ctx
);
625 drm_atomic_state_clear(state
);
626 drm_modeset_backoff(&ctx
);
631 nv50_crc_debugfs_flip_threshold_get(struct seq_file
*m
, void *data
)
633 struct nv50_head
*head
= m
->private;
634 struct drm_crtc
*crtc
= &head
->base
.base
;
635 struct nv50_crc
*crc
= &head
->crc
;
638 ret
= drm_modeset_lock_single_interruptible(&crtc
->mutex
);
642 seq_printf(m
, "%d\n", crc
->flip_threshold
);
644 drm_modeset_unlock(&crtc
->mutex
);
649 nv50_crc_debugfs_flip_threshold_open(struct inode
*inode
, struct file
*file
)
651 return single_open(file
, nv50_crc_debugfs_flip_threshold_get
,
656 nv50_crc_debugfs_flip_threshold_set(struct file
*file
,
657 const char __user
*ubuf
, size_t len
,
660 struct seq_file
*m
= file
->private_data
;
661 struct nv50_head
*head
= m
->private;
662 struct nv50_head_atom
*armh
;
663 struct drm_crtc
*crtc
= &head
->base
.base
;
664 struct nouveau_drm
*drm
= nouveau_drm(crtc
->dev
);
665 struct nv50_crc
*crc
= &head
->crc
;
666 const struct nv50_crc_func
*func
=
667 nv50_disp(crtc
->dev
)->core
->func
->crc
;
670 ret
= kstrtoint_from_user(ubuf
, len
, 10, &value
);
674 if (value
> func
->flip_threshold
)
676 else if (value
== -1)
677 value
= func
->flip_threshold
;
681 ret
= drm_modeset_lock_single_interruptible(&crtc
->mutex
);
685 armh
= nv50_head_atom(crtc
->state
);
692 "Changing CRC flip threshold for next capture on head-%d to %d\n",
693 head
->base
.index
, value
);
694 crc
->flip_threshold
= value
;
698 drm_modeset_unlock(&crtc
->mutex
);
702 static const struct file_operations nv50_crc_flip_threshold_fops
= {
703 .owner
= THIS_MODULE
,
704 .open
= nv50_crc_debugfs_flip_threshold_open
,
706 .write
= nv50_crc_debugfs_flip_threshold_set
,
709 int nv50_head_crc_late_register(struct nv50_head
*head
)
711 struct drm_crtc
*crtc
= &head
->base
.base
;
712 const struct nv50_crc_func
*func
=
713 nv50_disp(crtc
->dev
)->core
->func
->crc
;
716 if (!func
|| !crtc
->debugfs_entry
)
719 root
= debugfs_create_dir("nv_crc", crtc
->debugfs_entry
);
720 debugfs_create_file("flip_threshold", 0644, root
, head
,
721 &nv50_crc_flip_threshold_fops
);
727 nv50_crc_init_head(struct nv50_disp
*disp
, const struct nv50_crc_func
*func
,
728 struct nv50_head
*head
)
730 struct nv50_crc
*crc
= &head
->crc
;
732 crc
->flip_threshold
= func
->flip_threshold
;
733 spin_lock_init(&crc
->lock
);
734 drm_vblank_work_init(&crc
->flip_work
, &head
->base
.base
,
735 nv50_crc_ctx_flip_work
);
738 void nv50_crc_init(struct drm_device
*dev
)
740 struct nv50_disp
*disp
= nv50_disp(dev
);
741 struct drm_crtc
*crtc
;
742 const struct nv50_crc_func
*func
= disp
->core
->func
->crc
;
747 drm_for_each_crtc(crtc
, dev
)
748 nv50_crc_init_head(disp
, func
, nv50_head(crtc
));