2 * Copyright 2018 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include <nvif/class.h>
26 #include <nvif/cl0002.h>
28 #include <drm/drm_atomic_helper.h>
29 #include "nouveau_bo.h"
32 nv50_wndw_ctxdma_del(struct nv50_wndw_ctxdma
*ctxdma
)
34 nvif_object_fini(&ctxdma
->object
);
35 list_del(&ctxdma
->head
);
39 static struct nv50_wndw_ctxdma
*
40 nv50_wndw_ctxdma_new(struct nv50_wndw
*wndw
, struct nouveau_framebuffer
*fb
)
42 struct nouveau_drm
*drm
= nouveau_drm(fb
->base
.dev
);
43 struct nv50_wndw_ctxdma
*ctxdma
;
44 const u8 kind
= fb
->nvbo
->kind
;
45 const u32 handle
= 0xfb000000 | kind
;
47 struct nv_dma_v0 base
;
49 struct nv50_dma_v0 nv50
;
50 struct gf100_dma_v0 gf100
;
51 struct gf119_dma_v0 gf119
;
54 u32 argc
= sizeof(args
.base
);
57 list_for_each_entry(ctxdma
, &wndw
->ctxdma
.list
, head
) {
58 if (ctxdma
->object
.handle
== handle
)
62 if (!(ctxdma
= kzalloc(sizeof(*ctxdma
), GFP_KERNEL
)))
63 return ERR_PTR(-ENOMEM
);
64 list_add(&ctxdma
->head
, &wndw
->ctxdma
.list
);
66 args
.base
.target
= NV_DMA_V0_TARGET_VRAM
;
67 args
.base
.access
= NV_DMA_V0_ACCESS_RDWR
;
69 args
.base
.limit
= drm
->client
.device
.info
.ram_user
- 1;
71 if (drm
->client
.device
.info
.chipset
< 0x80) {
72 args
.nv50
.part
= NV50_DMA_V0_PART_256
;
73 argc
+= sizeof(args
.nv50
);
75 if (drm
->client
.device
.info
.chipset
< 0xc0) {
76 args
.nv50
.part
= NV50_DMA_V0_PART_256
;
77 args
.nv50
.kind
= kind
;
78 argc
+= sizeof(args
.nv50
);
80 if (drm
->client
.device
.info
.chipset
< 0xd0) {
81 args
.gf100
.kind
= kind
;
82 argc
+= sizeof(args
.gf100
);
84 args
.gf119
.page
= GF119_DMA_V0_PAGE_LP
;
85 args
.gf119
.kind
= kind
;
86 argc
+= sizeof(args
.gf119
);
89 ret
= nvif_object_init(wndw
->ctxdma
.parent
, handle
, NV_DMA_IN_MEMORY
,
90 &args
, argc
, &ctxdma
->object
);
92 nv50_wndw_ctxdma_del(ctxdma
);
100 nv50_wndw_wait_armed(struct nv50_wndw
*wndw
, struct nv50_wndw_atom
*asyw
)
102 struct nv50_disp
*disp
= nv50_disp(wndw
->plane
.dev
);
103 if (asyw
->set
.ntfy
) {
104 return wndw
->func
->ntfy_wait_begun(disp
->sync
,
106 wndw
->wndw
.base
.device
);
112 nv50_wndw_flush_clr(struct nv50_wndw
*wndw
, u32
*interlock
, bool flush
,
113 struct nv50_wndw_atom
*asyw
)
115 union nv50_wndw_atom_mask clr
= {
116 .mask
= asyw
->clr
.mask
& ~(flush
? 0 : asyw
->set
.mask
),
118 if (clr
.sema
) wndw
->func
-> sema_clr(wndw
);
119 if (clr
.ntfy
) wndw
->func
-> ntfy_clr(wndw
);
120 if (clr
.xlut
) wndw
->func
-> xlut_clr(wndw
);
121 if (clr
.image
) wndw
->func
->image_clr(wndw
);
123 interlock
[wndw
->interlock
.type
] |= wndw
->interlock
.data
;
127 nv50_wndw_flush_set(struct nv50_wndw
*wndw
, u32
*interlock
,
128 struct nv50_wndw_atom
*asyw
)
131 asyw
->image
.mode
= 0;
132 asyw
->image
.interval
= 1;
135 if (asyw
->set
.sema
) wndw
->func
->sema_set (wndw
, asyw
);
136 if (asyw
->set
.ntfy
) wndw
->func
->ntfy_set (wndw
, asyw
);
137 if (asyw
->set
.image
) wndw
->func
->image_set(wndw
, asyw
);
139 if (asyw
->set
.xlut
) {
141 asyw
->xlut
.i
.offset
=
142 nv50_lut_load(&wndw
->ilut
,
143 asyw
->xlut
.i
.mode
<= 1,
147 wndw
->func
->xlut_set(wndw
, asyw
);
150 if (asyw
->set
.scale
) wndw
->func
->scale_set(wndw
, asyw
);
151 if (asyw
->set
.point
) {
152 if (asyw
->set
.point
= false, asyw
->set
.mask
)
153 interlock
[wndw
->interlock
.type
] |= wndw
->interlock
.data
;
154 interlock
[NV50_DISP_INTERLOCK_WIMM
] |= wndw
->interlock
.data
;
156 wndw
->immd
->point(wndw
, asyw
);
157 wndw
->immd
->update(wndw
, interlock
);
159 interlock
[wndw
->interlock
.type
] |= wndw
->interlock
.data
;
164 nv50_wndw_ntfy_enable(struct nv50_wndw
*wndw
, struct nv50_wndw_atom
*asyw
)
166 struct nv50_disp
*disp
= nv50_disp(wndw
->plane
.dev
);
168 asyw
->ntfy
.handle
= wndw
->wndw
.sync
.handle
;
169 asyw
->ntfy
.offset
= wndw
->ntfy
;
170 asyw
->ntfy
.awaken
= false;
171 asyw
->set
.ntfy
= true;
173 wndw
->func
->ntfy_reset(disp
->sync
, wndw
->ntfy
);
178 nv50_wndw_atomic_check_release(struct nv50_wndw
*wndw
,
179 struct nv50_wndw_atom
*asyw
,
180 struct nv50_head_atom
*asyh
)
182 struct nouveau_drm
*drm
= nouveau_drm(wndw
->plane
.dev
);
183 NV_ATOMIC(drm
, "%s release\n", wndw
->plane
.name
);
184 wndw
->func
->release(wndw
, asyw
, asyh
);
185 asyw
->ntfy
.handle
= 0;
186 asyw
->sema
.handle
= 0;
190 nv50_wndw_atomic_check_acquire_yuv(struct nv50_wndw_atom
*asyw
)
192 switch (asyw
->state
.fb
->format
->format
) {
193 case DRM_FORMAT_YUYV
: asyw
->image
.format
= 0x28; break;
194 case DRM_FORMAT_UYVY
: asyw
->image
.format
= 0x29; break;
199 asyw
->image
.colorspace
= 1;
204 nv50_wndw_atomic_check_acquire_rgb(struct nv50_wndw_atom
*asyw
)
206 switch (asyw
->state
.fb
->format
->format
) {
207 case DRM_FORMAT_C8
: asyw
->image
.format
= 0x1e; break;
208 case DRM_FORMAT_XRGB8888
:
209 case DRM_FORMAT_ARGB8888
: asyw
->image
.format
= 0xcf; break;
210 case DRM_FORMAT_RGB565
: asyw
->image
.format
= 0xe8; break;
211 case DRM_FORMAT_XRGB1555
:
212 case DRM_FORMAT_ARGB1555
: asyw
->image
.format
= 0xe9; break;
213 case DRM_FORMAT_XBGR2101010
:
214 case DRM_FORMAT_ABGR2101010
: asyw
->image
.format
= 0xd1; break;
215 case DRM_FORMAT_XBGR8888
:
216 case DRM_FORMAT_ABGR8888
: asyw
->image
.format
= 0xd5; break;
217 case DRM_FORMAT_XRGB2101010
:
218 case DRM_FORMAT_ARGB2101010
: asyw
->image
.format
= 0xdf; break;
222 asyw
->image
.colorspace
= 0;
227 nv50_wndw_atomic_check_acquire(struct nv50_wndw
*wndw
, bool modeset
,
228 struct nv50_wndw_atom
*armw
,
229 struct nv50_wndw_atom
*asyw
,
230 struct nv50_head_atom
*asyh
)
232 struct nouveau_framebuffer
*fb
= nouveau_framebuffer(asyw
->state
.fb
);
233 struct nouveau_drm
*drm
= nouveau_drm(wndw
->plane
.dev
);
236 NV_ATOMIC(drm
, "%s acquire\n", wndw
->plane
.name
);
238 if (asyw
->state
.fb
!= armw
->state
.fb
|| !armw
->visible
|| modeset
) {
239 asyw
->image
.w
= fb
->base
.width
;
240 asyw
->image
.h
= fb
->base
.height
;
241 asyw
->image
.kind
= fb
->nvbo
->kind
;
243 ret
= nv50_wndw_atomic_check_acquire_rgb(asyw
);
245 ret
= nv50_wndw_atomic_check_acquire_yuv(asyw
);
250 if (asyw
->image
.kind
) {
251 asyw
->image
.layout
= 0;
252 if (drm
->client
.device
.info
.chipset
>= 0xc0)
253 asyw
->image
.blockh
= fb
->nvbo
->mode
>> 4;
255 asyw
->image
.blockh
= fb
->nvbo
->mode
;
256 asyw
->image
.blocks
[0] = fb
->base
.pitches
[0] / 64;
257 asyw
->image
.pitch
[0] = 0;
259 asyw
->image
.layout
= 1;
260 asyw
->image
.blockh
= 0;
261 asyw
->image
.blocks
[0] = 0;
262 asyw
->image
.pitch
[0] = fb
->base
.pitches
[0];
265 if (!(asyh
->state
.pageflip_flags
& DRM_MODE_PAGE_FLIP_ASYNC
))
266 asyw
->image
.interval
= 1;
268 asyw
->image
.interval
= 0;
269 asyw
->image
.mode
= asyw
->image
.interval
? 0 : 1;
270 asyw
->set
.image
= wndw
->func
->image_set
!= NULL
;
273 if (wndw
->func
->scale_set
) {
274 asyw
->scale
.sx
= asyw
->state
.src_x
>> 16;
275 asyw
->scale
.sy
= asyw
->state
.src_y
>> 16;
276 asyw
->scale
.sw
= asyw
->state
.src_w
>> 16;
277 asyw
->scale
.sh
= asyw
->state
.src_h
>> 16;
278 asyw
->scale
.dw
= asyw
->state
.crtc_w
;
279 asyw
->scale
.dh
= asyw
->state
.crtc_h
;
280 if (memcmp(&armw
->scale
, &asyw
->scale
, sizeof(asyw
->scale
)))
281 asyw
->set
.scale
= true;
285 asyw
->point
.x
= asyw
->state
.crtc_x
;
286 asyw
->point
.y
= asyw
->state
.crtc_y
;
287 if (memcmp(&armw
->point
, &asyw
->point
, sizeof(asyw
->point
)))
288 asyw
->set
.point
= true;
291 return wndw
->func
->acquire(wndw
, asyw
, asyh
);
295 nv50_wndw_atomic_check_lut(struct nv50_wndw
*wndw
,
296 struct nv50_wndw_atom
*armw
,
297 struct nv50_wndw_atom
*asyw
,
298 struct nv50_head_atom
*asyh
)
300 struct drm_property_blob
*ilut
= asyh
->state
.degamma_lut
;
302 /* I8 format without an input LUT makes no sense, and the
303 * HW error-checks for this.
305 * In order to handle legacy gamma, when there's no input
306 * LUT we need to steal the output LUT and use it instead.
308 if (!ilut
&& asyw
->state
.fb
->format
->format
== DRM_FORMAT_C8
) {
309 /* This should be an error, but there's legacy clients
310 * that do a modeset before providing a gamma table.
312 * We keep the window disabled to avoid angering HW.
314 if (!(ilut
= asyh
->state
.gamma_lut
)) {
315 asyw
->visible
= false;
319 if (wndw
->func
->ilut
)
320 asyh
->wndw
.olut
|= BIT(wndw
->id
);
322 asyh
->wndw
.olut
&= ~BIT(wndw
->id
);
325 /* Recalculate LUT state. */
326 memset(&asyw
->xlut
, 0x00, sizeof(asyw
->xlut
));
327 if ((asyw
->ilut
= wndw
->func
->ilut
? ilut
: NULL
)) {
328 wndw
->func
->ilut(wndw
, asyw
);
329 asyw
->xlut
.handle
= wndw
->wndw
.vram
.handle
;
330 asyw
->xlut
.i
.buffer
= !asyw
->xlut
.i
.buffer
;
331 asyw
->set
.xlut
= true;
334 /* Handle setting base SET_OUTPUT_LUT_LO_ENABLE_USE_CORE_LUT. */
335 if (wndw
->func
->olut_core
&&
336 (!armw
->visible
|| (armw
->xlut
.handle
&& !asyw
->xlut
.handle
)))
337 asyw
->set
.xlut
= true;
339 /* Can't do an immediate flip while changing the LUT. */
340 asyh
->state
.pageflip_flags
&= ~DRM_MODE_PAGE_FLIP_ASYNC
;
344 nv50_wndw_atomic_check(struct drm_plane
*plane
, struct drm_plane_state
*state
)
346 struct nouveau_drm
*drm
= nouveau_drm(plane
->dev
);
347 struct nv50_wndw
*wndw
= nv50_wndw(plane
);
348 struct nv50_wndw_atom
*armw
= nv50_wndw_atom(wndw
->plane
.state
);
349 struct nv50_wndw_atom
*asyw
= nv50_wndw_atom(state
);
350 struct nv50_head_atom
*harm
= NULL
, *asyh
= NULL
;
351 bool modeset
= false;
354 NV_ATOMIC(drm
, "%s atomic_check\n", plane
->name
);
356 /* Fetch the assembly state for the head the window will belong to,
357 * and determine whether the window will be visible.
359 if (asyw
->state
.crtc
) {
360 asyh
= nv50_head_atom_get(asyw
->state
.state
, asyw
->state
.crtc
);
362 return PTR_ERR(asyh
);
363 modeset
= drm_atomic_crtc_needs_modeset(&asyh
->state
);
364 asyw
->visible
= asyh
->state
.active
;
366 asyw
->visible
= false;
369 /* Fetch assembly state for the head the window used to belong to. */
370 if (armw
->state
.crtc
) {
371 harm
= nv50_head_atom_get(asyw
->state
.state
, armw
->state
.crtc
);
373 return PTR_ERR(harm
);
376 /* LUT configuration can potentially cause the window to be disabled. */
377 if (asyw
->visible
&& wndw
->func
->xlut_set
&&
379 asyh
->state
.color_mgmt_changed
||
380 asyw
->state
.fb
->format
->format
!=
381 armw
->state
.fb
->format
->format
))
382 nv50_wndw_atomic_check_lut(wndw
, armw
, asyw
, asyh
);
384 /* Calculate new window state. */
386 ret
= nv50_wndw_atomic_check_acquire(wndw
, modeset
,
391 asyh
->wndw
.mask
|= BIT(wndw
->id
);
394 nv50_wndw_atomic_check_release(wndw
, asyw
, harm
);
395 harm
->wndw
.mask
&= ~BIT(wndw
->id
);
400 /* Aside from the obvious case where the window is actively being
401 * disabled, we might also need to temporarily disable the window
402 * when performing certain modeset operations.
404 if (!asyw
->visible
|| modeset
) {
405 asyw
->clr
.ntfy
= armw
->ntfy
.handle
!= 0;
406 asyw
->clr
.sema
= armw
->sema
.handle
!= 0;
407 asyw
->clr
.xlut
= armw
->xlut
.handle
!= 0;
408 if (wndw
->func
->image_clr
)
409 asyw
->clr
.image
= armw
->image
.handle
[0] != 0;
416 nv50_wndw_cleanup_fb(struct drm_plane
*plane
, struct drm_plane_state
*old_state
)
418 struct nouveau_framebuffer
*fb
= nouveau_framebuffer(old_state
->fb
);
419 struct nouveau_drm
*drm
= nouveau_drm(plane
->dev
);
421 NV_ATOMIC(drm
, "%s cleanup: %p\n", plane
->name
, old_state
->fb
);
425 nouveau_bo_unpin(fb
->nvbo
);
429 nv50_wndw_prepare_fb(struct drm_plane
*plane
, struct drm_plane_state
*state
)
431 struct nouveau_framebuffer
*fb
= nouveau_framebuffer(state
->fb
);
432 struct nouveau_drm
*drm
= nouveau_drm(plane
->dev
);
433 struct nv50_wndw
*wndw
= nv50_wndw(plane
);
434 struct nv50_wndw_atom
*asyw
= nv50_wndw_atom(state
);
435 struct nv50_head_atom
*asyh
;
436 struct nv50_wndw_ctxdma
*ctxdma
;
439 NV_ATOMIC(drm
, "%s prepare: %p\n", plane
->name
, state
->fb
);
443 ret
= nouveau_bo_pin(fb
->nvbo
, TTM_PL_FLAG_VRAM
, true);
447 if (wndw
->ctxdma
.parent
) {
448 ctxdma
= nv50_wndw_ctxdma_new(wndw
, fb
);
449 if (IS_ERR(ctxdma
)) {
450 nouveau_bo_unpin(fb
->nvbo
);
451 return PTR_ERR(ctxdma
);
454 asyw
->image
.handle
[0] = ctxdma
->object
.handle
;
457 asyw
->state
.fence
= reservation_object_get_excl_rcu(fb
->nvbo
->bo
.resv
);
458 asyw
->image
.offset
[0] = fb
->nvbo
->bo
.offset
;
460 if (wndw
->func
->prepare
) {
461 asyh
= nv50_head_atom_get(asyw
->state
.state
, asyw
->state
.crtc
);
463 return PTR_ERR(asyh
);
465 wndw
->func
->prepare(wndw
, asyh
, asyw
);
471 static const struct drm_plane_helper_funcs
473 .prepare_fb
= nv50_wndw_prepare_fb
,
474 .cleanup_fb
= nv50_wndw_cleanup_fb
,
475 .atomic_check
= nv50_wndw_atomic_check
,
479 nv50_wndw_atomic_destroy_state(struct drm_plane
*plane
,
480 struct drm_plane_state
*state
)
482 struct nv50_wndw_atom
*asyw
= nv50_wndw_atom(state
);
483 __drm_atomic_helper_plane_destroy_state(&asyw
->state
);
487 static struct drm_plane_state
*
488 nv50_wndw_atomic_duplicate_state(struct drm_plane
*plane
)
490 struct nv50_wndw_atom
*armw
= nv50_wndw_atom(plane
->state
);
491 struct nv50_wndw_atom
*asyw
;
492 if (!(asyw
= kmalloc(sizeof(*asyw
), GFP_KERNEL
)))
494 __drm_atomic_helper_plane_duplicate_state(plane
, &asyw
->state
);
495 asyw
->sema
= armw
->sema
;
496 asyw
->ntfy
= armw
->ntfy
;
498 asyw
->xlut
= armw
->xlut
;
499 asyw
->image
= armw
->image
;
500 asyw
->point
= armw
->point
;
507 nv50_wndw_reset(struct drm_plane
*plane
)
509 struct nv50_wndw_atom
*asyw
;
511 if (WARN_ON(!(asyw
= kzalloc(sizeof(*asyw
), GFP_KERNEL
))))
515 plane
->funcs
->atomic_destroy_state(plane
, plane
->state
);
516 plane
->state
= &asyw
->state
;
517 plane
->state
->plane
= plane
;
518 plane
->state
->rotation
= DRM_MODE_ROTATE_0
;
522 nv50_wndw_destroy(struct drm_plane
*plane
)
524 struct nv50_wndw
*wndw
= nv50_wndw(plane
);
525 struct nv50_wndw_ctxdma
*ctxdma
, *ctxtmp
;
527 list_for_each_entry_safe(ctxdma
, ctxtmp
, &wndw
->ctxdma
.list
, head
) {
528 nv50_wndw_ctxdma_del(ctxdma
);
531 nvif_notify_fini(&wndw
->notify
);
532 nv50_dmac_destroy(&wndw
->wimm
);
533 nv50_dmac_destroy(&wndw
->wndw
);
535 nv50_lut_fini(&wndw
->ilut
);
537 drm_plane_cleanup(&wndw
->plane
);
541 const struct drm_plane_funcs
543 .update_plane
= drm_atomic_helper_update_plane
,
544 .disable_plane
= drm_atomic_helper_disable_plane
,
545 .destroy
= nv50_wndw_destroy
,
546 .reset
= nv50_wndw_reset
,
547 .atomic_duplicate_state
= nv50_wndw_atomic_duplicate_state
,
548 .atomic_destroy_state
= nv50_wndw_atomic_destroy_state
,
552 nv50_wndw_notify(struct nvif_notify
*notify
)
554 return NVIF_NOTIFY_KEEP
;
558 nv50_wndw_fini(struct nv50_wndw
*wndw
)
560 nvif_notify_put(&wndw
->notify
);
564 nv50_wndw_init(struct nv50_wndw
*wndw
)
566 nvif_notify_get(&wndw
->notify
);
570 nv50_wndw_new_(const struct nv50_wndw_func
*func
, struct drm_device
*dev
,
571 enum drm_plane_type type
, const char *name
, int index
,
572 const u32
*format
, u32 heads
,
573 enum nv50_disp_interlock_type interlock_type
, u32 interlock_data
,
574 struct nv50_wndw
**pwndw
)
576 struct nouveau_drm
*drm
= nouveau_drm(dev
);
577 struct nvif_mmu
*mmu
= &drm
->client
.mmu
;
578 struct nv50_disp
*disp
= nv50_disp(dev
);
579 struct nv50_wndw
*wndw
;
583 if (!(wndw
= *pwndw
= kzalloc(sizeof(*wndw
), GFP_KERNEL
)))
587 wndw
->interlock
.type
= interlock_type
;
588 wndw
->interlock
.data
= interlock_data
;
590 wndw
->ctxdma
.parent
= &wndw
->wndw
.base
.user
;
591 INIT_LIST_HEAD(&wndw
->ctxdma
.list
);
593 for (nformat
= 0; format
[nformat
]; nformat
++);
595 ret
= drm_universal_plane_init(dev
, &wndw
->plane
, heads
, &nv50_wndw
,
596 format
, nformat
, NULL
,
597 type
, "%s-%d", name
, index
);
604 drm_plane_helper_add(&wndw
->plane
, &nv50_wndw_helper
);
606 if (wndw
->func
->ilut
) {
607 ret
= nv50_lut_init(disp
, mmu
, &wndw
->ilut
);
612 wndw
->notify
.func
= nv50_wndw_notify
;
617 nv50_wndw_new(struct nouveau_drm
*drm
, enum drm_plane_type type
, int index
,
618 struct nv50_wndw
**pwndw
)
623 int (*new)(struct nouveau_drm
*, enum drm_plane_type
,
624 int, s32
, struct nv50_wndw
**);
626 { GV100_DISP_WINDOW_CHANNEL_DMA
, 0, wndwc37e_new
},
629 struct nv50_disp
*disp
= nv50_disp(drm
->dev
);
632 cid
= nvif_mclass(&disp
->disp
->object
, wndws
);
634 NV_ERROR(drm
, "No supported window class\n");
638 ret
= wndws
[cid
].new(drm
, type
, index
, wndws
[cid
].oclass
, pwndw
);
642 return nv50_wimm_init(drm
, *pwndw
);