2 * Copyright 2018 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include <nvif/class.h>
26 #include <nvif/cl0002.h>
28 #include <drm/drm_atomic_helper.h>
29 #include <drm/drm_fourcc.h>
31 #include "nouveau_bo.h"
34 nv50_wndw_ctxdma_del(struct nv50_wndw_ctxdma
*ctxdma
)
36 nvif_object_fini(&ctxdma
->object
);
37 list_del(&ctxdma
->head
);
41 static struct nv50_wndw_ctxdma
*
42 nv50_wndw_ctxdma_new(struct nv50_wndw
*wndw
, struct nouveau_framebuffer
*fb
)
44 struct nouveau_drm
*drm
= nouveau_drm(fb
->base
.dev
);
45 struct nv50_wndw_ctxdma
*ctxdma
;
46 const u8 kind
= fb
->nvbo
->kind
;
47 const u32 handle
= 0xfb000000 | kind
;
49 struct nv_dma_v0 base
;
51 struct nv50_dma_v0 nv50
;
52 struct gf100_dma_v0 gf100
;
53 struct gf119_dma_v0 gf119
;
56 u32 argc
= sizeof(args
.base
);
59 list_for_each_entry(ctxdma
, &wndw
->ctxdma
.list
, head
) {
60 if (ctxdma
->object
.handle
== handle
)
64 if (!(ctxdma
= kzalloc(sizeof(*ctxdma
), GFP_KERNEL
)))
65 return ERR_PTR(-ENOMEM
);
66 list_add(&ctxdma
->head
, &wndw
->ctxdma
.list
);
68 args
.base
.target
= NV_DMA_V0_TARGET_VRAM
;
69 args
.base
.access
= NV_DMA_V0_ACCESS_RDWR
;
71 args
.base
.limit
= drm
->client
.device
.info
.ram_user
- 1;
73 if (drm
->client
.device
.info
.chipset
< 0x80) {
74 args
.nv50
.part
= NV50_DMA_V0_PART_256
;
75 argc
+= sizeof(args
.nv50
);
77 if (drm
->client
.device
.info
.chipset
< 0xc0) {
78 args
.nv50
.part
= NV50_DMA_V0_PART_256
;
79 args
.nv50
.kind
= kind
;
80 argc
+= sizeof(args
.nv50
);
82 if (drm
->client
.device
.info
.chipset
< 0xd0) {
83 args
.gf100
.kind
= kind
;
84 argc
+= sizeof(args
.gf100
);
86 args
.gf119
.page
= GF119_DMA_V0_PAGE_LP
;
87 args
.gf119
.kind
= kind
;
88 argc
+= sizeof(args
.gf119
);
91 ret
= nvif_object_init(wndw
->ctxdma
.parent
, handle
, NV_DMA_IN_MEMORY
,
92 &args
, argc
, &ctxdma
->object
);
94 nv50_wndw_ctxdma_del(ctxdma
);
102 nv50_wndw_wait_armed(struct nv50_wndw
*wndw
, struct nv50_wndw_atom
*asyw
)
104 struct nv50_disp
*disp
= nv50_disp(wndw
->plane
.dev
);
105 if (asyw
->set
.ntfy
) {
106 return wndw
->func
->ntfy_wait_begun(disp
->sync
,
108 wndw
->wndw
.base
.device
);
114 nv50_wndw_flush_clr(struct nv50_wndw
*wndw
, u32
*interlock
, bool flush
,
115 struct nv50_wndw_atom
*asyw
)
117 union nv50_wndw_atom_mask clr
= {
118 .mask
= asyw
->clr
.mask
& ~(flush
? 0 : asyw
->set
.mask
),
120 if (clr
.sema
) wndw
->func
-> sema_clr(wndw
);
121 if (clr
.ntfy
) wndw
->func
-> ntfy_clr(wndw
);
122 if (clr
.xlut
) wndw
->func
-> xlut_clr(wndw
);
123 if (clr
.csc
) wndw
->func
-> csc_clr(wndw
);
124 if (clr
.image
) wndw
->func
->image_clr(wndw
);
126 interlock
[wndw
->interlock
.type
] |= wndw
->interlock
.data
;
130 nv50_wndw_flush_set(struct nv50_wndw
*wndw
, u32
*interlock
,
131 struct nv50_wndw_atom
*asyw
)
133 if (interlock
[NV50_DISP_INTERLOCK_CORE
]) {
134 asyw
->image
.mode
= 0;
135 asyw
->image
.interval
= 1;
138 if (asyw
->set
.sema
) wndw
->func
->sema_set (wndw
, asyw
);
139 if (asyw
->set
.ntfy
) wndw
->func
->ntfy_set (wndw
, asyw
);
140 if (asyw
->set
.image
) wndw
->func
->image_set(wndw
, asyw
);
142 if (asyw
->set
.xlut
) {
144 asyw
->xlut
.i
.offset
=
145 nv50_lut_load(&wndw
->ilut
, asyw
->xlut
.i
.buffer
,
146 asyw
->ilut
, asyw
->xlut
.i
.load
);
148 wndw
->func
->xlut_set(wndw
, asyw
);
151 if (asyw
->set
.csc
) wndw
->func
->csc_set (wndw
, asyw
);
152 if (asyw
->set
.scale
) wndw
->func
->scale_set(wndw
, asyw
);
153 if (asyw
->set
.blend
) wndw
->func
->blend_set(wndw
, asyw
);
154 if (asyw
->set
.point
) {
155 if (asyw
->set
.point
= false, asyw
->set
.mask
)
156 interlock
[wndw
->interlock
.type
] |= wndw
->interlock
.data
;
157 interlock
[NV50_DISP_INTERLOCK_WIMM
] |= wndw
->interlock
.wimm
;
159 wndw
->immd
->point(wndw
, asyw
);
160 wndw
->immd
->update(wndw
, interlock
);
162 interlock
[wndw
->interlock
.type
] |= wndw
->interlock
.data
;
167 nv50_wndw_ntfy_enable(struct nv50_wndw
*wndw
, struct nv50_wndw_atom
*asyw
)
169 struct nv50_disp
*disp
= nv50_disp(wndw
->plane
.dev
);
171 asyw
->ntfy
.handle
= wndw
->wndw
.sync
.handle
;
172 asyw
->ntfy
.offset
= wndw
->ntfy
;
173 asyw
->ntfy
.awaken
= false;
174 asyw
->set
.ntfy
= true;
176 wndw
->func
->ntfy_reset(disp
->sync
, wndw
->ntfy
);
181 nv50_wndw_atomic_check_release(struct nv50_wndw
*wndw
,
182 struct nv50_wndw_atom
*asyw
,
183 struct nv50_head_atom
*asyh
)
185 struct nouveau_drm
*drm
= nouveau_drm(wndw
->plane
.dev
);
186 NV_ATOMIC(drm
, "%s release\n", wndw
->plane
.name
);
187 wndw
->func
->release(wndw
, asyw
, asyh
);
188 asyw
->ntfy
.handle
= 0;
189 asyw
->sema
.handle
= 0;
193 nv50_wndw_atomic_check_acquire_yuv(struct nv50_wndw_atom
*asyw
)
195 switch (asyw
->state
.fb
->format
->format
) {
196 case DRM_FORMAT_YUYV
: asyw
->image
.format
= 0x28; break;
197 case DRM_FORMAT_UYVY
: asyw
->image
.format
= 0x29; break;
202 asyw
->image
.colorspace
= 1;
207 nv50_wndw_atomic_check_acquire_rgb(struct nv50_wndw_atom
*asyw
)
209 switch (asyw
->state
.fb
->format
->format
) {
210 case DRM_FORMAT_C8
: asyw
->image
.format
= 0x1e; break;
211 case DRM_FORMAT_XRGB8888
:
212 case DRM_FORMAT_ARGB8888
: asyw
->image
.format
= 0xcf; break;
213 case DRM_FORMAT_RGB565
: asyw
->image
.format
= 0xe8; break;
214 case DRM_FORMAT_XRGB1555
:
215 case DRM_FORMAT_ARGB1555
: asyw
->image
.format
= 0xe9; break;
216 case DRM_FORMAT_XBGR2101010
:
217 case DRM_FORMAT_ABGR2101010
: asyw
->image
.format
= 0xd1; break;
218 case DRM_FORMAT_XBGR8888
:
219 case DRM_FORMAT_ABGR8888
: asyw
->image
.format
= 0xd5; break;
220 case DRM_FORMAT_XRGB2101010
:
221 case DRM_FORMAT_ARGB2101010
: asyw
->image
.format
= 0xdf; break;
222 case DRM_FORMAT_XBGR16161616F
:
223 case DRM_FORMAT_ABGR16161616F
: asyw
->image
.format
= 0xca; break;
227 asyw
->image
.colorspace
= 0;
232 nv50_wndw_atomic_check_acquire(struct nv50_wndw
*wndw
, bool modeset
,
233 struct nv50_wndw_atom
*armw
,
234 struct nv50_wndw_atom
*asyw
,
235 struct nv50_head_atom
*asyh
)
237 struct nouveau_framebuffer
*fb
= nouveau_framebuffer(asyw
->state
.fb
);
238 struct nouveau_drm
*drm
= nouveau_drm(wndw
->plane
.dev
);
241 NV_ATOMIC(drm
, "%s acquire\n", wndw
->plane
.name
);
243 if (asyw
->state
.fb
!= armw
->state
.fb
|| !armw
->visible
|| modeset
) {
244 asyw
->image
.w
= fb
->base
.width
;
245 asyw
->image
.h
= fb
->base
.height
;
246 asyw
->image
.kind
= fb
->nvbo
->kind
;
248 ret
= nv50_wndw_atomic_check_acquire_rgb(asyw
);
250 ret
= nv50_wndw_atomic_check_acquire_yuv(asyw
);
255 if (asyw
->image
.kind
) {
256 asyw
->image
.layout
= 0;
257 if (drm
->client
.device
.info
.chipset
>= 0xc0)
258 asyw
->image
.blockh
= fb
->nvbo
->mode
>> 4;
260 asyw
->image
.blockh
= fb
->nvbo
->mode
;
261 asyw
->image
.blocks
[0] = fb
->base
.pitches
[0] / 64;
262 asyw
->image
.pitch
[0] = 0;
264 asyw
->image
.layout
= 1;
265 asyw
->image
.blockh
= 0;
266 asyw
->image
.blocks
[0] = 0;
267 asyw
->image
.pitch
[0] = fb
->base
.pitches
[0];
270 if (!asyh
->state
.async_flip
)
271 asyw
->image
.interval
= 1;
273 asyw
->image
.interval
= 0;
274 asyw
->image
.mode
= asyw
->image
.interval
? 0 : 1;
275 asyw
->set
.image
= wndw
->func
->image_set
!= NULL
;
278 if (wndw
->func
->scale_set
) {
279 asyw
->scale
.sx
= asyw
->state
.src_x
>> 16;
280 asyw
->scale
.sy
= asyw
->state
.src_y
>> 16;
281 asyw
->scale
.sw
= asyw
->state
.src_w
>> 16;
282 asyw
->scale
.sh
= asyw
->state
.src_h
>> 16;
283 asyw
->scale
.dw
= asyw
->state
.crtc_w
;
284 asyw
->scale
.dh
= asyw
->state
.crtc_h
;
285 if (memcmp(&armw
->scale
, &asyw
->scale
, sizeof(asyw
->scale
)))
286 asyw
->set
.scale
= true;
289 if (wndw
->func
->blend_set
) {
290 asyw
->blend
.depth
= 255 - asyw
->state
.normalized_zpos
;
291 asyw
->blend
.k1
= asyw
->state
.alpha
>> 8;
292 switch (asyw
->state
.pixel_blend_mode
) {
293 case DRM_MODE_BLEND_PREMULTI
:
294 asyw
->blend
.src_color
= 2; /* K1 */
295 asyw
->blend
.dst_color
= 7; /* NEG_K1_TIMES_SRC */
297 case DRM_MODE_BLEND_COVERAGE
:
298 asyw
->blend
.src_color
= 5; /* K1_TIMES_SRC */
299 asyw
->blend
.dst_color
= 7; /* NEG_K1_TIMES_SRC */
301 case DRM_MODE_BLEND_PIXEL_NONE
:
303 asyw
->blend
.src_color
= 2; /* K1 */
304 asyw
->blend
.dst_color
= 4; /* NEG_K1 */
307 if (memcmp(&armw
->blend
, &asyw
->blend
, sizeof(asyw
->blend
)))
308 asyw
->set
.blend
= true;
312 asyw
->point
.x
= asyw
->state
.crtc_x
;
313 asyw
->point
.y
= asyw
->state
.crtc_y
;
314 if (memcmp(&armw
->point
, &asyw
->point
, sizeof(asyw
->point
)))
315 asyw
->set
.point
= true;
318 return wndw
->func
->acquire(wndw
, asyw
, asyh
);
322 nv50_wndw_atomic_check_lut(struct nv50_wndw
*wndw
,
323 struct nv50_wndw_atom
*armw
,
324 struct nv50_wndw_atom
*asyw
,
325 struct nv50_head_atom
*asyh
)
327 struct drm_property_blob
*ilut
= asyh
->state
.degamma_lut
;
329 /* I8 format without an input LUT makes no sense, and the
330 * HW error-checks for this.
332 * In order to handle legacy gamma, when there's no input
333 * LUT we need to steal the output LUT and use it instead.
335 if (!ilut
&& asyw
->state
.fb
->format
->format
== DRM_FORMAT_C8
) {
336 /* This should be an error, but there's legacy clients
337 * that do a modeset before providing a gamma table.
339 * We keep the window disabled to avoid angering HW.
341 if (!(ilut
= asyh
->state
.gamma_lut
)) {
342 asyw
->visible
= false;
346 if (wndw
->func
->ilut
)
347 asyh
->wndw
.olut
|= BIT(wndw
->id
);
349 asyh
->wndw
.olut
&= ~BIT(wndw
->id
);
352 if (!ilut
&& wndw
->func
->ilut_identity
&&
353 asyw
->state
.fb
->format
->format
!= DRM_FORMAT_XBGR16161616F
&&
354 asyw
->state
.fb
->format
->format
!= DRM_FORMAT_ABGR16161616F
) {
355 static struct drm_property_blob dummy
= {};
359 /* Recalculate LUT state. */
360 memset(&asyw
->xlut
, 0x00, sizeof(asyw
->xlut
));
361 if ((asyw
->ilut
= wndw
->func
->ilut
? ilut
: NULL
)) {
362 if (!wndw
->func
->ilut(wndw
, asyw
, drm_color_lut_size(ilut
))) {
363 DRM_DEBUG_KMS("Invalid ilut\n");
366 asyw
->xlut
.handle
= wndw
->wndw
.vram
.handle
;
367 asyw
->xlut
.i
.buffer
= !asyw
->xlut
.i
.buffer
;
368 asyw
->set
.xlut
= true;
370 asyw
->clr
.xlut
= armw
->xlut
.handle
!= 0;
373 /* Handle setting base SET_OUTPUT_LUT_LO_ENABLE_USE_CORE_LUT. */
374 if (wndw
->func
->olut_core
&&
375 (!armw
->visible
|| (armw
->xlut
.handle
&& !asyw
->xlut
.handle
)))
376 asyw
->set
.xlut
= true;
378 if (wndw
->func
->csc
&& asyh
->state
.ctm
) {
379 const struct drm_color_ctm
*ctm
= asyh
->state
.ctm
->data
;
380 wndw
->func
->csc(wndw
, asyw
, ctm
);
381 asyw
->csc
.valid
= true;
382 asyw
->set
.csc
= true;
384 asyw
->csc
.valid
= false;
385 asyw
->clr
.csc
= armw
->csc
.valid
;
388 /* Can't do an immediate flip while changing the LUT. */
389 asyh
->state
.async_flip
= false;
394 nv50_wndw_atomic_check(struct drm_plane
*plane
, struct drm_plane_state
*state
)
396 struct nouveau_drm
*drm
= nouveau_drm(plane
->dev
);
397 struct nv50_wndw
*wndw
= nv50_wndw(plane
);
398 struct nv50_wndw_atom
*armw
= nv50_wndw_atom(wndw
->plane
.state
);
399 struct nv50_wndw_atom
*asyw
= nv50_wndw_atom(state
);
400 struct nv50_head_atom
*harm
= NULL
, *asyh
= NULL
;
401 bool modeset
= false;
404 NV_ATOMIC(drm
, "%s atomic_check\n", plane
->name
);
406 /* Fetch the assembly state for the head the window will belong to,
407 * and determine whether the window will be visible.
409 if (asyw
->state
.crtc
) {
410 asyh
= nv50_head_atom_get(asyw
->state
.state
, asyw
->state
.crtc
);
412 return PTR_ERR(asyh
);
413 modeset
= drm_atomic_crtc_needs_modeset(&asyh
->state
);
414 asyw
->visible
= asyh
->state
.active
;
416 asyw
->visible
= false;
419 /* Fetch assembly state for the head the window used to belong to. */
420 if (armw
->state
.crtc
) {
421 harm
= nv50_head_atom_get(asyw
->state
.state
, armw
->state
.crtc
);
423 return PTR_ERR(harm
);
426 /* LUT configuration can potentially cause the window to be disabled. */
427 if (asyw
->visible
&& wndw
->func
->xlut_set
&&
429 asyh
->state
.color_mgmt_changed
||
430 asyw
->state
.fb
->format
->format
!=
431 armw
->state
.fb
->format
->format
)) {
432 ret
= nv50_wndw_atomic_check_lut(wndw
, armw
, asyw
, asyh
);
437 /* Calculate new window state. */
439 ret
= nv50_wndw_atomic_check_acquire(wndw
, modeset
,
444 asyh
->wndw
.mask
|= BIT(wndw
->id
);
447 nv50_wndw_atomic_check_release(wndw
, asyw
, harm
);
448 harm
->wndw
.mask
&= ~BIT(wndw
->id
);
453 /* Aside from the obvious case where the window is actively being
454 * disabled, we might also need to temporarily disable the window
455 * when performing certain modeset operations.
457 if (!asyw
->visible
|| modeset
) {
458 asyw
->clr
.ntfy
= armw
->ntfy
.handle
!= 0;
459 asyw
->clr
.sema
= armw
->sema
.handle
!= 0;
460 asyw
->clr
.xlut
= armw
->xlut
.handle
!= 0;
461 asyw
->clr
.csc
= armw
->csc
.valid
;
462 if (wndw
->func
->image_clr
)
463 asyw
->clr
.image
= armw
->image
.handle
[0] != 0;
470 nv50_wndw_cleanup_fb(struct drm_plane
*plane
, struct drm_plane_state
*old_state
)
472 struct nouveau_framebuffer
*fb
= nouveau_framebuffer(old_state
->fb
);
473 struct nouveau_drm
*drm
= nouveau_drm(plane
->dev
);
475 NV_ATOMIC(drm
, "%s cleanup: %p\n", plane
->name
, old_state
->fb
);
479 nouveau_bo_unpin(fb
->nvbo
);
483 nv50_wndw_prepare_fb(struct drm_plane
*plane
, struct drm_plane_state
*state
)
485 struct nouveau_framebuffer
*fb
= nouveau_framebuffer(state
->fb
);
486 struct nouveau_drm
*drm
= nouveau_drm(plane
->dev
);
487 struct nv50_wndw
*wndw
= nv50_wndw(plane
);
488 struct nv50_wndw_atom
*asyw
= nv50_wndw_atom(state
);
489 struct nv50_head_atom
*asyh
;
490 struct nv50_wndw_ctxdma
*ctxdma
;
493 NV_ATOMIC(drm
, "%s prepare: %p\n", plane
->name
, state
->fb
);
497 ret
= nouveau_bo_pin(fb
->nvbo
, TTM_PL_FLAG_VRAM
, true);
501 if (wndw
->ctxdma
.parent
) {
502 ctxdma
= nv50_wndw_ctxdma_new(wndw
, fb
);
503 if (IS_ERR(ctxdma
)) {
504 nouveau_bo_unpin(fb
->nvbo
);
505 return PTR_ERR(ctxdma
);
508 asyw
->image
.handle
[0] = ctxdma
->object
.handle
;
511 asyw
->state
.fence
= dma_resv_get_excl_rcu(fb
->nvbo
->bo
.base
.resv
);
512 asyw
->image
.offset
[0] = fb
->nvbo
->bo
.offset
;
514 if (wndw
->func
->prepare
) {
515 asyh
= nv50_head_atom_get(asyw
->state
.state
, asyw
->state
.crtc
);
517 return PTR_ERR(asyh
);
519 wndw
->func
->prepare(wndw
, asyh
, asyw
);
525 static const struct drm_plane_helper_funcs
527 .prepare_fb
= nv50_wndw_prepare_fb
,
528 .cleanup_fb
= nv50_wndw_cleanup_fb
,
529 .atomic_check
= nv50_wndw_atomic_check
,
533 nv50_wndw_atomic_destroy_state(struct drm_plane
*plane
,
534 struct drm_plane_state
*state
)
536 struct nv50_wndw_atom
*asyw
= nv50_wndw_atom(state
);
537 __drm_atomic_helper_plane_destroy_state(&asyw
->state
);
541 static struct drm_plane_state
*
542 nv50_wndw_atomic_duplicate_state(struct drm_plane
*plane
)
544 struct nv50_wndw_atom
*armw
= nv50_wndw_atom(plane
->state
);
545 struct nv50_wndw_atom
*asyw
;
546 if (!(asyw
= kmalloc(sizeof(*asyw
), GFP_KERNEL
)))
548 __drm_atomic_helper_plane_duplicate_state(plane
, &asyw
->state
);
549 asyw
->sema
= armw
->sema
;
550 asyw
->ntfy
= armw
->ntfy
;
552 asyw
->xlut
= armw
->xlut
;
553 asyw
->csc
= armw
->csc
;
554 asyw
->image
= armw
->image
;
555 asyw
->point
= armw
->point
;
562 nv50_wndw_zpos_default(struct drm_plane
*plane
)
564 return (plane
->type
== DRM_PLANE_TYPE_PRIMARY
) ? 0 :
565 (plane
->type
== DRM_PLANE_TYPE_OVERLAY
) ? 1 : 255;
569 nv50_wndw_reset(struct drm_plane
*plane
)
571 struct nv50_wndw_atom
*asyw
;
573 if (WARN_ON(!(asyw
= kzalloc(sizeof(*asyw
), GFP_KERNEL
))))
577 plane
->funcs
->atomic_destroy_state(plane
, plane
->state
);
579 __drm_atomic_helper_plane_reset(plane
, &asyw
->state
);
580 plane
->state
->zpos
= nv50_wndw_zpos_default(plane
);
581 plane
->state
->normalized_zpos
= nv50_wndw_zpos_default(plane
);
585 nv50_wndw_destroy(struct drm_plane
*plane
)
587 struct nv50_wndw
*wndw
= nv50_wndw(plane
);
588 struct nv50_wndw_ctxdma
*ctxdma
, *ctxtmp
;
590 list_for_each_entry_safe(ctxdma
, ctxtmp
, &wndw
->ctxdma
.list
, head
) {
591 nv50_wndw_ctxdma_del(ctxdma
);
594 nvif_notify_fini(&wndw
->notify
);
595 nv50_dmac_destroy(&wndw
->wimm
);
596 nv50_dmac_destroy(&wndw
->wndw
);
598 nv50_lut_fini(&wndw
->ilut
);
600 drm_plane_cleanup(&wndw
->plane
);
604 const struct drm_plane_funcs
606 .update_plane
= drm_atomic_helper_update_plane
,
607 .disable_plane
= drm_atomic_helper_disable_plane
,
608 .destroy
= nv50_wndw_destroy
,
609 .reset
= nv50_wndw_reset
,
610 .atomic_duplicate_state
= nv50_wndw_atomic_duplicate_state
,
611 .atomic_destroy_state
= nv50_wndw_atomic_destroy_state
,
615 nv50_wndw_notify(struct nvif_notify
*notify
)
617 return NVIF_NOTIFY_KEEP
;
621 nv50_wndw_fini(struct nv50_wndw
*wndw
)
623 nvif_notify_put(&wndw
->notify
);
627 nv50_wndw_init(struct nv50_wndw
*wndw
)
629 nvif_notify_get(&wndw
->notify
);
633 nv50_wndw_new_(const struct nv50_wndw_func
*func
, struct drm_device
*dev
,
634 enum drm_plane_type type
, const char *name
, int index
,
635 const u32
*format
, u32 heads
,
636 enum nv50_disp_interlock_type interlock_type
, u32 interlock_data
,
637 struct nv50_wndw
**pwndw
)
639 struct nouveau_drm
*drm
= nouveau_drm(dev
);
640 struct nvif_mmu
*mmu
= &drm
->client
.mmu
;
641 struct nv50_disp
*disp
= nv50_disp(dev
);
642 struct nv50_wndw
*wndw
;
646 if (!(wndw
= *pwndw
= kzalloc(sizeof(*wndw
), GFP_KERNEL
)))
650 wndw
->interlock
.type
= interlock_type
;
651 wndw
->interlock
.data
= interlock_data
;
653 wndw
->ctxdma
.parent
= &wndw
->wndw
.base
.user
;
654 INIT_LIST_HEAD(&wndw
->ctxdma
.list
);
656 for (nformat
= 0; format
[nformat
]; nformat
++);
658 ret
= drm_universal_plane_init(dev
, &wndw
->plane
, heads
, &nv50_wndw
,
659 format
, nformat
, NULL
,
660 type
, "%s-%d", name
, index
);
667 drm_plane_helper_add(&wndw
->plane
, &nv50_wndw_helper
);
669 if (wndw
->func
->ilut
) {
670 ret
= nv50_lut_init(disp
, mmu
, &wndw
->ilut
);
675 wndw
->notify
.func
= nv50_wndw_notify
;
677 if (wndw
->func
->blend_set
) {
678 ret
= drm_plane_create_zpos_property(&wndw
->plane
,
679 nv50_wndw_zpos_default(&wndw
->plane
), 0, 254);
683 ret
= drm_plane_create_alpha_property(&wndw
->plane
);
687 ret
= drm_plane_create_blend_mode_property(&wndw
->plane
,
688 BIT(DRM_MODE_BLEND_PIXEL_NONE
) |
689 BIT(DRM_MODE_BLEND_PREMULTI
) |
690 BIT(DRM_MODE_BLEND_COVERAGE
));
694 ret
= drm_plane_create_zpos_immutable_property(&wndw
->plane
,
695 nv50_wndw_zpos_default(&wndw
->plane
));
704 nv50_wndw_new(struct nouveau_drm
*drm
, enum drm_plane_type type
, int index
,
705 struct nv50_wndw
**pwndw
)
710 int (*new)(struct nouveau_drm
*, enum drm_plane_type
,
711 int, s32
, struct nv50_wndw
**);
713 { TU102_DISP_WINDOW_CHANNEL_DMA
, 0, wndwc57e_new
},
714 { GV100_DISP_WINDOW_CHANNEL_DMA
, 0, wndwc37e_new
},
717 struct nv50_disp
*disp
= nv50_disp(drm
->dev
);
720 cid
= nvif_mclass(&disp
->disp
->object
, wndws
);
722 NV_ERROR(drm
, "No supported window class\n");
726 ret
= wndws
[cid
].new(drm
, type
, index
, wndws
[cid
].oclass
, pwndw
);
730 return nv50_wimm_init(drm
, *pwndw
);