2 * Copyright 2018 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include <nvif/class.h>
26 #include <nvif/cl0002.h>
28 #include <drm/drm_atomic_helper.h>
29 #include "nouveau_bo.h"
32 nv50_wndw_ctxdma_del(struct nv50_wndw_ctxdma
*ctxdma
)
34 nvif_object_fini(&ctxdma
->object
);
35 list_del(&ctxdma
->head
);
39 static struct nv50_wndw_ctxdma
*
40 nv50_wndw_ctxdma_new(struct nv50_wndw
*wndw
, struct nouveau_framebuffer
*fb
)
42 struct nouveau_drm
*drm
= nouveau_drm(fb
->base
.dev
);
43 struct nv50_wndw_ctxdma
*ctxdma
;
44 const u8 kind
= fb
->nvbo
->kind
;
45 const u32 handle
= 0xfb000000 | kind
;
47 struct nv_dma_v0 base
;
49 struct nv50_dma_v0 nv50
;
50 struct gf100_dma_v0 gf100
;
51 struct gf119_dma_v0 gf119
;
54 u32 argc
= sizeof(args
.base
);
57 list_for_each_entry(ctxdma
, &wndw
->ctxdma
.list
, head
) {
58 if (ctxdma
->object
.handle
== handle
)
62 if (!(ctxdma
= kzalloc(sizeof(*ctxdma
), GFP_KERNEL
)))
63 return ERR_PTR(-ENOMEM
);
64 list_add(&ctxdma
->head
, &wndw
->ctxdma
.list
);
66 args
.base
.target
= NV_DMA_V0_TARGET_VRAM
;
67 args
.base
.access
= NV_DMA_V0_ACCESS_RDWR
;
69 args
.base
.limit
= drm
->client
.device
.info
.ram_user
- 1;
71 if (drm
->client
.device
.info
.chipset
< 0x80) {
72 args
.nv50
.part
= NV50_DMA_V0_PART_256
;
73 argc
+= sizeof(args
.nv50
);
75 if (drm
->client
.device
.info
.chipset
< 0xc0) {
76 args
.nv50
.part
= NV50_DMA_V0_PART_256
;
77 args
.nv50
.kind
= kind
;
78 argc
+= sizeof(args
.nv50
);
80 if (drm
->client
.device
.info
.chipset
< 0xd0) {
81 args
.gf100
.kind
= kind
;
82 argc
+= sizeof(args
.gf100
);
84 args
.gf119
.page
= GF119_DMA_V0_PAGE_LP
;
85 args
.gf119
.kind
= kind
;
86 argc
+= sizeof(args
.gf119
);
89 ret
= nvif_object_init(wndw
->ctxdma
.parent
, handle
, NV_DMA_IN_MEMORY
,
90 &args
, argc
, &ctxdma
->object
);
92 nv50_wndw_ctxdma_del(ctxdma
);
100 nv50_wndw_wait_armed(struct nv50_wndw
*wndw
, struct nv50_wndw_atom
*asyw
)
102 struct nv50_disp
*disp
= nv50_disp(wndw
->plane
.dev
);
103 if (asyw
->set
.ntfy
) {
104 return wndw
->func
->ntfy_wait_begun(disp
->sync
,
106 wndw
->wndw
.base
.device
);
112 nv50_wndw_flush_clr(struct nv50_wndw
*wndw
, u32
*interlock
, bool flush
,
113 struct nv50_wndw_atom
*asyw
)
115 union nv50_wndw_atom_mask clr
= {
116 .mask
= asyw
->clr
.mask
& ~(flush
? 0 : asyw
->set
.mask
),
118 if (clr
.sema
) wndw
->func
-> sema_clr(wndw
);
119 if (clr
.ntfy
) wndw
->func
-> ntfy_clr(wndw
);
120 if (clr
.xlut
) wndw
->func
-> xlut_clr(wndw
);
121 if (clr
.image
) wndw
->func
->image_clr(wndw
);
123 interlock
[wndw
->interlock
.type
] |= wndw
->interlock
.data
;
127 nv50_wndw_flush_set(struct nv50_wndw
*wndw
, u32
*interlock
,
128 struct nv50_wndw_atom
*asyw
)
131 asyw
->image
.mode
= 0;
132 asyw
->image
.interval
= 1;
135 if (asyw
->set
.sema
) wndw
->func
->sema_set (wndw
, asyw
);
136 if (asyw
->set
.ntfy
) wndw
->func
->ntfy_set (wndw
, asyw
);
137 if (asyw
->set
.image
) wndw
->func
->image_set(wndw
, asyw
);
139 if (asyw
->set
.xlut
) {
141 asyw
->xlut
.i
.offset
=
142 nv50_lut_load(&wndw
->ilut
, asyw
->xlut
.i
.buffer
,
143 asyw
->ilut
, asyw
->xlut
.i
.load
);
145 wndw
->func
->xlut_set(wndw
, asyw
);
148 if (asyw
->set
.scale
) wndw
->func
->scale_set(wndw
, asyw
);
149 if (asyw
->set
.point
) {
150 if (asyw
->set
.point
= false, asyw
->set
.mask
)
151 interlock
[wndw
->interlock
.type
] |= wndw
->interlock
.data
;
152 interlock
[NV50_DISP_INTERLOCK_WIMM
] |= wndw
->interlock
.data
;
154 wndw
->immd
->point(wndw
, asyw
);
155 wndw
->immd
->update(wndw
, interlock
);
157 interlock
[wndw
->interlock
.type
] |= wndw
->interlock
.data
;
162 nv50_wndw_ntfy_enable(struct nv50_wndw
*wndw
, struct nv50_wndw_atom
*asyw
)
164 struct nv50_disp
*disp
= nv50_disp(wndw
->plane
.dev
);
166 asyw
->ntfy
.handle
= wndw
->wndw
.sync
.handle
;
167 asyw
->ntfy
.offset
= wndw
->ntfy
;
168 asyw
->ntfy
.awaken
= false;
169 asyw
->set
.ntfy
= true;
171 wndw
->func
->ntfy_reset(disp
->sync
, wndw
->ntfy
);
176 nv50_wndw_atomic_check_release(struct nv50_wndw
*wndw
,
177 struct nv50_wndw_atom
*asyw
,
178 struct nv50_head_atom
*asyh
)
180 struct nouveau_drm
*drm
= nouveau_drm(wndw
->plane
.dev
);
181 NV_ATOMIC(drm
, "%s release\n", wndw
->plane
.name
);
182 wndw
->func
->release(wndw
, asyw
, asyh
);
183 asyw
->ntfy
.handle
= 0;
184 asyw
->sema
.handle
= 0;
188 nv50_wndw_atomic_check_acquire_yuv(struct nv50_wndw_atom
*asyw
)
190 switch (asyw
->state
.fb
->format
->format
) {
191 case DRM_FORMAT_YUYV
: asyw
->image
.format
= 0x28; break;
192 case DRM_FORMAT_UYVY
: asyw
->image
.format
= 0x29; break;
197 asyw
->image
.colorspace
= 1;
202 nv50_wndw_atomic_check_acquire_rgb(struct nv50_wndw_atom
*asyw
)
204 switch (asyw
->state
.fb
->format
->format
) {
205 case DRM_FORMAT_C8
: asyw
->image
.format
= 0x1e; break;
206 case DRM_FORMAT_XRGB8888
:
207 case DRM_FORMAT_ARGB8888
: asyw
->image
.format
= 0xcf; break;
208 case DRM_FORMAT_RGB565
: asyw
->image
.format
= 0xe8; break;
209 case DRM_FORMAT_XRGB1555
:
210 case DRM_FORMAT_ARGB1555
: asyw
->image
.format
= 0xe9; break;
211 case DRM_FORMAT_XBGR2101010
:
212 case DRM_FORMAT_ABGR2101010
: asyw
->image
.format
= 0xd1; break;
213 case DRM_FORMAT_XBGR8888
:
214 case DRM_FORMAT_ABGR8888
: asyw
->image
.format
= 0xd5; break;
215 case DRM_FORMAT_XRGB2101010
:
216 case DRM_FORMAT_ARGB2101010
: asyw
->image
.format
= 0xdf; break;
220 asyw
->image
.colorspace
= 0;
225 nv50_wndw_atomic_check_acquire(struct nv50_wndw
*wndw
, bool modeset
,
226 struct nv50_wndw_atom
*armw
,
227 struct nv50_wndw_atom
*asyw
,
228 struct nv50_head_atom
*asyh
)
230 struct nouveau_framebuffer
*fb
= nouveau_framebuffer(asyw
->state
.fb
);
231 struct nouveau_drm
*drm
= nouveau_drm(wndw
->plane
.dev
);
234 NV_ATOMIC(drm
, "%s acquire\n", wndw
->plane
.name
);
236 if (asyw
->state
.fb
!= armw
->state
.fb
|| !armw
->visible
|| modeset
) {
237 asyw
->image
.w
= fb
->base
.width
;
238 asyw
->image
.h
= fb
->base
.height
;
239 asyw
->image
.kind
= fb
->nvbo
->kind
;
241 ret
= nv50_wndw_atomic_check_acquire_rgb(asyw
);
243 ret
= nv50_wndw_atomic_check_acquire_yuv(asyw
);
248 if (asyw
->image
.kind
) {
249 asyw
->image
.layout
= 0;
250 if (drm
->client
.device
.info
.chipset
>= 0xc0)
251 asyw
->image
.blockh
= fb
->nvbo
->mode
>> 4;
253 asyw
->image
.blockh
= fb
->nvbo
->mode
;
254 asyw
->image
.blocks
[0] = fb
->base
.pitches
[0] / 64;
255 asyw
->image
.pitch
[0] = 0;
257 asyw
->image
.layout
= 1;
258 asyw
->image
.blockh
= 0;
259 asyw
->image
.blocks
[0] = 0;
260 asyw
->image
.pitch
[0] = fb
->base
.pitches
[0];
263 if (!(asyh
->state
.pageflip_flags
& DRM_MODE_PAGE_FLIP_ASYNC
))
264 asyw
->image
.interval
= 1;
266 asyw
->image
.interval
= 0;
267 asyw
->image
.mode
= asyw
->image
.interval
? 0 : 1;
268 asyw
->set
.image
= wndw
->func
->image_set
!= NULL
;
271 if (wndw
->func
->scale_set
) {
272 asyw
->scale
.sx
= asyw
->state
.src_x
>> 16;
273 asyw
->scale
.sy
= asyw
->state
.src_y
>> 16;
274 asyw
->scale
.sw
= asyw
->state
.src_w
>> 16;
275 asyw
->scale
.sh
= asyw
->state
.src_h
>> 16;
276 asyw
->scale
.dw
= asyw
->state
.crtc_w
;
277 asyw
->scale
.dh
= asyw
->state
.crtc_h
;
278 if (memcmp(&armw
->scale
, &asyw
->scale
, sizeof(asyw
->scale
)))
279 asyw
->set
.scale
= true;
283 asyw
->point
.x
= asyw
->state
.crtc_x
;
284 asyw
->point
.y
= asyw
->state
.crtc_y
;
285 if (memcmp(&armw
->point
, &asyw
->point
, sizeof(asyw
->point
)))
286 asyw
->set
.point
= true;
289 return wndw
->func
->acquire(wndw
, asyw
, asyh
);
293 nv50_wndw_atomic_check_lut(struct nv50_wndw
*wndw
,
294 struct nv50_wndw_atom
*armw
,
295 struct nv50_wndw_atom
*asyw
,
296 struct nv50_head_atom
*asyh
)
298 struct drm_property_blob
*ilut
= asyh
->state
.degamma_lut
;
300 /* I8 format without an input LUT makes no sense, and the
301 * HW error-checks for this.
303 * In order to handle legacy gamma, when there's no input
304 * LUT we need to steal the output LUT and use it instead.
306 if (!ilut
&& asyw
->state
.fb
->format
->format
== DRM_FORMAT_C8
) {
307 /* This should be an error, but there's legacy clients
308 * that do a modeset before providing a gamma table.
310 * We keep the window disabled to avoid angering HW.
312 if (!(ilut
= asyh
->state
.gamma_lut
)) {
313 asyw
->visible
= false;
317 if (wndw
->func
->ilut
)
318 asyh
->wndw
.olut
|= BIT(wndw
->id
);
320 asyh
->wndw
.olut
&= ~BIT(wndw
->id
);
323 if (!ilut
&& wndw
->func
->ilut_identity
) {
324 static struct drm_property_blob dummy
= {};
328 /* Recalculate LUT state. */
329 memset(&asyw
->xlut
, 0x00, sizeof(asyw
->xlut
));
330 if ((asyw
->ilut
= wndw
->func
->ilut
? ilut
: NULL
)) {
331 wndw
->func
->ilut(wndw
, asyw
);
332 asyw
->xlut
.handle
= wndw
->wndw
.vram
.handle
;
333 asyw
->xlut
.i
.buffer
= !asyw
->xlut
.i
.buffer
;
334 asyw
->set
.xlut
= true;
337 /* Handle setting base SET_OUTPUT_LUT_LO_ENABLE_USE_CORE_LUT. */
338 if (wndw
->func
->olut_core
&&
339 (!armw
->visible
|| (armw
->xlut
.handle
&& !asyw
->xlut
.handle
)))
340 asyw
->set
.xlut
= true;
342 /* Can't do an immediate flip while changing the LUT. */
343 asyh
->state
.pageflip_flags
&= ~DRM_MODE_PAGE_FLIP_ASYNC
;
347 nv50_wndw_atomic_check(struct drm_plane
*plane
, struct drm_plane_state
*state
)
349 struct nouveau_drm
*drm
= nouveau_drm(plane
->dev
);
350 struct nv50_wndw
*wndw
= nv50_wndw(plane
);
351 struct nv50_wndw_atom
*armw
= nv50_wndw_atom(wndw
->plane
.state
);
352 struct nv50_wndw_atom
*asyw
= nv50_wndw_atom(state
);
353 struct nv50_head_atom
*harm
= NULL
, *asyh
= NULL
;
354 bool modeset
= false;
357 NV_ATOMIC(drm
, "%s atomic_check\n", plane
->name
);
359 /* Fetch the assembly state for the head the window will belong to,
360 * and determine whether the window will be visible.
362 if (asyw
->state
.crtc
) {
363 asyh
= nv50_head_atom_get(asyw
->state
.state
, asyw
->state
.crtc
);
365 return PTR_ERR(asyh
);
366 modeset
= drm_atomic_crtc_needs_modeset(&asyh
->state
);
367 asyw
->visible
= asyh
->state
.active
;
369 asyw
->visible
= false;
372 /* Fetch assembly state for the head the window used to belong to. */
373 if (armw
->state
.crtc
) {
374 harm
= nv50_head_atom_get(asyw
->state
.state
, armw
->state
.crtc
);
376 return PTR_ERR(harm
);
379 /* LUT configuration can potentially cause the window to be disabled. */
380 if (asyw
->visible
&& wndw
->func
->xlut_set
&&
382 asyh
->state
.color_mgmt_changed
||
383 asyw
->state
.fb
->format
->format
!=
384 armw
->state
.fb
->format
->format
))
385 nv50_wndw_atomic_check_lut(wndw
, armw
, asyw
, asyh
);
387 /* Calculate new window state. */
389 ret
= nv50_wndw_atomic_check_acquire(wndw
, modeset
,
394 asyh
->wndw
.mask
|= BIT(wndw
->id
);
397 nv50_wndw_atomic_check_release(wndw
, asyw
, harm
);
398 harm
->wndw
.mask
&= ~BIT(wndw
->id
);
403 /* Aside from the obvious case where the window is actively being
404 * disabled, we might also need to temporarily disable the window
405 * when performing certain modeset operations.
407 if (!asyw
->visible
|| modeset
) {
408 asyw
->clr
.ntfy
= armw
->ntfy
.handle
!= 0;
409 asyw
->clr
.sema
= armw
->sema
.handle
!= 0;
410 asyw
->clr
.xlut
= armw
->xlut
.handle
!= 0;
411 if (wndw
->func
->image_clr
)
412 asyw
->clr
.image
= armw
->image
.handle
[0] != 0;
419 nv50_wndw_cleanup_fb(struct drm_plane
*plane
, struct drm_plane_state
*old_state
)
421 struct nouveau_framebuffer
*fb
= nouveau_framebuffer(old_state
->fb
);
422 struct nouveau_drm
*drm
= nouveau_drm(plane
->dev
);
424 NV_ATOMIC(drm
, "%s cleanup: %p\n", plane
->name
, old_state
->fb
);
428 nouveau_bo_unpin(fb
->nvbo
);
432 nv50_wndw_prepare_fb(struct drm_plane
*plane
, struct drm_plane_state
*state
)
434 struct nouveau_framebuffer
*fb
= nouveau_framebuffer(state
->fb
);
435 struct nouveau_drm
*drm
= nouveau_drm(plane
->dev
);
436 struct nv50_wndw
*wndw
= nv50_wndw(plane
);
437 struct nv50_wndw_atom
*asyw
= nv50_wndw_atom(state
);
438 struct nv50_head_atom
*asyh
;
439 struct nv50_wndw_ctxdma
*ctxdma
;
442 NV_ATOMIC(drm
, "%s prepare: %p\n", plane
->name
, state
->fb
);
446 ret
= nouveau_bo_pin(fb
->nvbo
, TTM_PL_FLAG_VRAM
, true);
450 if (wndw
->ctxdma
.parent
) {
451 ctxdma
= nv50_wndw_ctxdma_new(wndw
, fb
);
452 if (IS_ERR(ctxdma
)) {
453 nouveau_bo_unpin(fb
->nvbo
);
454 return PTR_ERR(ctxdma
);
457 asyw
->image
.handle
[0] = ctxdma
->object
.handle
;
460 asyw
->state
.fence
= reservation_object_get_excl_rcu(fb
->nvbo
->bo
.resv
);
461 asyw
->image
.offset
[0] = fb
->nvbo
->bo
.offset
;
463 if (wndw
->func
->prepare
) {
464 asyh
= nv50_head_atom_get(asyw
->state
.state
, asyw
->state
.crtc
);
466 return PTR_ERR(asyh
);
468 wndw
->func
->prepare(wndw
, asyh
, asyw
);
474 static const struct drm_plane_helper_funcs
476 .prepare_fb
= nv50_wndw_prepare_fb
,
477 .cleanup_fb
= nv50_wndw_cleanup_fb
,
478 .atomic_check
= nv50_wndw_atomic_check
,
482 nv50_wndw_atomic_destroy_state(struct drm_plane
*plane
,
483 struct drm_plane_state
*state
)
485 struct nv50_wndw_atom
*asyw
= nv50_wndw_atom(state
);
486 __drm_atomic_helper_plane_destroy_state(&asyw
->state
);
490 static struct drm_plane_state
*
491 nv50_wndw_atomic_duplicate_state(struct drm_plane
*plane
)
493 struct nv50_wndw_atom
*armw
= nv50_wndw_atom(plane
->state
);
494 struct nv50_wndw_atom
*asyw
;
495 if (!(asyw
= kmalloc(sizeof(*asyw
), GFP_KERNEL
)))
497 __drm_atomic_helper_plane_duplicate_state(plane
, &asyw
->state
);
498 asyw
->sema
= armw
->sema
;
499 asyw
->ntfy
= armw
->ntfy
;
501 asyw
->xlut
= armw
->xlut
;
502 asyw
->image
= armw
->image
;
503 asyw
->point
= armw
->point
;
510 nv50_wndw_reset(struct drm_plane
*plane
)
512 struct nv50_wndw_atom
*asyw
;
514 if (WARN_ON(!(asyw
= kzalloc(sizeof(*asyw
), GFP_KERNEL
))))
518 plane
->funcs
->atomic_destroy_state(plane
, plane
->state
);
519 plane
->state
= &asyw
->state
;
520 plane
->state
->plane
= plane
;
521 plane
->state
->rotation
= DRM_MODE_ROTATE_0
;
525 nv50_wndw_destroy(struct drm_plane
*plane
)
527 struct nv50_wndw
*wndw
= nv50_wndw(plane
);
528 struct nv50_wndw_ctxdma
*ctxdma
, *ctxtmp
;
530 list_for_each_entry_safe(ctxdma
, ctxtmp
, &wndw
->ctxdma
.list
, head
) {
531 nv50_wndw_ctxdma_del(ctxdma
);
534 nvif_notify_fini(&wndw
->notify
);
535 nv50_dmac_destroy(&wndw
->wimm
);
536 nv50_dmac_destroy(&wndw
->wndw
);
538 nv50_lut_fini(&wndw
->ilut
);
540 drm_plane_cleanup(&wndw
->plane
);
544 const struct drm_plane_funcs
546 .update_plane
= drm_atomic_helper_update_plane
,
547 .disable_plane
= drm_atomic_helper_disable_plane
,
548 .destroy
= nv50_wndw_destroy
,
549 .reset
= nv50_wndw_reset
,
550 .atomic_duplicate_state
= nv50_wndw_atomic_duplicate_state
,
551 .atomic_destroy_state
= nv50_wndw_atomic_destroy_state
,
555 nv50_wndw_notify(struct nvif_notify
*notify
)
557 return NVIF_NOTIFY_KEEP
;
561 nv50_wndw_fini(struct nv50_wndw
*wndw
)
563 nvif_notify_put(&wndw
->notify
);
567 nv50_wndw_init(struct nv50_wndw
*wndw
)
569 nvif_notify_get(&wndw
->notify
);
573 nv50_wndw_new_(const struct nv50_wndw_func
*func
, struct drm_device
*dev
,
574 enum drm_plane_type type
, const char *name
, int index
,
575 const u32
*format
, u32 heads
,
576 enum nv50_disp_interlock_type interlock_type
, u32 interlock_data
,
577 struct nv50_wndw
**pwndw
)
579 struct nouveau_drm
*drm
= nouveau_drm(dev
);
580 struct nvif_mmu
*mmu
= &drm
->client
.mmu
;
581 struct nv50_disp
*disp
= nv50_disp(dev
);
582 struct nv50_wndw
*wndw
;
586 if (!(wndw
= *pwndw
= kzalloc(sizeof(*wndw
), GFP_KERNEL
)))
590 wndw
->interlock
.type
= interlock_type
;
591 wndw
->interlock
.data
= interlock_data
;
593 wndw
->ctxdma
.parent
= &wndw
->wndw
.base
.user
;
594 INIT_LIST_HEAD(&wndw
->ctxdma
.list
);
596 for (nformat
= 0; format
[nformat
]; nformat
++);
598 ret
= drm_universal_plane_init(dev
, &wndw
->plane
, heads
, &nv50_wndw
,
599 format
, nformat
, NULL
,
600 type
, "%s-%d", name
, index
);
607 drm_plane_helper_add(&wndw
->plane
, &nv50_wndw_helper
);
609 if (wndw
->func
->ilut
) {
610 ret
= nv50_lut_init(disp
, mmu
, &wndw
->ilut
);
615 wndw
->notify
.func
= nv50_wndw_notify
;
620 nv50_wndw_new(struct nouveau_drm
*drm
, enum drm_plane_type type
, int index
,
621 struct nv50_wndw
**pwndw
)
626 int (*new)(struct nouveau_drm
*, enum drm_plane_type
,
627 int, s32
, struct nv50_wndw
**);
629 { TU102_DISP_WINDOW_CHANNEL_DMA
, 0, wndwc57e_new
},
630 { GV100_DISP_WINDOW_CHANNEL_DMA
, 0, wndwc37e_new
},
633 struct nv50_disp
*disp
= nv50_disp(drm
->dev
);
636 cid
= nvif_mclass(&disp
->disp
->object
, wndws
);
638 NV_ERROR(drm
, "No supported window class\n");
642 ret
= wndws
[cid
].new(drm
, type
, index
, wndws
[cid
].oclass
, pwndw
);
646 return nv50_wimm_init(drm
, *pwndw
);