2 * Copyright 2005-2006 Stephane Marchesin
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
27 #include "nouveau_drv.h"
28 #include "nouveau_drm.h"
31 /* returns the size of fifo context */
32 int nouveau_fifo_ctx_size(struct drm_device
*dev
)
34 struct drm_nouveau_private
*dev_priv
=dev
->dev_private
;
36 if (dev_priv
->card_type
>= NV_40
)
38 else if (dev_priv
->card_type
>= NV_17
)
44 /***********************************
45 * functions doing the actual work
46 ***********************************/
48 static int nouveau_fifo_instmem_configure(struct drm_device
*dev
)
50 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
52 NV_WRITE(NV03_PFIFO_RAMHT
,
53 (0x03 << 24) /* search 128 */ |
54 ((dev_priv
->ramht_bits
- 9) << 16) |
55 (dev_priv
->ramht_offset
>> 8)
58 NV_WRITE(NV03_PFIFO_RAMRO
, dev_priv
->ramro_offset
>>8);
60 switch(dev_priv
->card_type
)
63 switch (dev_priv
->chipset
) {
72 NV_WRITE(NV40_PFIFO_RAMFC
, 0x30002);
75 NV_WRITE(NV40_PFIFO_RAMFC
, ((nouveau_mem_fb_amount(dev
)-512*1024+dev_priv
->ramfc_offset
)>>16) |
81 NV_WRITE(NV03_PFIFO_RAMFC
, (dev_priv
->ramfc_offset
>>8) |
82 (1 << 16) /* 64 Bytes entry*/);
83 /* XXX nvidia blob set bit 18, 21,23 for nv20 & nv30 */
88 NV_WRITE(NV03_PFIFO_RAMFC
, dev_priv
->ramfc_offset
>>8);
95 int nouveau_fifo_init(struct drm_device
*dev
)
97 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
100 NV_WRITE(NV03_PMC_ENABLE
, NV_READ(NV03_PMC_ENABLE
) &
101 ~NV_PMC_ENABLE_PFIFO
);
102 NV_WRITE(NV03_PMC_ENABLE
, NV_READ(NV03_PMC_ENABLE
) |
103 NV_PMC_ENABLE_PFIFO
);
105 /* Enable PFIFO error reporting */
106 NV_WRITE(NV03_PFIFO_INTR_0
, 0xFFFFFFFF);
107 NV_WRITE(NV03_PFIFO_INTR_EN_0
, 0xFFFFFFFF);
109 NV_WRITE(NV03_PFIFO_CACHES
, 0x00000000);
111 ret
= nouveau_fifo_instmem_configure(dev
);
113 DRM_ERROR("Failed to configure instance memory\n");
117 /* FIXME remove all the stuff that's done in nouveau_fifo_alloc */
119 DRM_DEBUG("Setting defaults for remaining PFIFO regs\n");
121 /* All channels into PIO mode */
122 NV_WRITE(NV04_PFIFO_MODE
, 0x00000000);
124 NV_WRITE(NV03_PFIFO_CACHE1_PUSH0
, 0x00000000);
125 NV_WRITE(NV04_PFIFO_CACHE1_PULL0
, 0x00000000);
126 /* Channel 0 active, PIO mode */
127 NV_WRITE(NV03_PFIFO_CACHE1_PUSH1
, 0x00000000);
128 /* PUT and GET to 0 */
129 NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT
, 0x00000000);
130 NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET
, 0x00000000);
131 /* No cmdbuf object */
132 NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE
, 0x00000000);
133 NV_WRITE(NV03_PFIFO_CACHE0_PUSH0
, 0x00000000);
134 NV_WRITE(NV04_PFIFO_CACHE0_PULL0
, 0x00000000);
135 NV_WRITE(NV04_PFIFO_SIZE
, 0x0000FFFF);
136 NV_WRITE(NV04_PFIFO_CACHE1_HASH
, 0x0000FFFF);
137 NV_WRITE(NV04_PFIFO_CACHE0_PULL1
, 0x00000001);
138 NV_WRITE(NV04_PFIFO_CACHE1_DMA_CTL
, 0x00000000);
139 NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE
, 0x00000000);
140 NV_WRITE(NV04_PFIFO_CACHE1_ENGINE
, 0x00000000);
142 NV_WRITE(NV04_PFIFO_CACHE1_DMA_FETCH
, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES
|
143 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES
|
144 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4
|
146 NV_PFIFO_CACHE1_BIG_ENDIAN
|
150 NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH
, 0x00000001);
151 NV_WRITE(NV03_PFIFO_CACHE1_PUSH0
, 0x00000001);
152 NV_WRITE(NV04_PFIFO_CACHE1_PULL0
, 0x00000001);
153 NV_WRITE(NV04_PFIFO_CACHE1_PULL1
, 0x00000001);
156 if (dev_priv
->card_type
>= NV_10
) {
157 NV_WRITE(NV10_PGRAPH_CTX_USER
, 0x0);
158 NV_WRITE(NV04_PFIFO_DELAY_0
, 0xff /* retrycount*/ );
159 if (dev_priv
->card_type
>= NV_40
)
160 NV_WRITE(NV10_PGRAPH_CTX_CONTROL
, 0x00002001);
162 NV_WRITE(NV10_PGRAPH_CTX_CONTROL
, 0x10110000);
164 NV_WRITE(NV04_PGRAPH_CTX_USER
, 0x0);
165 NV_WRITE(NV04_PFIFO_DELAY_0
, 0xff /* retrycount*/ );
166 NV_WRITE(NV04_PGRAPH_CTX_CONTROL
, 0x10110000);
169 NV_WRITE(NV04_PFIFO_DMA_TIMESLICE
, 0x001fffff);
170 NV_WRITE(NV03_PFIFO_CACHES
, 0x00000001);
175 nouveau_fifo_pushbuf_ctxdma_init(struct nouveau_channel
*chan
)
177 struct drm_device
*dev
= chan
->dev
;
178 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
179 struct mem_block
*pb
= chan
->pushbuf_mem
;
180 struct nouveau_gpuobj
*pushbuf
= NULL
;
183 if (pb
->flags
& NOUVEAU_MEM_AGP
) {
184 ret
= nouveau_gpuobj_gart_dma_new(chan
, pb
->start
, pb
->size
,
187 &chan
->pushbuf_base
);
189 if (pb
->flags
& NOUVEAU_MEM_PCI
) {
190 ret
= nouveau_gpuobj_dma_new(chan
, NV_CLASS_DMA_IN_MEMORY
,
193 NV_DMA_TARGET_PCI_NONLINEAR
,
195 chan
->pushbuf_base
= 0;
196 } else if (dev_priv
->card_type
!= NV_04
) {
197 ret
= nouveau_gpuobj_dma_new(chan
, NV_CLASS_DMA_IN_MEMORY
,
200 NV_DMA_TARGET_VIDMEM
, &pushbuf
);
201 chan
->pushbuf_base
= 0;
203 /* NV04 cmdbuf hack, from original ddx.. not sure of it's
204 * exact reason for existing :) PCI access to cmdbuf in
207 ret
= nouveau_gpuobj_dma_new(chan
, NV_CLASS_DMA_IN_MEMORY
,
209 drm_get_resource_start(dev
, 1),
210 pb
->size
, NV_DMA_ACCESS_RO
,
211 NV_DMA_TARGET_PCI
, &pushbuf
);
212 chan
->pushbuf_base
= 0;
215 if ((ret
= nouveau_gpuobj_ref_add(dev
, chan
, 0, pushbuf
,
217 DRM_ERROR("Error referencing push buffer ctxdma: %d\n", ret
);
218 if (pushbuf
!= dev_priv
->gart_info
.sg_ctxdma
)
219 nouveau_gpuobj_del(dev
, &pushbuf
);
226 static struct mem_block
*
227 nouveau_fifo_user_pushbuf_alloc(struct drm_device
*dev
)
229 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
230 struct nouveau_config
*config
= &dev_priv
->config
;
231 struct mem_block
*pb
;
232 int pb_min_size
= max(NV03_FIFO_SIZE
,PAGE_SIZE
);
234 /* Defaults for unconfigured values */
235 if (!config
->cmdbuf
.location
)
236 config
->cmdbuf
.location
= NOUVEAU_MEM_FB
;
237 if (!config
->cmdbuf
.size
|| config
->cmdbuf
.size
< pb_min_size
)
238 config
->cmdbuf
.size
= pb_min_size
;
240 pb
= nouveau_mem_alloc(dev
, 0, config
->cmdbuf
.size
,
241 config
->cmdbuf
.location
| NOUVEAU_MEM_MAPPED
,
242 (struct drm_file
*)-2);
244 DRM_ERROR("Couldn't allocate DMA push buffer.\n");
249 /* allocates and initializes a fifo for user space consumption */
251 nouveau_fifo_alloc(struct drm_device
*dev
, struct nouveau_channel
**chan_ret
,
252 struct drm_file
*file_priv
, struct mem_block
*pushbuf
,
253 uint32_t vram_handle
, uint32_t tt_handle
)
256 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
257 struct nouveau_engine
*engine
= &dev_priv
->Engine
;
258 struct nouveau_channel
*chan
;
262 * Alright, here is the full story
263 * Nvidia cards have multiple hw fifo contexts (praise them for that,
264 * no complicated crash-prone context switches)
265 * We allocate a new context for each app and let it write to it directly
266 * (woo, full userspace command submission !)
267 * When there are no more contexts, you lost
269 for (channel
= 0; channel
< engine
->fifo
.channels
; channel
++) {
270 if (dev_priv
->fifos
[channel
] == NULL
)
274 /* no more fifos. you lost. */
275 if (channel
== engine
->fifo
.channels
)
278 dev_priv
->fifos
[channel
] = drm_calloc(1, sizeof(struct nouveau_channel
),
280 if (!dev_priv
->fifos
[channel
])
282 dev_priv
->fifo_alloc_count
++;
283 chan
= dev_priv
->fifos
[channel
];
286 chan
->file_priv
= file_priv
;
287 chan
->pushbuf_mem
= pushbuf
;
289 DRM_INFO("Allocating FIFO number %d\n", channel
);
291 /* Locate channel's user control regs */
292 if (dev_priv
->card_type
< NV_40
) {
293 chan
->user
= NV03_USER(channel
);
294 chan
->user_size
= NV03_USER_SIZE
;
295 chan
->put
= NV03_USER_DMA_PUT(channel
);
296 chan
->get
= NV03_USER_DMA_GET(channel
);
297 chan
->ref_cnt
= NV03_USER_REF_CNT(channel
);
299 if (dev_priv
->card_type
< NV_50
) {
300 chan
->user
= NV40_USER(channel
);
301 chan
->user_size
= NV40_USER_SIZE
;
302 chan
->put
= NV40_USER_DMA_PUT(channel
);
303 chan
->get
= NV40_USER_DMA_GET(channel
);
304 chan
->ref_cnt
= NV40_USER_REF_CNT(channel
);
306 chan
->user
= NV50_USER(channel
);
307 chan
->user_size
= NV50_USER_SIZE
;
308 chan
->put
= NV50_USER_DMA_PUT(channel
);
309 chan
->get
= NV50_USER_DMA_GET(channel
);
310 chan
->ref_cnt
= NV50_USER_REF_CNT(channel
);
313 /* Allocate space for per-channel fixed notifier memory */
314 ret
= nouveau_notifier_init_channel(chan
);
316 nouveau_fifo_free(chan
);
320 /* Setup channel's default objects */
321 ret
= nouveau_gpuobj_channel_init(chan
, vram_handle
, tt_handle
);
323 nouveau_fifo_free(chan
);
327 /* Create a dma object for the push buffer */
328 ret
= nouveau_fifo_pushbuf_ctxdma_init(chan
);
330 nouveau_fifo_free(chan
);
334 nouveau_wait_for_idle(dev
);
336 /* disable the fifo caches */
337 NV_WRITE(NV03_PFIFO_CACHES
, 0x00000000);
338 NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH
, NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH
)&(~0x1));
339 NV_WRITE(NV03_PFIFO_CACHE1_PUSH0
, 0x00000000);
340 NV_WRITE(NV04_PFIFO_CACHE1_PULL0
, 0x00000000);
342 /* Create a graphics context for new channel */
343 ret
= engine
->graph
.create_context(chan
);
345 nouveau_fifo_free(chan
);
349 /* Construct inital RAMFC for new channel */
350 ret
= engine
->fifo
.create_context(chan
);
352 nouveau_fifo_free(chan
);
356 /* setup channel's default get/put values
357 * XXX: quite possibly extremely pointless..
359 NV_WRITE(chan
->get
, chan
->pushbuf_base
);
360 NV_WRITE(chan
->put
, chan
->pushbuf_base
);
362 /* If this is the first channel, setup PFIFO ourselves. For any
363 * other case, the GPU will handle this when it switches contexts.
365 if (dev_priv
->card_type
< NV_50
&&
366 dev_priv
->fifo_alloc_count
== 1) {
367 ret
= engine
->fifo
.load_context(chan
);
369 nouveau_fifo_free(chan
);
373 ret
= engine
->graph
.load_context(chan
);
375 nouveau_fifo_free(chan
);
380 NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH
,
381 NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH
) | 1);
382 NV_WRITE(NV03_PFIFO_CACHE1_PUSH0
, 0x00000001);
383 NV_WRITE(NV04_PFIFO_CACHE1_PULL0
, 0x00000001);
384 NV_WRITE(NV04_PFIFO_CACHE1_PULL1
, 0x00000001);
386 /* reenable the fifo caches */
387 NV_WRITE(NV03_PFIFO_CACHES
, 1);
389 DRM_INFO("%s: initialised FIFO %d\n", __func__
, channel
);
395 nouveau_channel_idle(struct nouveau_channel
*chan
)
397 struct drm_device
*dev
= chan
->dev
;
398 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
399 struct nouveau_engine
*engine
= &dev_priv
->Engine
;
403 caches
= NV_READ(NV03_PFIFO_CACHES
);
404 NV_WRITE(NV03_PFIFO_CACHES
, caches
& ~1);
406 if (engine
->fifo
.channel_id(dev
) != chan
->id
) {
407 struct nouveau_gpuobj
*ramfc
= chan
->ramfc
->gpuobj
;
409 if (INSTANCE_RD(ramfc
, 0) != INSTANCE_RD(ramfc
, 1))
414 idle
= (NV_READ(NV04_PFIFO_CACHE1_DMA_GET
) ==
415 NV_READ(NV04_PFIFO_CACHE1_DMA_PUT
));
418 NV_WRITE(NV03_PFIFO_CACHES
, caches
);
423 void nouveau_fifo_free(struct nouveau_channel
*chan
)
425 struct drm_device
*dev
= chan
->dev
;
426 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
427 struct nouveau_engine
*engine
= &dev_priv
->Engine
;
430 DRM_INFO("%s: freeing fifo %d\n", __func__
, chan
->id
);
432 /* Give the channel a chance to idle, wait 2s (hopefully) */
433 t_start
= engine
->timer
.read(dev
);
434 while (!nouveau_channel_idle(chan
)) {
435 if (engine
->timer
.read(dev
) - t_start
> 2000000000ULL) {
436 DRM_ERROR("Failed to idle channel %d before destroy."
437 "Prepare for strangeness..\n", chan
->id
);
442 /*XXX: Maybe should wait for PGRAPH to finish with the stuff it fetched
446 /* disable the fifo caches */
447 NV_WRITE(NV03_PFIFO_CACHES
, 0x00000000);
448 NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH
, NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH
)&(~0x1));
449 NV_WRITE(NV03_PFIFO_CACHE1_PUSH0
, 0x00000000);
450 NV_WRITE(NV04_PFIFO_CACHE1_PULL0
, 0x00000000);
452 // FIXME XXX needs more code
454 engine
->fifo
.destroy_context(chan
);
456 /* Cleanup PGRAPH state */
457 engine
->graph
.destroy_context(chan
);
459 /* reenable the fifo caches */
460 NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH
,
461 NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH
) | 1);
462 NV_WRITE(NV03_PFIFO_CACHE1_PUSH0
, 0x00000001);
463 NV_WRITE(NV04_PFIFO_CACHE1_PULL0
, 0x00000001);
464 NV_WRITE(NV03_PFIFO_CACHES
, 0x00000001);
466 /* Deallocate push buffer */
467 nouveau_gpuobj_ref_del(dev
, &chan
->pushbuf
);
468 if (chan
->pushbuf_mem
) {
469 nouveau_mem_free(dev
, chan
->pushbuf_mem
);
470 chan
->pushbuf_mem
= NULL
;
473 /* Destroy objects belonging to the channel */
474 nouveau_gpuobj_channel_takedown(chan
);
476 nouveau_notifier_takedown_channel(chan
);
478 dev_priv
->fifos
[chan
->id
] = NULL
;
479 dev_priv
->fifo_alloc_count
--;
480 drm_free(chan
, sizeof(*chan
), DRM_MEM_DRIVER
);
483 /* cleanups all the fifos from file_priv */
484 void nouveau_fifo_cleanup(struct drm_device
*dev
, struct drm_file
*file_priv
)
486 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
487 struct nouveau_engine
*engine
= &dev_priv
->Engine
;
490 DRM_DEBUG("clearing FIFO enables from file_priv\n");
491 for(i
= 0; i
< engine
->fifo
.channels
; i
++) {
492 struct nouveau_channel
*chan
= dev_priv
->fifos
[i
];
494 if (chan
&& chan
->file_priv
== file_priv
)
495 nouveau_fifo_free(chan
);
500 nouveau_fifo_owner(struct drm_device
*dev
, struct drm_file
*file_priv
,
503 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
504 struct nouveau_engine
*engine
= &dev_priv
->Engine
;
506 if (channel
>= engine
->fifo
.channels
)
508 if (dev_priv
->fifos
[channel
] == NULL
)
510 return (dev_priv
->fifos
[channel
]->file_priv
== file_priv
);
513 /***********************************
514 * ioctls wrapping the functions
515 ***********************************/
517 static int nouveau_ioctl_fifo_alloc(struct drm_device
*dev
, void *data
,
518 struct drm_file
*file_priv
)
520 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
521 struct drm_nouveau_channel_alloc
*init
= data
;
522 struct drm_map_list
*entry
;
523 struct nouveau_channel
*chan
;
524 struct mem_block
*pushbuf
;
527 NOUVEAU_CHECK_INITIALISED_WITH_RETURN
;
529 if (init
->fb_ctxdma_handle
== ~0 || init
->tt_ctxdma_handle
== ~0)
532 pushbuf
= nouveau_fifo_user_pushbuf_alloc(dev
);
536 res
= nouveau_fifo_alloc(dev
, &chan
, file_priv
, pushbuf
,
537 init
->fb_ctxdma_handle
,
538 init
->tt_ctxdma_handle
);
541 init
->channel
= chan
->id
;
542 init
->put_base
= chan
->pushbuf_base
;
544 /* make the fifo available to user space */
545 /* first, the fifo control regs */
546 init
->ctrl
= dev_priv
->mmio
->offset
+ chan
->user
;
547 init
->ctrl_size
= chan
->user_size
;
548 res
= drm_addmap(dev
, init
->ctrl
, init
->ctrl_size
, _DRM_REGISTERS
,
553 entry
= drm_find_matching_map(dev
, chan
->regs
);
556 init
->ctrl
= entry
->user_token
;
558 /* pass back FIFO map info to the caller */
559 init
->cmdbuf
= chan
->pushbuf_mem
->map_handle
;
560 init
->cmdbuf_size
= chan
->pushbuf_mem
->size
;
562 /* and the notifier block */
563 init
->notifier
= chan
->notifier_block
->map_handle
;
564 init
->notifier_size
= chan
->notifier_block
->size
;
569 static int nouveau_ioctl_fifo_free(struct drm_device
*dev
, void *data
,
570 struct drm_file
*file_priv
)
572 struct drm_nouveau_channel_free
*cfree
= data
;
573 struct nouveau_channel
*chan
;
575 NOUVEAU_CHECK_INITIALISED_WITH_RETURN
;
576 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(cfree
->channel
, file_priv
, chan
);
578 nouveau_fifo_free(chan
);
582 /***********************************
583 * finally, the ioctl table
584 ***********************************/
586 struct drm_ioctl_desc nouveau_ioctls
[] = {
587 DRM_IOCTL_DEF(DRM_NOUVEAU_CARD_INIT
, nouveau_ioctl_card_init
, DRM_AUTH
),
588 DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM
, nouveau_ioctl_getparam
, DRM_AUTH
),
589 DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM
, nouveau_ioctl_setparam
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
590 DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC
, nouveau_ioctl_fifo_alloc
, DRM_AUTH
),
591 DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_FREE
, nouveau_ioctl_fifo_free
, DRM_AUTH
),
592 DRM_IOCTL_DEF(DRM_NOUVEAU_GROBJ_ALLOC
, nouveau_ioctl_grobj_alloc
, DRM_AUTH
),
593 DRM_IOCTL_DEF(DRM_NOUVEAU_NOTIFIEROBJ_ALLOC
, nouveau_ioctl_notifier_alloc
, DRM_AUTH
),
594 DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE
, nouveau_ioctl_gpuobj_free
, DRM_AUTH
),
595 DRM_IOCTL_DEF(DRM_NOUVEAU_MEM_ALLOC
, nouveau_ioctl_mem_alloc
, DRM_AUTH
),
596 DRM_IOCTL_DEF(DRM_NOUVEAU_MEM_FREE
, nouveau_ioctl_mem_free
, DRM_AUTH
),
597 DRM_IOCTL_DEF(DRM_NOUVEAU_MEM_TILE
, nouveau_ioctl_mem_tile
, DRM_AUTH
),
598 DRM_IOCTL_DEF(DRM_NOUVEAU_SUSPEND
, nouveau_ioctl_suspend
, DRM_AUTH
),
599 DRM_IOCTL_DEF(DRM_NOUVEAU_RESUME
, nouveau_ioctl_resume
, DRM_AUTH
),
602 int nouveau_max_ioctl
= DRM_ARRAY_SIZE(nouveau_ioctls
);