2 * Copyright 2005-2006 Stephane Marchesin
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
27 #include "nouveau_drv.h"
28 #include "nouveau_drm.h"
29 #include "nouveau_dma.h"
32 nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel
*chan
)
34 struct drm_device
*dev
= chan
->dev
;
35 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
36 struct nouveau_bo
*pb
= chan
->pushbuf_bo
;
37 struct nouveau_gpuobj
*pushbuf
= NULL
;
40 if (dev_priv
->card_type
>= NV_50
) {
41 if (dev_priv
->card_type
< NV_C0
) {
42 ret
= nouveau_gpuobj_dma_new(chan
,
43 NV_CLASS_DMA_IN_MEMORY
, 0,
49 chan
->pushbuf_base
= pb
->bo
.offset
;
51 if (pb
->bo
.mem
.mem_type
== TTM_PL_TT
) {
52 ret
= nouveau_gpuobj_dma_new(chan
, NV_CLASS_DMA_IN_MEMORY
, 0,
53 dev_priv
->gart_info
.aper_size
,
55 NV_MEM_TARGET_GART
, &pushbuf
);
56 chan
->pushbuf_base
= pb
->bo
.mem
.start
<< PAGE_SHIFT
;
58 if (dev_priv
->card_type
!= NV_04
) {
59 ret
= nouveau_gpuobj_dma_new(chan
, NV_CLASS_DMA_IN_MEMORY
, 0,
60 dev_priv
->fb_available_size
,
62 NV_MEM_TARGET_VRAM
, &pushbuf
);
63 chan
->pushbuf_base
= pb
->bo
.mem
.start
<< PAGE_SHIFT
;
65 /* NV04 cmdbuf hack, from original ddx.. not sure of it's
66 * exact reason for existing :) PCI access to cmdbuf in
69 ret
= nouveau_gpuobj_dma_new(chan
, NV_CLASS_DMA_IN_MEMORY
,
70 pci_resource_start(dev
->pdev
, 1),
71 dev_priv
->fb_available_size
,
73 NV_MEM_TARGET_PCI
, &pushbuf
);
74 chan
->pushbuf_base
= pb
->bo
.mem
.start
<< PAGE_SHIFT
;
77 nouveau_gpuobj_ref(pushbuf
, &chan
->pushbuf
);
78 nouveau_gpuobj_ref(NULL
, &pushbuf
);
82 static struct nouveau_bo
*
83 nouveau_channel_user_pushbuf_alloc(struct drm_device
*dev
)
85 struct nouveau_bo
*pushbuf
= NULL
;
88 if (nouveau_vram_pushbuf
)
89 location
= TTM_PL_FLAG_VRAM
;
91 location
= TTM_PL_FLAG_TT
;
93 ret
= nouveau_bo_new(dev
, NULL
, 65536, 0, location
, 0, 0x0000, &pushbuf
);
95 NV_ERROR(dev
, "error allocating DMA push buffer: %d\n", ret
);
99 ret
= nouveau_bo_pin(pushbuf
, location
);
101 NV_ERROR(dev
, "error pinning DMA push buffer: %d\n", ret
);
102 nouveau_bo_ref(NULL
, &pushbuf
);
106 ret
= nouveau_bo_map(pushbuf
);
108 nouveau_bo_unpin(pushbuf
);
109 nouveau_bo_ref(NULL
, &pushbuf
);
116 /* allocates and initializes a fifo for user space consumption */
118 nouveau_channel_alloc(struct drm_device
*dev
, struct nouveau_channel
**chan_ret
,
119 struct drm_file
*file_priv
,
120 uint32_t vram_handle
, uint32_t gart_handle
)
122 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
123 struct nouveau_fifo_engine
*pfifo
= &dev_priv
->engine
.fifo
;
124 struct nouveau_channel
*chan
;
128 /* allocate and lock channel structure */
129 chan
= kzalloc(sizeof(*chan
), GFP_KERNEL
);
133 chan
->file_priv
= file_priv
;
134 chan
->vram_handle
= vram_handle
;
135 chan
->gart_handle
= gart_handle
;
137 kref_init(&chan
->ref
);
138 atomic_set(&chan
->users
, 1);
139 mutex_init(&chan
->mutex
);
140 mutex_lock(&chan
->mutex
);
142 /* allocate hw channel id */
143 spin_lock_irqsave(&dev_priv
->channels
.lock
, flags
);
144 for (chan
->id
= 0; chan
->id
< pfifo
->channels
; chan
->id
++) {
145 if (!dev_priv
->channels
.ptr
[chan
->id
]) {
146 nouveau_channel_ref(chan
, &dev_priv
->channels
.ptr
[chan
->id
]);
150 spin_unlock_irqrestore(&dev_priv
->channels
.lock
, flags
);
152 if (chan
->id
== pfifo
->channels
) {
153 mutex_unlock(&chan
->mutex
);
158 NV_DEBUG(dev
, "initialising channel %d\n", chan
->id
);
159 INIT_LIST_HEAD(&chan
->nvsw
.vbl_wait
);
160 INIT_LIST_HEAD(&chan
->nvsw
.flip
);
161 INIT_LIST_HEAD(&chan
->fence
.pending
);
163 /* Allocate DMA push buffer */
164 chan
->pushbuf_bo
= nouveau_channel_user_pushbuf_alloc(dev
);
165 if (!chan
->pushbuf_bo
) {
167 NV_ERROR(dev
, "pushbuf %d\n", ret
);
168 nouveau_channel_put(&chan
);
172 nouveau_dma_pre_init(chan
);
173 chan
->user_put
= 0x40;
174 chan
->user_get
= 0x44;
176 /* Allocate space for per-channel fixed notifier memory */
177 ret
= nouveau_notifier_init_channel(chan
);
179 NV_ERROR(dev
, "ntfy %d\n", ret
);
180 nouveau_channel_put(&chan
);
184 /* Setup channel's default objects */
185 ret
= nouveau_gpuobj_channel_init(chan
, vram_handle
, gart_handle
);
187 NV_ERROR(dev
, "gpuobj %d\n", ret
);
188 nouveau_channel_put(&chan
);
192 /* Create a dma object for the push buffer */
193 ret
= nouveau_channel_pushbuf_ctxdma_init(chan
);
195 NV_ERROR(dev
, "pbctxdma %d\n", ret
);
196 nouveau_channel_put(&chan
);
200 /* disable the fifo caches */
201 pfifo
->reassign(dev
, false);
203 /* Construct inital RAMFC for new channel */
204 ret
= pfifo
->create_context(chan
);
206 nouveau_channel_put(&chan
);
210 pfifo
->reassign(dev
, true);
212 ret
= nouveau_dma_init(chan
);
214 ret
= nouveau_fence_channel_init(chan
);
216 nouveau_channel_put(&chan
);
220 nouveau_debugfs_channel_init(chan
);
222 NV_DEBUG(dev
, "channel %d initialised\n", chan
->id
);
227 struct nouveau_channel
*
228 nouveau_channel_get_unlocked(struct nouveau_channel
*ref
)
230 struct nouveau_channel
*chan
= NULL
;
232 if (likely(ref
&& atomic_inc_not_zero(&ref
->users
)))
233 nouveau_channel_ref(ref
, &chan
);
238 struct nouveau_channel
*
239 nouveau_channel_get(struct drm_device
*dev
, struct drm_file
*file_priv
, int id
)
241 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
242 struct nouveau_channel
*chan
;
245 if (unlikely(id
< 0 || id
>= NOUVEAU_MAX_CHANNEL_NR
))
246 return ERR_PTR(-EINVAL
);
248 spin_lock_irqsave(&dev_priv
->channels
.lock
, flags
);
249 chan
= nouveau_channel_get_unlocked(dev_priv
->channels
.ptr
[id
]);
250 spin_unlock_irqrestore(&dev_priv
->channels
.lock
, flags
);
253 return ERR_PTR(-EINVAL
);
255 if (unlikely(file_priv
&& chan
->file_priv
!= file_priv
)) {
256 nouveau_channel_put_unlocked(&chan
);
257 return ERR_PTR(-EINVAL
);
260 mutex_lock(&chan
->mutex
);
265 nouveau_channel_put_unlocked(struct nouveau_channel
**pchan
)
267 struct nouveau_channel
*chan
= *pchan
;
268 struct drm_device
*dev
= chan
->dev
;
269 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
270 struct nouveau_fifo_engine
*pfifo
= &dev_priv
->engine
.fifo
;
271 struct nouveau_pgraph_engine
*pgraph
= &dev_priv
->engine
.graph
;
272 struct nouveau_crypt_engine
*pcrypt
= &dev_priv
->engine
.crypt
;
275 /* decrement the refcount, and we're done if there's still refs */
276 if (likely(!atomic_dec_and_test(&chan
->users
))) {
277 nouveau_channel_ref(NULL
, pchan
);
281 /* noone wants the channel anymore */
282 NV_DEBUG(dev
, "freeing channel %d\n", chan
->id
);
283 nouveau_debugfs_channel_fini(chan
);
285 /* give it chance to idle */
286 nouveau_channel_idle(chan
);
288 /* ensure all outstanding fences are signaled. they should be if the
289 * above attempts at idling were OK, but if we failed this'll tell TTM
290 * we're done with the buffers.
292 nouveau_fence_channel_fini(chan
);
294 /* boot it off the hardware */
295 pfifo
->reassign(dev
, false);
297 /* We want to give pgraph a chance to idle and get rid of all
298 * potential errors. We need to do this without the context
299 * switch lock held, otherwise the irq handler is unable to
302 if (pgraph
->channel(dev
) == chan
)
303 nouveau_wait_for_idle(dev
);
305 /* destroy the engine specific contexts */
306 pfifo
->destroy_context(chan
);
307 pgraph
->destroy_context(chan
);
308 if (pcrypt
->destroy_context
)
309 pcrypt
->destroy_context(chan
);
311 pfifo
->reassign(dev
, true);
313 /* aside from its resources, the channel should now be dead,
314 * remove it from the channel list
316 spin_lock_irqsave(&dev_priv
->channels
.lock
, flags
);
317 nouveau_channel_ref(NULL
, &dev_priv
->channels
.ptr
[chan
->id
]);
318 spin_unlock_irqrestore(&dev_priv
->channels
.lock
, flags
);
320 /* destroy any resources the channel owned */
321 nouveau_gpuobj_ref(NULL
, &chan
->pushbuf
);
322 if (chan
->pushbuf_bo
) {
323 nouveau_bo_unmap(chan
->pushbuf_bo
);
324 nouveau_bo_unpin(chan
->pushbuf_bo
);
325 nouveau_bo_ref(NULL
, &chan
->pushbuf_bo
);
327 nouveau_gpuobj_channel_takedown(chan
);
328 nouveau_notifier_takedown_channel(chan
);
330 nouveau_channel_ref(NULL
, pchan
);
334 nouveau_channel_put(struct nouveau_channel
**pchan
)
336 mutex_unlock(&(*pchan
)->mutex
);
337 nouveau_channel_put_unlocked(pchan
);
341 nouveau_channel_del(struct kref
*ref
)
343 struct nouveau_channel
*chan
=
344 container_of(ref
, struct nouveau_channel
, ref
);
350 nouveau_channel_ref(struct nouveau_channel
*chan
,
351 struct nouveau_channel
**pchan
)
354 kref_get(&chan
->ref
);
357 kref_put(&(*pchan
)->ref
, nouveau_channel_del
);
363 nouveau_channel_idle(struct nouveau_channel
*chan
)
365 struct drm_device
*dev
= chan
->dev
;
366 struct nouveau_fence
*fence
= NULL
;
369 nouveau_fence_update(chan
);
371 if (chan
->fence
.sequence
!= chan
->fence
.sequence_ack
) {
372 ret
= nouveau_fence_new(chan
, &fence
, true);
374 ret
= nouveau_fence_wait(fence
, false, false);
375 nouveau_fence_unref(&fence
);
379 NV_ERROR(dev
, "Failed to idle channel %d.\n", chan
->id
);
383 /* cleans up all the fifos from file_priv */
385 nouveau_channel_cleanup(struct drm_device
*dev
, struct drm_file
*file_priv
)
387 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
388 struct nouveau_engine
*engine
= &dev_priv
->engine
;
389 struct nouveau_channel
*chan
;
392 NV_DEBUG(dev
, "clearing FIFO enables from file_priv\n");
393 for (i
= 0; i
< engine
->fifo
.channels
; i
++) {
394 chan
= nouveau_channel_get(dev
, file_priv
, i
);
398 atomic_dec(&chan
->users
);
399 nouveau_channel_put(&chan
);
404 /***********************************
405 * ioctls wrapping the functions
406 ***********************************/
409 nouveau_ioctl_fifo_alloc(struct drm_device
*dev
, void *data
,
410 struct drm_file
*file_priv
)
412 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
413 struct drm_nouveau_channel_alloc
*init
= data
;
414 struct nouveau_channel
*chan
;
417 if (dev_priv
->engine
.graph
.accel_blocked
)
420 if (init
->fb_ctxdma_handle
== ~0 || init
->tt_ctxdma_handle
== ~0)
423 ret
= nouveau_channel_alloc(dev
, &chan
, file_priv
,
424 init
->fb_ctxdma_handle
,
425 init
->tt_ctxdma_handle
);
428 init
->channel
= chan
->id
;
430 if (chan
->dma
.ib_max
)
431 init
->pushbuf_domains
= NOUVEAU_GEM_DOMAIN_VRAM
|
432 NOUVEAU_GEM_DOMAIN_GART
;
433 else if (chan
->pushbuf_bo
->bo
.mem
.mem_type
== TTM_PL_VRAM
)
434 init
->pushbuf_domains
= NOUVEAU_GEM_DOMAIN_VRAM
;
436 init
->pushbuf_domains
= NOUVEAU_GEM_DOMAIN_GART
;
438 if (dev_priv
->card_type
< NV_C0
) {
439 init
->subchan
[0].handle
= NvM2MF
;
440 if (dev_priv
->card_type
< NV_50
)
441 init
->subchan
[0].grclass
= 0x0039;
443 init
->subchan
[0].grclass
= 0x5039;
444 init
->subchan
[1].handle
= NvSw
;
445 init
->subchan
[1].grclass
= NV_SW
;
446 init
->nr_subchan
= 2;
448 init
->subchan
[0].handle
= 0x9039;
449 init
->subchan
[0].grclass
= 0x9039;
450 init
->nr_subchan
= 1;
453 /* Named memory object area */
454 ret
= drm_gem_handle_create(file_priv
, chan
->notifier_bo
->gem
,
455 &init
->notifier_handle
);
458 atomic_inc(&chan
->users
); /* userspace reference */
459 nouveau_channel_put(&chan
);
464 nouveau_ioctl_fifo_free(struct drm_device
*dev
, void *data
,
465 struct drm_file
*file_priv
)
467 struct drm_nouveau_channel_free
*req
= data
;
468 struct nouveau_channel
*chan
;
470 chan
= nouveau_channel_get(dev
, file_priv
, req
->channel
);
472 return PTR_ERR(chan
);
474 atomic_dec(&chan
->users
);
475 nouveau_channel_put(&chan
);
479 /***********************************
480 * finally, the ioctl table
481 ***********************************/
483 struct drm_ioctl_desc nouveau_ioctls
[] = {
484 DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM
, nouveau_ioctl_getparam
, DRM_UNLOCKED
|DRM_AUTH
),
485 DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM
, nouveau_ioctl_setparam
, DRM_UNLOCKED
|DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
486 DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC
, nouveau_ioctl_fifo_alloc
, DRM_UNLOCKED
|DRM_AUTH
),
487 DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE
, nouveau_ioctl_fifo_free
, DRM_UNLOCKED
|DRM_AUTH
),
488 DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC
, nouveau_ioctl_grobj_alloc
, DRM_UNLOCKED
|DRM_AUTH
),
489 DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC
, nouveau_ioctl_notifier_alloc
, DRM_UNLOCKED
|DRM_AUTH
),
490 DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE
, nouveau_ioctl_gpuobj_free
, DRM_UNLOCKED
|DRM_AUTH
),
491 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW
, nouveau_gem_ioctl_new
, DRM_UNLOCKED
|DRM_AUTH
),
492 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF
, nouveau_gem_ioctl_pushbuf
, DRM_UNLOCKED
|DRM_AUTH
),
493 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP
, nouveau_gem_ioctl_cpu_prep
, DRM_UNLOCKED
|DRM_AUTH
),
494 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI
, nouveau_gem_ioctl_cpu_fini
, DRM_UNLOCKED
|DRM_AUTH
),
495 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO
, nouveau_gem_ioctl_info
, DRM_UNLOCKED
|DRM_AUTH
),
498 int nouveau_max_ioctl
= DRM_ARRAY_SIZE(nouveau_ioctls
);