2 * Copyright 2005-2006 Stephane Marchesin
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
27 #include "nouveau_drv.h"
28 #include "nouveau_drm.h"
29 #include "nouveau_dma.h"
32 nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel
*chan
)
34 struct drm_device
*dev
= chan
->dev
;
35 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
36 struct nouveau_bo
*pb
= chan
->pushbuf_bo
;
37 struct nouveau_gpuobj
*pushbuf
= NULL
;
40 if (dev_priv
->card_type
>= NV_50
) {
41 if (dev_priv
->card_type
< NV_C0
) {
42 ret
= nouveau_gpuobj_dma_new(chan
,
43 NV_CLASS_DMA_IN_MEMORY
, 0,
49 chan
->pushbuf_base
= pb
->bo
.offset
;
51 if (pb
->bo
.mem
.mem_type
== TTM_PL_TT
) {
52 ret
= nouveau_gpuobj_dma_new(chan
, NV_CLASS_DMA_IN_MEMORY
, 0,
53 dev_priv
->gart_info
.aper_size
,
55 NV_MEM_TARGET_GART
, &pushbuf
);
56 chan
->pushbuf_base
= pb
->bo
.mem
.start
<< PAGE_SHIFT
;
58 if (dev_priv
->card_type
!= NV_04
) {
59 ret
= nouveau_gpuobj_dma_new(chan
, NV_CLASS_DMA_IN_MEMORY
, 0,
60 dev_priv
->fb_available_size
,
62 NV_MEM_TARGET_VRAM
, &pushbuf
);
63 chan
->pushbuf_base
= pb
->bo
.mem
.start
<< PAGE_SHIFT
;
65 /* NV04 cmdbuf hack, from original ddx.. not sure of it's
66 * exact reason for existing :) PCI access to cmdbuf in
69 ret
= nouveau_gpuobj_dma_new(chan
, NV_CLASS_DMA_IN_MEMORY
,
70 pci_resource_start(dev
->pdev
, 1),
71 dev_priv
->fb_available_size
,
73 NV_MEM_TARGET_PCI
, &pushbuf
);
74 chan
->pushbuf_base
= pb
->bo
.mem
.start
<< PAGE_SHIFT
;
77 nouveau_gpuobj_ref(pushbuf
, &chan
->pushbuf
);
78 nouveau_gpuobj_ref(NULL
, &pushbuf
);
82 static struct nouveau_bo
*
83 nouveau_channel_user_pushbuf_alloc(struct drm_device
*dev
)
85 struct nouveau_bo
*pushbuf
= NULL
;
88 if (nouveau_vram_pushbuf
)
89 location
= TTM_PL_FLAG_VRAM
;
91 location
= TTM_PL_FLAG_TT
;
93 ret
= nouveau_bo_new(dev
, NULL
, 65536, 0, location
, 0, 0x0000, &pushbuf
);
95 NV_ERROR(dev
, "error allocating DMA push buffer: %d\n", ret
);
99 ret
= nouveau_bo_pin(pushbuf
, location
);
101 NV_ERROR(dev
, "error pinning DMA push buffer: %d\n", ret
);
102 nouveau_bo_ref(NULL
, &pushbuf
);
106 ret
= nouveau_bo_map(pushbuf
);
108 nouveau_bo_unpin(pushbuf
);
109 nouveau_bo_ref(NULL
, &pushbuf
);
116 /* allocates and initializes a fifo for user space consumption */
118 nouveau_channel_alloc(struct drm_device
*dev
, struct nouveau_channel
**chan_ret
,
119 struct drm_file
*file_priv
,
120 uint32_t vram_handle
, uint32_t gart_handle
)
122 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
123 struct nouveau_fifo_engine
*pfifo
= &dev_priv
->engine
.fifo
;
124 struct nouveau_channel
*chan
;
128 /* allocate and lock channel structure */
129 chan
= kzalloc(sizeof(*chan
), GFP_KERNEL
);
133 chan
->file_priv
= file_priv
;
134 chan
->vram_handle
= vram_handle
;
135 chan
->gart_handle
= gart_handle
;
137 kref_init(&chan
->ref
);
138 atomic_set(&chan
->users
, 1);
139 mutex_init(&chan
->mutex
);
140 mutex_lock(&chan
->mutex
);
142 /* allocate hw channel id */
143 spin_lock_irqsave(&dev_priv
->channels
.lock
, flags
);
144 for (chan
->id
= 0; chan
->id
< pfifo
->channels
; chan
->id
++) {
145 if (!dev_priv
->channels
.ptr
[chan
->id
]) {
146 nouveau_channel_ref(chan
, &dev_priv
->channels
.ptr
[chan
->id
]);
150 spin_unlock_irqrestore(&dev_priv
->channels
.lock
, flags
);
152 if (chan
->id
== pfifo
->channels
) {
153 mutex_unlock(&chan
->mutex
);
158 NV_DEBUG(dev
, "initialising channel %d\n", chan
->id
);
159 INIT_LIST_HEAD(&chan
->nvsw
.vbl_wait
);
160 INIT_LIST_HEAD(&chan
->nvsw
.flip
);
161 INIT_LIST_HEAD(&chan
->fence
.pending
);
163 /* Allocate DMA push buffer */
164 chan
->pushbuf_bo
= nouveau_channel_user_pushbuf_alloc(dev
);
165 if (!chan
->pushbuf_bo
) {
167 NV_ERROR(dev
, "pushbuf %d\n", ret
);
168 nouveau_channel_put(&chan
);
172 nouveau_dma_pre_init(chan
);
173 chan
->user_put
= 0x40;
174 chan
->user_get
= 0x44;
176 /* Allocate space for per-channel fixed notifier memory */
177 ret
= nouveau_notifier_init_channel(chan
);
179 NV_ERROR(dev
, "ntfy %d\n", ret
);
180 nouveau_channel_put(&chan
);
184 /* Setup channel's default objects */
185 ret
= nouveau_gpuobj_channel_init(chan
, vram_handle
, gart_handle
);
187 NV_ERROR(dev
, "gpuobj %d\n", ret
);
188 nouveau_channel_put(&chan
);
192 /* Create a dma object for the push buffer */
193 ret
= nouveau_channel_pushbuf_ctxdma_init(chan
);
195 NV_ERROR(dev
, "pbctxdma %d\n", ret
);
196 nouveau_channel_put(&chan
);
200 /* disable the fifo caches */
201 pfifo
->reassign(dev
, false);
203 /* Construct initial RAMFC for new channel */
204 ret
= pfifo
->create_context(chan
);
206 nouveau_channel_put(&chan
);
210 pfifo
->reassign(dev
, true);
212 ret
= nouveau_dma_init(chan
);
214 ret
= nouveau_fence_channel_init(chan
);
216 nouveau_channel_put(&chan
);
220 nouveau_debugfs_channel_init(chan
);
222 NV_DEBUG(dev
, "channel %d initialised\n", chan
->id
);
227 struct nouveau_channel
*
228 nouveau_channel_get_unlocked(struct nouveau_channel
*ref
)
230 struct nouveau_channel
*chan
= NULL
;
232 if (likely(ref
&& atomic_inc_not_zero(&ref
->users
)))
233 nouveau_channel_ref(ref
, &chan
);
238 struct nouveau_channel
*
239 nouveau_channel_get(struct drm_device
*dev
, struct drm_file
*file_priv
, int id
)
241 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
242 struct nouveau_channel
*chan
;
245 if (unlikely(id
< 0 || id
>= NOUVEAU_MAX_CHANNEL_NR
))
246 return ERR_PTR(-EINVAL
);
248 spin_lock_irqsave(&dev_priv
->channels
.lock
, flags
);
249 chan
= nouveau_channel_get_unlocked(dev_priv
->channels
.ptr
[id
]);
250 spin_unlock_irqrestore(&dev_priv
->channels
.lock
, flags
);
253 return ERR_PTR(-EINVAL
);
255 if (unlikely(file_priv
&& chan
->file_priv
!= file_priv
)) {
256 nouveau_channel_put_unlocked(&chan
);
257 return ERR_PTR(-EINVAL
);
260 mutex_lock(&chan
->mutex
);
265 nouveau_channel_put_unlocked(struct nouveau_channel
**pchan
)
267 struct nouveau_channel
*chan
= *pchan
;
268 struct drm_device
*dev
= chan
->dev
;
269 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
270 struct nouveau_fifo_engine
*pfifo
= &dev_priv
->engine
.fifo
;
274 /* decrement the refcount, and we're done if there's still refs */
275 if (likely(!atomic_dec_and_test(&chan
->users
))) {
276 nouveau_channel_ref(NULL
, pchan
);
280 /* no one wants the channel anymore */
281 NV_DEBUG(dev
, "freeing channel %d\n", chan
->id
);
282 nouveau_debugfs_channel_fini(chan
);
284 /* give it chance to idle */
285 nouveau_channel_idle(chan
);
287 /* ensure all outstanding fences are signaled. they should be if the
288 * above attempts at idling were OK, but if we failed this'll tell TTM
289 * we're done with the buffers.
291 nouveau_fence_channel_fini(chan
);
293 /* boot it off the hardware */
294 pfifo
->reassign(dev
, false);
296 /* destroy the engine specific contexts */
297 pfifo
->destroy_context(chan
);
298 for (i
= 0; i
< NVOBJ_ENGINE_NR
; i
++) {
300 dev_priv
->eng
[i
]->context_del(chan
, i
);
303 pfifo
->reassign(dev
, true);
305 /* aside from its resources, the channel should now be dead,
306 * remove it from the channel list
308 spin_lock_irqsave(&dev_priv
->channels
.lock
, flags
);
309 nouveau_channel_ref(NULL
, &dev_priv
->channels
.ptr
[chan
->id
]);
310 spin_unlock_irqrestore(&dev_priv
->channels
.lock
, flags
);
312 /* destroy any resources the channel owned */
313 nouveau_gpuobj_ref(NULL
, &chan
->pushbuf
);
314 if (chan
->pushbuf_bo
) {
315 nouveau_bo_unmap(chan
->pushbuf_bo
);
316 nouveau_bo_unpin(chan
->pushbuf_bo
);
317 nouveau_bo_ref(NULL
, &chan
->pushbuf_bo
);
319 nouveau_gpuobj_channel_takedown(chan
);
320 nouveau_notifier_takedown_channel(chan
);
322 nouveau_channel_ref(NULL
, pchan
);
326 nouveau_channel_put(struct nouveau_channel
**pchan
)
328 mutex_unlock(&(*pchan
)->mutex
);
329 nouveau_channel_put_unlocked(pchan
);
333 nouveau_channel_del(struct kref
*ref
)
335 struct nouveau_channel
*chan
=
336 container_of(ref
, struct nouveau_channel
, ref
);
342 nouveau_channel_ref(struct nouveau_channel
*chan
,
343 struct nouveau_channel
**pchan
)
346 kref_get(&chan
->ref
);
349 kref_put(&(*pchan
)->ref
, nouveau_channel_del
);
355 nouveau_channel_idle(struct nouveau_channel
*chan
)
357 struct drm_device
*dev
= chan
->dev
;
358 struct nouveau_fence
*fence
= NULL
;
361 nouveau_fence_update(chan
);
363 if (chan
->fence
.sequence
!= chan
->fence
.sequence_ack
) {
364 ret
= nouveau_fence_new(chan
, &fence
, true);
366 ret
= nouveau_fence_wait(fence
, false, false);
367 nouveau_fence_unref(&fence
);
371 NV_ERROR(dev
, "Failed to idle channel %d.\n", chan
->id
);
375 /* cleans up all the fifos from file_priv */
377 nouveau_channel_cleanup(struct drm_device
*dev
, struct drm_file
*file_priv
)
379 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
380 struct nouveau_engine
*engine
= &dev_priv
->engine
;
381 struct nouveau_channel
*chan
;
384 NV_DEBUG(dev
, "clearing FIFO enables from file_priv\n");
385 for (i
= 0; i
< engine
->fifo
.channels
; i
++) {
386 chan
= nouveau_channel_get(dev
, file_priv
, i
);
390 atomic_dec(&chan
->users
);
391 nouveau_channel_put(&chan
);
396 /***********************************
397 * ioctls wrapping the functions
398 ***********************************/
401 nouveau_ioctl_fifo_alloc(struct drm_device
*dev
, void *data
,
402 struct drm_file
*file_priv
)
404 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
405 struct drm_nouveau_channel_alloc
*init
= data
;
406 struct nouveau_channel
*chan
;
409 if (!dev_priv
->eng
[NVOBJ_ENGINE_GR
])
412 if (init
->fb_ctxdma_handle
== ~0 || init
->tt_ctxdma_handle
== ~0)
415 ret
= nouveau_channel_alloc(dev
, &chan
, file_priv
,
416 init
->fb_ctxdma_handle
,
417 init
->tt_ctxdma_handle
);
420 init
->channel
= chan
->id
;
422 if (chan
->dma
.ib_max
)
423 init
->pushbuf_domains
= NOUVEAU_GEM_DOMAIN_VRAM
|
424 NOUVEAU_GEM_DOMAIN_GART
;
425 else if (chan
->pushbuf_bo
->bo
.mem
.mem_type
== TTM_PL_VRAM
)
426 init
->pushbuf_domains
= NOUVEAU_GEM_DOMAIN_VRAM
;
428 init
->pushbuf_domains
= NOUVEAU_GEM_DOMAIN_GART
;
430 if (dev_priv
->card_type
< NV_C0
) {
431 init
->subchan
[0].handle
= NvM2MF
;
432 if (dev_priv
->card_type
< NV_50
)
433 init
->subchan
[0].grclass
= 0x0039;
435 init
->subchan
[0].grclass
= 0x5039;
436 init
->subchan
[1].handle
= NvSw
;
437 init
->subchan
[1].grclass
= NV_SW
;
438 init
->nr_subchan
= 2;
440 init
->subchan
[0].handle
= 0x9039;
441 init
->subchan
[0].grclass
= 0x9039;
442 init
->nr_subchan
= 1;
445 /* Named memory object area */
446 ret
= drm_gem_handle_create(file_priv
, chan
->notifier_bo
->gem
,
447 &init
->notifier_handle
);
450 atomic_inc(&chan
->users
); /* userspace reference */
451 nouveau_channel_put(&chan
);
456 nouveau_ioctl_fifo_free(struct drm_device
*dev
, void *data
,
457 struct drm_file
*file_priv
)
459 struct drm_nouveau_channel_free
*req
= data
;
460 struct nouveau_channel
*chan
;
462 chan
= nouveau_channel_get(dev
, file_priv
, req
->channel
);
464 return PTR_ERR(chan
);
466 atomic_dec(&chan
->users
);
467 nouveau_channel_put(&chan
);
471 /***********************************
472 * finally, the ioctl table
473 ***********************************/
475 struct drm_ioctl_desc nouveau_ioctls
[] = {
476 DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM
, nouveau_ioctl_getparam
, DRM_UNLOCKED
|DRM_AUTH
),
477 DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM
, nouveau_ioctl_setparam
, DRM_UNLOCKED
|DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
478 DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC
, nouveau_ioctl_fifo_alloc
, DRM_UNLOCKED
|DRM_AUTH
),
479 DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE
, nouveau_ioctl_fifo_free
, DRM_UNLOCKED
|DRM_AUTH
),
480 DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC
, nouveau_ioctl_grobj_alloc
, DRM_UNLOCKED
|DRM_AUTH
),
481 DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC
, nouveau_ioctl_notifier_alloc
, DRM_UNLOCKED
|DRM_AUTH
),
482 DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE
, nouveau_ioctl_gpuobj_free
, DRM_UNLOCKED
|DRM_AUTH
),
483 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW
, nouveau_gem_ioctl_new
, DRM_UNLOCKED
|DRM_AUTH
),
484 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF
, nouveau_gem_ioctl_pushbuf
, DRM_UNLOCKED
|DRM_AUTH
),
485 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP
, nouveau_gem_ioctl_cpu_prep
, DRM_UNLOCKED
|DRM_AUTH
),
486 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI
, nouveau_gem_ioctl_cpu_fini
, DRM_UNLOCKED
|DRM_AUTH
),
487 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO
, nouveau_gem_ioctl_info
, DRM_UNLOCKED
|DRM_AUTH
),
490 int nouveau_max_ioctl
= DRM_ARRAY_SIZE(nouveau_ioctls
);