2 * Copyright 2005-2006 Stephane Marchesin
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
27 #include "nouveau_drv.h"
28 #include "nouveau_drm.h"
29 #include "nouveau_dma.h"
32 nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel
*chan
)
34 struct drm_device
*dev
= chan
->dev
;
35 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
36 struct nouveau_bo
*pb
= chan
->pushbuf_bo
;
37 struct nouveau_gpuobj
*pushbuf
= NULL
;
40 if (dev_priv
->card_type
>= NV_50
) {
41 ret
= nouveau_gpuobj_dma_new(chan
, NV_CLASS_DMA_IN_MEMORY
, 0,
42 dev_priv
->vm_end
, NV_DMA_ACCESS_RO
,
43 NV_DMA_TARGET_AGP
, &pushbuf
);
44 chan
->pushbuf_base
= pb
->bo
.offset
;
46 if (pb
->bo
.mem
.mem_type
== TTM_PL_TT
) {
47 ret
= nouveau_gpuobj_gart_dma_new(chan
, 0,
48 dev_priv
->gart_info
.aper_size
,
49 NV_DMA_ACCESS_RO
, &pushbuf
,
51 chan
->pushbuf_base
= pb
->bo
.mem
.mm_node
->start
<< PAGE_SHIFT
;
53 if (dev_priv
->card_type
!= NV_04
) {
54 ret
= nouveau_gpuobj_dma_new(chan
, NV_CLASS_DMA_IN_MEMORY
, 0,
55 dev_priv
->fb_available_size
,
57 NV_DMA_TARGET_VIDMEM
, &pushbuf
);
58 chan
->pushbuf_base
= pb
->bo
.mem
.mm_node
->start
<< PAGE_SHIFT
;
60 /* NV04 cmdbuf hack, from original ddx.. not sure of it's
61 * exact reason for existing :) PCI access to cmdbuf in
64 ret
= nouveau_gpuobj_dma_new(chan
, NV_CLASS_DMA_IN_MEMORY
,
65 drm_get_resource_start(dev
, 1),
66 dev_priv
->fb_available_size
,
68 NV_DMA_TARGET_PCI
, &pushbuf
);
69 chan
->pushbuf_base
= pb
->bo
.mem
.mm_node
->start
<< PAGE_SHIFT
;
72 ret
= nouveau_gpuobj_ref_add(dev
, chan
, 0, pushbuf
, &chan
->pushbuf
);
74 NV_ERROR(dev
, "Error referencing pushbuf ctxdma: %d\n", ret
);
75 if (pushbuf
!= dev_priv
->gart_info
.sg_ctxdma
)
76 nouveau_gpuobj_del(dev
, &pushbuf
);
83 static struct nouveau_bo
*
84 nouveau_channel_user_pushbuf_alloc(struct drm_device
*dev
)
86 struct nouveau_bo
*pushbuf
= NULL
;
89 if (nouveau_vram_pushbuf
)
90 location
= TTM_PL_FLAG_VRAM
;
92 location
= TTM_PL_FLAG_TT
;
94 ret
= nouveau_bo_new(dev
, NULL
, 65536, 0, location
, 0, 0x0000, false,
97 NV_ERROR(dev
, "error allocating DMA push buffer: %d\n", ret
);
101 ret
= nouveau_bo_pin(pushbuf
, location
);
103 NV_ERROR(dev
, "error pinning DMA push buffer: %d\n", ret
);
104 nouveau_bo_ref(NULL
, &pushbuf
);
111 /* allocates and initializes a fifo for user space consumption */
113 nouveau_channel_alloc(struct drm_device
*dev
, struct nouveau_channel
**chan_ret
,
114 struct drm_file
*file_priv
,
115 uint32_t vram_handle
, uint32_t tt_handle
)
117 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
118 struct nouveau_pgraph_engine
*pgraph
= &dev_priv
->engine
.graph
;
119 struct nouveau_fifo_engine
*pfifo
= &dev_priv
->engine
.fifo
;
120 struct nouveau_channel
*chan
;
125 * Alright, here is the full story
126 * Nvidia cards have multiple hw fifo contexts (praise them for that,
127 * no complicated crash-prone context switches)
128 * We allocate a new context for each app and let it write to it
129 * directly (woo, full userspace command submission !)
130 * When there are no more contexts, you lost
132 for (channel
= 0; channel
< pfifo
->channels
; channel
++) {
133 if (dev_priv
->fifos
[channel
] == NULL
)
137 /* no more fifos. you lost. */
138 if (channel
== pfifo
->channels
)
141 dev_priv
->fifos
[channel
] = kzalloc(sizeof(struct nouveau_channel
),
143 if (!dev_priv
->fifos
[channel
])
145 dev_priv
->fifo_alloc_count
++;
146 chan
= dev_priv
->fifos
[channel
];
147 INIT_LIST_HEAD(&chan
->nvsw
.vbl_wait
);
148 INIT_LIST_HEAD(&chan
->fence
.pending
);
151 chan
->file_priv
= file_priv
;
152 chan
->vram_handle
= vram_handle
;
153 chan
->gart_handle
= tt_handle
;
155 NV_INFO(dev
, "Allocating FIFO number %d\n", channel
);
157 /* Allocate DMA push buffer */
158 chan
->pushbuf_bo
= nouveau_channel_user_pushbuf_alloc(dev
);
159 if (!chan
->pushbuf_bo
) {
161 NV_ERROR(dev
, "pushbuf %d\n", ret
);
162 nouveau_channel_free(chan
);
166 nouveau_dma_pre_init(chan
);
168 /* Locate channel's user control regs */
169 if (dev_priv
->card_type
< NV_40
)
170 user
= NV03_USER(channel
);
172 if (dev_priv
->card_type
< NV_50
)
173 user
= NV40_USER(channel
);
175 user
= NV50_USER(channel
);
177 chan
->user
= ioremap(pci_resource_start(dev
->pdev
, 0) + user
,
180 NV_ERROR(dev
, "ioremap of regs failed.\n");
181 nouveau_channel_free(chan
);
184 chan
->user_put
= 0x40;
185 chan
->user_get
= 0x44;
187 /* Allocate space for per-channel fixed notifier memory */
188 ret
= nouveau_notifier_init_channel(chan
);
190 NV_ERROR(dev
, "ntfy %d\n", ret
);
191 nouveau_channel_free(chan
);
195 /* Setup channel's default objects */
196 ret
= nouveau_gpuobj_channel_init(chan
, vram_handle
, tt_handle
);
198 NV_ERROR(dev
, "gpuobj %d\n", ret
);
199 nouveau_channel_free(chan
);
203 /* Create a dma object for the push buffer */
204 ret
= nouveau_channel_pushbuf_ctxdma_init(chan
);
206 NV_ERROR(dev
, "pbctxdma %d\n", ret
);
207 nouveau_channel_free(chan
);
211 /* disable the fifo caches */
212 pfifo
->reassign(dev
, false);
214 /* Create a graphics context for new channel */
215 ret
= pgraph
->create_context(chan
);
217 nouveau_channel_free(chan
);
221 /* Construct inital RAMFC for new channel */
222 ret
= pfifo
->create_context(chan
);
224 nouveau_channel_free(chan
);
228 pfifo
->reassign(dev
, true);
230 ret
= nouveau_dma_init(chan
);
232 ret
= nouveau_fence_init(chan
);
234 nouveau_channel_free(chan
);
238 nouveau_debugfs_channel_init(chan
);
240 NV_INFO(dev
, "%s: initialised FIFO %d\n", __func__
, channel
);
247 nouveau_channel_free(struct nouveau_channel
*chan
)
249 struct drm_device
*dev
= chan
->dev
;
250 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
251 struct nouveau_pgraph_engine
*pgraph
= &dev_priv
->engine
.graph
;
252 struct nouveau_fifo_engine
*pfifo
= &dev_priv
->engine
.fifo
;
256 NV_INFO(dev
, "%s: freeing fifo %d\n", __func__
, chan
->id
);
258 nouveau_debugfs_channel_fini(chan
);
260 /* Give outstanding push buffers a chance to complete */
261 spin_lock_irqsave(&chan
->fence
.lock
, flags
);
262 nouveau_fence_update(chan
);
263 spin_unlock_irqrestore(&chan
->fence
.lock
, flags
);
264 if (chan
->fence
.sequence
!= chan
->fence
.sequence_ack
) {
265 struct nouveau_fence
*fence
= NULL
;
267 ret
= nouveau_fence_new(chan
, &fence
, true);
269 ret
= nouveau_fence_wait(fence
, NULL
, false, false);
270 nouveau_fence_unref((void *)&fence
);
274 NV_ERROR(dev
, "Failed to idle channel %d.\n", chan
->id
);
277 /* Ensure all outstanding fences are signaled. They should be if the
278 * above attempts at idling were OK, but if we failed this'll tell TTM
279 * we're done with the buffers.
281 nouveau_fence_fini(chan
);
283 /* This will prevent pfifo from switching channels. */
284 pfifo
->reassign(dev
, false);
286 /* We want to give pgraph a chance to idle and get rid of all potential
287 * errors. We need to do this before the lock, otherwise the irq handler
288 * is unable to process them.
290 if (pgraph
->channel(dev
) == chan
)
291 nouveau_wait_for_idle(dev
);
293 spin_lock_irqsave(&dev_priv
->context_switch_lock
, flags
);
295 pgraph
->fifo_access(dev
, false);
296 if (pgraph
->channel(dev
) == chan
)
297 pgraph
->unload_context(dev
);
298 pgraph
->destroy_context(chan
);
299 pgraph
->fifo_access(dev
, true);
301 if (pfifo
->channel_id(dev
) == chan
->id
) {
303 pfifo
->unload_context(dev
);
306 pfifo
->destroy_context(chan
);
308 pfifo
->reassign(dev
, true);
310 spin_unlock_irqrestore(&dev_priv
->context_switch_lock
, flags
);
312 /* Release the channel's resources */
313 nouveau_gpuobj_ref_del(dev
, &chan
->pushbuf
);
314 if (chan
->pushbuf_bo
) {
315 nouveau_bo_unpin(chan
->pushbuf_bo
);
316 nouveau_bo_ref(NULL
, &chan
->pushbuf_bo
);
318 nouveau_gpuobj_channel_takedown(chan
);
319 nouveau_notifier_takedown_channel(chan
);
323 dev_priv
->fifos
[chan
->id
] = NULL
;
324 dev_priv
->fifo_alloc_count
--;
328 /* cleans up all the fifos from file_priv */
330 nouveau_channel_cleanup(struct drm_device
*dev
, struct drm_file
*file_priv
)
332 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
333 struct nouveau_engine
*engine
= &dev_priv
->engine
;
336 NV_DEBUG(dev
, "clearing FIFO enables from file_priv\n");
337 for (i
= 0; i
< engine
->fifo
.channels
; i
++) {
338 struct nouveau_channel
*chan
= dev_priv
->fifos
[i
];
340 if (chan
&& chan
->file_priv
== file_priv
)
341 nouveau_channel_free(chan
);
346 nouveau_channel_owner(struct drm_device
*dev
, struct drm_file
*file_priv
,
349 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
350 struct nouveau_engine
*engine
= &dev_priv
->engine
;
352 if (channel
>= engine
->fifo
.channels
)
354 if (dev_priv
->fifos
[channel
] == NULL
)
357 return (dev_priv
->fifos
[channel
]->file_priv
== file_priv
);
360 /***********************************
361 * ioctls wrapping the functions
362 ***********************************/
365 nouveau_ioctl_fifo_alloc(struct drm_device
*dev
, void *data
,
366 struct drm_file
*file_priv
)
368 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
369 struct drm_nouveau_channel_alloc
*init
= data
;
370 struct nouveau_channel
*chan
;
373 NOUVEAU_CHECK_INITIALISED_WITH_RETURN
;
375 if (dev_priv
->engine
.graph
.accel_blocked
)
378 if (init
->fb_ctxdma_handle
== ~0 || init
->tt_ctxdma_handle
== ~0)
381 ret
= nouveau_channel_alloc(dev
, &chan
, file_priv
,
382 init
->fb_ctxdma_handle
,
383 init
->tt_ctxdma_handle
);
386 init
->channel
= chan
->id
;
388 if (chan
->dma
.ib_max
)
389 init
->pushbuf_domains
= NOUVEAU_GEM_DOMAIN_VRAM
|
390 NOUVEAU_GEM_DOMAIN_GART
;
391 else if (chan
->pushbuf_bo
->bo
.mem
.mem_type
== TTM_PL_VRAM
)
392 init
->pushbuf_domains
= NOUVEAU_GEM_DOMAIN_VRAM
;
394 init
->pushbuf_domains
= NOUVEAU_GEM_DOMAIN_GART
;
396 init
->subchan
[0].handle
= NvM2MF
;
397 if (dev_priv
->card_type
< NV_50
)
398 init
->subchan
[0].grclass
= 0x0039;
400 init
->subchan
[0].grclass
= 0x5039;
401 init
->subchan
[1].handle
= NvSw
;
402 init
->subchan
[1].grclass
= NV_SW
;
403 init
->nr_subchan
= 2;
405 /* Named memory object area */
406 ret
= drm_gem_handle_create(file_priv
, chan
->notifier_bo
->gem
,
407 &init
->notifier_handle
);
409 nouveau_channel_free(chan
);
417 nouveau_ioctl_fifo_free(struct drm_device
*dev
, void *data
,
418 struct drm_file
*file_priv
)
420 struct drm_nouveau_channel_free
*cfree
= data
;
421 struct nouveau_channel
*chan
;
423 NOUVEAU_CHECK_INITIALISED_WITH_RETURN
;
424 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(cfree
->channel
, file_priv
, chan
);
426 nouveau_channel_free(chan
);
430 /***********************************
431 * finally, the ioctl table
432 ***********************************/
434 struct drm_ioctl_desc nouveau_ioctls
[] = {
435 DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM
, nouveau_ioctl_getparam
, DRM_AUTH
),
436 DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM
, nouveau_ioctl_setparam
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
437 DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC
, nouveau_ioctl_fifo_alloc
, DRM_AUTH
),
438 DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_FREE
, nouveau_ioctl_fifo_free
, DRM_AUTH
),
439 DRM_IOCTL_DEF(DRM_NOUVEAU_GROBJ_ALLOC
, nouveau_ioctl_grobj_alloc
, DRM_AUTH
),
440 DRM_IOCTL_DEF(DRM_NOUVEAU_NOTIFIEROBJ_ALLOC
, nouveau_ioctl_notifier_alloc
, DRM_AUTH
),
441 DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE
, nouveau_ioctl_gpuobj_free
, DRM_AUTH
),
442 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_NEW
, nouveau_gem_ioctl_new
, DRM_AUTH
),
443 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF
, nouveau_gem_ioctl_pushbuf
, DRM_AUTH
),
444 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_PREP
, nouveau_gem_ioctl_cpu_prep
, DRM_AUTH
),
445 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_FINI
, nouveau_gem_ioctl_cpu_fini
, DRM_AUTH
),
446 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_INFO
, nouveau_gem_ioctl_info
, DRM_AUTH
),
449 int nouveau_max_ioctl
= DRM_ARRAY_SIZE(nouveau_ioctls
);