2 * Copyright 2005-2006 Stephane Marchesin
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
27 #include "nouveau_drv.h"
28 #include "nouveau_drm.h"
29 #include "nouveau_dma.h"
32 nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel
*chan
)
34 struct drm_device
*dev
= chan
->dev
;
35 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
36 struct nouveau_bo
*pb
= chan
->pushbuf_bo
;
37 struct nouveau_gpuobj
*pushbuf
= NULL
;
40 if (dev_priv
->card_type
>= NV_50
) {
41 ret
= nouveau_gpuobj_dma_new(chan
, NV_CLASS_DMA_IN_MEMORY
, 0,
42 dev_priv
->vm_end
, NV_DMA_ACCESS_RO
,
43 NV_DMA_TARGET_AGP
, &pushbuf
);
44 chan
->pushbuf_base
= pb
->bo
.offset
;
46 if (pb
->bo
.mem
.mem_type
== TTM_PL_TT
) {
47 ret
= nouveau_gpuobj_gart_dma_new(chan
, 0,
48 dev_priv
->gart_info
.aper_size
,
49 NV_DMA_ACCESS_RO
, &pushbuf
,
51 chan
->pushbuf_base
= pb
->bo
.mem
.mm_node
->start
<< PAGE_SHIFT
;
53 if (dev_priv
->card_type
!= NV_04
) {
54 ret
= nouveau_gpuobj_dma_new(chan
, NV_CLASS_DMA_IN_MEMORY
, 0,
55 dev_priv
->fb_available_size
,
57 NV_DMA_TARGET_VIDMEM
, &pushbuf
);
58 chan
->pushbuf_base
= pb
->bo
.mem
.mm_node
->start
<< PAGE_SHIFT
;
60 /* NV04 cmdbuf hack, from original ddx.. not sure of it's
61 * exact reason for existing :) PCI access to cmdbuf in
64 ret
= nouveau_gpuobj_dma_new(chan
, NV_CLASS_DMA_IN_MEMORY
,
65 drm_get_resource_start(dev
, 1),
66 dev_priv
->fb_available_size
,
68 NV_DMA_TARGET_PCI
, &pushbuf
);
69 chan
->pushbuf_base
= pb
->bo
.mem
.mm_node
->start
<< PAGE_SHIFT
;
72 ret
= nouveau_gpuobj_ref_add(dev
, chan
, 0, pushbuf
, &chan
->pushbuf
);
74 NV_ERROR(dev
, "Error referencing pushbuf ctxdma: %d\n", ret
);
75 if (pushbuf
!= dev_priv
->gart_info
.sg_ctxdma
)
76 nouveau_gpuobj_del(dev
, &pushbuf
);
83 static struct nouveau_bo
*
84 nouveau_channel_user_pushbuf_alloc(struct drm_device
*dev
)
86 struct nouveau_bo
*pushbuf
= NULL
;
89 if (nouveau_vram_pushbuf
)
90 location
= TTM_PL_FLAG_VRAM
;
92 location
= TTM_PL_FLAG_TT
;
94 ret
= nouveau_bo_new(dev
, NULL
, 65536, 0, location
, 0, 0x0000, false,
97 NV_ERROR(dev
, "error allocating DMA push buffer: %d\n", ret
);
101 ret
= nouveau_bo_pin(pushbuf
, location
);
103 NV_ERROR(dev
, "error pinning DMA push buffer: %d\n", ret
);
104 nouveau_bo_ref(NULL
, &pushbuf
);
111 /* allocates and initializes a fifo for user space consumption */
113 nouveau_channel_alloc(struct drm_device
*dev
, struct nouveau_channel
**chan_ret
,
114 struct drm_file
*file_priv
,
115 uint32_t vram_handle
, uint32_t tt_handle
)
117 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
118 struct nouveau_pgraph_engine
*pgraph
= &dev_priv
->engine
.graph
;
119 struct nouveau_fifo_engine
*pfifo
= &dev_priv
->engine
.fifo
;
120 struct nouveau_channel
*chan
;
125 * Alright, here is the full story
126 * Nvidia cards have multiple hw fifo contexts (praise them for that,
127 * no complicated crash-prone context switches)
128 * We allocate a new context for each app and let it write to it
129 * directly (woo, full userspace command submission !)
130 * When there are no more contexts, you lost
132 for (channel
= 0; channel
< pfifo
->channels
; channel
++) {
133 if (dev_priv
->fifos
[channel
] == NULL
)
137 /* no more fifos. you lost. */
138 if (channel
== pfifo
->channels
)
141 dev_priv
->fifos
[channel
] = kzalloc(sizeof(struct nouveau_channel
),
143 if (!dev_priv
->fifos
[channel
])
145 chan
= dev_priv
->fifos
[channel
];
146 INIT_LIST_HEAD(&chan
->nvsw
.vbl_wait
);
147 INIT_LIST_HEAD(&chan
->fence
.pending
);
150 chan
->file_priv
= file_priv
;
151 chan
->vram_handle
= vram_handle
;
152 chan
->gart_handle
= tt_handle
;
154 NV_INFO(dev
, "Allocating FIFO number %d\n", channel
);
156 /* Allocate DMA push buffer */
157 chan
->pushbuf_bo
= nouveau_channel_user_pushbuf_alloc(dev
);
158 if (!chan
->pushbuf_bo
) {
160 NV_ERROR(dev
, "pushbuf %d\n", ret
);
161 nouveau_channel_free(chan
);
165 nouveau_dma_pre_init(chan
);
167 /* Locate channel's user control regs */
168 if (dev_priv
->card_type
< NV_40
)
169 user
= NV03_USER(channel
);
171 if (dev_priv
->card_type
< NV_50
)
172 user
= NV40_USER(channel
);
174 user
= NV50_USER(channel
);
176 chan
->user
= ioremap(pci_resource_start(dev
->pdev
, 0) + user
,
179 NV_ERROR(dev
, "ioremap of regs failed.\n");
180 nouveau_channel_free(chan
);
183 chan
->user_put
= 0x40;
184 chan
->user_get
= 0x44;
186 /* Allocate space for per-channel fixed notifier memory */
187 ret
= nouveau_notifier_init_channel(chan
);
189 NV_ERROR(dev
, "ntfy %d\n", ret
);
190 nouveau_channel_free(chan
);
194 /* Setup channel's default objects */
195 ret
= nouveau_gpuobj_channel_init(chan
, vram_handle
, tt_handle
);
197 NV_ERROR(dev
, "gpuobj %d\n", ret
);
198 nouveau_channel_free(chan
);
202 /* Create a dma object for the push buffer */
203 ret
= nouveau_channel_pushbuf_ctxdma_init(chan
);
205 NV_ERROR(dev
, "pbctxdma %d\n", ret
);
206 nouveau_channel_free(chan
);
210 /* disable the fifo caches */
211 pfifo
->reassign(dev
, false);
213 /* Create a graphics context for new channel */
214 ret
= pgraph
->create_context(chan
);
216 nouveau_channel_free(chan
);
220 /* Construct inital RAMFC for new channel */
221 ret
= pfifo
->create_context(chan
);
223 nouveau_channel_free(chan
);
227 pfifo
->reassign(dev
, true);
229 ret
= nouveau_dma_init(chan
);
231 ret
= nouveau_fence_init(chan
);
233 nouveau_channel_free(chan
);
237 nouveau_debugfs_channel_init(chan
);
239 NV_INFO(dev
, "%s: initialised FIFO %d\n", __func__
, channel
);
246 nouveau_channel_free(struct nouveau_channel
*chan
)
248 struct drm_device
*dev
= chan
->dev
;
249 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
250 struct nouveau_pgraph_engine
*pgraph
= &dev_priv
->engine
.graph
;
251 struct nouveau_fifo_engine
*pfifo
= &dev_priv
->engine
.fifo
;
255 NV_INFO(dev
, "%s: freeing fifo %d\n", __func__
, chan
->id
);
257 nouveau_debugfs_channel_fini(chan
);
259 /* Give outstanding push buffers a chance to complete */
260 spin_lock_irqsave(&chan
->fence
.lock
, flags
);
261 nouveau_fence_update(chan
);
262 spin_unlock_irqrestore(&chan
->fence
.lock
, flags
);
263 if (chan
->fence
.sequence
!= chan
->fence
.sequence_ack
) {
264 struct nouveau_fence
*fence
= NULL
;
266 ret
= nouveau_fence_new(chan
, &fence
, true);
268 ret
= nouveau_fence_wait(fence
, NULL
, false, false);
269 nouveau_fence_unref((void *)&fence
);
273 NV_ERROR(dev
, "Failed to idle channel %d.\n", chan
->id
);
276 /* Ensure all outstanding fences are signaled. They should be if the
277 * above attempts at idling were OK, but if we failed this'll tell TTM
278 * we're done with the buffers.
280 nouveau_fence_fini(chan
);
282 /* This will prevent pfifo from switching channels. */
283 pfifo
->reassign(dev
, false);
285 /* We want to give pgraph a chance to idle and get rid of all potential
286 * errors. We need to do this before the lock, otherwise the irq handler
287 * is unable to process them.
289 if (pgraph
->channel(dev
) == chan
)
290 nouveau_wait_for_idle(dev
);
292 spin_lock_irqsave(&dev_priv
->context_switch_lock
, flags
);
294 pgraph
->fifo_access(dev
, false);
295 if (pgraph
->channel(dev
) == chan
)
296 pgraph
->unload_context(dev
);
297 pgraph
->destroy_context(chan
);
298 pgraph
->fifo_access(dev
, true);
300 if (pfifo
->channel_id(dev
) == chan
->id
) {
302 pfifo
->unload_context(dev
);
305 pfifo
->destroy_context(chan
);
307 pfifo
->reassign(dev
, true);
309 spin_unlock_irqrestore(&dev_priv
->context_switch_lock
, flags
);
311 /* Release the channel's resources */
312 nouveau_gpuobj_ref_del(dev
, &chan
->pushbuf
);
313 if (chan
->pushbuf_bo
) {
314 nouveau_bo_unpin(chan
->pushbuf_bo
);
315 nouveau_bo_ref(NULL
, &chan
->pushbuf_bo
);
317 nouveau_gpuobj_channel_takedown(chan
);
318 nouveau_notifier_takedown_channel(chan
);
322 dev_priv
->fifos
[chan
->id
] = NULL
;
326 /* cleans up all the fifos from file_priv */
328 nouveau_channel_cleanup(struct drm_device
*dev
, struct drm_file
*file_priv
)
330 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
331 struct nouveau_engine
*engine
= &dev_priv
->engine
;
334 NV_DEBUG(dev
, "clearing FIFO enables from file_priv\n");
335 for (i
= 0; i
< engine
->fifo
.channels
; i
++) {
336 struct nouveau_channel
*chan
= dev_priv
->fifos
[i
];
338 if (chan
&& chan
->file_priv
== file_priv
)
339 nouveau_channel_free(chan
);
344 nouveau_channel_owner(struct drm_device
*dev
, struct drm_file
*file_priv
,
347 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
348 struct nouveau_engine
*engine
= &dev_priv
->engine
;
350 if (channel
>= engine
->fifo
.channels
)
352 if (dev_priv
->fifos
[channel
] == NULL
)
355 return (dev_priv
->fifos
[channel
]->file_priv
== file_priv
);
358 /***********************************
359 * ioctls wrapping the functions
360 ***********************************/
363 nouveau_ioctl_fifo_alloc(struct drm_device
*dev
, void *data
,
364 struct drm_file
*file_priv
)
366 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
367 struct drm_nouveau_channel_alloc
*init
= data
;
368 struct nouveau_channel
*chan
;
371 NOUVEAU_CHECK_INITIALISED_WITH_RETURN
;
373 if (dev_priv
->engine
.graph
.accel_blocked
)
376 if (init
->fb_ctxdma_handle
== ~0 || init
->tt_ctxdma_handle
== ~0)
379 ret
= nouveau_channel_alloc(dev
, &chan
, file_priv
,
380 init
->fb_ctxdma_handle
,
381 init
->tt_ctxdma_handle
);
384 init
->channel
= chan
->id
;
386 if (chan
->dma
.ib_max
)
387 init
->pushbuf_domains
= NOUVEAU_GEM_DOMAIN_VRAM
|
388 NOUVEAU_GEM_DOMAIN_GART
;
389 else if (chan
->pushbuf_bo
->bo
.mem
.mem_type
== TTM_PL_VRAM
)
390 init
->pushbuf_domains
= NOUVEAU_GEM_DOMAIN_VRAM
;
392 init
->pushbuf_domains
= NOUVEAU_GEM_DOMAIN_GART
;
394 init
->subchan
[0].handle
= NvM2MF
;
395 if (dev_priv
->card_type
< NV_50
)
396 init
->subchan
[0].grclass
= 0x0039;
398 init
->subchan
[0].grclass
= 0x5039;
399 init
->subchan
[1].handle
= NvSw
;
400 init
->subchan
[1].grclass
= NV_SW
;
401 init
->nr_subchan
= 2;
403 /* Named memory object area */
404 ret
= drm_gem_handle_create(file_priv
, chan
->notifier_bo
->gem
,
405 &init
->notifier_handle
);
407 nouveau_channel_free(chan
);
415 nouveau_ioctl_fifo_free(struct drm_device
*dev
, void *data
,
416 struct drm_file
*file_priv
)
418 struct drm_nouveau_channel_free
*cfree
= data
;
419 struct nouveau_channel
*chan
;
421 NOUVEAU_CHECK_INITIALISED_WITH_RETURN
;
422 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(cfree
->channel
, file_priv
, chan
);
424 nouveau_channel_free(chan
);
428 /***********************************
429 * finally, the ioctl table
430 ***********************************/
432 struct drm_ioctl_desc nouveau_ioctls
[] = {
433 DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM
, nouveau_ioctl_getparam
, DRM_AUTH
),
434 DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM
, nouveau_ioctl_setparam
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
435 DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC
, nouveau_ioctl_fifo_alloc
, DRM_AUTH
),
436 DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_FREE
, nouveau_ioctl_fifo_free
, DRM_AUTH
),
437 DRM_IOCTL_DEF(DRM_NOUVEAU_GROBJ_ALLOC
, nouveau_ioctl_grobj_alloc
, DRM_AUTH
),
438 DRM_IOCTL_DEF(DRM_NOUVEAU_NOTIFIEROBJ_ALLOC
, nouveau_ioctl_notifier_alloc
, DRM_AUTH
),
439 DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE
, nouveau_ioctl_gpuobj_free
, DRM_AUTH
),
440 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_NEW
, nouveau_gem_ioctl_new
, DRM_AUTH
),
441 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF
, nouveau_gem_ioctl_pushbuf
, DRM_AUTH
),
442 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_PREP
, nouveau_gem_ioctl_cpu_prep
, DRM_AUTH
),
443 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_FINI
, nouveau_gem_ioctl_cpu_fini
, DRM_AUTH
),
444 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_INFO
, nouveau_gem_ioctl_info
, DRM_AUTH
),
447 int nouveau_max_ioctl
= DRM_ARRAY_SIZE(nouveau_ioctls
);