2 * Copyright 2005-2006 Stephane Marchesin
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
27 #include "nouveau_drv.h"
28 #include "nouveau_drm.h"
29 #include "nouveau_dma.h"
32 nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel
*chan
)
34 struct drm_device
*dev
= chan
->dev
;
35 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
36 struct nouveau_bo
*pb
= chan
->pushbuf_bo
;
37 struct nouveau_gpuobj
*pushbuf
= NULL
;
38 uint32_t start
= pb
->bo
.mem
.mm_node
->start
<< PAGE_SHIFT
;
41 if (pb
->bo
.mem
.mem_type
== TTM_PL_TT
) {
42 ret
= nouveau_gpuobj_gart_dma_new(chan
, 0,
43 dev_priv
->gart_info
.aper_size
,
44 NV_DMA_ACCESS_RO
, &pushbuf
,
46 chan
->pushbuf_base
= start
;
48 if (dev_priv
->card_type
!= NV_04
) {
49 ret
= nouveau_gpuobj_dma_new(chan
, NV_CLASS_DMA_IN_MEMORY
, 0,
50 dev_priv
->fb_available_size
,
52 NV_DMA_TARGET_VIDMEM
, &pushbuf
);
53 chan
->pushbuf_base
= start
;
55 /* NV04 cmdbuf hack, from original ddx.. not sure of it's
56 * exact reason for existing :) PCI access to cmdbuf in
59 ret
= nouveau_gpuobj_dma_new(chan
, NV_CLASS_DMA_IN_MEMORY
,
60 drm_get_resource_start(dev
, 1),
61 dev_priv
->fb_available_size
,
63 NV_DMA_TARGET_PCI
, &pushbuf
);
64 chan
->pushbuf_base
= start
;
67 ret
= nouveau_gpuobj_ref_add(dev
, chan
, 0, pushbuf
, &chan
->pushbuf
);
69 NV_ERROR(dev
, "Error referencing pushbuf ctxdma: %d\n", ret
);
70 if (pushbuf
!= dev_priv
->gart_info
.sg_ctxdma
)
71 nouveau_gpuobj_del(dev
, &pushbuf
);
78 static struct nouveau_bo
*
79 nouveau_channel_user_pushbuf_alloc(struct drm_device
*dev
)
81 struct nouveau_bo
*pushbuf
= NULL
;
84 if (nouveau_vram_pushbuf
)
85 location
= TTM_PL_FLAG_VRAM
;
87 location
= TTM_PL_FLAG_TT
;
89 ret
= nouveau_bo_new(dev
, NULL
, 65536, 0, location
, 0, 0x0000, false,
92 NV_ERROR(dev
, "error allocating DMA push buffer: %d\n", ret
);
96 ret
= nouveau_bo_pin(pushbuf
, location
);
98 NV_ERROR(dev
, "error pinning DMA push buffer: %d\n", ret
);
99 nouveau_bo_ref(NULL
, &pushbuf
);
106 /* allocates and initializes a fifo for user space consumption */
108 nouveau_channel_alloc(struct drm_device
*dev
, struct nouveau_channel
**chan_ret
,
109 struct drm_file
*file_priv
,
110 uint32_t vram_handle
, uint32_t tt_handle
)
112 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
113 struct nouveau_pgraph_engine
*pgraph
= &dev_priv
->engine
.graph
;
114 struct nouveau_fifo_engine
*pfifo
= &dev_priv
->engine
.fifo
;
115 struct nouveau_channel
*chan
;
120 * Alright, here is the full story
121 * Nvidia cards have multiple hw fifo contexts (praise them for that,
122 * no complicated crash-prone context switches)
123 * We allocate a new context for each app and let it write to it
124 * directly (woo, full userspace command submission !)
125 * When there are no more contexts, you lost
127 for (channel
= 0; channel
< pfifo
->channels
; channel
++) {
128 if (dev_priv
->fifos
[channel
] == NULL
)
132 /* no more fifos. you lost. */
133 if (channel
== pfifo
->channels
)
136 dev_priv
->fifos
[channel
] = kzalloc(sizeof(struct nouveau_channel
),
138 if (!dev_priv
->fifos
[channel
])
140 dev_priv
->fifo_alloc_count
++;
141 chan
= dev_priv
->fifos
[channel
];
142 INIT_LIST_HEAD(&chan
->nvsw
.vbl_wait
);
143 INIT_LIST_HEAD(&chan
->fence
.pending
);
146 chan
->file_priv
= file_priv
;
147 chan
->vram_handle
= vram_handle
;
148 chan
->gart_handle
= tt_handle
;
150 NV_INFO(dev
, "Allocating FIFO number %d\n", channel
);
152 /* Allocate DMA push buffer */
153 chan
->pushbuf_bo
= nouveau_channel_user_pushbuf_alloc(dev
);
154 if (!chan
->pushbuf_bo
) {
156 NV_ERROR(dev
, "pushbuf %d\n", ret
);
157 nouveau_channel_free(chan
);
161 nouveau_dma_pre_init(chan
);
163 /* Locate channel's user control regs */
164 if (dev_priv
->card_type
< NV_40
)
165 user
= NV03_USER(channel
);
167 if (dev_priv
->card_type
< NV_50
)
168 user
= NV40_USER(channel
);
170 user
= NV50_USER(channel
);
172 chan
->user
= ioremap(pci_resource_start(dev
->pdev
, 0) + user
,
175 NV_ERROR(dev
, "ioremap of regs failed.\n");
176 nouveau_channel_free(chan
);
179 chan
->user_put
= 0x40;
180 chan
->user_get
= 0x44;
182 /* Allocate space for per-channel fixed notifier memory */
183 ret
= nouveau_notifier_init_channel(chan
);
185 NV_ERROR(dev
, "ntfy %d\n", ret
);
186 nouveau_channel_free(chan
);
190 /* Setup channel's default objects */
191 ret
= nouveau_gpuobj_channel_init(chan
, vram_handle
, tt_handle
);
193 NV_ERROR(dev
, "gpuobj %d\n", ret
);
194 nouveau_channel_free(chan
);
198 /* Create a dma object for the push buffer */
199 ret
= nouveau_channel_pushbuf_ctxdma_init(chan
);
201 NV_ERROR(dev
, "pbctxdma %d\n", ret
);
202 nouveau_channel_free(chan
);
206 /* disable the fifo caches */
207 pfifo
->reassign(dev
, false);
209 /* Create a graphics context for new channel */
210 ret
= pgraph
->create_context(chan
);
212 nouveau_channel_free(chan
);
216 /* Construct inital RAMFC for new channel */
217 ret
= pfifo
->create_context(chan
);
219 nouveau_channel_free(chan
);
223 pfifo
->reassign(dev
, true);
225 ret
= nouveau_dma_init(chan
);
227 ret
= nouveau_fence_init(chan
);
229 nouveau_channel_free(chan
);
233 nouveau_debugfs_channel_init(chan
);
235 NV_INFO(dev
, "%s: initialised FIFO %d\n", __func__
, channel
);
242 nouveau_channel_free(struct nouveau_channel
*chan
)
244 struct drm_device
*dev
= chan
->dev
;
245 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
246 struct nouveau_pgraph_engine
*pgraph
= &dev_priv
->engine
.graph
;
247 struct nouveau_fifo_engine
*pfifo
= &dev_priv
->engine
.fifo
;
251 NV_INFO(dev
, "%s: freeing fifo %d\n", __func__
, chan
->id
);
253 nouveau_debugfs_channel_fini(chan
);
255 /* Give outstanding push buffers a chance to complete */
256 spin_lock_irqsave(&chan
->fence
.lock
, flags
);
257 nouveau_fence_update(chan
);
258 spin_unlock_irqrestore(&chan
->fence
.lock
, flags
);
259 if (chan
->fence
.sequence
!= chan
->fence
.sequence_ack
) {
260 struct nouveau_fence
*fence
= NULL
;
262 ret
= nouveau_fence_new(chan
, &fence
, true);
264 ret
= nouveau_fence_wait(fence
, NULL
, false, false);
265 nouveau_fence_unref((void *)&fence
);
269 NV_ERROR(dev
, "Failed to idle channel %d.\n", chan
->id
);
272 /* Ensure all outstanding fences are signaled. They should be if the
273 * above attempts at idling were OK, but if we failed this'll tell TTM
274 * we're done with the buffers.
276 nouveau_fence_fini(chan
);
278 /* Ensure the channel is no longer active on the GPU */
279 pfifo
->reassign(dev
, false);
281 if (pgraph
->channel(dev
) == chan
) {
282 pgraph
->fifo_access(dev
, false);
283 pgraph
->unload_context(dev
);
284 pgraph
->fifo_access(dev
, true);
286 pgraph
->destroy_context(chan
);
288 if (pfifo
->channel_id(dev
) == chan
->id
) {
290 pfifo
->unload_context(dev
);
293 pfifo
->destroy_context(chan
);
295 pfifo
->reassign(dev
, true);
297 /* Release the channel's resources */
298 nouveau_gpuobj_ref_del(dev
, &chan
->pushbuf
);
299 if (chan
->pushbuf_bo
) {
300 nouveau_bo_unpin(chan
->pushbuf_bo
);
301 nouveau_bo_ref(NULL
, &chan
->pushbuf_bo
);
303 nouveau_gpuobj_channel_takedown(chan
);
304 nouveau_notifier_takedown_channel(chan
);
308 dev_priv
->fifos
[chan
->id
] = NULL
;
309 dev_priv
->fifo_alloc_count
--;
313 /* cleans up all the fifos from file_priv */
315 nouveau_channel_cleanup(struct drm_device
*dev
, struct drm_file
*file_priv
)
317 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
318 struct nouveau_engine
*engine
= &dev_priv
->engine
;
321 NV_DEBUG(dev
, "clearing FIFO enables from file_priv\n");
322 for (i
= 0; i
< engine
->fifo
.channels
; i
++) {
323 struct nouveau_channel
*chan
= dev_priv
->fifos
[i
];
325 if (chan
&& chan
->file_priv
== file_priv
)
326 nouveau_channel_free(chan
);
331 nouveau_channel_owner(struct drm_device
*dev
, struct drm_file
*file_priv
,
334 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
335 struct nouveau_engine
*engine
= &dev_priv
->engine
;
337 if (channel
>= engine
->fifo
.channels
)
339 if (dev_priv
->fifos
[channel
] == NULL
)
342 return (dev_priv
->fifos
[channel
]->file_priv
== file_priv
);
345 /***********************************
346 * ioctls wrapping the functions
347 ***********************************/
350 nouveau_ioctl_fifo_alloc(struct drm_device
*dev
, void *data
,
351 struct drm_file
*file_priv
)
353 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
354 struct drm_nouveau_channel_alloc
*init
= data
;
355 struct nouveau_channel
*chan
;
358 NOUVEAU_CHECK_INITIALISED_WITH_RETURN
;
360 if (dev_priv
->engine
.graph
.accel_blocked
)
363 if (init
->fb_ctxdma_handle
== ~0 || init
->tt_ctxdma_handle
== ~0)
366 ret
= nouveau_channel_alloc(dev
, &chan
, file_priv
,
367 init
->fb_ctxdma_handle
,
368 init
->tt_ctxdma_handle
);
371 init
->channel
= chan
->id
;
373 init
->subchan
[0].handle
= NvM2MF
;
374 if (dev_priv
->card_type
< NV_50
)
375 init
->subchan
[0].grclass
= 0x0039;
377 init
->subchan
[0].grclass
= 0x5039;
378 init
->subchan
[1].handle
= NvSw
;
379 init
->subchan
[1].grclass
= NV_SW
;
380 init
->nr_subchan
= 2;
382 /* Named memory object area */
383 ret
= drm_gem_handle_create(file_priv
, chan
->notifier_bo
->gem
,
384 &init
->notifier_handle
);
386 nouveau_channel_free(chan
);
394 nouveau_ioctl_fifo_free(struct drm_device
*dev
, void *data
,
395 struct drm_file
*file_priv
)
397 struct drm_nouveau_channel_free
*cfree
= data
;
398 struct nouveau_channel
*chan
;
400 NOUVEAU_CHECK_INITIALISED_WITH_RETURN
;
401 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(cfree
->channel
, file_priv
, chan
);
403 nouveau_channel_free(chan
);
407 /***********************************
408 * finally, the ioctl table
409 ***********************************/
411 struct drm_ioctl_desc nouveau_ioctls
[] = {
412 DRM_IOCTL_DEF(DRM_NOUVEAU_CARD_INIT
, nouveau_ioctl_card_init
, DRM_AUTH
),
413 DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM
, nouveau_ioctl_getparam
, DRM_AUTH
),
414 DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM
, nouveau_ioctl_setparam
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
415 DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC
, nouveau_ioctl_fifo_alloc
, DRM_AUTH
),
416 DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_FREE
, nouveau_ioctl_fifo_free
, DRM_AUTH
),
417 DRM_IOCTL_DEF(DRM_NOUVEAU_GROBJ_ALLOC
, nouveau_ioctl_grobj_alloc
, DRM_AUTH
),
418 DRM_IOCTL_DEF(DRM_NOUVEAU_NOTIFIEROBJ_ALLOC
, nouveau_ioctl_notifier_alloc
, DRM_AUTH
),
419 DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE
, nouveau_ioctl_gpuobj_free
, DRM_AUTH
),
420 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_NEW
, nouveau_gem_ioctl_new
, DRM_AUTH
),
421 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF
, nouveau_gem_ioctl_pushbuf
, DRM_AUTH
),
422 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF_CALL
, nouveau_gem_ioctl_pushbuf_call
, DRM_AUTH
),
423 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PIN
, nouveau_gem_ioctl_pin
, DRM_AUTH
),
424 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_UNPIN
, nouveau_gem_ioctl_unpin
, DRM_AUTH
),
425 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_PREP
, nouveau_gem_ioctl_cpu_prep
, DRM_AUTH
),
426 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_FINI
, nouveau_gem_ioctl_cpu_fini
, DRM_AUTH
),
427 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_INFO
, nouveau_gem_ioctl_info
, DRM_AUTH
),
428 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF_CALL2
, nouveau_gem_ioctl_pushbuf_call2
, DRM_AUTH
),
431 int nouveau_max_ioctl
= DRM_ARRAY_SIZE(nouveau_ioctls
);