2 * Copyright 2005-2006 Stephane Marchesin
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
27 #include "nouveau_drv.h"
28 #include "nouveau_drm.h"
29 #include "nouveau_dma.h"
30 #include "nouveau_fifo.h"
31 #include "nouveau_ramht.h"
32 #include "nouveau_fence.h"
33 #include "nouveau_software.h"
36 nouveau_channel_pushbuf_init(struct nouveau_channel
*chan
)
38 u32 mem
= nouveau_vram_pushbuf
? TTM_PL_FLAG_VRAM
: TTM_PL_FLAG_TT
;
39 struct drm_device
*dev
= chan
->dev
;
40 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
43 /* allocate buffer object */
44 ret
= nouveau_bo_new(dev
, 65536, 0, mem
, 0, 0, NULL
, &chan
->pushbuf_bo
);
48 ret
= nouveau_bo_pin(chan
->pushbuf_bo
, mem
);
52 ret
= nouveau_bo_map(chan
->pushbuf_bo
);
56 /* create DMA object covering the entire memtype where the push
57 * buffer resides, userspace can submit its own push buffers from
58 * anywhere within the same memtype.
60 chan
->pushbuf_base
= chan
->pushbuf_bo
->bo
.offset
;
61 if (dev_priv
->card_type
>= NV_50
) {
62 ret
= nouveau_bo_vma_add(chan
->pushbuf_bo
, chan
->vm
,
67 if (dev_priv
->card_type
< NV_C0
) {
68 ret
= nouveau_gpuobj_dma_new(chan
,
69 NV_CLASS_DMA_IN_MEMORY
, 0,
75 chan
->pushbuf_base
= chan
->pushbuf_vma
.offset
;
77 if (chan
->pushbuf_bo
->bo
.mem
.mem_type
== TTM_PL_TT
) {
78 ret
= nouveau_gpuobj_dma_new(chan
, NV_CLASS_DMA_IN_MEMORY
, 0,
79 dev_priv
->gart_info
.aper_size
,
84 if (dev_priv
->card_type
!= NV_04
) {
85 ret
= nouveau_gpuobj_dma_new(chan
, NV_CLASS_DMA_IN_MEMORY
, 0,
86 dev_priv
->fb_available_size
,
91 /* NV04 cmdbuf hack, from original ddx.. not sure of it's
92 * exact reason for existing :) PCI access to cmdbuf in
95 ret
= nouveau_gpuobj_dma_new(chan
, NV_CLASS_DMA_IN_MEMORY
,
96 pci_resource_start(dev
->pdev
, 1),
97 dev_priv
->fb_available_size
,
105 NV_ERROR(dev
, "error initialising pushbuf: %d\n", ret
);
106 nouveau_bo_vma_del(chan
->pushbuf_bo
, &chan
->pushbuf_vma
);
107 nouveau_gpuobj_ref(NULL
, &chan
->pushbuf
);
108 if (chan
->pushbuf_bo
) {
109 nouveau_bo_unmap(chan
->pushbuf_bo
);
110 nouveau_bo_ref(NULL
, &chan
->pushbuf_bo
);
117 /* allocates and initializes a fifo for user space consumption */
119 nouveau_channel_alloc(struct drm_device
*dev
, struct nouveau_channel
**chan_ret
,
120 struct drm_file
*file_priv
,
121 uint32_t vram_handle
, uint32_t gart_handle
)
123 struct nouveau_exec_engine
*fence
= nv_engine(dev
, NVOBJ_ENGINE_FENCE
);
124 struct nouveau_fifo_priv
*pfifo
= nv_engine(dev
, NVOBJ_ENGINE_FIFO
);
125 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
126 struct nouveau_fpriv
*fpriv
= nouveau_fpriv(file_priv
);
127 struct nouveau_channel
*chan
;
131 /* allocate and lock channel structure */
132 chan
= kzalloc(sizeof(*chan
), GFP_KERNEL
);
136 chan
->file_priv
= file_priv
;
137 chan
->vram_handle
= vram_handle
;
138 chan
->gart_handle
= gart_handle
;
140 kref_init(&chan
->ref
);
141 atomic_set(&chan
->users
, 1);
142 mutex_init(&chan
->mutex
);
143 mutex_lock(&chan
->mutex
);
145 /* allocate hw channel id */
146 spin_lock_irqsave(&dev_priv
->channels
.lock
, flags
);
147 for (chan
->id
= 0; chan
->id
< pfifo
->channels
; chan
->id
++) {
148 if (!dev_priv
->channels
.ptr
[chan
->id
]) {
149 nouveau_channel_ref(chan
, &dev_priv
->channels
.ptr
[chan
->id
]);
153 spin_unlock_irqrestore(&dev_priv
->channels
.lock
, flags
);
155 if (chan
->id
== pfifo
->channels
) {
156 mutex_unlock(&chan
->mutex
);
161 NV_DEBUG(dev
, "initialising channel %d\n", chan
->id
);
163 /* setup channel's memory and vm */
164 ret
= nouveau_gpuobj_channel_init(chan
, vram_handle
, gart_handle
);
166 NV_ERROR(dev
, "gpuobj %d\n", ret
);
167 nouveau_channel_put(&chan
);
171 /* Allocate space for per-channel fixed notifier memory */
172 ret
= nouveau_notifier_init_channel(chan
);
174 NV_ERROR(dev
, "ntfy %d\n", ret
);
175 nouveau_channel_put(&chan
);
179 /* Allocate DMA push buffer */
180 ret
= nouveau_channel_pushbuf_init(chan
);
182 NV_ERROR(dev
, "pushbuf %d\n", ret
);
183 nouveau_channel_put(&chan
);
187 nouveau_dma_init(chan
);
188 chan
->user_put
= 0x40;
189 chan
->user_get
= 0x44;
190 if (dev_priv
->card_type
>= NV_50
)
191 chan
->user_get_hi
= 0x60;
193 /* create fifo context */
194 ret
= pfifo
->base
.context_new(chan
, NVOBJ_ENGINE_FIFO
);
196 nouveau_channel_put(&chan
);
200 /* Insert NOPs for NOUVEAU_DMA_SKIPS */
201 ret
= RING_SPACE(chan
, NOUVEAU_DMA_SKIPS
);
203 nouveau_channel_put(&chan
);
207 for (i
= 0; i
< NOUVEAU_DMA_SKIPS
; i
++)
208 OUT_RING (chan
, 0x00000000);
210 ret
= nouveau_gpuobj_gr_new(chan
, NvSw
, nouveau_software_class(dev
));
212 nouveau_channel_put(&chan
);
216 if (dev_priv
->card_type
< NV_C0
) {
217 ret
= RING_SPACE(chan
, 2);
219 nouveau_channel_put(&chan
);
223 BEGIN_NV04(chan
, NvSubSw
, NV01_SUBCHAN_OBJECT
, 1);
224 OUT_RING (chan
, NvSw
);
230 ret
= fence
->context_new(chan
, NVOBJ_ENGINE_FENCE
);
232 nouveau_channel_put(&chan
);
236 nouveau_debugfs_channel_init(chan
);
238 NV_DEBUG(dev
, "channel %d initialised\n", chan
->id
);
240 spin_lock(&fpriv
->lock
);
241 list_add(&chan
->list
, &fpriv
->channels
);
242 spin_unlock(&fpriv
->lock
);
248 struct nouveau_channel
*
249 nouveau_channel_get_unlocked(struct nouveau_channel
*ref
)
251 struct nouveau_channel
*chan
= NULL
;
253 if (likely(ref
&& atomic_inc_not_zero(&ref
->users
)))
254 nouveau_channel_ref(ref
, &chan
);
259 struct nouveau_channel
*
260 nouveau_channel_get(struct drm_file
*file_priv
, int id
)
262 struct nouveau_fpriv
*fpriv
= nouveau_fpriv(file_priv
);
263 struct nouveau_channel
*chan
;
265 spin_lock(&fpriv
->lock
);
266 list_for_each_entry(chan
, &fpriv
->channels
, list
) {
267 if (chan
->id
== id
) {
268 chan
= nouveau_channel_get_unlocked(chan
);
269 spin_unlock(&fpriv
->lock
);
270 mutex_lock(&chan
->mutex
);
274 spin_unlock(&fpriv
->lock
);
276 return ERR_PTR(-EINVAL
);
280 nouveau_channel_put_unlocked(struct nouveau_channel
**pchan
)
282 struct nouveau_channel
*chan
= *pchan
;
283 struct drm_device
*dev
= chan
->dev
;
284 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
288 /* decrement the refcount, and we're done if there's still refs */
289 if (likely(!atomic_dec_and_test(&chan
->users
))) {
290 nouveau_channel_ref(NULL
, pchan
);
294 /* no one wants the channel anymore */
295 NV_DEBUG(dev
, "freeing channel %d\n", chan
->id
);
296 nouveau_debugfs_channel_fini(chan
);
298 /* give it chance to idle */
299 nouveau_channel_idle(chan
);
301 /* destroy the engine specific contexts */
302 for (i
= NVOBJ_ENGINE_NR
- 1; i
>= 0; i
--) {
304 dev_priv
->eng
[i
]->context_del(chan
, i
);
307 /* aside from its resources, the channel should now be dead,
308 * remove it from the channel list
310 spin_lock_irqsave(&dev_priv
->channels
.lock
, flags
);
311 nouveau_channel_ref(NULL
, &dev_priv
->channels
.ptr
[chan
->id
]);
312 spin_unlock_irqrestore(&dev_priv
->channels
.lock
, flags
);
314 /* destroy any resources the channel owned */
315 nouveau_gpuobj_ref(NULL
, &chan
->pushbuf
);
316 if (chan
->pushbuf_bo
) {
317 nouveau_bo_vma_del(chan
->pushbuf_bo
, &chan
->pushbuf_vma
);
318 nouveau_bo_unmap(chan
->pushbuf_bo
);
319 nouveau_bo_unpin(chan
->pushbuf_bo
);
320 nouveau_bo_ref(NULL
, &chan
->pushbuf_bo
);
322 nouveau_ramht_ref(NULL
, &chan
->ramht
, chan
);
323 nouveau_notifier_takedown_channel(chan
);
324 nouveau_gpuobj_channel_takedown(chan
);
326 nouveau_channel_ref(NULL
, pchan
);
330 nouveau_channel_put(struct nouveau_channel
**pchan
)
332 mutex_unlock(&(*pchan
)->mutex
);
333 nouveau_channel_put_unlocked(pchan
);
337 nouveau_channel_del(struct kref
*ref
)
339 struct nouveau_channel
*chan
=
340 container_of(ref
, struct nouveau_channel
, ref
);
346 nouveau_channel_ref(struct nouveau_channel
*chan
,
347 struct nouveau_channel
**pchan
)
350 kref_get(&chan
->ref
);
353 kref_put(&(*pchan
)->ref
, nouveau_channel_del
);
359 nouveau_channel_idle(struct nouveau_channel
*chan
)
361 struct drm_device
*dev
= chan
->dev
;
362 struct nouveau_fence
*fence
= NULL
;
365 ret
= nouveau_fence_new(chan
, &fence
);
367 ret
= nouveau_fence_wait(fence
, false, false);
368 nouveau_fence_unref(&fence
);
372 NV_ERROR(dev
, "Failed to idle channel %d.\n", chan
->id
);
376 /* cleans up all the fifos from file_priv */
378 nouveau_channel_cleanup(struct drm_device
*dev
, struct drm_file
*file_priv
)
380 struct nouveau_fifo_priv
*pfifo
= nv_engine(dev
, NVOBJ_ENGINE_FIFO
);
381 struct nouveau_channel
*chan
;
387 NV_DEBUG(dev
, "clearing FIFO enables from file_priv\n");
388 for (i
= 0; i
< pfifo
->channels
; i
++) {
389 chan
= nouveau_channel_get(file_priv
, i
);
393 list_del(&chan
->list
);
394 atomic_dec(&chan
->users
);
395 nouveau_channel_put(&chan
);