2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <nvif/class.h>
27 #include <nvif/cl0002.h>
28 #include <nvif/cl006b.h>
29 #include <nvif/cl506f.h>
30 #include <nvif/cl906f.h>
31 #include <nvif/cla06f.h>
32 #include <nvif/clc36f.h>
33 #include <nvif/ioctl.h>
36 #include <core/client.h>
38 #include "nouveau_drv.h"
39 #include "nouveau_dma.h"
40 #include "nouveau_bo.h"
41 #include "nouveau_chan.h"
42 #include "nouveau_fence.h"
43 #include "nouveau_abi16.h"
44 #include "nouveau_vmm.h"
45 #include "nouveau_svm.h"
47 MODULE_PARM_DESC(vram_pushbuf
, "Create DMA push buffers in VRAM");
48 int nouveau_vram_pushbuf
;
49 module_param_named(vram_pushbuf
, nouveau_vram_pushbuf
, int, 0400);
52 nouveau_channel_killed(struct nvif_notify
*ntfy
)
54 struct nouveau_channel
*chan
= container_of(ntfy
, typeof(*chan
), kill
);
55 struct nouveau_cli
*cli
= (void *)chan
->user
.client
;
56 NV_PRINTK(warn
, cli
, "channel %d killed!\n", chan
->chid
);
57 atomic_set(&chan
->killed
, 1);
59 nouveau_fence_context_kill(chan
->fence
, -ENODEV
);
60 return NVIF_NOTIFY_DROP
;
64 nouveau_channel_idle(struct nouveau_channel
*chan
)
66 if (likely(chan
&& chan
->fence
&& !atomic_read(&chan
->killed
))) {
67 struct nouveau_cli
*cli
= (void *)chan
->user
.client
;
68 struct nouveau_fence
*fence
= NULL
;
71 ret
= nouveau_fence_new(chan
, false, &fence
);
73 ret
= nouveau_fence_wait(fence
, false, false);
74 nouveau_fence_unref(&fence
);
78 NV_PRINTK(err
, cli
, "failed to idle channel %d [%s]\n",
79 chan
->chid
, nvxx_client(&cli
->base
)->name
);
87 nouveau_channel_del(struct nouveau_channel
**pchan
)
89 struct nouveau_channel
*chan
= *pchan
;
91 struct nouveau_cli
*cli
= (void *)chan
->user
.client
;
95 super
= cli
->base
.super
;
96 cli
->base
.super
= true;
100 nouveau_fence(chan
->drm
)->context_del(chan
);
103 nouveau_svmm_part(chan
->vmm
->svmm
, chan
->inst
);
105 nvif_object_fini(&chan
->nvsw
);
106 nvif_object_fini(&chan
->gart
);
107 nvif_object_fini(&chan
->vram
);
108 nvif_notify_fini(&chan
->kill
);
109 nvif_object_fini(&chan
->user
);
110 nvif_object_fini(&chan
->push
.ctxdma
);
111 nouveau_vma_del(&chan
->push
.vma
);
112 nouveau_bo_unmap(chan
->push
.buffer
);
113 if (chan
->push
.buffer
&& chan
->push
.buffer
->pin_refcnt
)
114 nouveau_bo_unpin(chan
->push
.buffer
);
115 nouveau_bo_ref(NULL
, &chan
->push
.buffer
);
119 cli
->base
.super
= super
;
125 nouveau_channel_prep(struct nouveau_drm
*drm
, struct nvif_device
*device
,
126 u32 size
, struct nouveau_channel
**pchan
)
128 struct nouveau_cli
*cli
= (void *)device
->object
.client
;
129 struct nv_dma_v0 args
= {};
130 struct nouveau_channel
*chan
;
134 chan
= *pchan
= kzalloc(sizeof(*chan
), GFP_KERNEL
);
138 chan
->device
= device
;
140 chan
->vmm
= cli
->svm
.cli
? &cli
->svm
: &cli
->vmm
;
141 atomic_set(&chan
->killed
, 0);
143 /* allocate memory for dma push buffer */
144 target
= TTM_PL_FLAG_TT
| TTM_PL_FLAG_UNCACHED
;
145 if (nouveau_vram_pushbuf
)
146 target
= TTM_PL_FLAG_VRAM
;
148 ret
= nouveau_bo_new(cli
, size
, 0, target
, 0, 0, NULL
, NULL
,
151 ret
= nouveau_bo_pin(chan
->push
.buffer
, target
, false);
153 ret
= nouveau_bo_map(chan
->push
.buffer
);
157 nouveau_channel_del(pchan
);
161 /* create dma object covering the *entire* memory space that the
162 * pushbuf lives in, this is because the GEM code requires that
163 * we be able to call out to other (indirect) push buffers
165 chan
->push
.addr
= chan
->push
.buffer
->bo
.offset
;
167 if (device
->info
.family
>= NV_DEVICE_INFO_V0_TESLA
) {
168 ret
= nouveau_vma_new(chan
->push
.buffer
, chan
->vmm
,
171 nouveau_channel_del(pchan
);
175 chan
->push
.addr
= chan
->push
.vma
->addr
;
177 if (device
->info
.family
>= NV_DEVICE_INFO_V0_FERMI
)
180 args
.target
= NV_DMA_V0_TARGET_VM
;
181 args
.access
= NV_DMA_V0_ACCESS_VM
;
183 args
.limit
= chan
->vmm
->vmm
.limit
- 1;
185 if (chan
->push
.buffer
->bo
.mem
.mem_type
== TTM_PL_VRAM
) {
186 if (device
->info
.family
== NV_DEVICE_INFO_V0_TNT
) {
187 /* nv04 vram pushbuf hack, retarget to its location in
188 * the framebuffer bar rather than direct vram access..
189 * nfi why this exists, it came from the -nv ddx.
191 args
.target
= NV_DMA_V0_TARGET_PCI
;
192 args
.access
= NV_DMA_V0_ACCESS_RDWR
;
193 args
.start
= nvxx_device(device
)->func
->
194 resource_addr(nvxx_device(device
), 1);
195 args
.limit
= args
.start
+ device
->info
.ram_user
- 1;
197 args
.target
= NV_DMA_V0_TARGET_VRAM
;
198 args
.access
= NV_DMA_V0_ACCESS_RDWR
;
200 args
.limit
= device
->info
.ram_user
- 1;
203 if (chan
->drm
->agp
.bridge
) {
204 args
.target
= NV_DMA_V0_TARGET_AGP
;
205 args
.access
= NV_DMA_V0_ACCESS_RDWR
;
206 args
.start
= chan
->drm
->agp
.base
;
207 args
.limit
= chan
->drm
->agp
.base
+
208 chan
->drm
->agp
.size
- 1;
210 args
.target
= NV_DMA_V0_TARGET_VM
;
211 args
.access
= NV_DMA_V0_ACCESS_RDWR
;
213 args
.limit
= chan
->vmm
->vmm
.limit
- 1;
217 ret
= nvif_object_init(&device
->object
, 0, NV_DMA_FROM_MEMORY
,
218 &args
, sizeof(args
), &chan
->push
.ctxdma
);
220 nouveau_channel_del(pchan
);
228 nouveau_channel_ind(struct nouveau_drm
*drm
, struct nvif_device
*device
,
229 u64 runlist
, bool priv
, struct nouveau_channel
**pchan
)
231 static const u16 oclasses
[] = { TURING_CHANNEL_GPFIFO_A
,
232 VOLTA_CHANNEL_GPFIFO_A
,
233 PASCAL_CHANNEL_GPFIFO_A
,
234 MAXWELL_CHANNEL_GPFIFO_A
,
235 KEPLER_CHANNEL_GPFIFO_B
,
236 KEPLER_CHANNEL_GPFIFO_A
,
237 FERMI_CHANNEL_GPFIFO
,
241 const u16
*oclass
= oclasses
;
243 struct nv50_channel_gpfifo_v0 nv50
;
244 struct fermi_channel_gpfifo_v0 fermi
;
245 struct kepler_channel_gpfifo_a_v0 kepler
;
246 struct volta_channel_gpfifo_a_v0 volta
;
248 struct nouveau_channel
*chan
;
252 /* allocate dma push buffer */
253 ret
= nouveau_channel_prep(drm
, device
, 0x12000, &chan
);
258 /* create channel object */
260 if (oclass
[0] >= VOLTA_CHANNEL_GPFIFO_A
) {
261 args
.volta
.version
= 0;
262 args
.volta
.ilength
= 0x02000;
263 args
.volta
.ioffset
= 0x10000 + chan
->push
.addr
;
264 args
.volta
.runlist
= runlist
;
265 args
.volta
.vmm
= nvif_handle(&chan
->vmm
->vmm
.object
);
266 args
.volta
.priv
= priv
;
267 size
= sizeof(args
.volta
);
269 if (oclass
[0] >= KEPLER_CHANNEL_GPFIFO_A
) {
270 args
.kepler
.version
= 0;
271 args
.kepler
.ilength
= 0x02000;
272 args
.kepler
.ioffset
= 0x10000 + chan
->push
.addr
;
273 args
.kepler
.runlist
= runlist
;
274 args
.kepler
.vmm
= nvif_handle(&chan
->vmm
->vmm
.object
);
275 args
.kepler
.priv
= priv
;
276 size
= sizeof(args
.kepler
);
278 if (oclass
[0] >= FERMI_CHANNEL_GPFIFO
) {
279 args
.fermi
.version
= 0;
280 args
.fermi
.ilength
= 0x02000;
281 args
.fermi
.ioffset
= 0x10000 + chan
->push
.addr
;
282 args
.fermi
.vmm
= nvif_handle(&chan
->vmm
->vmm
.object
);
283 size
= sizeof(args
.fermi
);
285 args
.nv50
.version
= 0;
286 args
.nv50
.ilength
= 0x02000;
287 args
.nv50
.ioffset
= 0x10000 + chan
->push
.addr
;
288 args
.nv50
.pushbuf
= nvif_handle(&chan
->push
.ctxdma
);
289 args
.nv50
.vmm
= nvif_handle(&chan
->vmm
->vmm
.object
);
290 size
= sizeof(args
.nv50
);
293 ret
= nvif_object_init(&device
->object
, 0, *oclass
++,
294 &args
, size
, &chan
->user
);
296 if (chan
->user
.oclass
>= VOLTA_CHANNEL_GPFIFO_A
) {
297 chan
->chid
= args
.volta
.chid
;
298 chan
->inst
= args
.volta
.inst
;
299 chan
->token
= args
.volta
.token
;
301 if (chan
->user
.oclass
>= KEPLER_CHANNEL_GPFIFO_A
) {
302 chan
->chid
= args
.kepler
.chid
;
303 chan
->inst
= args
.kepler
.inst
;
305 if (chan
->user
.oclass
>= FERMI_CHANNEL_GPFIFO
) {
306 chan
->chid
= args
.fermi
.chid
;
308 chan
->chid
= args
.nv50
.chid
;
314 nouveau_channel_del(pchan
);
319 nouveau_channel_dma(struct nouveau_drm
*drm
, struct nvif_device
*device
,
320 struct nouveau_channel
**pchan
)
322 static const u16 oclasses
[] = { NV40_CHANNEL_DMA
,
327 const u16
*oclass
= oclasses
;
328 struct nv03_channel_dma_v0 args
;
329 struct nouveau_channel
*chan
;
332 /* allocate dma push buffer */
333 ret
= nouveau_channel_prep(drm
, device
, 0x10000, &chan
);
338 /* create channel object */
340 args
.pushbuf
= nvif_handle(&chan
->push
.ctxdma
);
341 args
.offset
= chan
->push
.addr
;
344 ret
= nvif_object_init(&device
->object
, 0, *oclass
++,
345 &args
, sizeof(args
), &chan
->user
);
347 chan
->chid
= args
.chid
;
350 } while (ret
&& *oclass
);
352 nouveau_channel_del(pchan
);
357 nouveau_channel_init(struct nouveau_channel
*chan
, u32 vram
, u32 gart
)
359 struct nvif_device
*device
= chan
->device
;
360 struct nouveau_drm
*drm
= chan
->drm
;
361 struct nv_dma_v0 args
= {};
364 nvif_object_map(&chan
->user
, NULL
, 0);
366 if (chan
->user
.oclass
>= FERMI_CHANNEL_GPFIFO
) {
367 ret
= nvif_notify_init(&chan
->user
, nouveau_channel_killed
,
368 true, NV906F_V0_NTFY_KILLED
,
369 NULL
, 0, 0, &chan
->kill
);
371 ret
= nvif_notify_get(&chan
->kill
);
373 NV_ERROR(drm
, "Failed to request channel kill "
374 "notification: %d\n", ret
);
379 /* allocate dma objects to cover all allowed vram, and gart */
380 if (device
->info
.family
< NV_DEVICE_INFO_V0_FERMI
) {
381 if (device
->info
.family
>= NV_DEVICE_INFO_V0_TESLA
) {
382 args
.target
= NV_DMA_V0_TARGET_VM
;
383 args
.access
= NV_DMA_V0_ACCESS_VM
;
385 args
.limit
= chan
->vmm
->vmm
.limit
- 1;
387 args
.target
= NV_DMA_V0_TARGET_VRAM
;
388 args
.access
= NV_DMA_V0_ACCESS_RDWR
;
390 args
.limit
= device
->info
.ram_user
- 1;
393 ret
= nvif_object_init(&chan
->user
, vram
, NV_DMA_IN_MEMORY
,
394 &args
, sizeof(args
), &chan
->vram
);
398 if (device
->info
.family
>= NV_DEVICE_INFO_V0_TESLA
) {
399 args
.target
= NV_DMA_V0_TARGET_VM
;
400 args
.access
= NV_DMA_V0_ACCESS_VM
;
402 args
.limit
= chan
->vmm
->vmm
.limit
- 1;
404 if (chan
->drm
->agp
.bridge
) {
405 args
.target
= NV_DMA_V0_TARGET_AGP
;
406 args
.access
= NV_DMA_V0_ACCESS_RDWR
;
407 args
.start
= chan
->drm
->agp
.base
;
408 args
.limit
= chan
->drm
->agp
.base
+
409 chan
->drm
->agp
.size
- 1;
411 args
.target
= NV_DMA_V0_TARGET_VM
;
412 args
.access
= NV_DMA_V0_ACCESS_RDWR
;
414 args
.limit
= chan
->vmm
->vmm
.limit
- 1;
417 ret
= nvif_object_init(&chan
->user
, gart
, NV_DMA_IN_MEMORY
,
418 &args
, sizeof(args
), &chan
->gart
);
423 /* initialise dma tracking parameters */
424 switch (chan
->user
.oclass
& 0x00ff) {
427 chan
->user_put
= 0x40;
428 chan
->user_get
= 0x44;
429 chan
->dma
.max
= (0x10000 / 4) - 2;
432 chan
->user_put
= 0x40;
433 chan
->user_get
= 0x44;
434 chan
->user_get_hi
= 0x60;
435 chan
->dma
.ib_base
= 0x10000 / 4;
436 chan
->dma
.ib_max
= (0x02000 / 8) - 1;
437 chan
->dma
.ib_put
= 0;
438 chan
->dma
.ib_free
= chan
->dma
.ib_max
- chan
->dma
.ib_put
;
439 chan
->dma
.max
= chan
->dma
.ib_base
;
444 chan
->dma
.cur
= chan
->dma
.put
;
445 chan
->dma
.free
= chan
->dma
.max
- chan
->dma
.cur
;
447 ret
= RING_SPACE(chan
, NOUVEAU_DMA_SKIPS
);
451 for (i
= 0; i
< NOUVEAU_DMA_SKIPS
; i
++)
452 OUT_RING(chan
, 0x00000000);
454 /* allocate software object class (used for fences on <= nv05) */
455 if (device
->info
.family
< NV_DEVICE_INFO_V0_CELSIUS
) {
456 ret
= nvif_object_init(&chan
->user
, 0x006e,
458 NULL
, 0, &chan
->nvsw
);
462 ret
= RING_SPACE(chan
, 2);
466 BEGIN_NV04(chan
, NvSubSw
, 0x0000, 1);
467 OUT_RING (chan
, chan
->nvsw
.handle
);
471 /* initialise synchronisation */
472 return nouveau_fence(chan
->drm
)->context_new(chan
);
476 nouveau_channel_new(struct nouveau_drm
*drm
, struct nvif_device
*device
,
477 u32 arg0
, u32 arg1
, bool priv
,
478 struct nouveau_channel
**pchan
)
480 struct nouveau_cli
*cli
= (void *)device
->object
.client
;
484 /* hack until fencenv50 is fixed, and agp access relaxed */
485 super
= cli
->base
.super
;
486 cli
->base
.super
= true;
488 ret
= nouveau_channel_ind(drm
, device
, arg0
, priv
, pchan
);
490 NV_PRINTK(dbg
, cli
, "ib channel create, %d\n", ret
);
491 ret
= nouveau_channel_dma(drm
, device
, pchan
);
493 NV_PRINTK(dbg
, cli
, "dma channel create, %d\n", ret
);
498 ret
= nouveau_channel_init(*pchan
, arg0
, arg1
);
500 NV_PRINTK(err
, cli
, "channel failed to initialise, %d\n", ret
);
501 nouveau_channel_del(pchan
);
504 ret
= nouveau_svmm_join((*pchan
)->vmm
->svmm
, (*pchan
)->inst
);
506 nouveau_channel_del(pchan
);
509 cli
->base
.super
= super
;
514 nouveau_channels_init(struct nouveau_drm
*drm
)
517 struct nv_device_info_v1 m
;
519 struct nv_device_info_v1_data channels
;
523 .m
.count
= sizeof(args
.v
) / sizeof(args
.v
.channels
),
524 .v
.channels
.mthd
= NV_DEVICE_FIFO_CHANNELS
,
526 struct nvif_object
*device
= &drm
->client
.device
.object
;
529 ret
= nvif_object_mthd(device
, NV_DEVICE_V0_INFO
, &args
, sizeof(args
));
530 if (ret
|| args
.v
.channels
.mthd
== NV_DEVICE_INFO_INVALID
)
533 drm
->chan
.nr
= args
.v
.channels
.data
;
534 drm
->chan
.context_base
= dma_fence_context_alloc(drm
->chan
.nr
);