2 * Copyright 2010 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
27 #include "nouveau_drv.h"
28 #include "nouveau_mm.h"
29 #include "nouveau_fifo.h"
31 static void nvc0_fifo_isr(struct drm_device
*);
33 struct nvc0_fifo_priv
{
34 struct nouveau_fifo_priv base
;
35 struct nouveau_gpuobj
*playlist
[2];
37 struct nouveau_vma user_vma
;
41 struct nvc0_fifo_chan
{
42 struct nouveau_fifo_chan base
;
43 struct nouveau_gpuobj
*user
;
47 nvc0_fifo_playlist_update(struct drm_device
*dev
)
49 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
50 struct nouveau_instmem_engine
*pinstmem
= &dev_priv
->engine
.instmem
;
51 struct nvc0_fifo_priv
*priv
= nv_engine(dev
, NVOBJ_ENGINE_FIFO
);
52 struct nouveau_gpuobj
*cur
;
55 cur
= priv
->playlist
[priv
->cur_playlist
];
56 priv
->cur_playlist
= !priv
->cur_playlist
;
58 for (i
= 0, p
= 0; i
< 128; i
++) {
59 if (!(nv_rd32(dev
, 0x3004 + (i
* 8)) & 1))
61 nv_wo32(cur
, p
+ 0, i
);
62 nv_wo32(cur
, p
+ 4, 0x00000004);
67 nv_wr32(dev
, 0x002270, cur
->vinst
>> 12);
68 nv_wr32(dev
, 0x002274, 0x01f00000 | (p
>> 3));
69 if (!nv_wait(dev
, 0x00227c, 0x00100000, 0x00000000))
70 NV_ERROR(dev
, "PFIFO - playlist update failed\n");
74 nvc0_fifo_context_new(struct nouveau_channel
*chan
, int engine
)
76 struct drm_device
*dev
= chan
->dev
;
77 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
78 struct nouveau_instmem_engine
*pinstmem
= &dev_priv
->engine
.instmem
;
79 struct nvc0_fifo_priv
*priv
= nv_engine(dev
, engine
);
80 struct nvc0_fifo_chan
*fctx
;
81 u64 ib_virt
= chan
->pushbuf_base
+ chan
->dma
.ib_base
* 4;
84 fctx
= chan
->engctx
[engine
] = kzalloc(sizeof(*fctx
), GFP_KERNEL
);
88 chan
->user
= ioremap_wc(pci_resource_start(dev
->pdev
, 1) +
89 priv
->user_vma
.offset
+ (chan
->id
* 0x1000),
96 /* allocate vram for control regs, map into polling area */
97 ret
= nouveau_gpuobj_new(dev
, NULL
, 0x1000, 0x1000,
98 NVOBJ_FLAG_ZERO_ALLOC
, &fctx
->user
);
102 nouveau_vm_map_at(&priv
->user_vma
, chan
->id
* 0x1000,
103 *(struct nouveau_mem
**)fctx
->user
->node
);
105 for (i
= 0; i
< 0x100; i
+= 4)
106 nv_wo32(chan
->ramin
, i
, 0x00000000);
107 nv_wo32(chan
->ramin
, 0x08, lower_32_bits(fctx
->user
->vinst
));
108 nv_wo32(chan
->ramin
, 0x0c, upper_32_bits(fctx
->user
->vinst
));
109 nv_wo32(chan
->ramin
, 0x10, 0x0000face);
110 nv_wo32(chan
->ramin
, 0x30, 0xfffff902);
111 nv_wo32(chan
->ramin
, 0x48, lower_32_bits(ib_virt
));
112 nv_wo32(chan
->ramin
, 0x4c, drm_order(chan
->dma
.ib_max
+ 1) << 16 |
113 upper_32_bits(ib_virt
));
114 nv_wo32(chan
->ramin
, 0x54, 0x00000002);
115 nv_wo32(chan
->ramin
, 0x84, 0x20400000);
116 nv_wo32(chan
->ramin
, 0x94, 0x30000001);
117 nv_wo32(chan
->ramin
, 0x9c, 0x00000100);
118 nv_wo32(chan
->ramin
, 0xa4, 0x1f1f1f1f);
119 nv_wo32(chan
->ramin
, 0xa8, 0x1f1f1f1f);
120 nv_wo32(chan
->ramin
, 0xac, 0x0000001f);
121 nv_wo32(chan
->ramin
, 0xb8, 0xf8000000);
122 nv_wo32(chan
->ramin
, 0xf8, 0x10003080); /* 0x002310 */
123 nv_wo32(chan
->ramin
, 0xfc, 0x10000010); /* 0x002350 */
124 pinstmem
->flush(dev
);
126 nv_wr32(dev
, 0x003000 + (chan
->id
* 8), 0xc0000000 |
127 (chan
->ramin
->vinst
>> 12));
128 nv_wr32(dev
, 0x003004 + (chan
->id
* 8), 0x001f0001);
129 nvc0_fifo_playlist_update(dev
);
133 priv
->base
.base
.context_del(chan
, engine
);
138 nvc0_fifo_context_del(struct nouveau_channel
*chan
, int engine
)
140 struct nvc0_fifo_chan
*fctx
= chan
->engctx
[engine
];
141 struct drm_device
*dev
= chan
->dev
;
143 nv_mask(dev
, 0x003004 + (chan
->id
* 8), 0x00000001, 0x00000000);
144 nv_wr32(dev
, 0x002634, chan
->id
);
145 if (!nv_wait(dev
, 0x0002634, 0xffffffff, chan
->id
))
146 NV_WARN(dev
, "0x2634 != chid: 0x%08x\n", nv_rd32(dev
, 0x2634));
147 nvc0_fifo_playlist_update(dev
);
148 nv_wr32(dev
, 0x003000 + (chan
->id
* 8), 0x00000000);
150 nouveau_gpuobj_ref(NULL
, &fctx
->user
);
156 chan
->engctx
[engine
] = NULL
;
161 nvc0_fifo_init(struct drm_device
*dev
, int engine
)
163 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
164 struct nvc0_fifo_priv
*priv
= nv_engine(dev
, engine
);
165 struct nouveau_channel
*chan
;
168 /* reset PFIFO, enable all available PSUBFIFO areas */
169 nv_mask(dev
, 0x000200, 0x00000100, 0x00000000);
170 nv_mask(dev
, 0x000200, 0x00000100, 0x00000100);
171 nv_wr32(dev
, 0x000204, 0xffffffff);
172 nv_wr32(dev
, 0x002204, 0xffffffff);
174 priv
->spoon_nr
= hweight32(nv_rd32(dev
, 0x002204));
175 NV_DEBUG(dev
, "PFIFO: %d subfifo(s)\n", priv
->spoon_nr
);
177 /* assign engines to subfifos */
178 if (priv
->spoon_nr
>= 3) {
179 nv_wr32(dev
, 0x002208, ~(1 << 0)); /* PGRAPH */
180 nv_wr32(dev
, 0x00220c, ~(1 << 1)); /* PVP */
181 nv_wr32(dev
, 0x002210, ~(1 << 1)); /* PPP */
182 nv_wr32(dev
, 0x002214, ~(1 << 1)); /* PBSP */
183 nv_wr32(dev
, 0x002218, ~(1 << 2)); /* PCE0 */
184 nv_wr32(dev
, 0x00221c, ~(1 << 1)); /* PCE1 */
188 for (i
= 0; i
< priv
->spoon_nr
; i
++) {
189 nv_mask(dev
, 0x04013c + (i
* 0x2000), 0x10000100, 0x00000000);
190 nv_wr32(dev
, 0x040108 + (i
* 0x2000), 0xffffffff); /* INTR */
191 nv_wr32(dev
, 0x04010c + (i
* 0x2000), 0xfffffeff); /* INTR_EN */
194 nv_mask(dev
, 0x002200, 0x00000001, 0x00000001);
195 nv_wr32(dev
, 0x002254, 0x10000000 | priv
->user_vma
.offset
>> 12);
197 nv_wr32(dev
, 0x002a00, 0xffffffff); /* clears PFIFO.INTR bit 30 */
198 nv_wr32(dev
, 0x002100, 0xffffffff);
199 nv_wr32(dev
, 0x002140, 0xbfffffff);
201 /* restore PFIFO context table */
202 for (i
= 0; i
< 128; i
++) {
203 chan
= dev_priv
->channels
.ptr
[i
];
204 if (!chan
|| !chan
->engctx
[engine
])
207 nv_wr32(dev
, 0x003000 + (i
* 8), 0xc0000000 |
208 (chan
->ramin
->vinst
>> 12));
209 nv_wr32(dev
, 0x003004 + (i
* 8), 0x001f0001);
211 nvc0_fifo_playlist_update(dev
);
217 nvc0_fifo_fini(struct drm_device
*dev
, int engine
, bool suspend
)
221 for (i
= 0; i
< 128; i
++) {
222 if (!(nv_rd32(dev
, 0x003004 + (i
* 8)) & 1))
225 nv_mask(dev
, 0x003004 + (i
* 8), 0x00000001, 0x00000000);
226 nv_wr32(dev
, 0x002634, i
);
227 if (!nv_wait(dev
, 0x002634, 0xffffffff, i
)) {
228 NV_INFO(dev
, "PFIFO: kick ch %d failed: 0x%08x\n",
229 i
, nv_rd32(dev
, 0x002634));
234 nv_wr32(dev
, 0x002140, 0x00000000);
239 struct nouveau_enum nvc0_fifo_fault_unit
[] = {
241 { 0x03, "PEEPHOLE" },
247 { 0x13, "PCOUNTER" },
255 struct nouveau_enum nvc0_fifo_fault_reason
[] = {
256 { 0x00, "PT_NOT_PRESENT" },
257 { 0x01, "PT_TOO_SHORT" },
258 { 0x02, "PAGE_NOT_PRESENT" },
259 { 0x03, "VM_LIMIT_EXCEEDED" },
260 { 0x04, "NO_CHANNEL" },
261 { 0x05, "PAGE_SYSTEM_ONLY" },
262 { 0x06, "PAGE_READ_ONLY" },
263 { 0x0a, "COMPRESSED_SYSRAM" },
264 { 0x0c, "INVALID_STORAGE_TYPE" },
268 struct nouveau_enum nvc0_fifo_fault_hubclient
[] = {
271 { 0x04, "DISPATCH" },
274 { 0x07, "BAR_READ" },
275 { 0x08, "BAR_WRITE" },
279 { 0x11, "PCOUNTER" },
282 { 0x15, "CCACHE_POST" },
286 struct nouveau_enum nvc0_fifo_fault_gpcclient
[] = {
294 struct nouveau_bitfield nvc0_fifo_subfifo_intr
[] = {
295 /* { 0x00008000, "" } seen with null ib push */
296 { 0x00200000, "ILLEGAL_MTHD" },
297 { 0x00800000, "EMPTY_SUBC" },
302 nvc0_fifo_isr_vm_fault(struct drm_device
*dev
, int unit
)
304 u32 inst
= nv_rd32(dev
, 0x2800 + (unit
* 0x10));
305 u32 valo
= nv_rd32(dev
, 0x2804 + (unit
* 0x10));
306 u32 vahi
= nv_rd32(dev
, 0x2808 + (unit
* 0x10));
307 u32 stat
= nv_rd32(dev
, 0x280c + (unit
* 0x10));
308 u32 client
= (stat
& 0x00001f00) >> 8;
310 NV_INFO(dev
, "PFIFO: %s fault at 0x%010llx [",
311 (stat
& 0x00000080) ? "write" : "read", (u64
)vahi
<< 32 | valo
);
312 nouveau_enum_print(nvc0_fifo_fault_reason
, stat
& 0x0000000f);
314 nouveau_enum_print(nvc0_fifo_fault_unit
, unit
);
315 if (stat
& 0x00000040) {
317 nouveau_enum_print(nvc0_fifo_fault_hubclient
, client
);
319 printk("/GPC%d/", (stat
& 0x1f000000) >> 24);
320 nouveau_enum_print(nvc0_fifo_fault_gpcclient
, client
);
322 printk(" on channel 0x%010llx\n", (u64
)inst
<< 12);
326 nvc0_fifo_page_flip(struct drm_device
*dev
, u32 chid
)
328 struct nvc0_fifo_priv
*priv
= nv_engine(dev
, NVOBJ_ENGINE_FIFO
);
329 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
330 struct nouveau_channel
*chan
= NULL
;
334 spin_lock_irqsave(&dev_priv
->channels
.lock
, flags
);
335 if (likely(chid
>= 0 && chid
< priv
->base
.channels
)) {
336 chan
= dev_priv
->channels
.ptr
[chid
];
338 ret
= nouveau_finish_page_flip(chan
, NULL
);
340 spin_unlock_irqrestore(&dev_priv
->channels
.lock
, flags
);
345 nvc0_fifo_isr_subfifo_intr(struct drm_device
*dev
, int unit
)
347 u32 stat
= nv_rd32(dev
, 0x040108 + (unit
* 0x2000));
348 u32 addr
= nv_rd32(dev
, 0x0400c0 + (unit
* 0x2000));
349 u32 data
= nv_rd32(dev
, 0x0400c4 + (unit
* 0x2000));
350 u32 chid
= nv_rd32(dev
, 0x040120 + (unit
* 0x2000)) & 0x7f;
351 u32 subc
= (addr
& 0x00070000);
352 u32 mthd
= (addr
& 0x00003ffc);
355 if (stat
& 0x00200000) {
356 if (mthd
== 0x0054) {
357 if (!nvc0_fifo_page_flip(dev
, chid
))
363 NV_INFO(dev
, "PFIFO%d:", unit
);
364 nouveau_bitfield_print(nvc0_fifo_subfifo_intr
, show
);
365 NV_INFO(dev
, "PFIFO%d: ch %d subc %d mthd 0x%04x data 0x%08x\n",
366 unit
, chid
, subc
, mthd
, data
);
369 nv_wr32(dev
, 0x0400c0 + (unit
* 0x2000), 0x80600008);
370 nv_wr32(dev
, 0x040108 + (unit
* 0x2000), stat
);
374 nvc0_fifo_isr(struct drm_device
*dev
)
376 u32 mask
= nv_rd32(dev
, 0x002140);
377 u32 stat
= nv_rd32(dev
, 0x002100) & mask
;
379 if (stat
& 0x00000100) {
380 NV_INFO(dev
, "PFIFO: unknown status 0x00000100\n");
381 nv_wr32(dev
, 0x002100, 0x00000100);
385 if (stat
& 0x10000000) {
386 u32 units
= nv_rd32(dev
, 0x00259c);
391 nvc0_fifo_isr_vm_fault(dev
, i
);
395 nv_wr32(dev
, 0x00259c, units
);
399 if (stat
& 0x20000000) {
400 u32 units
= nv_rd32(dev
, 0x0025a0);
405 nvc0_fifo_isr_subfifo_intr(dev
, i
);
409 nv_wr32(dev
, 0x0025a0, units
);
413 if (stat
& 0x40000000) {
414 NV_INFO(dev
, "PFIFO: unknown status 0x40000000\n");
415 nv_mask(dev
, 0x002a00, 0x00000000, 0x00000000);
420 NV_INFO(dev
, "PFIFO: unhandled status 0x%08x\n", stat
);
421 nv_wr32(dev
, 0x002100, stat
);
422 nv_wr32(dev
, 0x002140, 0);
427 nvc0_fifo_destroy(struct drm_device
*dev
, int engine
)
429 struct nvc0_fifo_priv
*priv
= nv_engine(dev
, NVOBJ_ENGINE_FIFO
);
430 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
432 nouveau_vm_put(&priv
->user_vma
);
433 nouveau_gpuobj_ref(NULL
, &priv
->playlist
[1]);
434 nouveau_gpuobj_ref(NULL
, &priv
->playlist
[0]);
436 dev_priv
->eng
[engine
] = NULL
;
441 nvc0_fifo_create(struct drm_device
*dev
)
443 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
444 struct nvc0_fifo_priv
*priv
;
447 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
451 priv
->base
.base
.destroy
= nvc0_fifo_destroy
;
452 priv
->base
.base
.init
= nvc0_fifo_init
;
453 priv
->base
.base
.fini
= nvc0_fifo_fini
;
454 priv
->base
.base
.context_new
= nvc0_fifo_context_new
;
455 priv
->base
.base
.context_del
= nvc0_fifo_context_del
;
456 priv
->base
.channels
= 128;
457 dev_priv
->eng
[NVOBJ_ENGINE_FIFO
] = &priv
->base
.base
;
459 ret
= nouveau_gpuobj_new(dev
, NULL
, 4096, 4096, 0, &priv
->playlist
[0]);
463 ret
= nouveau_gpuobj_new(dev
, NULL
, 4096, 4096, 0, &priv
->playlist
[1]);
467 ret
= nouveau_vm_get(dev_priv
->bar1_vm
, priv
->base
.channels
* 0x1000,
468 12, NV_MEM_ACCESS_RW
, &priv
->user_vma
);
472 nouveau_irq_register(dev
, 8, nvc0_fifo_isr
);
475 priv
->base
.base
.destroy(dev
, NVOBJ_ENGINE_FIFO
);