2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
28 #include <core/client.h>
29 #include <core/ramht.h>
30 #include <subdev/instmem.h>
31 #include <subdev/timer.h>
32 #include <engine/sw.h>
34 static const struct nv04_fifo_ramfc
36 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT
},
37 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET
},
38 { 16, 0, 0x08, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE
},
39 { 16, 16, 0x08, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT
},
40 { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_STATE
},
41 { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_FETCH
},
42 { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_ENGINE
},
43 { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_PULL1
},
48 nv04_fifo_pause(struct nvkm_fifo
*base
, unsigned long *pflags
)
49 __acquires(fifo
->base
.lock
)
51 struct nv04_fifo
*fifo
= nv04_fifo(base
);
52 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
55 spin_lock_irqsave(&fifo
->base
.lock
, flags
);
58 nvkm_wr32(device
, NV03_PFIFO_CACHES
, 0x00000000);
59 nvkm_mask(device
, NV04_PFIFO_CACHE1_PULL0
, 0x00000001, 0x00000000);
61 /* in some cases the puller may be left in an inconsistent state
62 * if you try to stop it while it's busy translating handles.
63 * sometimes you get a CACHE_ERROR, sometimes it just fails
64 * silently; sending incorrect instance offsets to PGRAPH after
65 * it's started up again.
67 * to avoid this, we invalidate the most recently calculated
70 nvkm_msec(device
, 2000,
71 u32 tmp
= nvkm_rd32(device
, NV04_PFIFO_CACHE1_PULL0
);
72 if (!(tmp
& NV04_PFIFO_CACHE1_PULL0_HASH_BUSY
))
76 if (nvkm_rd32(device
, NV04_PFIFO_CACHE1_PULL0
) &
77 NV04_PFIFO_CACHE1_PULL0_HASH_FAILED
)
78 nvkm_wr32(device
, NV03_PFIFO_INTR_0
, NV_PFIFO_INTR_CACHE_ERROR
);
80 nvkm_wr32(device
, NV04_PFIFO_CACHE1_HASH
, 0x00000000);
84 nv04_fifo_start(struct nvkm_fifo
*base
, unsigned long *pflags
)
85 __releases(fifo
->base
.lock
)
87 struct nv04_fifo
*fifo
= nv04_fifo(base
);
88 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
89 unsigned long flags
= *pflags
;
91 nvkm_mask(device
, NV04_PFIFO_CACHE1_PULL0
, 0x00000001, 0x00000001);
92 nvkm_wr32(device
, NV03_PFIFO_CACHES
, 0x00000001);
94 spin_unlock_irqrestore(&fifo
->base
.lock
, flags
);
98 nv_dma_state_err(u32 state
)
100 static const char * const desc
[] = {
101 "NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE",
102 "INVALID_CMD", "IB_EMPTY"/* NV50+ */, "MEM_FAULT", "UNK"
104 return desc
[(state
>> 29) & 0x7];
108 nv04_fifo_swmthd(struct nvkm_device
*device
, u32 chid
, u32 addr
, u32 data
)
110 struct nvkm_sw
*sw
= device
->sw
;
111 const int subc
= (addr
& 0x0000e000) >> 13;
112 const int mthd
= (addr
& 0x00001ffc);
113 const u32 mask
= 0x0000000f << (subc
* 4);
114 u32 engine
= nvkm_rd32(device
, 0x003280);
115 bool handled
= false;
118 case 0x0000 ... 0x0000: /* subchannel's engine -> software */
119 nvkm_wr32(device
, 0x003280, (engine
&= ~mask
));
121 case 0x0180 ... 0x01fc: /* handle -> instance */
122 data
= nvkm_rd32(device
, 0x003258) & 0x0000ffff;
124 case 0x0100 ... 0x017c:
125 case 0x0200 ... 0x1ffc: /* pass method down to sw */
126 if (!(engine
& mask
) && sw
)
127 handled
= nvkm_sw_mthd(sw
, chid
, subc
, mthd
, data
);
137 nv04_fifo_cache_error(struct nv04_fifo
*fifo
, u32 chid
, u32 get
)
139 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
140 struct nvkm_device
*device
= subdev
->device
;
141 struct nvkm_fifo_chan
*chan
;
143 u32 pull0
= nvkm_rd32(device
, 0x003250);
147 /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before wrapping on my
148 * G80 chips, but CACHE1 isn't big enough for this much data.. Tests
149 * show that it wraps around to the start at GET=0x800.. No clue as to
152 ptr
= (get
& 0x7ff) >> 2;
154 if (device
->card_type
< NV_40
) {
155 mthd
= nvkm_rd32(device
, NV04_PFIFO_CACHE1_METHOD(ptr
));
156 data
= nvkm_rd32(device
, NV04_PFIFO_CACHE1_DATA(ptr
));
158 mthd
= nvkm_rd32(device
, NV40_PFIFO_CACHE1_METHOD(ptr
));
159 data
= nvkm_rd32(device
, NV40_PFIFO_CACHE1_DATA(ptr
));
162 if (!(pull0
& 0x00000100) ||
163 !nv04_fifo_swmthd(device
, chid
, mthd
, data
)) {
164 chan
= nvkm_fifo_chan_chid(&fifo
->base
, chid
, &flags
);
165 nvkm_error(subdev
, "CACHE_ERROR - "
166 "ch %d [%s] subc %d mthd %04x data %08x\n",
167 chid
, chan
? chan
->object
.client
->name
: "unknown",
168 (mthd
>> 13) & 7, mthd
& 0x1ffc, data
);
169 nvkm_fifo_chan_put(&fifo
->base
, flags
, &chan
);
172 nvkm_wr32(device
, NV04_PFIFO_CACHE1_DMA_PUSH
, 0);
173 nvkm_wr32(device
, NV03_PFIFO_INTR_0
, NV_PFIFO_INTR_CACHE_ERROR
);
175 nvkm_wr32(device
, NV03_PFIFO_CACHE1_PUSH0
,
176 nvkm_rd32(device
, NV03_PFIFO_CACHE1_PUSH0
) & ~1);
177 nvkm_wr32(device
, NV03_PFIFO_CACHE1_GET
, get
+ 4);
178 nvkm_wr32(device
, NV03_PFIFO_CACHE1_PUSH0
,
179 nvkm_rd32(device
, NV03_PFIFO_CACHE1_PUSH0
) | 1);
180 nvkm_wr32(device
, NV04_PFIFO_CACHE1_HASH
, 0);
182 nvkm_wr32(device
, NV04_PFIFO_CACHE1_DMA_PUSH
,
183 nvkm_rd32(device
, NV04_PFIFO_CACHE1_DMA_PUSH
) | 1);
184 nvkm_wr32(device
, NV04_PFIFO_CACHE1_PULL0
, 1);
188 nv04_fifo_dma_pusher(struct nv04_fifo
*fifo
, u32 chid
)
190 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
191 struct nvkm_device
*device
= subdev
->device
;
192 u32 dma_get
= nvkm_rd32(device
, 0x003244);
193 u32 dma_put
= nvkm_rd32(device
, 0x003240);
194 u32 push
= nvkm_rd32(device
, 0x003220);
195 u32 state
= nvkm_rd32(device
, 0x003228);
196 struct nvkm_fifo_chan
*chan
;
200 chan
= nvkm_fifo_chan_chid(&fifo
->base
, chid
, &flags
);
201 name
= chan
? chan
->object
.client
->name
: "unknown";
202 if (device
->card_type
== NV_50
) {
203 u32 ho_get
= nvkm_rd32(device
, 0x003328);
204 u32 ho_put
= nvkm_rd32(device
, 0x003320);
205 u32 ib_get
= nvkm_rd32(device
, 0x003334);
206 u32 ib_put
= nvkm_rd32(device
, 0x003330);
208 nvkm_error(subdev
, "DMA_PUSHER - "
209 "ch %d [%s] get %02x%08x put %02x%08x ib_get %08x "
210 "ib_put %08x state %08x (err: %s) push %08x\n",
211 chid
, name
, ho_get
, dma_get
, ho_put
, dma_put
,
212 ib_get
, ib_put
, state
, nv_dma_state_err(state
),
215 /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
216 nvkm_wr32(device
, 0x003364, 0x00000000);
217 if (dma_get
!= dma_put
|| ho_get
!= ho_put
) {
218 nvkm_wr32(device
, 0x003244, dma_put
);
219 nvkm_wr32(device
, 0x003328, ho_put
);
221 if (ib_get
!= ib_put
)
222 nvkm_wr32(device
, 0x003334, ib_put
);
224 nvkm_error(subdev
, "DMA_PUSHER - ch %d [%s] get %08x put %08x "
225 "state %08x (err: %s) push %08x\n",
226 chid
, name
, dma_get
, dma_put
, state
,
227 nv_dma_state_err(state
), push
);
229 if (dma_get
!= dma_put
)
230 nvkm_wr32(device
, 0x003244, dma_put
);
232 nvkm_fifo_chan_put(&fifo
->base
, flags
, &chan
);
234 nvkm_wr32(device
, 0x003228, 0x00000000);
235 nvkm_wr32(device
, 0x003220, 0x00000001);
236 nvkm_wr32(device
, 0x002100, NV_PFIFO_INTR_DMA_PUSHER
);
240 nv04_fifo_intr(struct nvkm_fifo
*base
)
242 struct nv04_fifo
*fifo
= nv04_fifo(base
);
243 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
244 struct nvkm_device
*device
= subdev
->device
;
245 u32 mask
= nvkm_rd32(device
, NV03_PFIFO_INTR_EN_0
);
246 u32 stat
= nvkm_rd32(device
, NV03_PFIFO_INTR_0
) & mask
;
247 u32 reassign
, chid
, get
, sem
;
249 reassign
= nvkm_rd32(device
, NV03_PFIFO_CACHES
) & 1;
250 nvkm_wr32(device
, NV03_PFIFO_CACHES
, 0);
252 chid
= nvkm_rd32(device
, NV03_PFIFO_CACHE1_PUSH1
) & (fifo
->base
.nr
- 1);
253 get
= nvkm_rd32(device
, NV03_PFIFO_CACHE1_GET
);
255 if (stat
& NV_PFIFO_INTR_CACHE_ERROR
) {
256 nv04_fifo_cache_error(fifo
, chid
, get
);
257 stat
&= ~NV_PFIFO_INTR_CACHE_ERROR
;
260 if (stat
& NV_PFIFO_INTR_DMA_PUSHER
) {
261 nv04_fifo_dma_pusher(fifo
, chid
);
262 stat
&= ~NV_PFIFO_INTR_DMA_PUSHER
;
265 if (stat
& NV_PFIFO_INTR_SEMAPHORE
) {
266 stat
&= ~NV_PFIFO_INTR_SEMAPHORE
;
267 nvkm_wr32(device
, NV03_PFIFO_INTR_0
, NV_PFIFO_INTR_SEMAPHORE
);
269 sem
= nvkm_rd32(device
, NV10_PFIFO_CACHE1_SEMAPHORE
);
270 nvkm_wr32(device
, NV10_PFIFO_CACHE1_SEMAPHORE
, sem
| 0x1);
272 nvkm_wr32(device
, NV03_PFIFO_CACHE1_GET
, get
+ 4);
273 nvkm_wr32(device
, NV04_PFIFO_CACHE1_PULL0
, 1);
276 if (device
->card_type
== NV_50
) {
277 if (stat
& 0x00000010) {
279 nvkm_wr32(device
, 0x002100, 0x00000010);
282 if (stat
& 0x40000000) {
283 nvkm_wr32(device
, 0x002100, 0x40000000);
284 nvkm_fifo_uevent(&fifo
->base
);
290 nvkm_warn(subdev
, "intr %08x\n", stat
);
291 nvkm_mask(device
, NV03_PFIFO_INTR_EN_0
, stat
, 0x00000000);
292 nvkm_wr32(device
, NV03_PFIFO_INTR_0
, stat
);
295 nvkm_wr32(device
, NV03_PFIFO_CACHES
, reassign
);
299 nv04_fifo_init(struct nvkm_fifo
*base
)
301 struct nv04_fifo
*fifo
= nv04_fifo(base
);
302 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
303 struct nvkm_instmem
*imem
= device
->imem
;
304 struct nvkm_ramht
*ramht
= imem
->ramht
;
305 struct nvkm_memory
*ramro
= imem
->ramro
;
306 struct nvkm_memory
*ramfc
= imem
->ramfc
;
308 nvkm_wr32(device
, NV04_PFIFO_DELAY_0
, 0x000000ff);
309 nvkm_wr32(device
, NV04_PFIFO_DMA_TIMESLICE
, 0x0101ffff);
311 nvkm_wr32(device
, NV03_PFIFO_RAMHT
, (0x03 << 24) /* search 128 */ |
312 ((ramht
->bits
- 9) << 16) |
313 (ramht
->gpuobj
->addr
>> 8));
314 nvkm_wr32(device
, NV03_PFIFO_RAMRO
, nvkm_memory_addr(ramro
) >> 8);
315 nvkm_wr32(device
, NV03_PFIFO_RAMFC
, nvkm_memory_addr(ramfc
) >> 8);
317 nvkm_wr32(device
, NV03_PFIFO_CACHE1_PUSH1
, fifo
->base
.nr
- 1);
319 nvkm_wr32(device
, NV03_PFIFO_INTR_0
, 0xffffffff);
320 nvkm_wr32(device
, NV03_PFIFO_INTR_EN_0
, 0xffffffff);
322 nvkm_wr32(device
, NV03_PFIFO_CACHE1_PUSH0
, 1);
323 nvkm_wr32(device
, NV04_PFIFO_CACHE1_PULL0
, 1);
324 nvkm_wr32(device
, NV03_PFIFO_CACHES
, 1);
328 nv04_fifo_new_(const struct nvkm_fifo_func
*func
, struct nvkm_device
*device
,
329 int index
, int nr
, const struct nv04_fifo_ramfc
*ramfc
,
330 struct nvkm_fifo
**pfifo
)
332 struct nv04_fifo
*fifo
;
335 if (!(fifo
= kzalloc(sizeof(*fifo
), GFP_KERNEL
)))
338 *pfifo
= &fifo
->base
;
340 ret
= nvkm_fifo_ctor(func
, device
, index
, nr
, &fifo
->base
);
344 set_bit(nr
- 1, fifo
->base
.mask
); /* inactive channel */
348 static const struct nvkm_fifo_func
350 .init
= nv04_fifo_init
,
351 .intr
= nv04_fifo_intr
,
352 .pause
= nv04_fifo_pause
,
353 .start
= nv04_fifo_start
,
355 &nv04_fifo_dma_oclass
,
361 nv04_fifo_new(struct nvkm_device
*device
, int index
, struct nvkm_fifo
**pfifo
)
363 return nv04_fifo_new_(&nv04_fifo
, device
, index
, 16,
364 nv04_fifo_ramfc
, pfifo
);