2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
28 #include <core/client.h>
29 #include <core/ramht.h>
30 #include <subdev/instmem.h>
31 #include <subdev/timer.h>
32 #include <engine/sw.h>
34 static const struct nv04_fifo_ramfc
36 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT
},
37 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET
},
38 { 16, 0, 0x08, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE
},
39 { 16, 16, 0x08, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT
},
40 { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_STATE
},
41 { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_FETCH
},
42 { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_ENGINE
},
43 { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_PULL1
},
48 nv04_fifo_pause(struct nvkm_fifo
*base
, unsigned long *pflags
)
49 __acquires(fifo
->base
.lock
)
51 struct nv04_fifo
*fifo
= nv04_fifo(base
);
52 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
55 spin_lock_irqsave(&fifo
->base
.lock
, flags
);
58 nvkm_wr32(device
, NV03_PFIFO_CACHES
, 0x00000000);
59 nvkm_mask(device
, NV04_PFIFO_CACHE1_PULL0
, 0x00000001, 0x00000000);
61 /* in some cases the puller may be left in an inconsistent state
62 * if you try to stop it while it's busy translating handles.
63 * sometimes you get a CACHE_ERROR, sometimes it just fails
64 * silently; sending incorrect instance offsets to PGRAPH after
65 * it's started up again.
67 * to avoid this, we invalidate the most recently calculated
70 nvkm_msec(device
, 2000,
71 u32 tmp
= nvkm_rd32(device
, NV04_PFIFO_CACHE1_PULL0
);
72 if (!(tmp
& NV04_PFIFO_CACHE1_PULL0_HASH_BUSY
))
76 if (nvkm_rd32(device
, NV04_PFIFO_CACHE1_PULL0
) &
77 NV04_PFIFO_CACHE1_PULL0_HASH_FAILED
)
78 nvkm_wr32(device
, NV03_PFIFO_INTR_0
, NV_PFIFO_INTR_CACHE_ERROR
);
80 nvkm_wr32(device
, NV04_PFIFO_CACHE1_HASH
, 0x00000000);
84 nv04_fifo_start(struct nvkm_fifo
*base
, unsigned long *pflags
)
85 __releases(fifo
->base
.lock
)
87 struct nv04_fifo
*fifo
= nv04_fifo(base
);
88 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
89 unsigned long flags
= *pflags
;
91 nvkm_mask(device
, NV04_PFIFO_CACHE1_PULL0
, 0x00000001, 0x00000001);
92 nvkm_wr32(device
, NV03_PFIFO_CACHES
, 0x00000001);
94 spin_unlock_irqrestore(&fifo
->base
.lock
, flags
);
98 nv_dma_state_err(u32 state
)
100 static const char * const desc
[] = {
101 "NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE",
102 "INVALID_CMD", "IB_EMPTY"/* NV50+ */, "MEM_FAULT", "UNK"
104 return desc
[(state
>> 29) & 0x7];
108 nv04_fifo_swmthd(struct nvkm_device
*device
, u32 chid
, u32 addr
, u32 data
)
110 struct nvkm_sw
*sw
= device
->sw
;
111 const int subc
= (addr
& 0x0000e000) >> 13;
112 const int mthd
= (addr
& 0x00001ffc);
113 const u32 mask
= 0x0000000f << (subc
* 4);
114 u32 engine
= nvkm_rd32(device
, 0x003280);
115 bool handled
= false;
118 case 0x0000 ... 0x0000: /* subchannel's engine -> software */
119 nvkm_wr32(device
, 0x003280, (engine
&= ~mask
));
120 case 0x0180 ... 0x01fc: /* handle -> instance */
121 data
= nvkm_rd32(device
, 0x003258) & 0x0000ffff;
122 case 0x0100 ... 0x017c:
123 case 0x0200 ... 0x1ffc: /* pass method down to sw */
124 if (!(engine
& mask
) && sw
)
125 handled
= nvkm_sw_mthd(sw
, chid
, subc
, mthd
, data
);
135 nv04_fifo_cache_error(struct nv04_fifo
*fifo
, u32 chid
, u32 get
)
137 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
138 struct nvkm_device
*device
= subdev
->device
;
139 struct nvkm_fifo_chan
*chan
;
141 u32 pull0
= nvkm_rd32(device
, 0x003250);
145 /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before wrapping on my
146 * G80 chips, but CACHE1 isn't big enough for this much data.. Tests
147 * show that it wraps around to the start at GET=0x800.. No clue as to
150 ptr
= (get
& 0x7ff) >> 2;
152 if (device
->card_type
< NV_40
) {
153 mthd
= nvkm_rd32(device
, NV04_PFIFO_CACHE1_METHOD(ptr
));
154 data
= nvkm_rd32(device
, NV04_PFIFO_CACHE1_DATA(ptr
));
156 mthd
= nvkm_rd32(device
, NV40_PFIFO_CACHE1_METHOD(ptr
));
157 data
= nvkm_rd32(device
, NV40_PFIFO_CACHE1_DATA(ptr
));
160 if (!(pull0
& 0x00000100) ||
161 !nv04_fifo_swmthd(device
, chid
, mthd
, data
)) {
162 chan
= nvkm_fifo_chan_chid(&fifo
->base
, chid
, &flags
);
163 nvkm_error(subdev
, "CACHE_ERROR - "
164 "ch %d [%s] subc %d mthd %04x data %08x\n",
165 chid
, chan
? chan
->object
.client
->name
: "unknown",
166 (mthd
>> 13) & 7, mthd
& 0x1ffc, data
);
167 nvkm_fifo_chan_put(&fifo
->base
, flags
, &chan
);
170 nvkm_wr32(device
, NV04_PFIFO_CACHE1_DMA_PUSH
, 0);
171 nvkm_wr32(device
, NV03_PFIFO_INTR_0
, NV_PFIFO_INTR_CACHE_ERROR
);
173 nvkm_wr32(device
, NV03_PFIFO_CACHE1_PUSH0
,
174 nvkm_rd32(device
, NV03_PFIFO_CACHE1_PUSH0
) & ~1);
175 nvkm_wr32(device
, NV03_PFIFO_CACHE1_GET
, get
+ 4);
176 nvkm_wr32(device
, NV03_PFIFO_CACHE1_PUSH0
,
177 nvkm_rd32(device
, NV03_PFIFO_CACHE1_PUSH0
) | 1);
178 nvkm_wr32(device
, NV04_PFIFO_CACHE1_HASH
, 0);
180 nvkm_wr32(device
, NV04_PFIFO_CACHE1_DMA_PUSH
,
181 nvkm_rd32(device
, NV04_PFIFO_CACHE1_DMA_PUSH
) | 1);
182 nvkm_wr32(device
, NV04_PFIFO_CACHE1_PULL0
, 1);
186 nv04_fifo_dma_pusher(struct nv04_fifo
*fifo
, u32 chid
)
188 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
189 struct nvkm_device
*device
= subdev
->device
;
190 u32 dma_get
= nvkm_rd32(device
, 0x003244);
191 u32 dma_put
= nvkm_rd32(device
, 0x003240);
192 u32 push
= nvkm_rd32(device
, 0x003220);
193 u32 state
= nvkm_rd32(device
, 0x003228);
194 struct nvkm_fifo_chan
*chan
;
198 chan
= nvkm_fifo_chan_chid(&fifo
->base
, chid
, &flags
);
199 name
= chan
? chan
->object
.client
->name
: "unknown";
200 if (device
->card_type
== NV_50
) {
201 u32 ho_get
= nvkm_rd32(device
, 0x003328);
202 u32 ho_put
= nvkm_rd32(device
, 0x003320);
203 u32 ib_get
= nvkm_rd32(device
, 0x003334);
204 u32 ib_put
= nvkm_rd32(device
, 0x003330);
206 nvkm_error(subdev
, "DMA_PUSHER - "
207 "ch %d [%s] get %02x%08x put %02x%08x ib_get %08x "
208 "ib_put %08x state %08x (err: %s) push %08x\n",
209 chid
, name
, ho_get
, dma_get
, ho_put
, dma_put
,
210 ib_get
, ib_put
, state
, nv_dma_state_err(state
),
213 /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
214 nvkm_wr32(device
, 0x003364, 0x00000000);
215 if (dma_get
!= dma_put
|| ho_get
!= ho_put
) {
216 nvkm_wr32(device
, 0x003244, dma_put
);
217 nvkm_wr32(device
, 0x003328, ho_put
);
219 if (ib_get
!= ib_put
)
220 nvkm_wr32(device
, 0x003334, ib_put
);
222 nvkm_error(subdev
, "DMA_PUSHER - ch %d [%s] get %08x put %08x "
223 "state %08x (err: %s) push %08x\n",
224 chid
, name
, dma_get
, dma_put
, state
,
225 nv_dma_state_err(state
), push
);
227 if (dma_get
!= dma_put
)
228 nvkm_wr32(device
, 0x003244, dma_put
);
230 nvkm_fifo_chan_put(&fifo
->base
, flags
, &chan
);
232 nvkm_wr32(device
, 0x003228, 0x00000000);
233 nvkm_wr32(device
, 0x003220, 0x00000001);
234 nvkm_wr32(device
, 0x002100, NV_PFIFO_INTR_DMA_PUSHER
);
238 nv04_fifo_intr(struct nvkm_fifo
*base
)
240 struct nv04_fifo
*fifo
= nv04_fifo(base
);
241 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
242 struct nvkm_device
*device
= subdev
->device
;
243 u32 mask
= nvkm_rd32(device
, NV03_PFIFO_INTR_EN_0
);
244 u32 stat
= nvkm_rd32(device
, NV03_PFIFO_INTR_0
) & mask
;
245 u32 reassign
, chid
, get
, sem
;
247 reassign
= nvkm_rd32(device
, NV03_PFIFO_CACHES
) & 1;
248 nvkm_wr32(device
, NV03_PFIFO_CACHES
, 0);
250 chid
= nvkm_rd32(device
, NV03_PFIFO_CACHE1_PUSH1
) & (fifo
->base
.nr
- 1);
251 get
= nvkm_rd32(device
, NV03_PFIFO_CACHE1_GET
);
253 if (stat
& NV_PFIFO_INTR_CACHE_ERROR
) {
254 nv04_fifo_cache_error(fifo
, chid
, get
);
255 stat
&= ~NV_PFIFO_INTR_CACHE_ERROR
;
258 if (stat
& NV_PFIFO_INTR_DMA_PUSHER
) {
259 nv04_fifo_dma_pusher(fifo
, chid
);
260 stat
&= ~NV_PFIFO_INTR_DMA_PUSHER
;
263 if (stat
& NV_PFIFO_INTR_SEMAPHORE
) {
264 stat
&= ~NV_PFIFO_INTR_SEMAPHORE
;
265 nvkm_wr32(device
, NV03_PFIFO_INTR_0
, NV_PFIFO_INTR_SEMAPHORE
);
267 sem
= nvkm_rd32(device
, NV10_PFIFO_CACHE1_SEMAPHORE
);
268 nvkm_wr32(device
, NV10_PFIFO_CACHE1_SEMAPHORE
, sem
| 0x1);
270 nvkm_wr32(device
, NV03_PFIFO_CACHE1_GET
, get
+ 4);
271 nvkm_wr32(device
, NV04_PFIFO_CACHE1_PULL0
, 1);
274 if (device
->card_type
== NV_50
) {
275 if (stat
& 0x00000010) {
277 nvkm_wr32(device
, 0x002100, 0x00000010);
280 if (stat
& 0x40000000) {
281 nvkm_wr32(device
, 0x002100, 0x40000000);
282 nvkm_fifo_uevent(&fifo
->base
);
288 nvkm_warn(subdev
, "intr %08x\n", stat
);
289 nvkm_mask(device
, NV03_PFIFO_INTR_EN_0
, stat
, 0x00000000);
290 nvkm_wr32(device
, NV03_PFIFO_INTR_0
, stat
);
293 nvkm_wr32(device
, NV03_PFIFO_CACHES
, reassign
);
297 nv04_fifo_init(struct nvkm_fifo
*base
)
299 struct nv04_fifo
*fifo
= nv04_fifo(base
);
300 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
301 struct nvkm_instmem
*imem
= device
->imem
;
302 struct nvkm_ramht
*ramht
= imem
->ramht
;
303 struct nvkm_memory
*ramro
= imem
->ramro
;
304 struct nvkm_memory
*ramfc
= imem
->ramfc
;
306 nvkm_wr32(device
, NV04_PFIFO_DELAY_0
, 0x000000ff);
307 nvkm_wr32(device
, NV04_PFIFO_DMA_TIMESLICE
, 0x0101ffff);
309 nvkm_wr32(device
, NV03_PFIFO_RAMHT
, (0x03 << 24) /* search 128 */ |
310 ((ramht
->bits
- 9) << 16) |
311 (ramht
->gpuobj
->addr
>> 8));
312 nvkm_wr32(device
, NV03_PFIFO_RAMRO
, nvkm_memory_addr(ramro
) >> 8);
313 nvkm_wr32(device
, NV03_PFIFO_RAMFC
, nvkm_memory_addr(ramfc
) >> 8);
315 nvkm_wr32(device
, NV03_PFIFO_CACHE1_PUSH1
, fifo
->base
.nr
- 1);
317 nvkm_wr32(device
, NV03_PFIFO_INTR_0
, 0xffffffff);
318 nvkm_wr32(device
, NV03_PFIFO_INTR_EN_0
, 0xffffffff);
320 nvkm_wr32(device
, NV03_PFIFO_CACHE1_PUSH0
, 1);
321 nvkm_wr32(device
, NV04_PFIFO_CACHE1_PULL0
, 1);
322 nvkm_wr32(device
, NV03_PFIFO_CACHES
, 1);
326 nv04_fifo_new_(const struct nvkm_fifo_func
*func
, struct nvkm_device
*device
,
327 int index
, int nr
, const struct nv04_fifo_ramfc
*ramfc
,
328 struct nvkm_fifo
**pfifo
)
330 struct nv04_fifo
*fifo
;
333 if (!(fifo
= kzalloc(sizeof(*fifo
), GFP_KERNEL
)))
336 *pfifo
= &fifo
->base
;
338 ret
= nvkm_fifo_ctor(func
, device
, index
, nr
, &fifo
->base
);
342 set_bit(nr
- 1, fifo
->base
.mask
); /* inactive channel */
346 static const struct nvkm_fifo_func
348 .init
= nv04_fifo_init
,
349 .intr
= nv04_fifo_intr
,
350 .pause
= nv04_fifo_pause
,
351 .start
= nv04_fifo_start
,
353 &nv04_fifo_dma_oclass
,
359 nv04_fifo_new(struct nvkm_device
*device
, int index
, struct nvkm_fifo
**pfifo
)
361 return nv04_fifo_new_(&nv04_fifo
, device
, index
, 16,
362 nv04_fifo_ramfc
, pfifo
);