2 * Copyright (C) 2007 Ben Skeggs.
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #include "nouveau_drv.h"
30 #include "nouveau_ramht.h"
32 #define NV10_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV10_RAMFC__SIZE))
33 #define NV10_RAMFC__SIZE ((dev_priv->chipset) >= 0x17 ? 64 : 32)
36 nv10_fifo_channel_id(struct drm_device
*dev
)
38 return nv_rd32(dev
, NV03_PFIFO_CACHE1_PUSH1
) &
39 NV10_PFIFO_CACHE1_PUSH1_CHID_MASK
;
43 nv10_fifo_create_context(struct nouveau_channel
*chan
)
45 struct drm_nouveau_private
*dev_priv
= chan
->dev
->dev_private
;
46 struct drm_device
*dev
= chan
->dev
;
47 uint32_t fc
= NV10_RAMFC(chan
->id
);
50 ret
= nouveau_gpuobj_new_fake(dev
, NV10_RAMFC(chan
->id
), ~0,
51 NV10_RAMFC__SIZE
, NVOBJ_FLAG_ZERO_ALLOC
|
52 NVOBJ_FLAG_ZERO_FREE
, &chan
->ramfc
);
56 chan
->user
= ioremap(pci_resource_start(dev
->pdev
, 0) +
57 NV03_USER(chan
->id
), PAGE_SIZE
);
61 /* Fill entries that are seen filled in dumps of nvidia driver just
62 * after channel's is put into DMA mode
64 nv_wi32(dev
, fc
+ 0, chan
->pushbuf_base
);
65 nv_wi32(dev
, fc
+ 4, chan
->pushbuf_base
);
66 nv_wi32(dev
, fc
+ 12, chan
->pushbuf
->pinst
>> 4);
67 nv_wi32(dev
, fc
+ 20, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES
|
68 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES
|
69 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8
|
71 NV_PFIFO_CACHE1_BIG_ENDIAN
|
75 /* enable the fifo dma operation */
76 nv_wr32(dev
, NV04_PFIFO_MODE
,
77 nv_rd32(dev
, NV04_PFIFO_MODE
) | (1 << chan
->id
));
82 nv10_fifo_do_load_context(struct drm_device
*dev
, int chid
)
84 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
85 uint32_t fc
= NV10_RAMFC(chid
), tmp
;
87 nv_wr32(dev
, NV04_PFIFO_CACHE1_DMA_PUT
, nv_ri32(dev
, fc
+ 0));
88 nv_wr32(dev
, NV04_PFIFO_CACHE1_DMA_GET
, nv_ri32(dev
, fc
+ 4));
89 nv_wr32(dev
, NV10_PFIFO_CACHE1_REF_CNT
, nv_ri32(dev
, fc
+ 8));
91 tmp
= nv_ri32(dev
, fc
+ 12);
92 nv_wr32(dev
, NV04_PFIFO_CACHE1_DMA_INSTANCE
, tmp
& 0xFFFF);
93 nv_wr32(dev
, NV04_PFIFO_CACHE1_DMA_DCOUNT
, tmp
>> 16);
95 nv_wr32(dev
, NV04_PFIFO_CACHE1_DMA_STATE
, nv_ri32(dev
, fc
+ 16));
96 nv_wr32(dev
, NV04_PFIFO_CACHE1_DMA_FETCH
, nv_ri32(dev
, fc
+ 20));
97 nv_wr32(dev
, NV04_PFIFO_CACHE1_ENGINE
, nv_ri32(dev
, fc
+ 24));
98 nv_wr32(dev
, NV04_PFIFO_CACHE1_PULL1
, nv_ri32(dev
, fc
+ 28));
100 if (dev_priv
->chipset
< 0x17)
103 nv_wr32(dev
, NV10_PFIFO_CACHE1_ACQUIRE_VALUE
, nv_ri32(dev
, fc
+ 32));
104 tmp
= nv_ri32(dev
, fc
+ 36);
105 nv_wr32(dev
, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP
, tmp
);
106 nv_wr32(dev
, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT
, nv_ri32(dev
, fc
+ 40));
107 nv_wr32(dev
, NV10_PFIFO_CACHE1_SEMAPHORE
, nv_ri32(dev
, fc
+ 44));
108 nv_wr32(dev
, NV10_PFIFO_CACHE1_DMA_SUBROUTINE
, nv_ri32(dev
, fc
+ 48));
111 nv_wr32(dev
, NV03_PFIFO_CACHE1_GET
, 0);
112 nv_wr32(dev
, NV03_PFIFO_CACHE1_PUT
, 0);
116 nv10_fifo_load_context(struct nouveau_channel
*chan
)
118 struct drm_device
*dev
= chan
->dev
;
121 nv10_fifo_do_load_context(dev
, chan
->id
);
123 nv_wr32(dev
, NV03_PFIFO_CACHE1_PUSH1
,
124 NV03_PFIFO_CACHE1_PUSH1_DMA
| chan
->id
);
125 nv_wr32(dev
, NV04_PFIFO_CACHE1_DMA_PUSH
, 1);
127 /* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */
128 tmp
= nv_rd32(dev
, NV04_PFIFO_CACHE1_DMA_CTL
) & ~(1 << 31);
129 nv_wr32(dev
, NV04_PFIFO_CACHE1_DMA_CTL
, tmp
);
135 nv10_fifo_unload_context(struct drm_device
*dev
)
137 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
138 struct nouveau_fifo_engine
*pfifo
= &dev_priv
->engine
.fifo
;
142 chid
= pfifo
->channel_id(dev
);
143 if (chid
< 0 || chid
>= dev_priv
->engine
.fifo
.channels
)
145 fc
= NV10_RAMFC(chid
);
147 nv_wi32(dev
, fc
+ 0, nv_rd32(dev
, NV04_PFIFO_CACHE1_DMA_PUT
));
148 nv_wi32(dev
, fc
+ 4, nv_rd32(dev
, NV04_PFIFO_CACHE1_DMA_GET
));
149 nv_wi32(dev
, fc
+ 8, nv_rd32(dev
, NV10_PFIFO_CACHE1_REF_CNT
));
150 tmp
= nv_rd32(dev
, NV04_PFIFO_CACHE1_DMA_INSTANCE
) & 0xFFFF;
151 tmp
|= (nv_rd32(dev
, NV04_PFIFO_CACHE1_DMA_DCOUNT
) << 16);
152 nv_wi32(dev
, fc
+ 12, tmp
);
153 nv_wi32(dev
, fc
+ 16, nv_rd32(dev
, NV04_PFIFO_CACHE1_DMA_STATE
));
154 nv_wi32(dev
, fc
+ 20, nv_rd32(dev
, NV04_PFIFO_CACHE1_DMA_FETCH
));
155 nv_wi32(dev
, fc
+ 24, nv_rd32(dev
, NV04_PFIFO_CACHE1_ENGINE
));
156 nv_wi32(dev
, fc
+ 28, nv_rd32(dev
, NV04_PFIFO_CACHE1_PULL1
));
158 if (dev_priv
->chipset
< 0x17)
161 nv_wi32(dev
, fc
+ 32, nv_rd32(dev
, NV10_PFIFO_CACHE1_ACQUIRE_VALUE
));
162 tmp
= nv_rd32(dev
, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP
);
163 nv_wi32(dev
, fc
+ 36, tmp
);
164 nv_wi32(dev
, fc
+ 40, nv_rd32(dev
, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT
));
165 nv_wi32(dev
, fc
+ 44, nv_rd32(dev
, NV10_PFIFO_CACHE1_SEMAPHORE
));
166 nv_wi32(dev
, fc
+ 48, nv_rd32(dev
, NV04_PFIFO_CACHE1_DMA_GET
));
169 nv10_fifo_do_load_context(dev
, pfifo
->channels
- 1);
170 nv_wr32(dev
, NV03_PFIFO_CACHE1_PUSH1
, pfifo
->channels
- 1);
175 nv10_fifo_init_reset(struct drm_device
*dev
)
177 nv_wr32(dev
, NV03_PMC_ENABLE
,
178 nv_rd32(dev
, NV03_PMC_ENABLE
) & ~NV_PMC_ENABLE_PFIFO
);
179 nv_wr32(dev
, NV03_PMC_ENABLE
,
180 nv_rd32(dev
, NV03_PMC_ENABLE
) | NV_PMC_ENABLE_PFIFO
);
182 nv_wr32(dev
, 0x003224, 0x000f0078);
183 nv_wr32(dev
, 0x002044, 0x0101ffff);
184 nv_wr32(dev
, 0x002040, 0x000000ff);
185 nv_wr32(dev
, 0x002500, 0x00000000);
186 nv_wr32(dev
, 0x003000, 0x00000000);
187 nv_wr32(dev
, 0x003050, 0x00000000);
189 nv_wr32(dev
, 0x003258, 0x00000000);
190 nv_wr32(dev
, 0x003210, 0x00000000);
191 nv_wr32(dev
, 0x003270, 0x00000000);
195 nv10_fifo_init_ramxx(struct drm_device
*dev
)
197 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
199 nv_wr32(dev
, NV03_PFIFO_RAMHT
, (0x03 << 24) /* search 128 */ |
200 ((dev_priv
->ramht
->bits
- 9) << 16) |
201 (dev_priv
->ramht
->gpuobj
->pinst
>> 8));
202 nv_wr32(dev
, NV03_PFIFO_RAMRO
, dev_priv
->ramro
->pinst
>> 8);
204 if (dev_priv
->chipset
< 0x17) {
205 nv_wr32(dev
, NV03_PFIFO_RAMFC
, dev_priv
->ramfc
->pinst
>> 8);
207 nv_wr32(dev
, NV03_PFIFO_RAMFC
, (dev_priv
->ramfc
->pinst
>> 8) |
208 (1 << 16) /* 64 Bytes entry*/);
209 /* XXX nvidia blob set bit 18, 21,23 for nv20 & nv30 */
214 nv10_fifo_init_intr(struct drm_device
*dev
)
216 nouveau_irq_register(dev
, 8, nv04_fifo_isr
);
217 nv_wr32(dev
, 0x002100, 0xffffffff);
218 nv_wr32(dev
, 0x002140, 0xffffffff);
222 nv10_fifo_init(struct drm_device
*dev
)
224 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
225 struct nouveau_fifo_engine
*pfifo
= &dev_priv
->engine
.fifo
;
228 nv10_fifo_init_reset(dev
);
229 nv10_fifo_init_ramxx(dev
);
231 nv10_fifo_do_load_context(dev
, pfifo
->channels
- 1);
232 nv_wr32(dev
, NV03_PFIFO_CACHE1_PUSH1
, pfifo
->channels
- 1);
234 nv10_fifo_init_intr(dev
);
236 pfifo
->reassign(dev
, true);
238 for (i
= 0; i
< dev_priv
->engine
.fifo
.channels
; i
++) {
239 if (dev_priv
->channels
.ptr
[i
]) {
240 uint32_t mode
= nv_rd32(dev
, NV04_PFIFO_MODE
);
241 nv_wr32(dev
, NV04_PFIFO_MODE
, mode
| (1 << i
));