2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <core/client.h>
27 #include <core/ramht.h>
28 #include <subdev/mmu.h>
29 #include <subdev/timer.h>
32 nv50_fifo_chan_engine_addr(struct nvkm_engine
*engine
)
34 switch (engine
->subdev
.index
) {
35 case NVKM_ENGINE_DMAOBJ
:
36 case NVKM_ENGINE_SW
: return -1;
37 case NVKM_ENGINE_GR
: return 0x0000;
38 case NVKM_ENGINE_MPEG
: return 0x0060;
46 nv50_fifo_chan_engine_fini(struct nvkm_fifo_chan
*base
,
47 struct nvkm_engine
*engine
, bool suspend
)
49 struct nv50_fifo_chan
*chan
= nv50_fifo_chan(base
);
50 struct nv50_fifo
*fifo
= chan
->fifo
;
51 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
52 struct nvkm_device
*device
= subdev
->device
;
56 offset
= nv50_fifo_chan_engine_addr(engine
);
62 * PFIFO will hang forever if the connected engines don't report
63 * that they've processed the context switch request.
65 * In order for the kickoff to work, we need to ensure all the
66 * connected engines are in a state where they can answer.
68 * Newer chipsets don't seem to suffer from this issue, and well,
69 * there's also a "ignore these engines" bitmask reg we can use
70 * if we hit the issue there..
72 me
= nvkm_mask(device
, 0x00b860, 0x00000001, 0x00000001);
74 /* do the kickoff... */
75 nvkm_wr32(device
, 0x0032fc, chan
->base
.inst
->addr
>> 12);
76 if (nvkm_msec(device
, 2000,
77 if (nvkm_rd32(device
, 0x0032fc) != 0xffffffff)
80 nvkm_error(subdev
, "channel %d [%s] unload timeout\n",
81 chan
->base
.chid
, chan
->base
.object
.client
->name
);
85 nvkm_wr32(device
, 0x00b860, me
);
89 nvkm_wo32(chan
->eng
, offset
+ 0x00, 0x00000000);
90 nvkm_wo32(chan
->eng
, offset
+ 0x04, 0x00000000);
91 nvkm_wo32(chan
->eng
, offset
+ 0x08, 0x00000000);
92 nvkm_wo32(chan
->eng
, offset
+ 0x0c, 0x00000000);
93 nvkm_wo32(chan
->eng
, offset
+ 0x10, 0x00000000);
94 nvkm_wo32(chan
->eng
, offset
+ 0x14, 0x00000000);
102 nv50_fifo_chan_engine_init(struct nvkm_fifo_chan
*base
,
103 struct nvkm_engine
*engine
)
105 struct nv50_fifo_chan
*chan
= nv50_fifo_chan(base
);
106 struct nvkm_gpuobj
*engn
= chan
->engn
[engine
->subdev
.index
];
110 offset
= nv50_fifo_chan_engine_addr(engine
);
113 limit
= engn
->addr
+ engn
->size
- 1;
116 nvkm_kmap(chan
->eng
);
117 nvkm_wo32(chan
->eng
, offset
+ 0x00, 0x00190000);
118 nvkm_wo32(chan
->eng
, offset
+ 0x04, lower_32_bits(limit
));
119 nvkm_wo32(chan
->eng
, offset
+ 0x08, lower_32_bits(start
));
120 nvkm_wo32(chan
->eng
, offset
+ 0x0c, upper_32_bits(limit
) << 24 |
121 upper_32_bits(start
));
122 nvkm_wo32(chan
->eng
, offset
+ 0x10, 0x00000000);
123 nvkm_wo32(chan
->eng
, offset
+ 0x14, 0x00000000);
124 nvkm_done(chan
->eng
);
129 nv50_fifo_chan_engine_dtor(struct nvkm_fifo_chan
*base
,
130 struct nvkm_engine
*engine
)
132 struct nv50_fifo_chan
*chan
= nv50_fifo_chan(base
);
133 nvkm_gpuobj_del(&chan
->engn
[engine
->subdev
.index
]);
137 nv50_fifo_chan_engine_ctor(struct nvkm_fifo_chan
*base
,
138 struct nvkm_engine
*engine
,
139 struct nvkm_object
*object
)
141 struct nv50_fifo_chan
*chan
= nv50_fifo_chan(base
);
142 int engn
= engine
->subdev
.index
;
144 if (nv50_fifo_chan_engine_addr(engine
) < 0)
147 return nvkm_object_bind(object
, NULL
, 0, &chan
->engn
[engn
]);
151 nv50_fifo_chan_object_dtor(struct nvkm_fifo_chan
*base
, int cookie
)
153 struct nv50_fifo_chan
*chan
= nv50_fifo_chan(base
);
154 nvkm_ramht_remove(chan
->ramht
, cookie
);
158 nv50_fifo_chan_object_ctor(struct nvkm_fifo_chan
*base
,
159 struct nvkm_object
*object
)
161 struct nv50_fifo_chan
*chan
= nv50_fifo_chan(base
);
162 u32 handle
= object
->handle
;
165 switch (object
->engine
->subdev
.index
) {
166 case NVKM_ENGINE_DMAOBJ
:
167 case NVKM_ENGINE_SW
: context
= 0x00000000; break;
168 case NVKM_ENGINE_GR
: context
= 0x00100000; break;
169 case NVKM_ENGINE_MPEG
: context
= 0x00200000; break;
175 return nvkm_ramht_insert(chan
->ramht
, object
, 0, 4, handle
, context
);
179 nv50_fifo_chan_fini(struct nvkm_fifo_chan
*base
)
181 struct nv50_fifo_chan
*chan
= nv50_fifo_chan(base
);
182 struct nv50_fifo
*fifo
= chan
->fifo
;
183 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
184 u32 chid
= chan
->base
.chid
;
186 /* remove channel from runlist, fifo will unload context */
187 nvkm_mask(device
, 0x002600 + (chid
* 4), 0x80000000, 0x00000000);
188 nv50_fifo_runlist_update(fifo
);
189 nvkm_wr32(device
, 0x002600 + (chid
* 4), 0x00000000);
193 nv50_fifo_chan_init(struct nvkm_fifo_chan
*base
)
195 struct nv50_fifo_chan
*chan
= nv50_fifo_chan(base
);
196 struct nv50_fifo
*fifo
= chan
->fifo
;
197 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
198 u64 addr
= chan
->ramfc
->addr
>> 12;
199 u32 chid
= chan
->base
.chid
;
201 nvkm_wr32(device
, 0x002600 + (chid
* 4), 0x80000000 | addr
);
202 nv50_fifo_runlist_update(fifo
);
206 nv50_fifo_chan_dtor(struct nvkm_fifo_chan
*base
)
208 struct nv50_fifo_chan
*chan
= nv50_fifo_chan(base
);
209 nvkm_ramht_del(&chan
->ramht
);
210 nvkm_gpuobj_del(&chan
->pgd
);
211 nvkm_gpuobj_del(&chan
->eng
);
212 nvkm_gpuobj_del(&chan
->cache
);
213 nvkm_gpuobj_del(&chan
->ramfc
);
217 static const struct nvkm_fifo_chan_func
218 nv50_fifo_chan_func
= {
219 .dtor
= nv50_fifo_chan_dtor
,
220 .init
= nv50_fifo_chan_init
,
221 .fini
= nv50_fifo_chan_fini
,
222 .engine_ctor
= nv50_fifo_chan_engine_ctor
,
223 .engine_dtor
= nv50_fifo_chan_engine_dtor
,
224 .engine_init
= nv50_fifo_chan_engine_init
,
225 .engine_fini
= nv50_fifo_chan_engine_fini
,
226 .object_ctor
= nv50_fifo_chan_object_ctor
,
227 .object_dtor
= nv50_fifo_chan_object_dtor
,
231 nv50_fifo_chan_ctor(struct nv50_fifo
*fifo
, u64 vmm
, u64 push
,
232 const struct nvkm_oclass
*oclass
,
233 struct nv50_fifo_chan
*chan
)
235 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
241 ret
= nvkm_fifo_chan_ctor(&nv50_fifo_chan_func
, &fifo
->base
,
242 0x10000, 0x1000, false, vmm
, push
,
243 (1ULL << NVKM_ENGINE_DMAOBJ
) |
244 (1ULL << NVKM_ENGINE_SW
) |
245 (1ULL << NVKM_ENGINE_GR
) |
246 (1ULL << NVKM_ENGINE_MPEG
),
247 0, 0xc00000, 0x2000, oclass
, &chan
->base
);
252 ret
= nvkm_gpuobj_new(device
, 0x0200, 0x1000, true, chan
->base
.inst
,
257 ret
= nvkm_gpuobj_new(device
, 0x1200, 0, true, chan
->base
.inst
,
262 ret
= nvkm_gpuobj_new(device
, 0x4000, 0, false, chan
->base
.inst
,
267 return nvkm_ramht_new(device
, 0x8000, 16, chan
->base
.inst
, &chan
->ramht
);