2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <core/client.h>
27 #include <core/gpuobj.h>
28 #include <subdev/fb.h>
29 #include <subdev/timer.h>
30 #include <engine/fifo.h>
32 #include <nvif/class.h>
34 /*******************************************************************************
36 ******************************************************************************/
39 nv31_mpeg_object_bind(struct nvkm_object
*object
, struct nvkm_gpuobj
*parent
,
40 int align
, struct nvkm_gpuobj
**pgpuobj
)
42 int ret
= nvkm_gpuobj_new(object
->engine
->subdev
.device
, 16, align
,
43 false, parent
, pgpuobj
);
46 nvkm_wo32(*pgpuobj
, 0x00, object
->oclass
);
47 nvkm_wo32(*pgpuobj
, 0x04, 0x00000000);
48 nvkm_wo32(*pgpuobj
, 0x08, 0x00000000);
49 nvkm_wo32(*pgpuobj
, 0x0c, 0x00000000);
55 const struct nvkm_object_func
57 .bind
= nv31_mpeg_object_bind
,
60 /*******************************************************************************
62 ******************************************************************************/
65 nv31_mpeg_chan_dtor(struct nvkm_object
*object
)
67 struct nv31_mpeg_chan
*chan
= nv31_mpeg_chan(object
);
68 struct nv31_mpeg
*mpeg
= chan
->mpeg
;
71 spin_lock_irqsave(&mpeg
->engine
.lock
, flags
);
72 if (mpeg
->chan
== chan
)
74 spin_unlock_irqrestore(&mpeg
->engine
.lock
, flags
);
78 static const struct nvkm_object_func
80 .dtor
= nv31_mpeg_chan_dtor
,
84 nv31_mpeg_chan_new(struct nvkm_fifo_chan
*fifoch
,
85 const struct nvkm_oclass
*oclass
,
86 struct nvkm_object
**pobject
)
88 struct nv31_mpeg
*mpeg
= nv31_mpeg(oclass
->engine
);
89 struct nv31_mpeg_chan
*chan
;
93 if (!(chan
= kzalloc(sizeof(*chan
), GFP_KERNEL
)))
95 nvkm_object_ctor(&nv31_mpeg_chan
, oclass
, &chan
->object
);
98 *pobject
= &chan
->object
;
100 spin_lock_irqsave(&mpeg
->engine
.lock
, flags
);
105 spin_unlock_irqrestore(&mpeg
->engine
.lock
, flags
);
109 /*******************************************************************************
110 * PMPEG engine/subdev functions
111 ******************************************************************************/
114 nv31_mpeg_tile(struct nvkm_engine
*engine
, int i
, struct nvkm_fb_tile
*tile
)
116 struct nv31_mpeg
*mpeg
= nv31_mpeg(engine
);
117 struct nvkm_device
*device
= mpeg
->engine
.subdev
.device
;
119 nvkm_wr32(device
, 0x00b008 + (i
* 0x10), tile
->pitch
);
120 nvkm_wr32(device
, 0x00b004 + (i
* 0x10), tile
->limit
);
121 nvkm_wr32(device
, 0x00b000 + (i
* 0x10), tile
->addr
);
125 nv31_mpeg_mthd_dma(struct nvkm_device
*device
, u32 mthd
, u32 data
)
127 struct nv31_mpeg
*mpeg
= nv31_mpeg(device
->mpeg
);
128 struct nvkm_subdev
*subdev
= &mpeg
->engine
.subdev
;
129 u32 inst
= data
<< 4;
130 u32 dma0
= nvkm_rd32(device
, 0x700000 + inst
);
131 u32 dma1
= nvkm_rd32(device
, 0x700004 + inst
);
132 u32 dma2
= nvkm_rd32(device
, 0x700008 + inst
);
133 u32 base
= (dma2
& 0xfffff000) | (dma0
>> 20);
136 /* only allow linear DMA objects */
137 if (!(dma0
& 0x00002000)) {
138 nvkm_error(subdev
, "inst %08x dma0 %08x dma1 %08x dma2 %08x\n",
139 inst
, dma0
, dma1
, dma2
);
143 if (mthd
== 0x0190) {
145 nvkm_mask(device
, 0x00b300, 0x00010000,
146 (dma0
& 0x00030000) ? 0x00010000 : 0);
147 nvkm_wr32(device
, 0x00b334, base
);
148 nvkm_wr32(device
, 0x00b324, size
);
150 if (mthd
== 0x01a0) {
152 nvkm_mask(device
, 0x00b300, 0x00020000,
153 (dma0
& 0x00030000) ? 0x00020000 : 0);
154 nvkm_wr32(device
, 0x00b360, base
);
155 nvkm_wr32(device
, 0x00b364, size
);
157 /* DMA_IMAGE, VRAM only */
158 if (dma0
& 0x00030000)
161 nvkm_wr32(device
, 0x00b370, base
);
162 nvkm_wr32(device
, 0x00b374, size
);
169 nv31_mpeg_mthd(struct nv31_mpeg
*mpeg
, u32 mthd
, u32 data
)
171 struct nvkm_device
*device
= mpeg
->engine
.subdev
.device
;
176 return mpeg
->func
->mthd_dma(device
, mthd
, data
);
184 nv31_mpeg_intr(struct nvkm_engine
*engine
)
186 struct nv31_mpeg
*mpeg
= nv31_mpeg(engine
);
187 struct nvkm_subdev
*subdev
= &mpeg
->engine
.subdev
;
188 struct nvkm_device
*device
= subdev
->device
;
189 u32 stat
= nvkm_rd32(device
, 0x00b100);
190 u32 type
= nvkm_rd32(device
, 0x00b230);
191 u32 mthd
= nvkm_rd32(device
, 0x00b234);
192 u32 data
= nvkm_rd32(device
, 0x00b238);
196 spin_lock_irqsave(&mpeg
->engine
.lock
, flags
);
198 if (stat
& 0x01000000) {
199 /* happens on initial binding of the object */
200 if (type
== 0x00000020 && mthd
== 0x0000) {
201 nvkm_mask(device
, 0x00b308, 0x00000000, 0x00000000);
205 if (type
== 0x00000010) {
206 if (nv31_mpeg_mthd(mpeg
, mthd
, data
))
211 nvkm_wr32(device
, 0x00b100, stat
);
212 nvkm_wr32(device
, 0x00b230, 0x00000001);
215 nvkm_error(subdev
, "ch %d [%s] %08x %08x %08x %08x\n",
216 mpeg
->chan
? mpeg
->chan
->fifo
->chid
: -1,
217 mpeg
->chan
? mpeg
->chan
->object
.client
->name
:
218 "unknown", stat
, type
, mthd
, data
);
221 spin_unlock_irqrestore(&mpeg
->engine
.lock
, flags
);
225 nv31_mpeg_init(struct nvkm_engine
*mpeg
)
227 struct nvkm_subdev
*subdev
= &mpeg
->subdev
;
228 struct nvkm_device
*device
= subdev
->device
;
231 nvkm_wr32(device
, 0x00b0e0, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
232 nvkm_wr32(device
, 0x00b0e8, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
235 nvkm_wr32(device
, 0x00b32c, 0x00000000);
236 nvkm_wr32(device
, 0x00b314, 0x00000100);
237 nvkm_wr32(device
, 0x00b220, 0x00000031);
238 nvkm_wr32(device
, 0x00b300, 0x02001ec1);
239 nvkm_mask(device
, 0x00b32c, 0x00000001, 0x00000001);
241 nvkm_wr32(device
, 0x00b100, 0xffffffff);
242 nvkm_wr32(device
, 0x00b140, 0xffffffff);
244 if (nvkm_msec(device
, 2000,
245 if (!(nvkm_rd32(device
, 0x00b200) & 0x00000001))
248 nvkm_error(subdev
, "timeout %08x\n",
249 nvkm_rd32(device
, 0x00b200));
257 nv31_mpeg_dtor(struct nvkm_engine
*engine
)
259 return nv31_mpeg(engine
);
262 static const struct nvkm_engine_func
264 .dtor
= nv31_mpeg_dtor
,
265 .init
= nv31_mpeg_init
,
266 .intr
= nv31_mpeg_intr
,
267 .tile
= nv31_mpeg_tile
,
268 .fifo
.cclass
= nv31_mpeg_chan_new
,
270 { -1, -1, NV31_MPEG
, &nv31_mpeg_object
},
276 nv31_mpeg_new_(const struct nv31_mpeg_func
*func
, struct nvkm_device
*device
,
277 int index
, struct nvkm_engine
**pmpeg
)
279 struct nv31_mpeg
*mpeg
;
281 if (!(mpeg
= kzalloc(sizeof(*mpeg
), GFP_KERNEL
)))
284 *pmpeg
= &mpeg
->engine
;
286 return nvkm_engine_ctor(&nv31_mpeg_
, device
, index
,
287 true, &mpeg
->engine
);
290 static const struct nv31_mpeg_func
292 .mthd_dma
= nv31_mpeg_mthd_dma
,
296 nv31_mpeg_new(struct nvkm_device
*device
, int index
, struct nvkm_engine
**pmpeg
)
298 return nv31_mpeg_new_(&nv31_mpeg
, device
, index
, pmpeg
);