2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
27 #include <core/client.h>
28 #include <core/notify.h>
29 #include <core/oproxy.h>
30 #include <core/ramht.h>
31 #include <engine/dma.h>
33 #include <nvif/cl507d.h>
34 #include <nvif/event.h>
35 #include <nvif/unpack.h>
38 nv50_disp_mthd_list(struct nv50_disp
*disp
, int debug
, u32 base
, int c
,
39 const struct nv50_disp_mthd_list
*list
, int inst
)
41 struct nvkm_subdev
*subdev
= &disp
->base
.engine
.subdev
;
42 struct nvkm_device
*device
= subdev
->device
;
45 for (i
= 0; list
->data
[i
].mthd
; i
++) {
46 if (list
->data
[i
].addr
) {
47 u32 next
= nvkm_rd32(device
, list
->data
[i
].addr
+ base
+ 0);
48 u32 prev
= nvkm_rd32(device
, list
->data
[i
].addr
+ base
+ c
);
49 u32 mthd
= list
->data
[i
].mthd
+ (list
->mthd
* inst
);
50 const char *name
= list
->data
[i
].name
;
54 snprintf(mods
, sizeof(mods
), "-> %08x", next
);
56 snprintf(mods
, sizeof(mods
), "%13c", ' ');
58 nvkm_printk_(subdev
, debug
, info
,
59 "\t%04x: %08x %s%s%s\n",
60 mthd
, prev
, mods
, name
? " // " : "",
67 nv50_disp_chan_mthd(struct nv50_disp_chan
*chan
, int debug
)
69 struct nv50_disp
*disp
= chan
->disp
;
70 struct nvkm_subdev
*subdev
= &disp
->base
.engine
.subdev
;
71 const struct nv50_disp_chan_mthd
*mthd
= chan
->mthd
;
72 const struct nv50_disp_mthd_list
*list
;
75 if (debug
> subdev
->debug
)
80 for (i
= 0; (list
= mthd
->data
[i
].mthd
) != NULL
; i
++) {
81 u32 base
= chan
->head
* mthd
->addr
;
82 for (j
= 0; j
< mthd
->data
[i
].nr
; j
++, base
+= list
->addr
) {
83 const char *cname
= mthd
->name
;
84 const char *sname
= "";
85 char cname_
[16], sname_
[16];
88 snprintf(cname_
, sizeof(cname_
), "%s %d",
89 mthd
->name
, chan
->chid
.user
);
93 if (mthd
->data
[i
].nr
> 1) {
94 snprintf(sname_
, sizeof(sname_
), " - %s %d",
95 mthd
->data
[i
].name
, j
);
99 nvkm_printk_(subdev
, debug
, info
, "%s%s:\n", cname
, sname
);
100 nv50_disp_mthd_list(disp
, debug
, base
, mthd
->prev
,
107 nv50_disp_chan_uevent_fini(struct nvkm_event
*event
, int type
, int index
)
109 struct nv50_disp
*disp
= container_of(event
, typeof(*disp
), uevent
);
110 struct nvkm_device
*device
= disp
->base
.engine
.subdev
.device
;
111 nvkm_mask(device
, 0x610028, 0x00000001 << index
, 0x00000000 << index
);
112 nvkm_wr32(device
, 0x610020, 0x00000001 << index
);
116 nv50_disp_chan_uevent_init(struct nvkm_event
*event
, int types
, int index
)
118 struct nv50_disp
*disp
= container_of(event
, typeof(*disp
), uevent
);
119 struct nvkm_device
*device
= disp
->base
.engine
.subdev
.device
;
120 nvkm_wr32(device
, 0x610020, 0x00000001 << index
);
121 nvkm_mask(device
, 0x610028, 0x00000001 << index
, 0x00000001 << index
);
125 nv50_disp_chan_uevent_send(struct nv50_disp
*disp
, int chid
)
127 struct nvif_notify_uevent_rep
{
130 nvkm_event_send(&disp
->uevent
, 1, chid
, &rep
, sizeof(rep
));
134 nv50_disp_chan_uevent_ctor(struct nvkm_object
*object
, void *data
, u32 size
,
135 struct nvkm_notify
*notify
)
137 struct nv50_disp_chan
*chan
= nv50_disp_chan(object
);
139 struct nvif_notify_uevent_req none
;
143 if (!(ret
= nvif_unvers(ret
, &data
, &size
, args
->none
))) {
144 notify
->size
= sizeof(struct nvif_notify_uevent_rep
);
146 notify
->index
= chan
->chid
.user
;
153 const struct nvkm_event_func
154 nv50_disp_chan_uevent
= {
155 .ctor
= nv50_disp_chan_uevent_ctor
,
156 .init
= nv50_disp_chan_uevent_init
,
157 .fini
= nv50_disp_chan_uevent_fini
,
161 nv50_disp_chan_user(struct nv50_disp_chan
*chan
, u64
*psize
)
164 return 0x640000 + (chan
->chid
.user
* 0x1000);
168 nv50_disp_chan_intr(struct nv50_disp_chan
*chan
, bool en
)
170 struct nvkm_device
*device
= chan
->disp
->base
.engine
.subdev
.device
;
171 const u32 mask
= 0x00010001 << chan
->chid
.user
;
172 const u32 data
= en
? 0x00010000 << chan
->chid
.user
: 0x00000000;
173 nvkm_mask(device
, 0x610028, mask
, data
);
177 nv50_disp_chan_rd32(struct nvkm_object
*object
, u64 addr
, u32
*data
)
179 struct nv50_disp_chan
*chan
= nv50_disp_chan(object
);
180 struct nvkm_device
*device
= chan
->disp
->base
.engine
.subdev
.device
;
181 u64 size
, base
= chan
->func
->user(chan
, &size
);
182 *data
= nvkm_rd32(device
, base
+ addr
);
187 nv50_disp_chan_wr32(struct nvkm_object
*object
, u64 addr
, u32 data
)
189 struct nv50_disp_chan
*chan
= nv50_disp_chan(object
);
190 struct nvkm_device
*device
= chan
->disp
->base
.engine
.subdev
.device
;
191 u64 size
, base
= chan
->func
->user(chan
, &size
);
192 nvkm_wr32(device
, base
+ addr
, data
);
197 nv50_disp_chan_ntfy(struct nvkm_object
*object
, u32 type
,
198 struct nvkm_event
**pevent
)
200 struct nv50_disp_chan
*chan
= nv50_disp_chan(object
);
201 struct nv50_disp
*disp
= chan
->disp
;
203 case NV50_DISP_CORE_CHANNEL_DMA_V0_NTFY_UEVENT
:
204 *pevent
= &disp
->uevent
;
213 nv50_disp_chan_map(struct nvkm_object
*object
, void *argv
, u32 argc
,
214 enum nvkm_object_map
*type
, u64
*addr
, u64
*size
)
216 struct nv50_disp_chan
*chan
= nv50_disp_chan(object
);
217 struct nvkm_device
*device
= chan
->disp
->base
.engine
.subdev
.device
;
218 const u64 base
= device
->func
->resource_addr(device
, 0);
219 *type
= NVKM_OBJECT_MAP_IO
;
220 *addr
= base
+ chan
->func
->user(chan
, size
);
224 struct nv50_disp_chan_object
{
225 struct nvkm_oproxy oproxy
;
226 struct nv50_disp
*disp
;
231 nv50_disp_chan_child_del_(struct nvkm_oproxy
*base
)
233 struct nv50_disp_chan_object
*object
=
234 container_of(base
, typeof(*object
), oproxy
);
235 nvkm_ramht_remove(object
->disp
->ramht
, object
->hash
);
238 static const struct nvkm_oproxy_func
239 nv50_disp_chan_child_func_
= {
240 .dtor
[0] = nv50_disp_chan_child_del_
,
244 nv50_disp_chan_child_new(const struct nvkm_oclass
*oclass
,
245 void *argv
, u32 argc
, struct nvkm_object
**pobject
)
247 struct nv50_disp_chan
*chan
= nv50_disp_chan(oclass
->parent
);
248 struct nv50_disp
*disp
= chan
->disp
;
249 struct nvkm_device
*device
= disp
->base
.engine
.subdev
.device
;
250 const struct nvkm_device_oclass
*sclass
= oclass
->priv
;
251 struct nv50_disp_chan_object
*object
;
254 if (!(object
= kzalloc(sizeof(*object
), GFP_KERNEL
)))
256 nvkm_oproxy_ctor(&nv50_disp_chan_child_func_
, oclass
, &object
->oproxy
);
258 *pobject
= &object
->oproxy
.base
;
260 ret
= sclass
->ctor(device
, oclass
, argv
, argc
, &object
->oproxy
.object
);
264 object
->hash
= chan
->func
->bind(chan
, object
->oproxy
.object
,
266 if (object
->hash
< 0)
273 nv50_disp_chan_child_get(struct nvkm_object
*object
, int index
,
274 struct nvkm_oclass
*sclass
)
276 struct nv50_disp_chan
*chan
= nv50_disp_chan(object
);
277 struct nvkm_device
*device
= chan
->disp
->base
.engine
.subdev
.device
;
278 const struct nvkm_device_oclass
*oclass
= NULL
;
280 if (chan
->func
->bind
)
281 sclass
->engine
= nvkm_device_engine(device
, NVKM_ENGINE_DMAOBJ
);
283 sclass
->engine
= NULL
;
285 if (sclass
->engine
&& sclass
->engine
->func
->base
.sclass
) {
286 sclass
->engine
->func
->base
.sclass(sclass
, index
, &oclass
);
288 sclass
->ctor
= nv50_disp_chan_child_new
,
289 sclass
->priv
= oclass
;
298 nv50_disp_chan_fini(struct nvkm_object
*object
, bool suspend
)
300 struct nv50_disp_chan
*chan
= nv50_disp_chan(object
);
301 chan
->func
->fini(chan
);
302 chan
->func
->intr(chan
, false);
307 nv50_disp_chan_init(struct nvkm_object
*object
)
309 struct nv50_disp_chan
*chan
= nv50_disp_chan(object
);
310 chan
->func
->intr(chan
, true);
311 return chan
->func
->init(chan
);
315 nv50_disp_chan_dtor(struct nvkm_object
*object
)
317 struct nv50_disp_chan
*chan
= nv50_disp_chan(object
);
318 struct nv50_disp
*disp
= chan
->disp
;
319 if (chan
->chid
.user
>= 0)
320 disp
->chan
[chan
->chid
.user
] = NULL
;
321 nvkm_memory_unref(&chan
->memory
);
325 static const struct nvkm_object_func
327 .dtor
= nv50_disp_chan_dtor
,
328 .init
= nv50_disp_chan_init
,
329 .fini
= nv50_disp_chan_fini
,
330 .rd32
= nv50_disp_chan_rd32
,
331 .wr32
= nv50_disp_chan_wr32
,
332 .ntfy
= nv50_disp_chan_ntfy
,
333 .map
= nv50_disp_chan_map
,
334 .sclass
= nv50_disp_chan_child_get
,
338 nv50_disp_chan_new_(const struct nv50_disp_chan_func
*func
,
339 const struct nv50_disp_chan_mthd
*mthd
,
340 struct nv50_disp
*disp
, int ctrl
, int user
, int head
,
341 const struct nvkm_oclass
*oclass
,
342 struct nvkm_object
**pobject
)
344 struct nv50_disp_chan
*chan
;
346 if (!(chan
= kzalloc(sizeof(*chan
), GFP_KERNEL
)))
348 *pobject
= &chan
->object
;
350 nvkm_object_ctor(&nv50_disp_chan
, oclass
, &chan
->object
);
354 chan
->chid
.ctrl
= ctrl
;
355 chan
->chid
.user
= user
;
358 if (disp
->chan
[chan
->chid
.user
]) {
359 chan
->chid
.user
= -1;
362 disp
->chan
[chan
->chid
.user
] = chan
;