2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
27 #include <core/client.h>
28 #include <core/notify.h>
29 #include <core/ramht.h>
30 #include <engine/dma.h>
32 #include <nvif/cl507d.h>
33 #include <nvif/event.h>
34 #include <nvif/unpack.h>
37 nv50_disp_mthd_list(struct nv50_disp
*disp
, int debug
, u32 base
, int c
,
38 const struct nv50_disp_mthd_list
*list
, int inst
)
40 struct nvkm_subdev
*subdev
= &disp
->base
.engine
.subdev
;
41 struct nvkm_device
*device
= subdev
->device
;
44 for (i
= 0; list
->data
[i
].mthd
; i
++) {
45 if (list
->data
[i
].addr
) {
46 u32 next
= nvkm_rd32(device
, list
->data
[i
].addr
+ base
+ 0);
47 u32 prev
= nvkm_rd32(device
, list
->data
[i
].addr
+ base
+ c
);
48 u32 mthd
= list
->data
[i
].mthd
+ (list
->mthd
* inst
);
49 const char *name
= list
->data
[i
].name
;
53 snprintf(mods
, sizeof(mods
), "-> %08x", next
);
55 snprintf(mods
, sizeof(mods
), "%13c", ' ');
57 nvkm_printk_(subdev
, debug
, info
,
58 "\t%04x: %08x %s%s%s\n",
59 mthd
, prev
, mods
, name
? " // " : "",
66 nv50_disp_chan_mthd(struct nv50_disp_chan
*chan
, int debug
)
68 struct nv50_disp
*disp
= chan
->root
->disp
;
69 struct nvkm_subdev
*subdev
= &disp
->base
.engine
.subdev
;
70 const struct nv50_disp_chan_mthd
*mthd
= chan
->mthd
;
71 const struct nv50_disp_mthd_list
*list
;
74 if (debug
> subdev
->debug
)
77 for (i
= 0; (list
= mthd
->data
[i
].mthd
) != NULL
; i
++) {
78 u32 base
= chan
->head
* mthd
->addr
;
79 for (j
= 0; j
< mthd
->data
[i
].nr
; j
++, base
+= list
->addr
) {
80 const char *cname
= mthd
->name
;
81 const char *sname
= "";
82 char cname_
[16], sname_
[16];
85 snprintf(cname_
, sizeof(cname_
), "%s %d",
86 mthd
->name
, chan
->chid
.user
);
90 if (mthd
->data
[i
].nr
> 1) {
91 snprintf(sname_
, sizeof(sname_
), " - %s %d",
92 mthd
->data
[i
].name
, j
);
96 nvkm_printk_(subdev
, debug
, info
, "%s%s:\n", cname
, sname
);
97 nv50_disp_mthd_list(disp
, debug
, base
, mthd
->prev
,
104 nv50_disp_chan_uevent_fini(struct nvkm_event
*event
, int type
, int index
)
106 struct nv50_disp
*disp
= container_of(event
, typeof(*disp
), uevent
);
107 struct nvkm_device
*device
= disp
->base
.engine
.subdev
.device
;
108 nvkm_mask(device
, 0x610028, 0x00000001 << index
, 0x00000000 << index
);
109 nvkm_wr32(device
, 0x610020, 0x00000001 << index
);
113 nv50_disp_chan_uevent_init(struct nvkm_event
*event
, int types
, int index
)
115 struct nv50_disp
*disp
= container_of(event
, typeof(*disp
), uevent
);
116 struct nvkm_device
*device
= disp
->base
.engine
.subdev
.device
;
117 nvkm_wr32(device
, 0x610020, 0x00000001 << index
);
118 nvkm_mask(device
, 0x610028, 0x00000001 << index
, 0x00000001 << index
);
122 nv50_disp_chan_uevent_send(struct nv50_disp
*disp
, int chid
)
124 struct nvif_notify_uevent_rep
{
127 nvkm_event_send(&disp
->uevent
, 1, chid
, &rep
, sizeof(rep
));
131 nv50_disp_chan_uevent_ctor(struct nvkm_object
*object
, void *data
, u32 size
,
132 struct nvkm_notify
*notify
)
134 struct nv50_disp_chan
*chan
= nv50_disp_chan(object
);
136 struct nvif_notify_uevent_req none
;
140 if (!(ret
= nvif_unvers(ret
, &data
, &size
, args
->none
))) {
141 notify
->size
= sizeof(struct nvif_notify_uevent_rep
);
143 notify
->index
= chan
->chid
.user
;
150 const struct nvkm_event_func
151 nv50_disp_chan_uevent
= {
152 .ctor
= nv50_disp_chan_uevent_ctor
,
153 .init
= nv50_disp_chan_uevent_init
,
154 .fini
= nv50_disp_chan_uevent_fini
,
158 nv50_disp_chan_rd32(struct nvkm_object
*object
, u64 addr
, u32
*data
)
160 struct nv50_disp_chan
*chan
= nv50_disp_chan(object
);
161 struct nv50_disp
*disp
= chan
->root
->disp
;
162 struct nvkm_device
*device
= disp
->base
.engine
.subdev
.device
;
163 *data
= nvkm_rd32(device
, 0x640000 + (chan
->chid
.user
* 0x1000) + addr
);
168 nv50_disp_chan_wr32(struct nvkm_object
*object
, u64 addr
, u32 data
)
170 struct nv50_disp_chan
*chan
= nv50_disp_chan(object
);
171 struct nv50_disp
*disp
= chan
->root
->disp
;
172 struct nvkm_device
*device
= disp
->base
.engine
.subdev
.device
;
173 nvkm_wr32(device
, 0x640000 + (chan
->chid
.user
* 0x1000) + addr
, data
);
178 nv50_disp_chan_ntfy(struct nvkm_object
*object
, u32 type
,
179 struct nvkm_event
**pevent
)
181 struct nv50_disp_chan
*chan
= nv50_disp_chan(object
);
182 struct nv50_disp
*disp
= chan
->root
->disp
;
184 case NV50_DISP_CORE_CHANNEL_DMA_V0_NTFY_UEVENT
:
185 *pevent
= &disp
->uevent
;
194 nv50_disp_chan_map(struct nvkm_object
*object
, void *argv
, u32 argc
,
195 enum nvkm_object_map
*type
, u64
*addr
, u64
*size
)
197 struct nv50_disp_chan
*chan
= nv50_disp_chan(object
);
198 struct nv50_disp
*disp
= chan
->root
->disp
;
199 struct nvkm_device
*device
= disp
->base
.engine
.subdev
.device
;
200 *type
= NVKM_OBJECT_MAP_IO
;
201 *addr
= device
->func
->resource_addr(device
, 0) +
202 0x640000 + (chan
->chid
.user
* 0x1000);
208 nv50_disp_chan_child_new(const struct nvkm_oclass
*oclass
,
209 void *data
, u32 size
, struct nvkm_object
**pobject
)
211 struct nv50_disp_chan
*chan
= nv50_disp_chan(oclass
->parent
);
212 return chan
->func
->child_new(chan
, oclass
, data
, size
, pobject
);
216 nv50_disp_chan_child_get(struct nvkm_object
*object
, int index
,
217 struct nvkm_oclass
*oclass
)
219 struct nv50_disp_chan
*chan
= nv50_disp_chan(object
);
220 if (chan
->func
->child_get
) {
221 int ret
= chan
->func
->child_get(chan
, index
, oclass
);
223 oclass
->ctor
= nv50_disp_chan_child_new
;
230 nv50_disp_chan_fini(struct nvkm_object
*object
, bool suspend
)
232 struct nv50_disp_chan
*chan
= nv50_disp_chan(object
);
233 chan
->func
->fini(chan
);
238 nv50_disp_chan_init(struct nvkm_object
*object
)
240 struct nv50_disp_chan
*chan
= nv50_disp_chan(object
);
241 return chan
->func
->init(chan
);
245 nv50_disp_chan_dtor(struct nvkm_object
*object
)
247 struct nv50_disp_chan
*chan
= nv50_disp_chan(object
);
248 struct nv50_disp
*disp
= chan
->root
->disp
;
249 if (chan
->chid
.user
>= 0)
250 disp
->chan
[chan
->chid
.user
] = NULL
;
251 return chan
->func
->dtor
? chan
->func
->dtor(chan
) : chan
;
254 static const struct nvkm_object_func
256 .dtor
= nv50_disp_chan_dtor
,
257 .init
= nv50_disp_chan_init
,
258 .fini
= nv50_disp_chan_fini
,
259 .rd32
= nv50_disp_chan_rd32
,
260 .wr32
= nv50_disp_chan_wr32
,
261 .ntfy
= nv50_disp_chan_ntfy
,
262 .map
= nv50_disp_chan_map
,
263 .sclass
= nv50_disp_chan_child_get
,
267 nv50_disp_chan_ctor(const struct nv50_disp_chan_func
*func
,
268 const struct nv50_disp_chan_mthd
*mthd
,
269 struct nv50_disp_root
*root
, int ctrl
, int user
, int head
,
270 const struct nvkm_oclass
*oclass
,
271 struct nv50_disp_chan
*chan
)
273 struct nv50_disp
*disp
= root
->disp
;
275 nvkm_object_ctor(&nv50_disp_chan
, oclass
, &chan
->object
);
279 chan
->chid
.ctrl
= ctrl
;
280 chan
->chid
.user
= user
;
283 if (disp
->chan
[chan
->chid
.user
]) {
284 chan
->chid
.user
= -1;
287 disp
->chan
[chan
->chid
.user
] = chan
;
292 nv50_disp_chan_new_(const struct nv50_disp_chan_func
*func
,
293 const struct nv50_disp_chan_mthd
*mthd
,
294 struct nv50_disp_root
*root
, int ctrl
, int user
, int head
,
295 const struct nvkm_oclass
*oclass
,
296 struct nvkm_object
**pobject
)
298 struct nv50_disp_chan
*chan
;
300 if (!(chan
= kzalloc(sizeof(*chan
), GFP_KERNEL
)))
302 *pobject
= &chan
->object
;
304 return nv50_disp_chan_ctor(func
, mthd
, root
, ctrl
, user
,