2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
27 #include <core/client.h>
28 #include <core/gpuobj.h>
29 #include <core/notify.h>
30 #include <subdev/mc.h>
32 #include <nvif/event.h>
33 #include <nvif/unpack.h>
36 nvkm_fifo_recover_chan(struct nvkm_fifo
*fifo
, int chid
)
39 if (WARN_ON(!fifo
->func
->recover_chan
))
41 spin_lock_irqsave(&fifo
->lock
, flags
);
42 fifo
->func
->recover_chan(fifo
, chid
);
43 spin_unlock_irqrestore(&fifo
->lock
, flags
);
47 nvkm_fifo_pause(struct nvkm_fifo
*fifo
, unsigned long *flags
)
49 return fifo
->func
->pause(fifo
, flags
);
53 nvkm_fifo_start(struct nvkm_fifo
*fifo
, unsigned long *flags
)
55 return fifo
->func
->start(fifo
, flags
);
59 nvkm_fifo_chan_put(struct nvkm_fifo
*fifo
, unsigned long flags
,
60 struct nvkm_fifo_chan
**pchan
)
62 struct nvkm_fifo_chan
*chan
= *pchan
;
65 spin_unlock_irqrestore(&fifo
->lock
, flags
);
69 struct nvkm_fifo_chan
*
70 nvkm_fifo_chan_inst_locked(struct nvkm_fifo
*fifo
, u64 inst
)
72 struct nvkm_fifo_chan
*chan
;
73 list_for_each_entry(chan
, &fifo
->chan
, head
) {
74 if (chan
->inst
->addr
== inst
) {
75 list_del(&chan
->head
);
76 list_add(&chan
->head
, &fifo
->chan
);
83 struct nvkm_fifo_chan
*
84 nvkm_fifo_chan_inst(struct nvkm_fifo
*fifo
, u64 inst
, unsigned long *rflags
)
86 struct nvkm_fifo_chan
*chan
;
88 spin_lock_irqsave(&fifo
->lock
, flags
);
89 if ((chan
= nvkm_fifo_chan_inst_locked(fifo
, inst
))) {
93 spin_unlock_irqrestore(&fifo
->lock
, flags
);
97 struct nvkm_fifo_chan
*
98 nvkm_fifo_chan_chid(struct nvkm_fifo
*fifo
, int chid
, unsigned long *rflags
)
100 struct nvkm_fifo_chan
*chan
;
102 spin_lock_irqsave(&fifo
->lock
, flags
);
103 list_for_each_entry(chan
, &fifo
->chan
, head
) {
104 if (chan
->chid
== chid
) {
105 list_del(&chan
->head
);
106 list_add(&chan
->head
, &fifo
->chan
);
111 spin_unlock_irqrestore(&fifo
->lock
, flags
);
116 nvkm_fifo_kevent(struct nvkm_fifo
*fifo
, int chid
)
118 nvkm_event_send(&fifo
->kevent
, 1, chid
, NULL
, 0);
122 nvkm_fifo_kevent_ctor(struct nvkm_object
*object
, void *data
, u32 size
,
123 struct nvkm_notify
*notify
)
125 struct nvkm_fifo_chan
*chan
= nvkm_fifo_chan(object
);
129 notify
->index
= chan
->chid
;
135 static const struct nvkm_event_func
136 nvkm_fifo_kevent_func
= {
137 .ctor
= nvkm_fifo_kevent_ctor
,
141 nvkm_fifo_cevent_ctor(struct nvkm_object
*object
, void *data
, u32 size
,
142 struct nvkm_notify
*notify
)
153 static const struct nvkm_event_func
154 nvkm_fifo_cevent_func
= {
155 .ctor
= nvkm_fifo_cevent_ctor
,
159 nvkm_fifo_cevent(struct nvkm_fifo
*fifo
)
161 nvkm_event_send(&fifo
->cevent
, 1, 0, NULL
, 0);
165 nvkm_fifo_uevent_fini(struct nvkm_event
*event
, int type
, int index
)
167 struct nvkm_fifo
*fifo
= container_of(event
, typeof(*fifo
), uevent
);
168 fifo
->func
->uevent_fini(fifo
);
172 nvkm_fifo_uevent_init(struct nvkm_event
*event
, int type
, int index
)
174 struct nvkm_fifo
*fifo
= container_of(event
, typeof(*fifo
), uevent
);
175 fifo
->func
->uevent_init(fifo
);
179 nvkm_fifo_uevent_ctor(struct nvkm_object
*object
, void *data
, u32 size
,
180 struct nvkm_notify
*notify
)
183 struct nvif_notify_uevent_req none
;
187 if (!(ret
= nvif_unvers(ret
, &data
, &size
, req
->none
))) {
188 notify
->size
= sizeof(struct nvif_notify_uevent_rep
);
196 static const struct nvkm_event_func
197 nvkm_fifo_uevent_func
= {
198 .ctor
= nvkm_fifo_uevent_ctor
,
199 .init
= nvkm_fifo_uevent_init
,
200 .fini
= nvkm_fifo_uevent_fini
,
204 nvkm_fifo_uevent(struct nvkm_fifo
*fifo
)
206 struct nvif_notify_uevent_rep rep
= {
208 nvkm_event_send(&fifo
->uevent
, 1, 0, &rep
, sizeof(rep
));
212 nvkm_fifo_class_new(struct nvkm_device
*device
,
213 const struct nvkm_oclass
*oclass
, void *data
, u32 size
,
214 struct nvkm_object
**pobject
)
216 const struct nvkm_fifo_chan_oclass
*sclass
= oclass
->engn
;
217 struct nvkm_fifo
*fifo
= nvkm_fifo(oclass
->engine
);
218 return sclass
->ctor(fifo
, oclass
, data
, size
, pobject
);
221 static const struct nvkm_device_oclass
223 .ctor
= nvkm_fifo_class_new
,
227 nvkm_fifo_class_get(struct nvkm_oclass
*oclass
, int index
,
228 const struct nvkm_device_oclass
**class)
230 struct nvkm_fifo
*fifo
= nvkm_fifo(oclass
->engine
);
231 const struct nvkm_fifo_chan_oclass
*sclass
;
234 if (fifo
->func
->class_get
) {
235 int ret
= fifo
->func
->class_get(fifo
, index
, &sclass
);
237 oclass
->base
= sclass
->base
;
238 oclass
->engn
= sclass
;
239 *class = &nvkm_fifo_class
;
245 while ((sclass
= fifo
->func
->chan
[c
])) {
247 oclass
->base
= sclass
->base
;
248 oclass
->engn
= sclass
;
249 *class = &nvkm_fifo_class
;
258 nvkm_fifo_intr(struct nvkm_engine
*engine
)
260 struct nvkm_fifo
*fifo
= nvkm_fifo(engine
);
261 fifo
->func
->intr(fifo
);
265 nvkm_fifo_fini(struct nvkm_engine
*engine
, bool suspend
)
267 struct nvkm_fifo
*fifo
= nvkm_fifo(engine
);
268 if (fifo
->func
->fini
)
269 fifo
->func
->fini(fifo
);
274 nvkm_fifo_oneinit(struct nvkm_engine
*engine
)
276 struct nvkm_fifo
*fifo
= nvkm_fifo(engine
);
277 if (fifo
->func
->oneinit
)
278 return fifo
->func
->oneinit(fifo
);
283 nvkm_fifo_preinit(struct nvkm_engine
*engine
)
285 nvkm_mc_reset(engine
->subdev
.device
, NVKM_ENGINE_FIFO
);
289 nvkm_fifo_init(struct nvkm_engine
*engine
)
291 struct nvkm_fifo
*fifo
= nvkm_fifo(engine
);
292 fifo
->func
->init(fifo
);
297 nvkm_fifo_dtor(struct nvkm_engine
*engine
)
299 struct nvkm_fifo
*fifo
= nvkm_fifo(engine
);
301 if (fifo
->func
->dtor
)
302 data
= fifo
->func
->dtor(fifo
);
303 nvkm_event_fini(&fifo
->kevent
);
304 nvkm_event_fini(&fifo
->cevent
);
305 nvkm_event_fini(&fifo
->uevent
);
309 static const struct nvkm_engine_func
311 .dtor
= nvkm_fifo_dtor
,
312 .preinit
= nvkm_fifo_preinit
,
313 .oneinit
= nvkm_fifo_oneinit
,
314 .init
= nvkm_fifo_init
,
315 .fini
= nvkm_fifo_fini
,
316 .intr
= nvkm_fifo_intr
,
317 .base
.sclass
= nvkm_fifo_class_get
,
321 nvkm_fifo_ctor(const struct nvkm_fifo_func
*func
, struct nvkm_device
*device
,
322 int index
, int nr
, struct nvkm_fifo
*fifo
)
327 INIT_LIST_HEAD(&fifo
->chan
);
328 spin_lock_init(&fifo
->lock
);
330 if (WARN_ON(fifo
->nr
> NVKM_FIFO_CHID_NR
))
331 fifo
->nr
= NVKM_FIFO_CHID_NR
;
334 bitmap_clear(fifo
->mask
, 0, fifo
->nr
);
336 ret
= nvkm_engine_ctor(&nvkm_fifo
, device
, index
, true, &fifo
->engine
);
340 if (func
->uevent_init
) {
341 ret
= nvkm_event_init(&nvkm_fifo_uevent_func
, 1, 1,
347 ret
= nvkm_event_init(&nvkm_fifo_cevent_func
, 1, 1, &fifo
->cevent
);
351 return nvkm_event_init(&nvkm_fifo_kevent_func
, 1, nr
, &fifo
->kevent
);