2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <core/engine.h>
25 #include <core/device.h>
26 #include <core/option.h>
28 #include <subdev/fb.h>
31 nvkm_engine_chsw_load(struct nvkm_engine
*engine
)
33 if (engine
->func
->chsw_load
)
34 return engine
->func
->chsw_load(engine
);
39 nvkm_engine_unref(struct nvkm_engine
**pengine
)
41 struct nvkm_engine
*engine
= *pengine
;
43 mutex_lock(&engine
->subdev
.mutex
);
44 if (--engine
->usecount
== 0)
45 nvkm_subdev_fini(&engine
->subdev
, false);
46 mutex_unlock(&engine
->subdev
.mutex
);
52 nvkm_engine_ref(struct nvkm_engine
*engine
)
55 mutex_lock(&engine
->subdev
.mutex
);
56 if (++engine
->usecount
== 1) {
57 int ret
= nvkm_subdev_init(&engine
->subdev
);
60 mutex_unlock(&engine
->subdev
.mutex
);
64 mutex_unlock(&engine
->subdev
.mutex
);
70 nvkm_engine_tile(struct nvkm_engine
*engine
, int region
)
72 struct nvkm_fb
*fb
= engine
->subdev
.device
->fb
;
73 if (engine
->func
->tile
)
74 engine
->func
->tile(engine
, region
, &fb
->tile
.region
[region
]);
78 nvkm_engine_intr(struct nvkm_subdev
*subdev
)
80 struct nvkm_engine
*engine
= nvkm_engine(subdev
);
81 if (engine
->func
->intr
)
82 engine
->func
->intr(engine
);
86 nvkm_engine_fini(struct nvkm_subdev
*subdev
, bool suspend
)
88 struct nvkm_engine
*engine
= nvkm_engine(subdev
);
89 if (engine
->func
->fini
)
90 return engine
->func
->fini(engine
, suspend
);
95 nvkm_engine_init(struct nvkm_subdev
*subdev
)
97 struct nvkm_engine
*engine
= nvkm_engine(subdev
);
98 struct nvkm_fb
*fb
= subdev
->device
->fb
;
102 if (!engine
->usecount
) {
103 nvkm_trace(subdev
, "init skipped, engine has no users\n");
107 if (engine
->func
->oneinit
&& !engine
->subdev
.oneinit
) {
108 nvkm_trace(subdev
, "one-time init running...\n");
109 time
= ktime_to_us(ktime_get());
110 ret
= engine
->func
->oneinit(engine
);
112 nvkm_trace(subdev
, "one-time init failed, %d\n", ret
);
116 engine
->subdev
.oneinit
= true;
117 time
= ktime_to_us(ktime_get()) - time
;
118 nvkm_trace(subdev
, "one-time init completed in %lldus\n", time
);
121 if (engine
->func
->init
)
122 ret
= engine
->func
->init(engine
);
124 for (i
= 0; fb
&& i
< fb
->tile
.regions
; i
++)
125 nvkm_engine_tile(engine
, i
);
130 nvkm_engine_preinit(struct nvkm_subdev
*subdev
)
132 struct nvkm_engine
*engine
= nvkm_engine(subdev
);
133 if (engine
->func
->preinit
)
134 engine
->func
->preinit(engine
);
139 nvkm_engine_dtor(struct nvkm_subdev
*subdev
)
141 struct nvkm_engine
*engine
= nvkm_engine(subdev
);
142 if (engine
->func
->dtor
)
143 return engine
->func
->dtor(engine
);
147 static const struct nvkm_subdev_func
149 .dtor
= nvkm_engine_dtor
,
150 .preinit
= nvkm_engine_preinit
,
151 .init
= nvkm_engine_init
,
152 .fini
= nvkm_engine_fini
,
153 .intr
= nvkm_engine_intr
,
157 nvkm_engine_ctor(const struct nvkm_engine_func
*func
,
158 struct nvkm_device
*device
, int index
, bool enable
,
159 struct nvkm_engine
*engine
)
161 nvkm_subdev_ctor(&nvkm_engine_func
, device
, index
, &engine
->subdev
);
164 if (!nvkm_boolopt(device
->cfgopt
, nvkm_subdev_name
[index
], enable
)) {
165 nvkm_debug(&engine
->subdev
, "disabled\n");
169 spin_lock_init(&engine
->lock
);
174 nvkm_engine_new_(const struct nvkm_engine_func
*func
,
175 struct nvkm_device
*device
, int index
, bool enable
,
176 struct nvkm_engine
**pengine
)
178 if (!(*pengine
= kzalloc(sizeof(**pengine
), GFP_KERNEL
)))
180 return nvkm_engine_ctor(func
, device
, index
, enable
, *pengine
);