2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 #include <engine/falcon.h>
24 #include <core/gpuobj.h>
25 #include <subdev/mc.h>
26 #include <subdev/timer.h>
27 #include <engine/fifo.h>
30 nvkm_falcon_oclass_get(struct nvkm_oclass
*oclass
, int index
)
32 struct nvkm_falcon
*falcon
= nvkm_falcon(oclass
->engine
);
35 while (falcon
->func
->sclass
[c
].oclass
) {
37 oclass
->base
= falcon
->func
->sclass
[index
];
46 nvkm_falcon_cclass_bind(struct nvkm_object
*object
, struct nvkm_gpuobj
*parent
,
47 int align
, struct nvkm_gpuobj
**pgpuobj
)
49 return nvkm_gpuobj_new(object
->engine
->subdev
.device
, 256,
50 align
, true, parent
, pgpuobj
);
53 static const struct nvkm_object_func
54 nvkm_falcon_cclass
= {
55 .bind
= nvkm_falcon_cclass_bind
,
59 nvkm_falcon_intr(struct nvkm_engine
*engine
)
61 struct nvkm_falcon
*falcon
= nvkm_falcon(engine
);
62 struct nvkm_subdev
*subdev
= &falcon
->engine
.subdev
;
63 struct nvkm_device
*device
= subdev
->device
;
64 const u32 base
= falcon
->addr
;
65 u32 dest
= nvkm_rd32(device
, base
+ 0x01c);
66 u32 intr
= nvkm_rd32(device
, base
+ 0x008) & dest
& ~(dest
>> 16);
67 u32 inst
= nvkm_rd32(device
, base
+ 0x050) & 0x3fffffff;
68 struct nvkm_fifo_chan
*chan
;
71 chan
= nvkm_fifo_chan_inst(device
->fifo
, (u64
)inst
<< 12, &flags
);
73 if (intr
& 0x00000040) {
74 if (falcon
->func
->intr
) {
75 falcon
->func
->intr(falcon
, chan
);
76 nvkm_wr32(device
, base
+ 0x004, 0x00000040);
81 if (intr
& 0x00000010) {
82 nvkm_debug(subdev
, "ucode halted\n");
83 nvkm_wr32(device
, base
+ 0x004, 0x00000010);
88 nvkm_error(subdev
, "intr %08x\n", intr
);
89 nvkm_wr32(device
, base
+ 0x004, intr
);
92 nvkm_fifo_chan_put(device
->fifo
, flags
, &chan
);
96 nvkm_falcon_fini(struct nvkm_engine
*engine
, bool suspend
)
98 struct nvkm_falcon
*falcon
= nvkm_falcon(engine
);
99 struct nvkm_device
*device
= falcon
->engine
.subdev
.device
;
100 const u32 base
= falcon
->addr
;
103 nvkm_memory_unref(&falcon
->core
);
104 if (falcon
->external
) {
105 vfree(falcon
->data
.data
);
106 vfree(falcon
->code
.data
);
107 falcon
->code
.data
= NULL
;
111 if (nvkm_mc_enabled(device
, engine
->subdev
.index
)) {
112 nvkm_mask(device
, base
+ 0x048, 0x00000003, 0x00000000);
113 nvkm_wr32(device
, base
+ 0x014, 0xffffffff);
119 vmemdup(const void *src
, size_t len
)
121 void *p
= vmalloc(len
);
129 nvkm_falcon_oneinit(struct nvkm_engine
*engine
)
131 struct nvkm_falcon
*falcon
= nvkm_falcon(engine
);
132 struct nvkm_subdev
*subdev
= &falcon
->engine
.subdev
;
133 struct nvkm_device
*device
= subdev
->device
;
134 const u32 base
= falcon
->addr
;
137 /* determine falcon capabilities */
138 if (device
->chipset
< 0xa3 ||
139 device
->chipset
== 0xaa || device
->chipset
== 0xac) {
141 falcon
->secret
= (falcon
->addr
== 0x087000) ? 1 : 0;
143 caps
= nvkm_rd32(device
, base
+ 0x12c);
144 falcon
->version
= (caps
& 0x0000000f);
145 falcon
->secret
= (caps
& 0x00000030) >> 4;
148 caps
= nvkm_rd32(device
, base
+ 0x108);
149 falcon
->code
.limit
= (caps
& 0x000001ff) << 8;
150 falcon
->data
.limit
= (caps
& 0x0003fe00) >> 1;
152 nvkm_debug(subdev
, "falcon version: %d\n", falcon
->version
);
153 nvkm_debug(subdev
, "secret level: %d\n", falcon
->secret
);
154 nvkm_debug(subdev
, "code limit: %d\n", falcon
->code
.limit
);
155 nvkm_debug(subdev
, "data limit: %d\n", falcon
->data
.limit
);
160 nvkm_falcon_init(struct nvkm_engine
*engine
)
162 struct nvkm_falcon
*falcon
= nvkm_falcon(engine
);
163 struct nvkm_subdev
*subdev
= &falcon
->engine
.subdev
;
164 struct nvkm_device
*device
= subdev
->device
;
165 const struct firmware
*fw
;
166 char name
[32] = "internal";
167 const u32 base
= falcon
->addr
;
170 /* wait for 'uc halted' to be signalled before continuing */
171 if (falcon
->secret
&& falcon
->version
< 4) {
172 if (!falcon
->version
) {
173 nvkm_msec(device
, 2000,
174 if (nvkm_rd32(device
, base
+ 0x008) & 0x00000010)
178 nvkm_msec(device
, 2000,
179 if (!(nvkm_rd32(device
, base
+ 0x180) & 0x80000000))
183 nvkm_wr32(device
, base
+ 0x004, 0x00000010);
186 /* disable all interrupts */
187 nvkm_wr32(device
, base
+ 0x014, 0xffffffff);
189 /* no default ucode provided by the engine implementation, try and
190 * locate a "self-bootstrapping" firmware image for the engine
192 if (!falcon
->code
.data
) {
193 snprintf(name
, sizeof(name
), "nouveau/nv%02x_fuc%03x",
194 device
->chipset
, falcon
->addr
>> 12);
196 ret
= request_firmware(&fw
, name
, device
->dev
);
198 falcon
->code
.data
= vmemdup(fw
->data
, fw
->size
);
199 falcon
->code
.size
= fw
->size
;
200 falcon
->data
.data
= NULL
;
201 falcon
->data
.size
= 0;
202 release_firmware(fw
);
205 falcon
->external
= true;
208 /* next step is to try and load "static code/data segment" firmware
209 * images for the engine
211 if (!falcon
->code
.data
) {
212 snprintf(name
, sizeof(name
), "nouveau/nv%02x_fuc%03xd",
213 device
->chipset
, falcon
->addr
>> 12);
215 ret
= request_firmware(&fw
, name
, device
->dev
);
217 nvkm_error(subdev
, "unable to load firmware data\n");
221 falcon
->data
.data
= vmemdup(fw
->data
, fw
->size
);
222 falcon
->data
.size
= fw
->size
;
223 release_firmware(fw
);
224 if (!falcon
->data
.data
)
227 snprintf(name
, sizeof(name
), "nouveau/nv%02x_fuc%03xc",
228 device
->chipset
, falcon
->addr
>> 12);
230 ret
= request_firmware(&fw
, name
, device
->dev
);
232 nvkm_error(subdev
, "unable to load firmware code\n");
236 falcon
->code
.data
= vmemdup(fw
->data
, fw
->size
);
237 falcon
->code
.size
= fw
->size
;
238 release_firmware(fw
);
239 if (!falcon
->code
.data
)
243 nvkm_debug(subdev
, "firmware: %s (%s)\n", name
, falcon
->data
.data
?
244 "static code/data segments" : "self-bootstrapping");
246 /* ensure any "self-bootstrapping" firmware image is in vram */
247 if (!falcon
->data
.data
&& !falcon
->core
) {
248 ret
= nvkm_memory_new(device
, NVKM_MEM_TARGET_INST
,
249 falcon
->code
.size
, 256, false,
252 nvkm_error(subdev
, "core allocation failed, %d\n", ret
);
256 nvkm_kmap(falcon
->core
);
257 for (i
= 0; i
< falcon
->code
.size
; i
+= 4)
258 nvkm_wo32(falcon
->core
, i
, falcon
->code
.data
[i
/ 4]);
259 nvkm_done(falcon
->core
);
262 /* upload firmware bootloader (or the full code segments) */
264 u64 addr
= nvkm_memory_addr(falcon
->core
);
265 if (device
->card_type
< NV_C0
)
266 nvkm_wr32(device
, base
+ 0x618, 0x04000000);
268 nvkm_wr32(device
, base
+ 0x618, 0x00000114);
269 nvkm_wr32(device
, base
+ 0x11c, 0);
270 nvkm_wr32(device
, base
+ 0x110, addr
>> 8);
271 nvkm_wr32(device
, base
+ 0x114, 0);
272 nvkm_wr32(device
, base
+ 0x118, 0x00006610);
274 if (falcon
->code
.size
> falcon
->code
.limit
||
275 falcon
->data
.size
> falcon
->data
.limit
) {
276 nvkm_error(subdev
, "ucode exceeds falcon limit(s)\n");
280 if (falcon
->version
< 3) {
281 nvkm_wr32(device
, base
+ 0xff8, 0x00100000);
282 for (i
= 0; i
< falcon
->code
.size
/ 4; i
++)
283 nvkm_wr32(device
, base
+ 0xff4, falcon
->code
.data
[i
]);
285 nvkm_wr32(device
, base
+ 0x180, 0x01000000);
286 for (i
= 0; i
< falcon
->code
.size
/ 4; i
++) {
288 nvkm_wr32(device
, base
+ 0x188, i
>> 6);
289 nvkm_wr32(device
, base
+ 0x184, falcon
->code
.data
[i
]);
294 /* upload data segment (if necessary), zeroing the remainder */
295 if (falcon
->version
< 3) {
296 nvkm_wr32(device
, base
+ 0xff8, 0x00000000);
297 for (i
= 0; !falcon
->core
&& i
< falcon
->data
.size
/ 4; i
++)
298 nvkm_wr32(device
, base
+ 0xff4, falcon
->data
.data
[i
]);
299 for (; i
< falcon
->data
.limit
; i
+= 4)
300 nvkm_wr32(device
, base
+ 0xff4, 0x00000000);
302 nvkm_wr32(device
, base
+ 0x1c0, 0x01000000);
303 for (i
= 0; !falcon
->core
&& i
< falcon
->data
.size
/ 4; i
++)
304 nvkm_wr32(device
, base
+ 0x1c4, falcon
->data
.data
[i
]);
305 for (; i
< falcon
->data
.limit
/ 4; i
++)
306 nvkm_wr32(device
, base
+ 0x1c4, 0x00000000);
309 /* start it running */
310 nvkm_wr32(device
, base
+ 0x10c, 0x00000001); /* BLOCK_ON_FIFO */
311 nvkm_wr32(device
, base
+ 0x104, 0x00000000); /* ENTRY */
312 nvkm_wr32(device
, base
+ 0x100, 0x00000002); /* TRIGGER */
313 nvkm_wr32(device
, base
+ 0x048, 0x00000003); /* FIFO | CHSW */
315 if (falcon
->func
->init
)
316 falcon
->func
->init(falcon
);
321 nvkm_falcon_dtor(struct nvkm_engine
*engine
)
323 return nvkm_falcon(engine
);
326 static const struct nvkm_engine_func
328 .dtor
= nvkm_falcon_dtor
,
329 .oneinit
= nvkm_falcon_oneinit
,
330 .init
= nvkm_falcon_init
,
331 .fini
= nvkm_falcon_fini
,
332 .intr
= nvkm_falcon_intr
,
333 .fifo
.sclass
= nvkm_falcon_oclass_get
,
334 .cclass
= &nvkm_falcon_cclass
,
338 nvkm_falcon_new_(const struct nvkm_falcon_func
*func
,
339 struct nvkm_device
*device
, int index
, bool enable
,
340 u32 addr
, struct nvkm_engine
**pengine
)
342 struct nvkm_falcon
*falcon
;
344 if (!(falcon
= kzalloc(sizeof(*falcon
), GFP_KERNEL
)))
348 falcon
->code
.data
= func
->code
.data
;
349 falcon
->code
.size
= func
->code
.size
;
350 falcon
->data
.data
= func
->data
.data
;
351 falcon
->data
.size
= func
->data
.size
;
352 *pengine
= &falcon
->engine
;
354 return nvkm_engine_ctor(&nvkm_falcon
, device
, index
,
355 enable
, &falcon
->engine
);