2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
24 #include <core/gpuobj.h>
25 #include <core/memory.h>
26 #include <subdev/timer.h>
29 nvkm_falcon_v1_load_imem(struct nvkm_falcon
*falcon
, void *data
, u32 start
,
30 u32 size
, u16 tag
, u8 port
, bool secure
)
38 reg
= start
| BIT(24) | (secure
? BIT(28) : 0);
39 nvkm_falcon_wr32(falcon
, 0x180 + (port
* 16), reg
);
40 for (i
= 0; i
< size
/ 4; i
++) {
41 /* write new tag every 256B */
43 nvkm_falcon_wr32(falcon
, 0x188 + (port
* 16), tag
++);
44 nvkm_falcon_wr32(falcon
, 0x184 + (port
* 16), ((u32
*)data
)[i
]);
48 * If size is not a multiple of 4, mask the last work to ensure garbage
49 * does not get written
52 u32 extra
= ((u32
*)data
)[i
];
54 /* write new tag every 256B */
56 nvkm_falcon_wr32(falcon
, 0x188 + (port
* 16), tag
++);
57 nvkm_falcon_wr32(falcon
, 0x184 + (port
* 16),
58 extra
& (BIT(rem
* 8) - 1));
62 /* code must be padded to 0x40 words */
64 nvkm_falcon_wr32(falcon
, 0x184 + (port
* 16), 0);
68 nvkm_falcon_v1_load_emem(struct nvkm_falcon
*falcon
, void *data
, u32 start
,
76 nvkm_falcon_wr32(falcon
, 0xac0 + (port
* 8), start
| (0x1 << 24));
77 for (i
= 0; i
< size
/ 4; i
++)
78 nvkm_falcon_wr32(falcon
, 0xac4 + (port
* 8), ((u32
*)data
)[i
]);
81 * If size is not a multiple of 4, mask the last word to ensure garbage
82 * does not get written
85 u32 extra
= ((u32
*)data
)[i
];
87 nvkm_falcon_wr32(falcon
, 0xac4 + (port
* 8),
88 extra
& (BIT(rem
* 8) - 1));
93 nvkm_falcon_v1_load_dmem(struct nvkm_falcon
*falcon
, void *data
, u32 start
,
96 const struct nvkm_falcon_func
*func
= falcon
->func
;
100 if (func
->emem_addr
&& start
>= func
->emem_addr
)
101 return nvkm_falcon_v1_load_emem(falcon
, data
,
102 start
- func
->emem_addr
, size
,
107 nvkm_falcon_wr32(falcon
, 0x1c0 + (port
* 8), start
| (0x1 << 24));
108 for (i
= 0; i
< size
/ 4; i
++)
109 nvkm_falcon_wr32(falcon
, 0x1c4 + (port
* 8), ((u32
*)data
)[i
]);
112 * If size is not a multiple of 4, mask the last word to ensure garbage
113 * does not get written
116 u32 extra
= ((u32
*)data
)[i
];
118 nvkm_falcon_wr32(falcon
, 0x1c4 + (port
* 8),
119 extra
& (BIT(rem
* 8) - 1));
124 nvkm_falcon_v1_read_emem(struct nvkm_falcon
*falcon
, u32 start
, u32 size
,
132 nvkm_falcon_wr32(falcon
, 0xac0 + (port
* 8), start
| (0x1 << 25));
133 for (i
= 0; i
< size
/ 4; i
++)
134 ((u32
*)data
)[i
] = nvkm_falcon_rd32(falcon
, 0xac4 + (port
* 8));
137 * If size is not a multiple of 4, mask the last word to ensure garbage
141 u32 extra
= nvkm_falcon_rd32(falcon
, 0xac4 + (port
* 8));
143 for (i
= size
; i
< size
+ rem
; i
++) {
144 ((u8
*)data
)[i
] = (u8
)(extra
& 0xff);
151 nvkm_falcon_v1_read_dmem(struct nvkm_falcon
*falcon
, u32 start
, u32 size
,
154 const struct nvkm_falcon_func
*func
= falcon
->func
;
158 if (func
->emem_addr
&& start
>= func
->emem_addr
)
159 return nvkm_falcon_v1_read_emem(falcon
, start
- func
->emem_addr
,
164 nvkm_falcon_wr32(falcon
, 0x1c0 + (port
* 8), start
| (0x1 << 25));
165 for (i
= 0; i
< size
/ 4; i
++)
166 ((u32
*)data
)[i
] = nvkm_falcon_rd32(falcon
, 0x1c4 + (port
* 8));
169 * If size is not a multiple of 4, mask the last word to ensure garbage
173 u32 extra
= nvkm_falcon_rd32(falcon
, 0x1c4 + (port
* 8));
175 for (i
= size
; i
< size
+ rem
; i
++) {
176 ((u8
*)data
)[i
] = (u8
)(extra
& 0xff);
183 nvkm_falcon_v1_bind_context(struct nvkm_falcon
*falcon
, struct nvkm_memory
*ctx
)
185 const u32 fbif
= falcon
->func
->fbif
;
188 /* disable instance block binding */
190 nvkm_falcon_wr32(falcon
, 0x10c, 0x0);
194 nvkm_falcon_wr32(falcon
, 0x10c, 0x1);
196 /* setup apertures - virtual */
197 nvkm_falcon_wr32(falcon
, fbif
+ 4 * FALCON_DMAIDX_UCODE
, 0x4);
198 nvkm_falcon_wr32(falcon
, fbif
+ 4 * FALCON_DMAIDX_VIRT
, 0x0);
199 /* setup apertures - physical */
200 nvkm_falcon_wr32(falcon
, fbif
+ 4 * FALCON_DMAIDX_PHYS_VID
, 0x4);
201 nvkm_falcon_wr32(falcon
, fbif
+ 4 * FALCON_DMAIDX_PHYS_SYS_COH
, 0x5);
202 nvkm_falcon_wr32(falcon
, fbif
+ 4 * FALCON_DMAIDX_PHYS_SYS_NCOH
, 0x6);
205 switch (nvkm_memory_target(ctx
)) {
206 case NVKM_MEM_TARGET_VRAM
: inst_loc
= 0; break;
207 case NVKM_MEM_TARGET_HOST
: inst_loc
= 2; break;
208 case NVKM_MEM_TARGET_NCOH
: inst_loc
= 3; break;
215 nvkm_falcon_mask(falcon
, 0x048, 0x1, 0x1);
216 nvkm_falcon_wr32(falcon
, 0x054,
217 ((nvkm_memory_addr(ctx
) >> 12) & 0xfffffff) |
218 (inst_loc
<< 28) | (1 << 30));
220 nvkm_falcon_mask(falcon
, 0x090, 0x10000, 0x10000);
221 nvkm_falcon_mask(falcon
, 0x0a4, 0x8, 0x8);
225 nvkm_falcon_v1_set_start_addr(struct nvkm_falcon
*falcon
, u32 start_addr
)
227 nvkm_falcon_wr32(falcon
, 0x104, start_addr
);
231 nvkm_falcon_v1_start(struct nvkm_falcon
*falcon
)
233 u32 reg
= nvkm_falcon_rd32(falcon
, 0x100);
236 nvkm_falcon_wr32(falcon
, 0x130, 0x2);
238 nvkm_falcon_wr32(falcon
, 0x100, 0x2);
242 nvkm_falcon_v1_wait_for_halt(struct nvkm_falcon
*falcon
, u32 ms
)
244 struct nvkm_device
*device
= falcon
->owner
->device
;
247 ret
= nvkm_wait_msec(device
, ms
, falcon
->addr
+ 0x100, 0x10, 0x10);
255 nvkm_falcon_v1_clear_interrupt(struct nvkm_falcon
*falcon
, u32 mask
)
257 struct nvkm_device
*device
= falcon
->owner
->device
;
260 /* clear interrupt(s) */
261 nvkm_falcon_mask(falcon
, 0x004, mask
, mask
);
262 /* wait until interrupts are cleared */
263 ret
= nvkm_wait_msec(device
, 10, falcon
->addr
+ 0x008, mask
, 0x0);
271 falcon_v1_wait_idle(struct nvkm_falcon
*falcon
)
273 struct nvkm_device
*device
= falcon
->owner
->device
;
276 ret
= nvkm_wait_msec(device
, 10, falcon
->addr
+ 0x04c, 0xffff, 0x0);
284 nvkm_falcon_v1_enable(struct nvkm_falcon
*falcon
)
286 struct nvkm_device
*device
= falcon
->owner
->device
;
289 ret
= nvkm_wait_msec(device
, 10, falcon
->addr
+ 0x10c, 0x6, 0x0);
291 nvkm_error(falcon
->user
, "Falcon mem scrubbing timeout\n");
295 ret
= falcon_v1_wait_idle(falcon
);
300 nvkm_falcon_wr32(falcon
, 0x010, 0xff);
306 nvkm_falcon_v1_disable(struct nvkm_falcon
*falcon
)
308 /* disable IRQs and wait for any previous code to complete */
309 nvkm_falcon_wr32(falcon
, 0x014, 0xff);
310 falcon_v1_wait_idle(falcon
);
313 static const struct nvkm_falcon_func
315 .load_imem
= nvkm_falcon_v1_load_imem
,
316 .load_dmem
= nvkm_falcon_v1_load_dmem
,
317 .read_dmem
= nvkm_falcon_v1_read_dmem
,
318 .bind_context
= nvkm_falcon_v1_bind_context
,
319 .start
= nvkm_falcon_v1_start
,
320 .wait_for_halt
= nvkm_falcon_v1_wait_for_halt
,
321 .clear_interrupt
= nvkm_falcon_v1_clear_interrupt
,
322 .enable
= nvkm_falcon_v1_enable
,
323 .disable
= nvkm_falcon_v1_disable
,
324 .set_start_addr
= nvkm_falcon_v1_set_start_addr
,
328 nvkm_falcon_v1_new(struct nvkm_subdev
*owner
, const char *name
, u32 addr
,
329 struct nvkm_falcon
**pfalcon
)
331 struct nvkm_falcon
*falcon
;
332 if (!(falcon
= *pfalcon
= kzalloc(sizeof(*falcon
), GFP_KERNEL
)))
334 nvkm_falcon_ctor(&nvkm_falcon_v1
, owner
, name
, addr
, falcon
);