2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
24 #include <core/gpuobj.h>
25 #include <core/memory.h>
26 #include <subdev/timer.h>
29 nvkm_falcon_v1_load_imem(struct nvkm_falcon
*falcon
, void *data
, u32 start
,
30 u32 size
, u16 tag
, u8 port
, bool secure
)
38 reg
= start
| BIT(24) | (secure
? BIT(28) : 0);
39 nvkm_falcon_wr32(falcon
, 0x180 + (port
* 16), reg
);
40 for (i
= 0; i
< size
/ 4; i
++) {
41 /* write new tag every 256B */
43 nvkm_falcon_wr32(falcon
, 0x188 + (port
* 16), tag
++);
44 nvkm_falcon_wr32(falcon
, 0x184 + (port
* 16), ((u32
*)data
)[i
]);
48 * If size is not a multiple of 4, mask the last work to ensure garbage
49 * does not get written
52 u32 extra
= ((u32
*)data
)[i
];
54 /* write new tag every 256B */
56 nvkm_falcon_wr32(falcon
, 0x188 + (port
* 16), tag
++);
57 nvkm_falcon_wr32(falcon
, 0x184 + (port
* 16),
58 extra
& (BIT(rem
* 8) - 1));
62 /* code must be padded to 0x40 words */
64 nvkm_falcon_wr32(falcon
, 0x184 + (port
* 16), 0);
68 nvkm_falcon_v1_load_emem(struct nvkm_falcon
*falcon
, void *data
, u32 start
,
76 nvkm_falcon_wr32(falcon
, 0xac0 + (port
* 8), start
| (0x1 << 24));
77 for (i
= 0; i
< size
/ 4; i
++)
78 nvkm_falcon_wr32(falcon
, 0xac4 + (port
* 8), ((u32
*)data
)[i
]);
81 * If size is not a multiple of 4, mask the last word to ensure garbage
82 * does not get written
85 u32 extra
= ((u32
*)data
)[i
];
87 nvkm_falcon_wr32(falcon
, 0xac4 + (port
* 8),
88 extra
& (BIT(rem
* 8) - 1));
92 static const u32 EMEM_START_ADDR
= 0x1000000;
95 nvkm_falcon_v1_load_dmem(struct nvkm_falcon
*falcon
, void *data
, u32 start
,
101 if (start
>= EMEM_START_ADDR
&& falcon
->has_emem
)
102 return nvkm_falcon_v1_load_emem(falcon
, data
,
103 start
- EMEM_START_ADDR
, size
,
108 nvkm_falcon_wr32(falcon
, 0x1c0 + (port
* 8), start
| (0x1 << 24));
109 for (i
= 0; i
< size
/ 4; i
++)
110 nvkm_falcon_wr32(falcon
, 0x1c4 + (port
* 8), ((u32
*)data
)[i
]);
113 * If size is not a multiple of 4, mask the last word to ensure garbage
114 * does not get written
117 u32 extra
= ((u32
*)data
)[i
];
119 nvkm_falcon_wr32(falcon
, 0x1c4 + (port
* 8),
120 extra
& (BIT(rem
* 8) - 1));
125 nvkm_falcon_v1_read_emem(struct nvkm_falcon
*falcon
, u32 start
, u32 size
,
133 nvkm_falcon_wr32(falcon
, 0xac0 + (port
* 8), start
| (0x1 << 25));
134 for (i
= 0; i
< size
/ 4; i
++)
135 ((u32
*)data
)[i
] = nvkm_falcon_rd32(falcon
, 0xac4 + (port
* 8));
138 * If size is not a multiple of 4, mask the last word to ensure garbage
142 u32 extra
= nvkm_falcon_rd32(falcon
, 0xac4 + (port
* 8));
144 for (i
= size
; i
< size
+ rem
; i
++) {
145 ((u8
*)data
)[i
] = (u8
)(extra
& 0xff);
152 nvkm_falcon_v1_read_dmem(struct nvkm_falcon
*falcon
, u32 start
, u32 size
,
158 if (start
>= EMEM_START_ADDR
&& falcon
->has_emem
)
159 return nvkm_falcon_v1_read_emem(falcon
, start
- EMEM_START_ADDR
,
164 nvkm_falcon_wr32(falcon
, 0x1c0 + (port
* 8), start
| (0x1 << 25));
165 for (i
= 0; i
< size
/ 4; i
++)
166 ((u32
*)data
)[i
] = nvkm_falcon_rd32(falcon
, 0x1c4 + (port
* 8));
169 * If size is not a multiple of 4, mask the last word to ensure garbage
173 u32 extra
= nvkm_falcon_rd32(falcon
, 0x1c4 + (port
* 8));
175 for (i
= size
; i
< size
+ rem
; i
++) {
176 ((u8
*)data
)[i
] = (u8
)(extra
& 0xff);
183 nvkm_falcon_v1_bind_context(struct nvkm_falcon
*falcon
, struct nvkm_memory
*ctx
)
188 /* disable instance block binding */
190 nvkm_falcon_wr32(falcon
, 0x10c, 0x0);
194 switch (falcon
->owner
->index
) {
195 case NVKM_ENGINE_NVENC0
:
196 case NVKM_ENGINE_NVENC1
:
197 case NVKM_ENGINE_NVENC2
:
200 case NVKM_SUBDEV_PMU
:
208 nvkm_falcon_wr32(falcon
, 0x10c, 0x1);
210 /* setup apertures - virtual */
211 nvkm_falcon_wr32(falcon
, fbif
+ 4 * FALCON_DMAIDX_UCODE
, 0x4);
212 nvkm_falcon_wr32(falcon
, fbif
+ 4 * FALCON_DMAIDX_VIRT
, 0x0);
213 /* setup apertures - physical */
214 nvkm_falcon_wr32(falcon
, fbif
+ 4 * FALCON_DMAIDX_PHYS_VID
, 0x4);
215 nvkm_falcon_wr32(falcon
, fbif
+ 4 * FALCON_DMAIDX_PHYS_SYS_COH
, 0x5);
216 nvkm_falcon_wr32(falcon
, fbif
+ 4 * FALCON_DMAIDX_PHYS_SYS_NCOH
, 0x6);
219 switch (nvkm_memory_target(ctx
)) {
220 case NVKM_MEM_TARGET_VRAM
: inst_loc
= 0; break;
221 case NVKM_MEM_TARGET_HOST
: inst_loc
= 2; break;
222 case NVKM_MEM_TARGET_NCOH
: inst_loc
= 3; break;
229 nvkm_falcon_mask(falcon
, 0x048, 0x1, 0x1);
230 nvkm_falcon_wr32(falcon
, 0x054,
231 ((nvkm_memory_addr(ctx
) >> 12) & 0xfffffff) |
232 (inst_loc
<< 28) | (1 << 30));
234 nvkm_falcon_mask(falcon
, 0x090, 0x10000, 0x10000);
235 nvkm_falcon_mask(falcon
, 0x0a4, 0x8, 0x8);
239 nvkm_falcon_v1_set_start_addr(struct nvkm_falcon
*falcon
, u32 start_addr
)
241 nvkm_falcon_wr32(falcon
, 0x104, start_addr
);
245 nvkm_falcon_v1_start(struct nvkm_falcon
*falcon
)
247 u32 reg
= nvkm_falcon_rd32(falcon
, 0x100);
250 nvkm_falcon_wr32(falcon
, 0x130, 0x2);
252 nvkm_falcon_wr32(falcon
, 0x100, 0x2);
256 nvkm_falcon_v1_wait_for_halt(struct nvkm_falcon
*falcon
, u32 ms
)
258 struct nvkm_device
*device
= falcon
->owner
->device
;
261 ret
= nvkm_wait_msec(device
, ms
, falcon
->addr
+ 0x100, 0x10, 0x10);
269 nvkm_falcon_v1_clear_interrupt(struct nvkm_falcon
*falcon
, u32 mask
)
271 struct nvkm_device
*device
= falcon
->owner
->device
;
274 /* clear interrupt(s) */
275 nvkm_falcon_mask(falcon
, 0x004, mask
, mask
);
276 /* wait until interrupts are cleared */
277 ret
= nvkm_wait_msec(device
, 10, falcon
->addr
+ 0x008, mask
, 0x0);
285 falcon_v1_wait_idle(struct nvkm_falcon
*falcon
)
287 struct nvkm_device
*device
= falcon
->owner
->device
;
290 ret
= nvkm_wait_msec(device
, 10, falcon
->addr
+ 0x04c, 0xffff, 0x0);
298 nvkm_falcon_v1_enable(struct nvkm_falcon
*falcon
)
300 struct nvkm_device
*device
= falcon
->owner
->device
;
303 ret
= nvkm_wait_msec(device
, 10, falcon
->addr
+ 0x10c, 0x6, 0x0);
305 nvkm_error(falcon
->user
, "Falcon mem scrubbing timeout\n");
309 ret
= falcon_v1_wait_idle(falcon
);
314 nvkm_falcon_wr32(falcon
, 0x010, 0xff);
320 nvkm_falcon_v1_disable(struct nvkm_falcon
*falcon
)
322 /* disable IRQs and wait for any previous code to complete */
323 nvkm_falcon_wr32(falcon
, 0x014, 0xff);
324 falcon_v1_wait_idle(falcon
);
327 static const struct nvkm_falcon_func
329 .load_imem
= nvkm_falcon_v1_load_imem
,
330 .load_dmem
= nvkm_falcon_v1_load_dmem
,
331 .read_dmem
= nvkm_falcon_v1_read_dmem
,
332 .bind_context
= nvkm_falcon_v1_bind_context
,
333 .start
= nvkm_falcon_v1_start
,
334 .wait_for_halt
= nvkm_falcon_v1_wait_for_halt
,
335 .clear_interrupt
= nvkm_falcon_v1_clear_interrupt
,
336 .enable
= nvkm_falcon_v1_enable
,
337 .disable
= nvkm_falcon_v1_disable
,
338 .set_start_addr
= nvkm_falcon_v1_set_start_addr
,
342 nvkm_falcon_v1_new(struct nvkm_subdev
*owner
, const char *name
, u32 addr
,
343 struct nvkm_falcon
**pfalcon
)
345 struct nvkm_falcon
*falcon
;
346 if (!(falcon
= *pfalcon
= kzalloc(sizeof(*falcon
), GFP_KERNEL
)))
348 nvkm_falcon_ctor(&nvkm_falcon_v1
, owner
, name
, addr
, falcon
);