1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /* Copyright (c) 2023 Imagination Technologies Ltd. */
5 #include "pvr_device.h"
6 #include "pvr_device_info.h"
8 #include "pvr_fw_info.h"
9 #include "pvr_fw_startstop.h"
10 #include "pvr_fw_trace.h"
12 #include "pvr_power.h"
13 #include "pvr_rogue_fwif_dev_info.h"
14 #include "pvr_rogue_heap_config.h"
17 #include <drm/drm_drv.h>
18 #include <drm/drm_managed.h>
19 #include <drm/drm_mm.h>
20 #include <linux/clk.h>
21 #include <linux/firmware.h>
22 #include <linux/math.h>
23 #include <linux/minmax.h>
24 #include <linux/sizes.h>
26 #define FW_MAX_SUPPORTED_MAJOR_VERSION 1
28 #define FW_BOOT_TIMEOUT_USEC 5000000
30 /* Config heap occupies top 192k of the firmware heap. */
31 #define PVR_ROGUE_FW_CONFIG_HEAP_GRANULARITY SZ_64K
32 #define PVR_ROGUE_FW_CONFIG_HEAP_SIZE (3 * PVR_ROGUE_FW_CONFIG_HEAP_GRANULARITY)
34 /* Main firmware allocations should come from the remainder of the heap. */
35 #define PVR_ROGUE_FW_MAIN_HEAP_BASE ROGUE_FW_HEAP_BASE
37 /* Offsets from start of configuration area of FW heap. */
38 #define PVR_ROGUE_FWIF_CONNECTION_CTL_OFFSET 0
39 #define PVR_ROGUE_FWIF_OSINIT_OFFSET \
40 (PVR_ROGUE_FWIF_CONNECTION_CTL_OFFSET + PVR_ROGUE_FW_CONFIG_HEAP_GRANULARITY)
41 #define PVR_ROGUE_FWIF_SYSINIT_OFFSET \
42 (PVR_ROGUE_FWIF_OSINIT_OFFSET + PVR_ROGUE_FW_CONFIG_HEAP_GRANULARITY)
44 #define PVR_ROGUE_FAULT_PAGE_SIZE SZ_4K
46 #define PVR_SYNC_OBJ_SIZE sizeof(u32)
48 const struct pvr_fw_layout_entry
*
49 pvr_fw_find_layout_entry(struct pvr_device
*pvr_dev
, enum pvr_fw_section_id id
)
51 const struct pvr_fw_layout_entry
*layout_entries
= pvr_dev
->fw_dev
.layout_entries
;
52 u32 num_layout_entries
= pvr_dev
->fw_dev
.header
->layout_entry_num
;
55 for (entry
= 0; entry
< num_layout_entries
; entry
++) {
56 if (layout_entries
[entry
].id
== id
)
57 return &layout_entries
[entry
];
63 static const struct pvr_fw_layout_entry
*
64 pvr_fw_find_private_data(struct pvr_device
*pvr_dev
)
66 const struct pvr_fw_layout_entry
*layout_entries
= pvr_dev
->fw_dev
.layout_entries
;
67 u32 num_layout_entries
= pvr_dev
->fw_dev
.header
->layout_entry_num
;
70 for (entry
= 0; entry
< num_layout_entries
; entry
++) {
71 if (layout_entries
[entry
].id
== META_PRIVATE_DATA
||
72 layout_entries
[entry
].id
== MIPS_PRIVATE_DATA
||
73 layout_entries
[entry
].id
== RISCV_PRIVATE_DATA
)
74 return &layout_entries
[entry
];
80 #define DEV_INFO_MASK_SIZE(x) DIV_ROUND_UP(x, 64)
83 * pvr_fw_validate() - Parse firmware header and check compatibility
84 * @pvr_dev: Device pointer.
88 * * -EINVAL if firmware is incompatible.
91 pvr_fw_validate(struct pvr_device
*pvr_dev
)
93 struct drm_device
*drm_dev
= from_pvr_device(pvr_dev
);
94 const struct firmware
*firmware
= pvr_dev
->fw_dev
.firmware
;
95 const struct pvr_fw_layout_entry
*layout_entries
;
96 const struct pvr_fw_info_header
*header
;
97 const u8
*fw
= firmware
->data
;
98 u32 fw_offset
= firmware
->size
- SZ_4K
;
99 u32 layout_table_size
;
102 if (firmware
->size
< SZ_4K
|| (firmware
->size
% FW_BLOCK_SIZE
))
105 header
= (const struct pvr_fw_info_header
*)&fw
[fw_offset
];
107 if (header
->info_version
!= PVR_FW_INFO_VERSION
) {
108 drm_err(drm_dev
, "Unsupported fw info version %u\n",
109 header
->info_version
);
113 if (header
->header_len
!= sizeof(struct pvr_fw_info_header
) ||
114 header
->layout_entry_size
!= sizeof(struct pvr_fw_layout_entry
) ||
115 header
->layout_entry_num
> PVR_FW_INFO_MAX_NUM_ENTRIES
) {
116 drm_err(drm_dev
, "FW info format mismatch\n");
120 if (!(header
->flags
& PVR_FW_FLAGS_OPEN_SOURCE
) ||
121 header
->fw_version_major
> FW_MAX_SUPPORTED_MAJOR_VERSION
||
122 header
->fw_version_major
== 0) {
123 drm_err(drm_dev
, "Unsupported FW version %u.%u (build: %u%s)\n",
124 header
->fw_version_major
, header
->fw_version_minor
,
125 header
->fw_version_build
,
126 (header
->flags
& PVR_FW_FLAGS_OPEN_SOURCE
) ? " OS" : "");
130 if (pvr_gpu_id_to_packed_bvnc(&pvr_dev
->gpu_id
) != header
->bvnc
) {
131 struct pvr_gpu_id fw_gpu_id
;
133 packed_bvnc_to_pvr_gpu_id(header
->bvnc
, &fw_gpu_id
);
134 drm_err(drm_dev
, "FW built for incorrect GPU ID %i.%i.%i.%i (expected %i.%i.%i.%i)\n",
135 fw_gpu_id
.b
, fw_gpu_id
.v
, fw_gpu_id
.n
, fw_gpu_id
.c
,
136 pvr_dev
->gpu_id
.b
, pvr_dev
->gpu_id
.v
, pvr_dev
->gpu_id
.n
, pvr_dev
->gpu_id
.c
);
140 fw_offset
+= header
->header_len
;
142 header
->layout_entry_size
* header
->layout_entry_num
;
143 if ((fw_offset
+ layout_table_size
) > firmware
->size
)
146 layout_entries
= (const struct pvr_fw_layout_entry
*)&fw
[fw_offset
];
147 for (entry
= 0; entry
< header
->layout_entry_num
; entry
++) {
148 u32 start_addr
= layout_entries
[entry
].base_addr
;
149 u32 end_addr
= start_addr
+ layout_entries
[entry
].alloc_size
;
151 if (start_addr
>= end_addr
)
155 fw_offset
= (firmware
->size
- SZ_4K
) - header
->device_info_size
;
157 drm_info(drm_dev
, "FW version v%u.%u (build %u OS)\n", header
->fw_version_major
,
158 header
->fw_version_minor
, header
->fw_version_build
);
160 pvr_dev
->fw_version
.major
= header
->fw_version_major
;
161 pvr_dev
->fw_version
.minor
= header
->fw_version_minor
;
163 pvr_dev
->fw_dev
.header
= header
;
164 pvr_dev
->fw_dev
.layout_entries
= layout_entries
;
170 pvr_fw_get_device_info(struct pvr_device
*pvr_dev
)
172 const struct firmware
*firmware
= pvr_dev
->fw_dev
.firmware
;
173 struct pvr_fw_device_info_header
*header
;
174 const u8
*fw
= firmware
->data
;
178 fw_offset
= (firmware
->size
- SZ_4K
) - pvr_dev
->fw_dev
.header
->device_info_size
;
180 header
= (struct pvr_fw_device_info_header
*)&fw
[fw_offset
];
181 dev_info
= (u64
*)(header
+ 1);
183 pvr_device_info_set_quirks(pvr_dev
, dev_info
, header
->brn_mask_size
);
184 dev_info
+= header
->brn_mask_size
;
186 pvr_device_info_set_enhancements(pvr_dev
, dev_info
, header
->ern_mask_size
);
187 dev_info
+= header
->ern_mask_size
;
189 return pvr_device_info_set_features(pvr_dev
, dev_info
, header
->feature_mask_size
,
190 header
->feature_param_size
);
194 layout_get_sizes(struct pvr_device
*pvr_dev
)
196 const struct pvr_fw_layout_entry
*layout_entries
= pvr_dev
->fw_dev
.layout_entries
;
197 u32 num_layout_entries
= pvr_dev
->fw_dev
.header
->layout_entry_num
;
198 struct pvr_fw_mem
*fw_mem
= &pvr_dev
->fw_dev
.mem
;
200 fw_mem
->code_alloc_size
= 0;
201 fw_mem
->data_alloc_size
= 0;
202 fw_mem
->core_code_alloc_size
= 0;
203 fw_mem
->core_data_alloc_size
= 0;
205 /* Extract section sizes from FW layout table. */
206 for (u32 entry
= 0; entry
< num_layout_entries
; entry
++) {
207 switch (layout_entries
[entry
].type
) {
209 fw_mem
->code_alloc_size
+= layout_entries
[entry
].alloc_size
;
212 fw_mem
->data_alloc_size
+= layout_entries
[entry
].alloc_size
;
214 case FW_COREMEM_CODE
:
215 fw_mem
->core_code_alloc_size
+=
216 layout_entries
[entry
].alloc_size
;
218 case FW_COREMEM_DATA
:
219 fw_mem
->core_data_alloc_size
+=
220 layout_entries
[entry
].alloc_size
;
229 pvr_fw_find_mmu_segment(struct pvr_device
*pvr_dev
, u32 addr
, u32 size
, void *fw_code_ptr
,
230 void *fw_data_ptr
, void *fw_core_code_ptr
, void *fw_core_data_ptr
,
231 void **host_addr_out
)
233 const struct pvr_fw_layout_entry
*layout_entries
= pvr_dev
->fw_dev
.layout_entries
;
234 u32 num_layout_entries
= pvr_dev
->fw_dev
.header
->layout_entry_num
;
235 u32 end_addr
= addr
+ size
;
238 /* Ensure requested range is not zero, and size is not causing addr to overflow. */
239 if (end_addr
<= addr
)
242 for (entry
= 0; entry
< num_layout_entries
; entry
++) {
243 u32 entry_start_addr
= layout_entries
[entry
].base_addr
;
244 u32 entry_end_addr
= entry_start_addr
+ layout_entries
[entry
].alloc_size
;
246 if (addr
>= entry_start_addr
&& addr
< entry_end_addr
&&
247 end_addr
> entry_start_addr
&& end_addr
<= entry_end_addr
) {
248 switch (layout_entries
[entry
].type
) {
250 *host_addr_out
= fw_code_ptr
;
254 *host_addr_out
= fw_data_ptr
;
257 case FW_COREMEM_CODE
:
258 *host_addr_out
= fw_core_code_ptr
;
261 case FW_COREMEM_DATA
:
262 *host_addr_out
= fw_core_data_ptr
;
268 /* Direct Mem write to mapped memory */
269 addr
-= layout_entries
[entry
].base_addr
;
270 addr
+= layout_entries
[entry
].alloc_offset
;
273 * Add offset to pointer to FW allocation only if that
274 * allocation is available
276 *(u8
**)host_addr_out
+= addr
;
285 pvr_fw_create_fwif_connection_ctl(struct pvr_device
*pvr_dev
)
287 struct drm_device
*drm_dev
= from_pvr_device(pvr_dev
);
288 struct pvr_fw_device
*fw_dev
= &pvr_dev
->fw_dev
;
290 fw_dev
->fwif_connection_ctl
=
291 pvr_fw_object_create_and_map_offset(pvr_dev
,
292 fw_dev
->fw_heap_info
.config_offset
+
293 PVR_ROGUE_FWIF_CONNECTION_CTL_OFFSET
,
294 sizeof(*fw_dev
->fwif_connection_ctl
),
295 PVR_BO_FW_FLAGS_DEVICE_UNCACHED
,
297 &fw_dev
->mem
.fwif_connection_ctl_obj
);
298 if (IS_ERR(fw_dev
->fwif_connection_ctl
)) {
300 "Unable to allocate FWIF connection control memory\n");
301 return PTR_ERR(fw_dev
->fwif_connection_ctl
);
308 pvr_fw_fini_fwif_connection_ctl(struct pvr_device
*pvr_dev
)
310 struct pvr_fw_device
*fw_dev
= &pvr_dev
->fw_dev
;
312 pvr_fw_object_unmap_and_destroy(fw_dev
->mem
.fwif_connection_ctl_obj
);
316 fw_osinit_init(void *cpu_ptr
, void *priv
)
318 struct rogue_fwif_osinit
*fwif_osinit
= cpu_ptr
;
319 struct pvr_device
*pvr_dev
= priv
;
320 struct pvr_fw_device
*fw_dev
= &pvr_dev
->fw_dev
;
321 struct pvr_fw_mem
*fw_mem
= &fw_dev
->mem
;
323 fwif_osinit
->kernel_ccbctl_fw_addr
= pvr_dev
->kccb
.ccb
.ctrl_fw_addr
;
324 fwif_osinit
->kernel_ccb_fw_addr
= pvr_dev
->kccb
.ccb
.ccb_fw_addr
;
325 pvr_fw_object_get_fw_addr(pvr_dev
->kccb
.rtn_obj
,
326 &fwif_osinit
->kernel_ccb_rtn_slots_fw_addr
);
328 fwif_osinit
->firmware_ccbctl_fw_addr
= pvr_dev
->fwccb
.ctrl_fw_addr
;
329 fwif_osinit
->firmware_ccb_fw_addr
= pvr_dev
->fwccb
.ccb_fw_addr
;
331 fwif_osinit
->work_est_firmware_ccbctl_fw_addr
= 0;
332 fwif_osinit
->work_est_firmware_ccb_fw_addr
= 0;
334 pvr_fw_object_get_fw_addr(fw_mem
->hwrinfobuf_obj
,
335 &fwif_osinit
->rogue_fwif_hwr_info_buf_ctl_fw_addr
);
336 pvr_fw_object_get_fw_addr(fw_mem
->osdata_obj
, &fwif_osinit
->fw_os_data_fw_addr
);
338 fwif_osinit
->hwr_debug_dump_limit
= 0;
340 rogue_fwif_compchecks_bvnc_init(&fwif_osinit
->rogue_comp_checks
.hw_bvnc
);
341 rogue_fwif_compchecks_bvnc_init(&fwif_osinit
->rogue_comp_checks
.fw_bvnc
);
345 fw_osdata_init(void *cpu_ptr
, void *priv
)
347 struct rogue_fwif_osdata
*fwif_osdata
= cpu_ptr
;
348 struct pvr_device
*pvr_dev
= priv
;
349 struct pvr_fw_mem
*fw_mem
= &pvr_dev
->fw_dev
.mem
;
351 pvr_fw_object_get_fw_addr(fw_mem
->power_sync_obj
, &fwif_osdata
->power_sync_fw_addr
);
355 fw_fault_page_init(void *cpu_ptr
, void *priv
)
357 u32
*fault_page
= cpu_ptr
;
359 for (int i
= 0; i
< PVR_ROGUE_FAULT_PAGE_SIZE
/ sizeof(*fault_page
); i
++)
360 fault_page
[i
] = 0xdeadbee0;
364 fw_sysinit_init(void *cpu_ptr
, void *priv
)
366 struct rogue_fwif_sysinit
*fwif_sysinit
= cpu_ptr
;
367 struct pvr_device
*pvr_dev
= priv
;
368 struct pvr_fw_device
*fw_dev
= &pvr_dev
->fw_dev
;
369 struct pvr_fw_mem
*fw_mem
= &fw_dev
->mem
;
370 dma_addr_t fault_dma_addr
= 0;
371 u32 clock_speed_hz
= clk_get_rate(pvr_dev
->core_clk
);
373 WARN_ON(!clock_speed_hz
);
375 WARN_ON(pvr_fw_object_get_dma_addr(fw_mem
->fault_page_obj
, 0, &fault_dma_addr
));
376 fwif_sysinit
->fault_phys_addr
= (u64
)fault_dma_addr
;
378 fwif_sysinit
->pds_exec_base
= ROGUE_PDSCODEDATA_HEAP_BASE
;
379 fwif_sysinit
->usc_exec_base
= ROGUE_USCCODE_HEAP_BASE
;
381 pvr_fw_object_get_fw_addr(fw_mem
->runtime_cfg_obj
, &fwif_sysinit
->runtime_cfg_fw_addr
);
382 pvr_fw_object_get_fw_addr(fw_dev
->fw_trace
.tracebuf_ctrl_obj
,
383 &fwif_sysinit
->trace_buf_ctl_fw_addr
);
384 pvr_fw_object_get_fw_addr(fw_mem
->sysdata_obj
, &fwif_sysinit
->fw_sys_data_fw_addr
);
385 pvr_fw_object_get_fw_addr(fw_mem
->gpu_util_fwcb_obj
,
386 &fwif_sysinit
->gpu_util_fw_cb_ctl_fw_addr
);
387 if (fw_mem
->core_data_obj
) {
388 pvr_fw_object_get_fw_addr(fw_mem
->core_data_obj
,
389 &fwif_sysinit
->coremem_data_store
.fw_addr
);
392 /* Currently unsupported. */
393 fwif_sysinit
->counter_dump_ctl
.buffer_fw_addr
= 0;
394 fwif_sysinit
->counter_dump_ctl
.size_in_dwords
= 0;
396 /* Skip alignment checks. */
397 fwif_sysinit
->align_checks
= 0;
399 fwif_sysinit
->filter_flags
= 0;
400 fwif_sysinit
->hw_perf_filter
= 0;
401 fwif_sysinit
->firmware_perf
= FW_PERF_CONF_NONE
;
402 fwif_sysinit
->initial_core_clock_speed
= clock_speed_hz
;
403 fwif_sysinit
->active_pm_latency_ms
= 0;
404 fwif_sysinit
->gpio_validation_mode
= ROGUE_FWIF_GPIO_VAL_OFF
;
405 fwif_sysinit
->firmware_started
= false;
406 fwif_sysinit
->marker_val
= 1;
408 memset(&fwif_sysinit
->bvnc_km_feature_flags
, 0,
409 sizeof(fwif_sysinit
->bvnc_km_feature_flags
));
412 #define ROGUE_FWIF_SLC_MIN_SIZE_FOR_DM_OVERLAP_KB 4
415 fw_sysdata_init(void *cpu_ptr
, void *priv
)
417 struct rogue_fwif_sysdata
*fwif_sysdata
= cpu_ptr
;
418 struct pvr_device
*pvr_dev
= priv
;
419 u32 slc_size_in_kilobytes
= 0;
420 u32 config_flags
= 0;
422 WARN_ON(PVR_FEATURE_VALUE(pvr_dev
, slc_size_in_kilobytes
, &slc_size_in_kilobytes
));
424 if (slc_size_in_kilobytes
< ROGUE_FWIF_SLC_MIN_SIZE_FOR_DM_OVERLAP_KB
)
425 config_flags
|= ROGUE_FWIF_INICFG_DISABLE_DM_OVERLAP
;
427 fwif_sysdata
->config_flags
= config_flags
;
431 fw_runtime_cfg_init(void *cpu_ptr
, void *priv
)
433 struct rogue_fwif_runtime_cfg
*runtime_cfg
= cpu_ptr
;
434 struct pvr_device
*pvr_dev
= priv
;
435 u32 clock_speed_hz
= clk_get_rate(pvr_dev
->core_clk
);
437 WARN_ON(!clock_speed_hz
);
439 runtime_cfg
->core_clock_speed
= clock_speed_hz
;
440 runtime_cfg
->active_pm_latency_ms
= 0;
441 runtime_cfg
->active_pm_latency_persistant
= true;
442 WARN_ON(PVR_FEATURE_VALUE(pvr_dev
, num_clusters
,
443 &runtime_cfg
->default_dusts_num_init
) != 0);
447 fw_gpu_util_fwcb_init(void *cpu_ptr
, void *priv
)
449 struct rogue_fwif_gpu_util_fwcb
*gpu_util_fwcb
= cpu_ptr
;
451 gpu_util_fwcb
->last_word
= PVR_FWIF_GPU_UTIL_STATE_IDLE
;
455 pvr_fw_create_structures(struct pvr_device
*pvr_dev
)
457 struct drm_device
*drm_dev
= from_pvr_device(pvr_dev
);
458 struct pvr_fw_device
*fw_dev
= &pvr_dev
->fw_dev
;
459 struct pvr_fw_mem
*fw_mem
= &fw_dev
->mem
;
462 fw_dev
->power_sync
= pvr_fw_object_create_and_map(pvr_dev
, sizeof(*fw_dev
->power_sync
),
463 PVR_BO_FW_FLAGS_DEVICE_UNCACHED
,
464 NULL
, NULL
, &fw_mem
->power_sync_obj
);
465 if (IS_ERR(fw_dev
->power_sync
)) {
466 drm_err(drm_dev
, "Unable to allocate FW power_sync structure\n");
467 return PTR_ERR(fw_dev
->power_sync
);
470 fw_dev
->hwrinfobuf
= pvr_fw_object_create_and_map(pvr_dev
, sizeof(*fw_dev
->hwrinfobuf
),
471 PVR_BO_FW_FLAGS_DEVICE_UNCACHED
,
472 NULL
, NULL
, &fw_mem
->hwrinfobuf_obj
);
473 if (IS_ERR(fw_dev
->hwrinfobuf
)) {
475 "Unable to allocate FW hwrinfobuf structure\n");
476 err
= PTR_ERR(fw_dev
->hwrinfobuf
);
477 goto err_release_power_sync
;
480 err
= pvr_fw_object_create(pvr_dev
, PVR_SYNC_OBJ_SIZE
,
481 PVR_BO_FW_FLAGS_DEVICE_UNCACHED
,
482 NULL
, NULL
, &fw_mem
->mmucache_sync_obj
);
485 "Unable to allocate MMU cache sync object\n");
486 goto err_release_hwrinfobuf
;
489 fw_dev
->fwif_sysdata
= pvr_fw_object_create_and_map(pvr_dev
,
490 sizeof(*fw_dev
->fwif_sysdata
),
491 PVR_BO_FW_FLAGS_DEVICE_UNCACHED
,
492 fw_sysdata_init
, pvr_dev
,
493 &fw_mem
->sysdata_obj
);
494 if (IS_ERR(fw_dev
->fwif_sysdata
)) {
495 drm_err(drm_dev
, "Unable to allocate FW SYSDATA structure\n");
496 err
= PTR_ERR(fw_dev
->fwif_sysdata
);
497 goto err_release_mmucache_sync_obj
;
500 err
= pvr_fw_object_create(pvr_dev
, PVR_ROGUE_FAULT_PAGE_SIZE
,
501 PVR_BO_FW_FLAGS_DEVICE_UNCACHED
,
502 fw_fault_page_init
, NULL
, &fw_mem
->fault_page_obj
);
504 drm_err(drm_dev
, "Unable to allocate FW fault page\n");
505 goto err_release_sysdata
;
508 err
= pvr_fw_object_create(pvr_dev
, sizeof(struct rogue_fwif_gpu_util_fwcb
),
509 PVR_BO_FW_FLAGS_DEVICE_UNCACHED
,
510 fw_gpu_util_fwcb_init
, pvr_dev
, &fw_mem
->gpu_util_fwcb_obj
);
512 drm_err(drm_dev
, "Unable to allocate GPU util FWCB\n");
513 goto err_release_fault_page
;
516 err
= pvr_fw_object_create(pvr_dev
, sizeof(struct rogue_fwif_runtime_cfg
),
517 PVR_BO_FW_FLAGS_DEVICE_UNCACHED
,
518 fw_runtime_cfg_init
, pvr_dev
, &fw_mem
->runtime_cfg_obj
);
520 drm_err(drm_dev
, "Unable to allocate FW runtime config\n");
521 goto err_release_gpu_util_fwcb
;
524 err
= pvr_fw_trace_init(pvr_dev
);
526 goto err_release_runtime_cfg
;
528 fw_dev
->fwif_osdata
= pvr_fw_object_create_and_map(pvr_dev
,
529 sizeof(*fw_dev
->fwif_osdata
),
530 PVR_BO_FW_FLAGS_DEVICE_UNCACHED
,
531 fw_osdata_init
, pvr_dev
,
532 &fw_mem
->osdata_obj
);
533 if (IS_ERR(fw_dev
->fwif_osdata
)) {
534 drm_err(drm_dev
, "Unable to allocate FW OSDATA structure\n");
535 err
= PTR_ERR(fw_dev
->fwif_osdata
);
536 goto err_fw_trace_fini
;
539 fw_dev
->fwif_osinit
=
540 pvr_fw_object_create_and_map_offset(pvr_dev
,
541 fw_dev
->fw_heap_info
.config_offset
+
542 PVR_ROGUE_FWIF_OSINIT_OFFSET
,
543 sizeof(*fw_dev
->fwif_osinit
),
544 PVR_BO_FW_FLAGS_DEVICE_UNCACHED
,
545 fw_osinit_init
, pvr_dev
, &fw_mem
->osinit_obj
);
546 if (IS_ERR(fw_dev
->fwif_osinit
)) {
547 drm_err(drm_dev
, "Unable to allocate FW OSINIT structure\n");
548 err
= PTR_ERR(fw_dev
->fwif_osinit
);
549 goto err_release_osdata
;
552 fw_dev
->fwif_sysinit
=
553 pvr_fw_object_create_and_map_offset(pvr_dev
,
554 fw_dev
->fw_heap_info
.config_offset
+
555 PVR_ROGUE_FWIF_SYSINIT_OFFSET
,
556 sizeof(*fw_dev
->fwif_sysinit
),
557 PVR_BO_FW_FLAGS_DEVICE_UNCACHED
,
558 fw_sysinit_init
, pvr_dev
, &fw_mem
->sysinit_obj
);
559 if (IS_ERR(fw_dev
->fwif_sysinit
)) {
560 drm_err(drm_dev
, "Unable to allocate FW SYSINIT structure\n");
561 err
= PTR_ERR(fw_dev
->fwif_sysinit
);
562 goto err_release_osinit
;
568 pvr_fw_object_unmap_and_destroy(fw_mem
->osinit_obj
);
571 pvr_fw_object_unmap_and_destroy(fw_mem
->osdata_obj
);
574 pvr_fw_trace_fini(pvr_dev
);
576 err_release_runtime_cfg
:
577 pvr_fw_object_destroy(fw_mem
->runtime_cfg_obj
);
579 err_release_gpu_util_fwcb
:
580 pvr_fw_object_destroy(fw_mem
->gpu_util_fwcb_obj
);
582 err_release_fault_page
:
583 pvr_fw_object_destroy(fw_mem
->fault_page_obj
);
586 pvr_fw_object_unmap_and_destroy(fw_mem
->sysdata_obj
);
588 err_release_mmucache_sync_obj
:
589 pvr_fw_object_destroy(fw_mem
->mmucache_sync_obj
);
591 err_release_hwrinfobuf
:
592 pvr_fw_object_unmap_and_destroy(fw_mem
->hwrinfobuf_obj
);
594 err_release_power_sync
:
595 pvr_fw_object_unmap_and_destroy(fw_mem
->power_sync_obj
);
601 pvr_fw_destroy_structures(struct pvr_device
*pvr_dev
)
603 struct pvr_fw_device
*fw_dev
= &pvr_dev
->fw_dev
;
604 struct pvr_fw_mem
*fw_mem
= &fw_dev
->mem
;
606 pvr_fw_trace_fini(pvr_dev
);
607 pvr_fw_object_destroy(fw_mem
->runtime_cfg_obj
);
608 pvr_fw_object_destroy(fw_mem
->gpu_util_fwcb_obj
);
609 pvr_fw_object_destroy(fw_mem
->fault_page_obj
);
610 pvr_fw_object_unmap_and_destroy(fw_mem
->sysdata_obj
);
611 pvr_fw_object_unmap_and_destroy(fw_mem
->sysinit_obj
);
613 pvr_fw_object_destroy(fw_mem
->mmucache_sync_obj
);
614 pvr_fw_object_unmap_and_destroy(fw_mem
->hwrinfobuf_obj
);
615 pvr_fw_object_unmap_and_destroy(fw_mem
->power_sync_obj
);
616 pvr_fw_object_unmap_and_destroy(fw_mem
->osdata_obj
);
617 pvr_fw_object_unmap_and_destroy(fw_mem
->osinit_obj
);
621 * pvr_fw_process() - Process firmware image, allocate FW memory and create boot
623 * @pvr_dev: Device pointer.
627 * * Any error returned by pvr_fw_object_create_and_map_offset(), or
628 * * Any error returned by pvr_fw_object_create_and_map().
631 pvr_fw_process(struct pvr_device
*pvr_dev
)
633 struct drm_device
*drm_dev
= from_pvr_device(pvr_dev
);
634 struct pvr_fw_mem
*fw_mem
= &pvr_dev
->fw_dev
.mem
;
635 const u8
*fw
= pvr_dev
->fw_dev
.firmware
->data
;
636 const struct pvr_fw_layout_entry
*private_data
;
639 u8
*fw_core_code_ptr
;
640 u8
*fw_core_data_ptr
;
643 layout_get_sizes(pvr_dev
);
645 private_data
= pvr_fw_find_private_data(pvr_dev
);
649 /* Allocate and map memory for firmware sections. */
652 * Code allocation must be at the start of the firmware heap, otherwise
653 * firmware processor will be unable to boot.
655 * This has the useful side-effect that for every other object in the
656 * driver, a firmware address of 0 is invalid.
658 fw_code_ptr
= pvr_fw_object_create_and_map_offset(pvr_dev
, 0, fw_mem
->code_alloc_size
,
659 PVR_BO_FW_FLAGS_DEVICE_UNCACHED
,
660 NULL
, NULL
, &fw_mem
->code_obj
);
661 if (IS_ERR(fw_code_ptr
)) {
662 drm_err(drm_dev
, "Unable to allocate FW code memory\n");
663 return PTR_ERR(fw_code_ptr
);
666 if (pvr_dev
->fw_dev
.defs
->has_fixed_data_addr()) {
667 u32 base_addr
= private_data
->base_addr
& pvr_dev
->fw_dev
.fw_heap_info
.offset_mask
;
670 pvr_fw_object_create_and_map_offset(pvr_dev
, base_addr
,
671 fw_mem
->data_alloc_size
,
672 PVR_BO_FW_FLAGS_DEVICE_UNCACHED
,
673 NULL
, NULL
, &fw_mem
->data_obj
);
675 fw_data_ptr
= pvr_fw_object_create_and_map(pvr_dev
, fw_mem
->data_alloc_size
,
676 PVR_BO_FW_FLAGS_DEVICE_UNCACHED
,
677 NULL
, NULL
, &fw_mem
->data_obj
);
679 if (IS_ERR(fw_data_ptr
)) {
680 drm_err(drm_dev
, "Unable to allocate FW data memory\n");
681 err
= PTR_ERR(fw_data_ptr
);
682 goto err_free_fw_code_obj
;
685 /* Core code and data sections are optional. */
686 if (fw_mem
->core_code_alloc_size
) {
688 pvr_fw_object_create_and_map(pvr_dev
, fw_mem
->core_code_alloc_size
,
689 PVR_BO_FW_FLAGS_DEVICE_UNCACHED
,
690 NULL
, NULL
, &fw_mem
->core_code_obj
);
691 if (IS_ERR(fw_core_code_ptr
)) {
693 "Unable to allocate FW core code memory\n");
694 err
= PTR_ERR(fw_core_code_ptr
);
695 goto err_free_fw_data_obj
;
698 fw_core_code_ptr
= NULL
;
701 if (fw_mem
->core_data_alloc_size
) {
703 pvr_fw_object_create_and_map(pvr_dev
, fw_mem
->core_data_alloc_size
,
704 PVR_BO_FW_FLAGS_DEVICE_UNCACHED
,
705 NULL
, NULL
, &fw_mem
->core_data_obj
);
706 if (IS_ERR(fw_core_data_ptr
)) {
708 "Unable to allocate FW core data memory\n");
709 err
= PTR_ERR(fw_core_data_ptr
);
710 goto err_free_fw_core_code_obj
;
713 fw_core_data_ptr
= NULL
;
716 fw_mem
->code
= kzalloc(fw_mem
->code_alloc_size
, GFP_KERNEL
);
717 fw_mem
->data
= kzalloc(fw_mem
->data_alloc_size
, GFP_KERNEL
);
718 if (fw_mem
->core_code_alloc_size
)
719 fw_mem
->core_code
= kzalloc(fw_mem
->core_code_alloc_size
, GFP_KERNEL
);
720 if (fw_mem
->core_data_alloc_size
)
721 fw_mem
->core_data
= kzalloc(fw_mem
->core_data_alloc_size
, GFP_KERNEL
);
723 if (!fw_mem
->code
|| !fw_mem
->data
||
724 (!fw_mem
->core_code
&& fw_mem
->core_code_alloc_size
) ||
725 (!fw_mem
->core_data
&& fw_mem
->core_data_alloc_size
)) {
730 err
= pvr_dev
->fw_dev
.defs
->fw_process(pvr_dev
, fw
,
731 fw_mem
->code
, fw_mem
->data
, fw_mem
->core_code
,
732 fw_mem
->core_data
, fw_mem
->core_code_alloc_size
);
735 goto err_free_fw_core_data_obj
;
737 memcpy(fw_code_ptr
, fw_mem
->code
, fw_mem
->code_alloc_size
);
738 memcpy(fw_data_ptr
, fw_mem
->data
, fw_mem
->data_alloc_size
);
739 if (fw_mem
->core_code
)
740 memcpy(fw_core_code_ptr
, fw_mem
->core_code
, fw_mem
->core_code_alloc_size
);
741 if (fw_mem
->core_data
)
742 memcpy(fw_core_data_ptr
, fw_mem
->core_data
, fw_mem
->core_data_alloc_size
);
744 /* We're finished with the firmware section memory on the CPU, unmap. */
745 if (fw_core_data_ptr
)
746 pvr_fw_object_vunmap(fw_mem
->core_data_obj
);
747 if (fw_core_code_ptr
)
748 pvr_fw_object_vunmap(fw_mem
->core_code_obj
);
749 pvr_fw_object_vunmap(fw_mem
->data_obj
);
751 pvr_fw_object_vunmap(fw_mem
->code_obj
);
754 err
= pvr_fw_create_fwif_connection_ctl(pvr_dev
);
756 goto err_free_fw_core_data_obj
;
761 kfree(fw_mem
->core_data
);
762 kfree(fw_mem
->core_code
);
766 err_free_fw_core_data_obj
:
767 if (fw_core_data_ptr
)
768 pvr_fw_object_unmap_and_destroy(fw_mem
->core_data_obj
);
770 err_free_fw_core_code_obj
:
771 if (fw_core_code_ptr
)
772 pvr_fw_object_unmap_and_destroy(fw_mem
->core_code_obj
);
774 err_free_fw_data_obj
:
776 pvr_fw_object_vunmap(fw_mem
->data_obj
);
777 pvr_fw_object_destroy(fw_mem
->data_obj
);
779 err_free_fw_code_obj
:
781 pvr_fw_object_vunmap(fw_mem
->code_obj
);
782 pvr_fw_object_destroy(fw_mem
->code_obj
);
788 pvr_copy_to_fw(struct pvr_fw_object
*dest_obj
, u8
*src_ptr
, u32 size
)
790 u8
*dest_ptr
= pvr_fw_object_vmap(dest_obj
);
792 if (IS_ERR(dest_ptr
))
793 return PTR_ERR(dest_ptr
);
795 memcpy(dest_ptr
, src_ptr
, size
);
797 pvr_fw_object_vunmap(dest_obj
);
803 pvr_fw_reinit_code_data(struct pvr_device
*pvr_dev
)
805 struct pvr_fw_mem
*fw_mem
= &pvr_dev
->fw_dev
.mem
;
808 err
= pvr_copy_to_fw(fw_mem
->code_obj
, fw_mem
->code
, fw_mem
->code_alloc_size
);
812 err
= pvr_copy_to_fw(fw_mem
->data_obj
, fw_mem
->data
, fw_mem
->data_alloc_size
);
816 if (fw_mem
->core_code
) {
817 err
= pvr_copy_to_fw(fw_mem
->core_code_obj
, fw_mem
->core_code
,
818 fw_mem
->core_code_alloc_size
);
823 if (fw_mem
->core_data
) {
824 err
= pvr_copy_to_fw(fw_mem
->core_data_obj
, fw_mem
->core_data
,
825 fw_mem
->core_data_alloc_size
);
834 pvr_fw_cleanup(struct pvr_device
*pvr_dev
)
836 struct pvr_fw_mem
*fw_mem
= &pvr_dev
->fw_dev
.mem
;
838 pvr_fw_fini_fwif_connection_ctl(pvr_dev
);
839 if (fw_mem
->core_code_obj
)
840 pvr_fw_object_destroy(fw_mem
->core_code_obj
);
841 if (fw_mem
->core_data_obj
)
842 pvr_fw_object_destroy(fw_mem
->core_data_obj
);
843 pvr_fw_object_destroy(fw_mem
->code_obj
);
844 pvr_fw_object_destroy(fw_mem
->data_obj
);
848 * pvr_wait_for_fw_boot() - Wait for firmware to finish booting
849 * @pvr_dev: Target PowerVR device.
853 * * -%ETIMEDOUT if firmware fails to boot within timeout.
856 pvr_wait_for_fw_boot(struct pvr_device
*pvr_dev
)
858 ktime_t deadline
= ktime_add_us(ktime_get(), FW_BOOT_TIMEOUT_USEC
);
859 struct pvr_fw_device
*fw_dev
= &pvr_dev
->fw_dev
;
861 while (ktime_to_ns(ktime_sub(deadline
, ktime_get())) > 0) {
862 if (READ_ONCE(fw_dev
->fwif_sysinit
->firmware_started
))
870 * pvr_fw_heap_info_init() - Calculate size and masks for FW heap
871 * @pvr_dev: Target PowerVR device.
872 * @log2_size: Log2 of raw heap size.
873 * @reserved_size: Size of reserved area of heap, in bytes. May be zero.
876 pvr_fw_heap_info_init(struct pvr_device
*pvr_dev
, u32 log2_size
, u32 reserved_size
)
878 struct pvr_fw_device
*fw_dev
= &pvr_dev
->fw_dev
;
880 fw_dev
->fw_heap_info
.gpu_addr
= PVR_ROGUE_FW_MAIN_HEAP_BASE
;
881 fw_dev
->fw_heap_info
.log2_size
= log2_size
;
882 fw_dev
->fw_heap_info
.reserved_size
= reserved_size
;
883 fw_dev
->fw_heap_info
.raw_size
= 1 << fw_dev
->fw_heap_info
.log2_size
;
884 fw_dev
->fw_heap_info
.offset_mask
= fw_dev
->fw_heap_info
.raw_size
- 1;
885 fw_dev
->fw_heap_info
.config_offset
= fw_dev
->fw_heap_info
.raw_size
-
886 PVR_ROGUE_FW_CONFIG_HEAP_SIZE
;
887 fw_dev
->fw_heap_info
.size
= fw_dev
->fw_heap_info
.raw_size
-
888 (PVR_ROGUE_FW_CONFIG_HEAP_SIZE
+ reserved_size
);
892 * pvr_fw_validate_init_device_info() - Validate firmware and initialise device information
893 * @pvr_dev: Target PowerVR device.
895 * This function must be called before querying device information.
899 * * -%EINVAL if firmware validation fails.
902 pvr_fw_validate_init_device_info(struct pvr_device
*pvr_dev
)
906 err
= pvr_fw_validate(pvr_dev
);
910 return pvr_fw_get_device_info(pvr_dev
);
914 * pvr_fw_init() - Initialise and boot firmware
915 * @pvr_dev: Target PowerVR device
917 * On successful completion of the function the PowerVR device will be
918 * initialised and ready to use.
922 * * -%EINVAL on invalid firmware image,
923 * * -%ENOMEM on out of memory, or
924 * * -%ETIMEDOUT if firmware processor fails to boot or on register poll timeout.
927 pvr_fw_init(struct pvr_device
*pvr_dev
)
929 u32 kccb_size_log2
= ROGUE_FWIF_KCCB_NUMCMDS_LOG2_DEFAULT
;
930 u32 kccb_rtn_size
= (1 << kccb_size_log2
) * sizeof(*pvr_dev
->kccb
.rtn
);
931 struct pvr_fw_device
*fw_dev
= &pvr_dev
->fw_dev
;
934 if (fw_dev
->processor_type
== PVR_FW_PROCESSOR_TYPE_META
)
935 fw_dev
->defs
= &pvr_fw_defs_meta
;
936 else if (fw_dev
->processor_type
== PVR_FW_PROCESSOR_TYPE_MIPS
)
937 fw_dev
->defs
= &pvr_fw_defs_mips
;
941 err
= fw_dev
->defs
->init(pvr_dev
);
945 drm_mm_init(&fw_dev
->fw_mm
, ROGUE_FW_HEAP_BASE
, fw_dev
->fw_heap_info
.raw_size
);
946 fw_dev
->fw_mm_base
= ROGUE_FW_HEAP_BASE
;
947 spin_lock_init(&fw_dev
->fw_mm_lock
);
949 INIT_LIST_HEAD(&fw_dev
->fw_objs
.list
);
950 err
= drmm_mutex_init(from_pvr_device(pvr_dev
), &fw_dev
->fw_objs
.lock
);
952 goto err_mm_takedown
;
954 err
= pvr_fw_process(pvr_dev
);
956 goto err_mm_takedown
;
958 /* Initialise KCCB and FWCCB. */
959 err
= pvr_kccb_init(pvr_dev
);
963 err
= pvr_fwccb_init(pvr_dev
);
967 /* Allocate memory for KCCB return slots. */
968 pvr_dev
->kccb
.rtn
= pvr_fw_object_create_and_map(pvr_dev
, kccb_rtn_size
,
969 PVR_BO_FW_FLAGS_DEVICE_UNCACHED
,
970 NULL
, NULL
, &pvr_dev
->kccb
.rtn_obj
);
971 if (IS_ERR(pvr_dev
->kccb
.rtn
)) {
972 err
= PTR_ERR(pvr_dev
->kccb
.rtn
);
976 err
= pvr_fw_create_structures(pvr_dev
);
978 goto err_kccb_rtn_release
;
980 err
= pvr_fw_start(pvr_dev
);
982 goto err_destroy_structures
;
984 err
= pvr_wait_for_fw_boot(pvr_dev
);
986 drm_err(from_pvr_device(pvr_dev
), "Firmware failed to boot\n");
990 fw_dev
->booted
= true;
995 pvr_fw_stop(pvr_dev
);
997 err_destroy_structures
:
998 pvr_fw_destroy_structures(pvr_dev
);
1000 err_kccb_rtn_release
:
1001 pvr_fw_object_unmap_and_destroy(pvr_dev
->kccb
.rtn_obj
);
1004 pvr_ccb_fini(&pvr_dev
->fwccb
);
1007 pvr_kccb_fini(pvr_dev
);
1010 pvr_fw_cleanup(pvr_dev
);
1013 drm_mm_takedown(&fw_dev
->fw_mm
);
1015 if (fw_dev
->defs
->fini
)
1016 fw_dev
->defs
->fini(pvr_dev
);
1022 * pvr_fw_fini() - Shutdown firmware processor and free associated memory
1023 * @pvr_dev: Target PowerVR device
1026 pvr_fw_fini(struct pvr_device
*pvr_dev
)
1028 struct pvr_fw_device
*fw_dev
= &pvr_dev
->fw_dev
;
1030 fw_dev
->booted
= false;
1032 pvr_fw_destroy_structures(pvr_dev
);
1033 pvr_fw_object_unmap_and_destroy(pvr_dev
->kccb
.rtn_obj
);
1036 * Ensure FWCCB worker has finished executing before destroying FWCCB. The IRQ handler has
1037 * been unregistered at this point so no new work should be being submitted.
1039 pvr_ccb_fini(&pvr_dev
->fwccb
);
1040 pvr_kccb_fini(pvr_dev
);
1041 pvr_fw_cleanup(pvr_dev
);
1043 mutex_lock(&pvr_dev
->fw_dev
.fw_objs
.lock
);
1044 WARN_ON(!list_empty(&pvr_dev
->fw_dev
.fw_objs
.list
));
1045 mutex_unlock(&pvr_dev
->fw_dev
.fw_objs
.lock
);
1047 drm_mm_takedown(&fw_dev
->fw_mm
);
1049 if (fw_dev
->defs
->fini
)
1050 fw_dev
->defs
->fini(pvr_dev
);
1054 * pvr_fw_mts_schedule() - Schedule work via an MTS kick
1055 * @pvr_dev: Target PowerVR device
1056 * @val: Kick mask. Should be a combination of %ROGUE_CR_MTS_SCHEDULE_*
1059 pvr_fw_mts_schedule(struct pvr_device
*pvr_dev
, u32 val
)
1061 /* Ensure memory is flushed before kicking MTS. */
1064 pvr_cr_write32(pvr_dev
, ROGUE_CR_MTS_SCHEDULE
, val
);
1066 /* Ensure the MTS kick goes through before continuing. */
1071 * pvr_fw_structure_cleanup() - Send FW cleanup request for an object
1072 * @pvr_dev: Target PowerVR device.
1073 * @type: Type of object to cleanup. Must be one of &enum rogue_fwif_cleanup_type.
1074 * @fw_obj: Pointer to FW object containing object to cleanup.
1075 * @offset: Offset within FW object of object to cleanup.
1079 * * -EBUSY if object is busy,
1080 * * -ETIMEDOUT on timeout, or
1081 * * -EIO if device is lost.
1084 pvr_fw_structure_cleanup(struct pvr_device
*pvr_dev
, u32 type
, struct pvr_fw_object
*fw_obj
,
1087 struct rogue_fwif_kccb_cmd cmd
;
1093 struct rogue_fwif_cleanup_request
*cleanup_req
= &cmd
.cmd_data
.cleanup_data
;
1095 down_read(&pvr_dev
->reset_sem
);
1097 if (!drm_dev_enter(from_pvr_device(pvr_dev
), &idx
)) {
1102 cmd
.cmd_type
= ROGUE_FWIF_KCCB_CMD_CLEANUP
;
1104 cleanup_req
->cleanup_type
= type
;
1107 case ROGUE_FWIF_CLEANUP_FWCOMMONCONTEXT
:
1108 pvr_fw_object_get_fw_addr_offset(fw_obj
, offset
,
1109 &cleanup_req
->cleanup_data
.context_fw_addr
);
1111 case ROGUE_FWIF_CLEANUP_HWRTDATA
:
1112 pvr_fw_object_get_fw_addr_offset(fw_obj
, offset
,
1113 &cleanup_req
->cleanup_data
.hwrt_data_fw_addr
);
1115 case ROGUE_FWIF_CLEANUP_FREELIST
:
1116 pvr_fw_object_get_fw_addr_offset(fw_obj
, offset
,
1117 &cleanup_req
->cleanup_data
.freelist_fw_addr
);
1121 goto err_drm_dev_exit
;
1124 err
= pvr_kccb_send_cmd(pvr_dev
, &cmd
, &slot_nr
);
1126 goto err_drm_dev_exit
;
1128 err
= pvr_kccb_wait_for_completion(pvr_dev
, slot_nr
, HZ
, &rtn
);
1130 goto err_drm_dev_exit
;
1132 if (rtn
& ROGUE_FWIF_KCCB_RTN_SLOT_CLEANUP_BUSY
)
1139 up_read(&pvr_dev
->reset_sem
);
1145 * pvr_fw_object_fw_map() - Map a FW object in firmware address space
1146 * @pvr_dev: Device pointer.
1147 * @fw_obj: FW object to map.
1148 * @dev_addr: Desired address in device space, if a specific address is
1149 * required. 0 otherwise.
1152 * * 0 on success, or
1153 * * -%EINVAL if @fw_obj is already mapped but has no references, or
1154 * * Any error returned by DRM.
1157 pvr_fw_object_fw_map(struct pvr_device
*pvr_dev
, struct pvr_fw_object
*fw_obj
, u64 dev_addr
)
1159 struct pvr_gem_object
*pvr_obj
= fw_obj
->gem
;
1160 struct drm_gem_object
*gem_obj
= gem_from_pvr_gem(pvr_obj
);
1161 struct pvr_fw_device
*fw_dev
= &pvr_dev
->fw_dev
;
1165 spin_lock(&fw_dev
->fw_mm_lock
);
1167 if (drm_mm_node_allocated(&fw_obj
->fw_mm_node
)) {
1174 * Allocate from the main heap only (firmware heap minus
1177 err
= drm_mm_insert_node_in_range(&fw_dev
->fw_mm
, &fw_obj
->fw_mm_node
,
1178 gem_obj
->size
, 0, 0,
1179 fw_dev
->fw_heap_info
.gpu_addr
,
1180 fw_dev
->fw_heap_info
.gpu_addr
+
1181 fw_dev
->fw_heap_info
.size
, 0);
1185 fw_obj
->fw_mm_node
.start
= dev_addr
;
1186 fw_obj
->fw_mm_node
.size
= gem_obj
->size
;
1187 err
= drm_mm_reserve_node(&fw_dev
->fw_mm
, &fw_obj
->fw_mm_node
);
1192 spin_unlock(&fw_dev
->fw_mm_lock
);
1194 /* Map object on GPU. */
1195 err
= fw_dev
->defs
->vm_map(pvr_dev
, fw_obj
);
1197 goto err_remove_node
;
1199 fw_obj
->fw_addr_offset
= (u32
)(fw_obj
->fw_mm_node
.start
- fw_dev
->fw_mm_base
);
1204 spin_lock(&fw_dev
->fw_mm_lock
);
1205 drm_mm_remove_node(&fw_obj
->fw_mm_node
);
1208 spin_unlock(&fw_dev
->fw_mm_lock
);
1214 * pvr_fw_object_fw_unmap() - Unmap a previously mapped FW object
1215 * @fw_obj: FW object to unmap.
1218 * * 0 on success, or
1219 * * -%EINVAL if object is not currently mapped.
1222 pvr_fw_object_fw_unmap(struct pvr_fw_object
*fw_obj
)
1224 struct pvr_gem_object
*pvr_obj
= fw_obj
->gem
;
1225 struct drm_gem_object
*gem_obj
= gem_from_pvr_gem(pvr_obj
);
1226 struct pvr_device
*pvr_dev
= to_pvr_device(gem_obj
->dev
);
1227 struct pvr_fw_device
*fw_dev
= &pvr_dev
->fw_dev
;
1229 fw_dev
->defs
->vm_unmap(pvr_dev
, fw_obj
);
1231 spin_lock(&fw_dev
->fw_mm_lock
);
1233 if (!drm_mm_node_allocated(&fw_obj
->fw_mm_node
)) {
1234 spin_unlock(&fw_dev
->fw_mm_lock
);
1238 drm_mm_remove_node(&fw_obj
->fw_mm_node
);
1240 spin_unlock(&fw_dev
->fw_mm_lock
);
1246 pvr_fw_object_create_and_map_common(struct pvr_device
*pvr_dev
, size_t size
,
1247 u64 flags
, u64 dev_addr
,
1248 void (*init
)(void *cpu_ptr
, void *priv
),
1249 void *init_priv
, struct pvr_fw_object
**fw_obj_out
)
1251 struct pvr_fw_object
*fw_obj
;
1255 /* %DRM_PVR_BO_PM_FW_PROTECT is implicit for FW objects. */
1256 flags
|= DRM_PVR_BO_PM_FW_PROTECT
;
1258 fw_obj
= kzalloc(sizeof(*fw_obj
), GFP_KERNEL
);
1260 return ERR_PTR(-ENOMEM
);
1262 INIT_LIST_HEAD(&fw_obj
->node
);
1263 fw_obj
->init
= init
;
1264 fw_obj
->init_priv
= init_priv
;
1266 fw_obj
->gem
= pvr_gem_object_create(pvr_dev
, size
, flags
);
1267 if (IS_ERR(fw_obj
->gem
)) {
1268 err
= PTR_ERR(fw_obj
->gem
);
1270 goto err_put_object
;
1273 err
= pvr_fw_object_fw_map(pvr_dev
, fw_obj
, dev_addr
);
1275 goto err_put_object
;
1277 cpu_ptr
= pvr_fw_object_vmap(fw_obj
);
1278 if (IS_ERR(cpu_ptr
)) {
1279 err
= PTR_ERR(cpu_ptr
);
1280 goto err_put_object
;
1283 *fw_obj_out
= fw_obj
;
1286 fw_obj
->init(cpu_ptr
, fw_obj
->init_priv
);
1288 mutex_lock(&pvr_dev
->fw_dev
.fw_objs
.lock
);
1289 list_add_tail(&fw_obj
->node
, &pvr_dev
->fw_dev
.fw_objs
.list
);
1290 mutex_unlock(&pvr_dev
->fw_dev
.fw_objs
.lock
);
1295 pvr_fw_object_destroy(fw_obj
);
1297 return ERR_PTR(err
);
1301 * pvr_fw_object_create() - Create a FW object and map to firmware
1302 * @pvr_dev: PowerVR device pointer.
1303 * @size: Size of object, in bytes.
1304 * @flags: Options which affect both this operation and future mapping
1305 * operations performed on the returned object. Must be a combination of
1306 * DRM_PVR_BO_* and/or PVR_BO_* flags.
1307 * @init: Initialisation callback.
1308 * @init_priv: Private pointer to pass to initialisation callback.
1309 * @fw_obj_out: Pointer to location to store created object pointer.
1311 * %DRM_PVR_BO_DEVICE_PM_FW_PROTECT is implied for all FW objects. Consequently,
1312 * this function will fail if @flags has %DRM_PVR_BO_CPU_ALLOW_USERSPACE_ACCESS
1316 * * 0 on success, or
1317 * * Any error returned by pvr_fw_object_create_common().
1320 pvr_fw_object_create(struct pvr_device
*pvr_dev
, size_t size
, u64 flags
,
1321 void (*init
)(void *cpu_ptr
, void *priv
), void *init_priv
,
1322 struct pvr_fw_object
**fw_obj_out
)
1326 cpu_ptr
= pvr_fw_object_create_and_map_common(pvr_dev
, size
, flags
, 0, init
, init_priv
,
1328 if (IS_ERR(cpu_ptr
))
1329 return PTR_ERR(cpu_ptr
);
1331 pvr_fw_object_vunmap(*fw_obj_out
);
1337 * pvr_fw_object_create_and_map() - Create a FW object and map to firmware and CPU
1338 * @pvr_dev: PowerVR device pointer.
1339 * @size: Size of object, in bytes.
1340 * @flags: Options which affect both this operation and future mapping
1341 * operations performed on the returned object. Must be a combination of
1342 * DRM_PVR_BO_* and/or PVR_BO_* flags.
1343 * @init: Initialisation callback.
1344 * @init_priv: Private pointer to pass to initialisation callback.
1345 * @fw_obj_out: Pointer to location to store created object pointer.
1347 * %DRM_PVR_BO_DEVICE_PM_FW_PROTECT is implied for all FW objects. Consequently,
1348 * this function will fail if @flags has %DRM_PVR_BO_CPU_ALLOW_USERSPACE_ACCESS
1351 * Caller is responsible for calling pvr_fw_object_vunmap() to release the CPU
1355 * * Pointer to CPU mapping of newly created object, or
1356 * * Any error returned by pvr_fw_object_create(), or
1357 * * Any error returned by pvr_fw_object_vmap().
1360 pvr_fw_object_create_and_map(struct pvr_device
*pvr_dev
, size_t size
, u64 flags
,
1361 void (*init
)(void *cpu_ptr
, void *priv
),
1362 void *init_priv
, struct pvr_fw_object
**fw_obj_out
)
1364 return pvr_fw_object_create_and_map_common(pvr_dev
, size
, flags
, 0, init
, init_priv
,
1369 * pvr_fw_object_create_and_map_offset() - Create a FW object and map to
1370 * firmware at the provided offset and to the CPU.
1371 * @pvr_dev: PowerVR device pointer.
1372 * @dev_offset: Base address of desired FW mapping, offset from start of FW heap.
1373 * @size: Size of object, in bytes.
1374 * @flags: Options which affect both this operation and future mapping
1375 * operations performed on the returned object. Must be a combination of
1376 * DRM_PVR_BO_* and/or PVR_BO_* flags.
1377 * @init: Initialisation callback.
1378 * @init_priv: Private pointer to pass to initialisation callback.
1379 * @fw_obj_out: Pointer to location to store created object pointer.
1381 * %DRM_PVR_BO_DEVICE_PM_FW_PROTECT is implied for all FW objects. Consequently,
1382 * this function will fail if @flags has %DRM_PVR_BO_CPU_ALLOW_USERSPACE_ACCESS
1385 * Caller is responsible for calling pvr_fw_object_vunmap() to release the CPU
1389 * * Pointer to CPU mapping of newly created object, or
1390 * * Any error returned by pvr_fw_object_create(), or
1391 * * Any error returned by pvr_fw_object_vmap().
1394 pvr_fw_object_create_and_map_offset(struct pvr_device
*pvr_dev
,
1395 u32 dev_offset
, size_t size
, u64 flags
,
1396 void (*init
)(void *cpu_ptr
, void *priv
),
1397 void *init_priv
, struct pvr_fw_object
**fw_obj_out
)
1399 u64 dev_addr
= pvr_dev
->fw_dev
.fw_mm_base
+ dev_offset
;
1401 return pvr_fw_object_create_and_map_common(pvr_dev
, size
, flags
, dev_addr
, init
, init_priv
,
1406 * pvr_fw_object_destroy() - Destroy a pvr_fw_object
1407 * @fw_obj: Pointer to object to destroy.
1409 void pvr_fw_object_destroy(struct pvr_fw_object
*fw_obj
)
1411 struct pvr_gem_object
*pvr_obj
= fw_obj
->gem
;
1412 struct drm_gem_object
*gem_obj
= gem_from_pvr_gem(pvr_obj
);
1413 struct pvr_device
*pvr_dev
= to_pvr_device(gem_obj
->dev
);
1415 mutex_lock(&pvr_dev
->fw_dev
.fw_objs
.lock
);
1416 list_del(&fw_obj
->node
);
1417 mutex_unlock(&pvr_dev
->fw_dev
.fw_objs
.lock
);
1419 if (drm_mm_node_allocated(&fw_obj
->fw_mm_node
)) {
1420 /* If we can't unmap, leak the memory. */
1421 if (WARN_ON(pvr_fw_object_fw_unmap(fw_obj
)))
1426 pvr_gem_object_put(fw_obj
->gem
);
1432 * pvr_fw_object_get_fw_addr_offset() - Return address of object in firmware address space, with
1434 * @fw_obj: Pointer to object.
1435 * @offset: Desired offset from start of object.
1436 * @fw_addr_out: Location to store address to.
1438 void pvr_fw_object_get_fw_addr_offset(struct pvr_fw_object
*fw_obj
, u32 offset
, u32
*fw_addr_out
)
1440 struct pvr_gem_object
*pvr_obj
= fw_obj
->gem
;
1441 struct pvr_device
*pvr_dev
= to_pvr_device(gem_from_pvr_gem(pvr_obj
)->dev
);
1443 *fw_addr_out
= pvr_dev
->fw_dev
.defs
->get_fw_addr_with_offset(fw_obj
, offset
);
1447 * pvr_fw_hard_reset() - Re-initialise the FW code and data segments, and reset all global FW
1449 * @pvr_dev: Device pointer
1451 * If this function returns an error then the caller must regard the device as lost.
1454 * * 0 on success, or
1455 * * Any error returned by pvr_fw_init_dev_structures() or pvr_fw_reset_all().
1458 pvr_fw_hard_reset(struct pvr_device
*pvr_dev
)
1460 struct list_head
*pos
;
1463 /* Reset all FW objects */
1464 mutex_lock(&pvr_dev
->fw_dev
.fw_objs
.lock
);
1466 list_for_each(pos
, &pvr_dev
->fw_dev
.fw_objs
.list
) {
1467 struct pvr_fw_object
*fw_obj
= container_of(pos
, struct pvr_fw_object
, node
);
1468 void *cpu_ptr
= pvr_fw_object_vmap(fw_obj
);
1470 WARN_ON(IS_ERR(cpu_ptr
));
1472 if (!(fw_obj
->gem
->flags
& PVR_BO_FW_NO_CLEAR_ON_RESET
)) {
1473 memset(cpu_ptr
, 0, pvr_gem_object_size(fw_obj
->gem
));
1476 fw_obj
->init(cpu_ptr
, fw_obj
->init_priv
);
1479 pvr_fw_object_vunmap(fw_obj
);
1482 mutex_unlock(&pvr_dev
->fw_dev
.fw_objs
.lock
);
1484 err
= pvr_fw_reinit_code_data(pvr_dev
);