1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <linux/slab.h>
4 #include <linux/ctype.h>
5 #include <linux/kernel.h>
6 #include <linux/delay.h>
7 #include <linux/pci_ids.h>
8 #include "adf_accel_devices.h"
9 #include "adf_common_drv.h"
10 #include "icp_qat_uclo.h"
11 #include "icp_qat_hal.h"
12 #include "icp_qat_fw_loader_handle.h"
14 #define UWORD_CPYBUF_SIZE 1024
15 #define INVLD_UWORD 0xffffffffffull
16 #define PID_MINOR_REV 0xf
17 #define PID_MAJOR_REV (0xf << 4)
19 static int qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle
*obj_handle
,
20 unsigned int ae
, unsigned int image_num
)
22 struct icp_qat_uclo_aedata
*ae_data
;
23 struct icp_qat_uclo_encapme
*encap_image
;
24 struct icp_qat_uclo_page
*page
= NULL
;
25 struct icp_qat_uclo_aeslice
*ae_slice
= NULL
;
27 ae_data
= &obj_handle
->ae_data
[ae
];
28 encap_image
= &obj_handle
->ae_uimage
[image_num
];
29 ae_slice
= &ae_data
->ae_slices
[ae_data
->slice_num
];
30 ae_slice
->encap_image
= encap_image
;
32 if (encap_image
->img_ptr
) {
33 ae_slice
->ctx_mask_assigned
=
34 encap_image
->img_ptr
->ctx_assigned
;
35 ae_data
->eff_ustore_size
= obj_handle
->ustore_phy_size
;
37 ae_slice
->ctx_mask_assigned
= 0;
39 ae_slice
->region
= kzalloc(sizeof(*ae_slice
->region
), GFP_KERNEL
);
40 if (!ae_slice
->region
)
42 ae_slice
->page
= kzalloc(sizeof(*ae_slice
->page
), GFP_KERNEL
);
45 page
= ae_slice
->page
;
46 page
->encap_page
= encap_image
->page
;
47 ae_slice
->page
->region
= ae_slice
->region
;
51 kfree(ae_slice
->region
);
52 ae_slice
->region
= NULL
;
56 static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata
*ae_data
)
61 pr_err("QAT: bad argument, ae_data is NULL\n ");
65 for (i
= 0; i
< ae_data
->slice_num
; i
++) {
66 kfree(ae_data
->ae_slices
[i
].region
);
67 ae_data
->ae_slices
[i
].region
= NULL
;
68 kfree(ae_data
->ae_slices
[i
].page
);
69 ae_data
->ae_slices
[i
].page
= NULL
;
74 static char *qat_uclo_get_string(struct icp_qat_uof_strtable
*str_table
,
75 unsigned int str_offset
)
77 if (!str_table
->table_len
|| str_offset
> str_table
->table_len
)
79 return (char *)(((uintptr_t)(str_table
->strings
)) + str_offset
);
82 static int qat_uclo_check_uof_format(struct icp_qat_uof_filehdr
*hdr
)
84 int maj
= hdr
->maj_ver
& 0xff;
85 int min
= hdr
->min_ver
& 0xff;
87 if (hdr
->file_id
!= ICP_QAT_UOF_FID
) {
88 pr_err("QAT: Invalid header 0x%x\n", hdr
->file_id
);
91 if (min
!= ICP_QAT_UOF_MINVER
|| maj
!= ICP_QAT_UOF_MAJVER
) {
92 pr_err("QAT: bad UOF version, major 0x%x, minor 0x%x\n",
99 static int qat_uclo_check_suof_format(struct icp_qat_suof_filehdr
*suof_hdr
)
101 int maj
= suof_hdr
->maj_ver
& 0xff;
102 int min
= suof_hdr
->min_ver
& 0xff;
104 if (suof_hdr
->file_id
!= ICP_QAT_SUOF_FID
) {
105 pr_err("QAT: invalid header 0x%x\n", suof_hdr
->file_id
);
108 if (suof_hdr
->fw_type
!= 0) {
109 pr_err("QAT: unsupported firmware type\n");
112 if (suof_hdr
->num_chunks
<= 0x1) {
113 pr_err("QAT: SUOF chunk amount is incorrect\n");
116 if (maj
!= ICP_QAT_SUOF_MAJVER
|| min
!= ICP_QAT_SUOF_MINVER
) {
117 pr_err("QAT: bad SUOF version, major 0x%x, minor 0x%x\n",
124 static void qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle
*handle
,
125 unsigned int addr
, unsigned int *val
,
126 unsigned int num_in_bytes
)
129 unsigned char *ptr
= (unsigned char *)val
;
131 while (num_in_bytes
) {
132 memcpy(&outval
, ptr
, 4);
133 SRAM_WRITE(handle
, addr
, outval
);
140 static void qat_uclo_wr_umem_by_words(struct icp_qat_fw_loader_handle
*handle
,
141 unsigned char ae
, unsigned int addr
,
143 unsigned int num_in_bytes
)
146 unsigned char *ptr
= (unsigned char *)val
;
148 addr
>>= 0x2; /* convert to uword address */
150 while (num_in_bytes
) {
151 memcpy(&outval
, ptr
, 4);
152 qat_hal_wr_umem(handle
, ae
, addr
++, 1, &outval
);
158 static void qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle
*handle
,
160 struct icp_qat_uof_batch_init
163 struct icp_qat_uof_batch_init
*umem_init
;
165 if (!umem_init_header
)
167 umem_init
= umem_init_header
->next
;
169 unsigned int addr
, *value
, size
;
172 addr
= umem_init
->addr
;
173 value
= umem_init
->value
;
174 size
= umem_init
->size
;
175 qat_uclo_wr_umem_by_words(handle
, ae
, addr
, value
, size
);
176 umem_init
= umem_init
->next
;
181 qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle
*handle
,
182 struct icp_qat_uof_batch_init
**base
)
184 struct icp_qat_uof_batch_init
*umem_init
;
188 struct icp_qat_uof_batch_init
*pre
;
191 umem_init
= umem_init
->next
;
197 static int qat_uclo_parse_num(char *str
, unsigned int *num
)
200 unsigned long ae
= 0;
203 strncpy(buf
, str
, 15);
204 for (i
= 0; i
< 16; i
++) {
205 if (!isdigit(buf
[i
])) {
210 if ((kstrtoul(buf
, 10, &ae
)))
213 *num
= (unsigned int)ae
;
217 static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle
*handle
,
218 struct icp_qat_uof_initmem
*init_mem
,
219 unsigned int size_range
, unsigned int *ae
)
221 struct icp_qat_uclo_objhandle
*obj_handle
= handle
->obj_handle
;
224 if ((init_mem
->addr
+ init_mem
->num_in_bytes
) > (size_range
<< 0x2)) {
225 pr_err("QAT: initmem is out of range");
228 if (init_mem
->scope
!= ICP_QAT_UOF_LOCAL_SCOPE
) {
229 pr_err("QAT: Memory scope for init_mem error\n");
232 str
= qat_uclo_get_string(&obj_handle
->str_table
, init_mem
->sym_name
);
234 pr_err("QAT: AE name assigned in UOF init table is NULL\n");
237 if (qat_uclo_parse_num(str
, ae
)) {
238 pr_err("QAT: Parse num for AE number failed\n");
241 if (*ae
>= ICP_QAT_UCLO_MAX_AE
) {
242 pr_err("QAT: ae %d out of range\n", *ae
);
248 static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle
249 *handle
, struct icp_qat_uof_initmem
250 *init_mem
, unsigned int ae
,
251 struct icp_qat_uof_batch_init
254 struct icp_qat_uof_batch_init
*init_header
, *tail
;
255 struct icp_qat_uof_batch_init
*mem_init
, *tail_old
;
256 struct icp_qat_uof_memvar_attr
*mem_val_attr
;
257 unsigned int i
, flag
= 0;
260 (struct icp_qat_uof_memvar_attr
*)((uintptr_t)init_mem
+
261 sizeof(struct icp_qat_uof_initmem
));
263 init_header
= *init_tab_base
;
265 init_header
= kzalloc(sizeof(*init_header
), GFP_KERNEL
);
268 init_header
->size
= 1;
269 *init_tab_base
= init_header
;
272 tail_old
= init_header
;
273 while (tail_old
->next
)
274 tail_old
= tail_old
->next
;
276 for (i
= 0; i
< init_mem
->val_attr_num
; i
++) {
277 mem_init
= kzalloc(sizeof(*mem_init
), GFP_KERNEL
);
281 mem_init
->addr
= init_mem
->addr
+ mem_val_attr
->offset_in_byte
;
282 mem_init
->value
= &mem_val_attr
->value
;
284 mem_init
->next
= NULL
;
285 tail
->next
= mem_init
;
287 init_header
->size
+= qat_hal_get_ins_num();
292 /* Do not free the list head unless we allocated it. */
293 tail_old
= tail_old
->next
;
295 kfree(*init_tab_base
);
296 *init_tab_base
= NULL
;
300 mem_init
= tail_old
->next
;
307 static int qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle
*handle
,
308 struct icp_qat_uof_initmem
*init_mem
)
310 struct icp_qat_uclo_objhandle
*obj_handle
= handle
->obj_handle
;
313 if (qat_uclo_fetch_initmem_ae(handle
, init_mem
,
314 handle
->chip_info
->lm_size
, &ae
))
316 if (qat_uclo_create_batch_init_list(handle
, init_mem
, ae
,
317 &obj_handle
->lm_init_tab
[ae
]))
322 static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle
*handle
,
323 struct icp_qat_uof_initmem
*init_mem
)
325 struct icp_qat_uclo_objhandle
*obj_handle
= handle
->obj_handle
;
326 unsigned int ae
, ustore_size
, uaddr
, i
;
327 struct icp_qat_uclo_aedata
*aed
;
329 ustore_size
= obj_handle
->ustore_phy_size
;
330 if (qat_uclo_fetch_initmem_ae(handle
, init_mem
, ustore_size
, &ae
))
332 if (qat_uclo_create_batch_init_list(handle
, init_mem
, ae
,
333 &obj_handle
->umem_init_tab
[ae
]))
335 /* set the highest ustore address referenced */
336 uaddr
= (init_mem
->addr
+ init_mem
->num_in_bytes
) >> 0x2;
337 aed
= &obj_handle
->ae_data
[ae
];
338 for (i
= 0; i
< aed
->slice_num
; i
++) {
339 if (aed
->ae_slices
[i
].encap_image
->uwords_num
< uaddr
)
340 aed
->ae_slices
[i
].encap_image
->uwords_num
= uaddr
;
345 #define ICP_DH895XCC_PESRAM_BAR_SIZE 0x80000
346 static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle
*handle
,
347 struct icp_qat_uof_initmem
*init_mem
)
349 switch (init_mem
->region
) {
350 case ICP_QAT_UOF_LMEM_REGION
:
351 if (qat_uclo_init_lmem_seg(handle
, init_mem
))
354 case ICP_QAT_UOF_UMEM_REGION
:
355 if (qat_uclo_init_umem_seg(handle
, init_mem
))
359 pr_err("QAT: initmem region error. region type=0x%x\n",
366 static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle
*handle
,
367 struct icp_qat_uclo_encapme
*image
)
370 struct icp_qat_uclo_encap_page
*page
;
371 struct icp_qat_uof_image
*uof_image
;
373 unsigned int ustore_size
;
374 unsigned int patt_pos
;
375 struct icp_qat_uclo_objhandle
*obj_handle
= handle
->obj_handle
;
376 unsigned long ae_mask
= handle
->hal_handle
->ae_mask
;
377 unsigned long cfg_ae_mask
= handle
->cfg_ae_mask
;
380 uof_image
= image
->img_ptr
;
381 fill_data
= kcalloc(ICP_QAT_UCLO_MAX_USTORE
, sizeof(u64
),
385 for (i
= 0; i
< ICP_QAT_UCLO_MAX_USTORE
; i
++)
386 memcpy(&fill_data
[i
], &uof_image
->fill_pattern
,
390 for_each_set_bit(ae
, &ae_mask
, handle
->hal_handle
->ae_max_num
) {
391 if (!test_bit(ae
, (unsigned long *)&uof_image
->ae_assigned
))
394 if (!test_bit(ae
, &cfg_ae_mask
))
397 ustore_size
= obj_handle
->ae_data
[ae
].eff_ustore_size
;
398 patt_pos
= page
->beg_addr_p
+ page
->micro_words_num
;
400 qat_hal_wr_uwords(handle
, (unsigned char)ae
, 0,
401 page
->beg_addr_p
, &fill_data
[0]);
402 qat_hal_wr_uwords(handle
, (unsigned char)ae
, patt_pos
,
403 ustore_size
- patt_pos
+ 1,
404 &fill_data
[page
->beg_addr_p
]);
410 static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle
*handle
)
413 struct icp_qat_uclo_objhandle
*obj_handle
= handle
->obj_handle
;
414 struct icp_qat_uof_initmem
*initmem
= obj_handle
->init_mem_tab
.init_mem
;
415 unsigned long ae_mask
= handle
->hal_handle
->ae_mask
;
417 for (i
= 0; i
< obj_handle
->init_mem_tab
.entry_num
; i
++) {
418 if (initmem
->num_in_bytes
) {
419 if (qat_uclo_init_ae_memory(handle
, initmem
))
422 initmem
= (struct icp_qat_uof_initmem
*)((uintptr_t)(
424 sizeof(struct icp_qat_uof_initmem
)) +
425 (sizeof(struct icp_qat_uof_memvar_attr
) *
426 initmem
->val_attr_num
));
429 for_each_set_bit(ae
, &ae_mask
, handle
->hal_handle
->ae_max_num
) {
430 if (qat_hal_batch_wr_lm(handle
, ae
,
431 obj_handle
->lm_init_tab
[ae
])) {
432 pr_err("QAT: fail to batch init lmem for AE %d\n", ae
);
435 qat_uclo_cleanup_batch_init_list(handle
,
436 &obj_handle
->lm_init_tab
[ae
]);
437 qat_uclo_batch_wr_umem(handle
, ae
,
438 obj_handle
->umem_init_tab
[ae
]);
439 qat_uclo_cleanup_batch_init_list(handle
,
446 static void *qat_uclo_find_chunk(struct icp_qat_uof_objhdr
*obj_hdr
,
447 char *chunk_id
, void *cur
)
450 struct icp_qat_uof_chunkhdr
*chunk_hdr
=
451 (struct icp_qat_uof_chunkhdr
*)
452 ((uintptr_t)obj_hdr
+ sizeof(struct icp_qat_uof_objhdr
));
454 for (i
= 0; i
< obj_hdr
->num_chunks
; i
++) {
455 if ((cur
< (void *)&chunk_hdr
[i
]) &&
456 !strncmp(chunk_hdr
[i
].chunk_id
, chunk_id
,
457 ICP_QAT_UOF_OBJID_LEN
)) {
458 return &chunk_hdr
[i
];
464 static unsigned int qat_uclo_calc_checksum(unsigned int reg
, int ch
)
467 unsigned int topbit
= 1 << 0xF;
468 unsigned int inbyte
= (unsigned int)((reg
>> 0x18) ^ ch
);
470 reg
^= inbyte
<< 0x8;
471 for (i
= 0; i
< 0x8; i
++) {
473 reg
= (reg
<< 1) ^ 0x1021;
480 static unsigned int qat_uclo_calc_str_checksum(char *ptr
, int num
)
482 unsigned int chksum
= 0;
486 chksum
= qat_uclo_calc_checksum(chksum
, *ptr
++);
490 static struct icp_qat_uclo_objhdr
*
491 qat_uclo_map_chunk(char *buf
, struct icp_qat_uof_filehdr
*file_hdr
,
494 struct icp_qat_uof_filechunkhdr
*file_chunk
;
495 struct icp_qat_uclo_objhdr
*obj_hdr
;
499 file_chunk
= (struct icp_qat_uof_filechunkhdr
*)
500 (buf
+ sizeof(struct icp_qat_uof_filehdr
));
501 for (i
= 0; i
< file_hdr
->num_chunks
; i
++) {
502 if (!strncmp(file_chunk
->chunk_id
, chunk_id
,
503 ICP_QAT_UOF_OBJID_LEN
)) {
504 chunk
= buf
+ file_chunk
->offset
;
505 if (file_chunk
->checksum
!= qat_uclo_calc_str_checksum(
506 chunk
, file_chunk
->size
))
508 obj_hdr
= kzalloc(sizeof(*obj_hdr
), GFP_KERNEL
);
511 obj_hdr
->file_buff
= chunk
;
512 obj_hdr
->checksum
= file_chunk
->checksum
;
513 obj_hdr
->size
= file_chunk
->size
;
522 qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj
*encap_uof_obj
,
523 struct icp_qat_uof_image
*image
)
525 struct icp_qat_uof_objtable
*uc_var_tab
, *imp_var_tab
, *imp_expr_tab
;
526 struct icp_qat_uof_objtable
*neigh_reg_tab
;
527 struct icp_qat_uof_code_page
*code_page
;
529 code_page
= (struct icp_qat_uof_code_page
*)
530 ((char *)image
+ sizeof(struct icp_qat_uof_image
));
531 uc_var_tab
= (struct icp_qat_uof_objtable
*)(encap_uof_obj
->beg_uof
+
532 code_page
->uc_var_tab_offset
);
533 imp_var_tab
= (struct icp_qat_uof_objtable
*)(encap_uof_obj
->beg_uof
+
534 code_page
->imp_var_tab_offset
);
535 imp_expr_tab
= (struct icp_qat_uof_objtable
*)
536 (encap_uof_obj
->beg_uof
+
537 code_page
->imp_expr_tab_offset
);
538 if (uc_var_tab
->entry_num
|| imp_var_tab
->entry_num
||
539 imp_expr_tab
->entry_num
) {
540 pr_err("QAT: UOF can't contain imported variable to be parsed\n");
543 neigh_reg_tab
= (struct icp_qat_uof_objtable
*)
544 (encap_uof_obj
->beg_uof
+
545 code_page
->neigh_reg_tab_offset
);
546 if (neigh_reg_tab
->entry_num
) {
547 pr_err("QAT: UOF can't contain neighbor register table\n");
550 if (image
->numpages
> 1) {
551 pr_err("QAT: UOF can't contain multiple pages\n");
554 if (ICP_QAT_SHARED_USTORE_MODE(image
->ae_mode
)) {
555 pr_err("QAT: UOF can't use shared control store feature\n");
558 if (RELOADABLE_CTX_SHARED_MODE(image
->ae_mode
)) {
559 pr_err("QAT: UOF can't use reloadable feature\n");
565 static void qat_uclo_map_image_page(struct icp_qat_uof_encap_obj
567 struct icp_qat_uof_image
*img
,
568 struct icp_qat_uclo_encap_page
*page
)
570 struct icp_qat_uof_code_page
*code_page
;
571 struct icp_qat_uof_code_area
*code_area
;
572 struct icp_qat_uof_objtable
*uword_block_tab
;
573 struct icp_qat_uof_uword_block
*uwblock
;
576 code_page
= (struct icp_qat_uof_code_page
*)
577 ((char *)img
+ sizeof(struct icp_qat_uof_image
));
578 page
->def_page
= code_page
->def_page
;
579 page
->page_region
= code_page
->page_region
;
580 page
->beg_addr_v
= code_page
->beg_addr_v
;
581 page
->beg_addr_p
= code_page
->beg_addr_p
;
582 code_area
= (struct icp_qat_uof_code_area
*)(encap_uof_obj
->beg_uof
+
583 code_page
->code_area_offset
);
584 page
->micro_words_num
= code_area
->micro_words_num
;
585 uword_block_tab
= (struct icp_qat_uof_objtable
*)
586 (encap_uof_obj
->beg_uof
+
587 code_area
->uword_block_tab
);
588 page
->uwblock_num
= uword_block_tab
->entry_num
;
589 uwblock
= (struct icp_qat_uof_uword_block
*)((char *)uword_block_tab
+
590 sizeof(struct icp_qat_uof_objtable
));
591 page
->uwblock
= (struct icp_qat_uclo_encap_uwblock
*)uwblock
;
592 for (i
= 0; i
< uword_block_tab
->entry_num
; i
++)
593 page
->uwblock
[i
].micro_words
=
594 (uintptr_t)encap_uof_obj
->beg_uof
+ uwblock
[i
].uword_offset
;
597 static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle
*obj_handle
,
598 struct icp_qat_uclo_encapme
*ae_uimage
,
602 struct icp_qat_uof_chunkhdr
*chunk_hdr
= NULL
;
603 struct icp_qat_uof_image
*image
;
604 struct icp_qat_uof_objtable
*ae_regtab
;
605 struct icp_qat_uof_objtable
*init_reg_sym_tab
;
606 struct icp_qat_uof_objtable
*sbreak_tab
;
607 struct icp_qat_uof_encap_obj
*encap_uof_obj
=
608 &obj_handle
->encap_uof_obj
;
610 for (j
= 0; j
< max_image
; j
++) {
611 chunk_hdr
= qat_uclo_find_chunk(encap_uof_obj
->obj_hdr
,
612 ICP_QAT_UOF_IMAG
, chunk_hdr
);
615 image
= (struct icp_qat_uof_image
*)(encap_uof_obj
->beg_uof
+
617 ae_regtab
= (struct icp_qat_uof_objtable
*)
618 (image
->reg_tab_offset
+
619 obj_handle
->obj_hdr
->file_buff
);
620 ae_uimage
[j
].ae_reg_num
= ae_regtab
->entry_num
;
621 ae_uimage
[j
].ae_reg
= (struct icp_qat_uof_ae_reg
*)
622 (((char *)ae_regtab
) +
623 sizeof(struct icp_qat_uof_objtable
));
624 init_reg_sym_tab
= (struct icp_qat_uof_objtable
*)
625 (image
->init_reg_sym_tab
+
626 obj_handle
->obj_hdr
->file_buff
);
627 ae_uimage
[j
].init_regsym_num
= init_reg_sym_tab
->entry_num
;
628 ae_uimage
[j
].init_regsym
= (struct icp_qat_uof_init_regsym
*)
629 (((char *)init_reg_sym_tab
) +
630 sizeof(struct icp_qat_uof_objtable
));
631 sbreak_tab
= (struct icp_qat_uof_objtable
*)
632 (image
->sbreak_tab
+ obj_handle
->obj_hdr
->file_buff
);
633 ae_uimage
[j
].sbreak_num
= sbreak_tab
->entry_num
;
634 ae_uimage
[j
].sbreak
= (struct icp_qat_uof_sbreak
*)
635 (((char *)sbreak_tab
) +
636 sizeof(struct icp_qat_uof_objtable
));
637 ae_uimage
[j
].img_ptr
= image
;
638 if (qat_uclo_check_image_compat(encap_uof_obj
, image
))
641 kzalloc(sizeof(struct icp_qat_uclo_encap_page
),
643 if (!ae_uimage
[j
].page
)
645 qat_uclo_map_image_page(encap_uof_obj
, image
,
650 for (i
= 0; i
< j
; i
++)
651 kfree(ae_uimage
[i
].page
);
655 static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle
*handle
, int max_ae
)
659 struct icp_qat_uclo_objhandle
*obj_handle
= handle
->obj_handle
;
660 unsigned long ae_mask
= handle
->hal_handle
->ae_mask
;
661 unsigned long cfg_ae_mask
= handle
->cfg_ae_mask
;
663 for_each_set_bit(ae
, &ae_mask
, max_ae
) {
664 if (!test_bit(ae
, &cfg_ae_mask
))
667 for (i
= 0; i
< obj_handle
->uimage_num
; i
++) {
668 if (!test_bit(ae
, (unsigned long *)
669 &obj_handle
->ae_uimage
[i
].img_ptr
->ae_assigned
))
672 if (qat_uclo_init_ae_data(obj_handle
, ae
, i
))
677 pr_err("QAT: uimage uses AE not set\n");
683 static struct icp_qat_uof_strtable
*
684 qat_uclo_map_str_table(struct icp_qat_uclo_objhdr
*obj_hdr
,
685 char *tab_name
, struct icp_qat_uof_strtable
*str_table
)
687 struct icp_qat_uof_chunkhdr
*chunk_hdr
;
689 chunk_hdr
= qat_uclo_find_chunk((struct icp_qat_uof_objhdr
*)
690 obj_hdr
->file_buff
, tab_name
, NULL
);
694 memcpy(&str_table
->table_len
, obj_hdr
->file_buff
+
695 chunk_hdr
->offset
, sizeof(str_table
->table_len
));
696 hdr_size
= (char *)&str_table
->strings
- (char *)str_table
;
697 str_table
->strings
= (uintptr_t)obj_hdr
->file_buff
+
698 chunk_hdr
->offset
+ hdr_size
;
705 qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj
*encap_uof_obj
,
706 struct icp_qat_uclo_init_mem_table
*init_mem_tab
)
708 struct icp_qat_uof_chunkhdr
*chunk_hdr
;
710 chunk_hdr
= qat_uclo_find_chunk(encap_uof_obj
->obj_hdr
,
711 ICP_QAT_UOF_IMEM
, NULL
);
713 memmove(&init_mem_tab
->entry_num
, encap_uof_obj
->beg_uof
+
714 chunk_hdr
->offset
, sizeof(unsigned int));
715 init_mem_tab
->init_mem
= (struct icp_qat_uof_initmem
*)
716 (encap_uof_obj
->beg_uof
+ chunk_hdr
->offset
+
717 sizeof(unsigned int));
722 qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle
*handle
)
724 switch (handle
->pci_dev
->device
) {
725 case PCI_DEVICE_ID_INTEL_QAT_DH895XCC
:
726 return ICP_QAT_AC_895XCC_DEV_TYPE
;
727 case PCI_DEVICE_ID_INTEL_QAT_C62X
:
728 return ICP_QAT_AC_C62X_DEV_TYPE
;
729 case PCI_DEVICE_ID_INTEL_QAT_C3XXX
:
730 return ICP_QAT_AC_C3XXX_DEV_TYPE
;
731 case ADF_4XXX_PCI_DEVICE_ID
:
732 return ICP_QAT_AC_4XXX_A_DEV_TYPE
;
734 pr_err("QAT: unsupported device 0x%x\n",
735 handle
->pci_dev
->device
);
740 static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle
*obj_handle
)
742 unsigned int maj_ver
, prod_type
= obj_handle
->prod_type
;
744 if (!(prod_type
& obj_handle
->encap_uof_obj
.obj_hdr
->ac_dev_type
)) {
745 pr_err("QAT: UOF type 0x%x doesn't match with platform 0x%x\n",
746 obj_handle
->encap_uof_obj
.obj_hdr
->ac_dev_type
,
750 maj_ver
= obj_handle
->prod_rev
& 0xff;
751 if (obj_handle
->encap_uof_obj
.obj_hdr
->max_cpu_ver
< maj_ver
||
752 obj_handle
->encap_uof_obj
.obj_hdr
->min_cpu_ver
> maj_ver
) {
753 pr_err("QAT: UOF majVer 0x%x out of range\n", maj_ver
);
759 static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle
*handle
,
760 unsigned char ae
, unsigned char ctx_mask
,
761 enum icp_qat_uof_regtype reg_type
,
762 unsigned short reg_addr
, unsigned int value
)
771 return qat_hal_init_gpr(handle
, ae
, ctx_mask
, reg_type
,
783 return qat_hal_init_rd_xfer(handle
, ae
, ctx_mask
, reg_type
,
791 return qat_hal_init_wr_xfer(handle
, ae
, ctx_mask
, reg_type
,
794 return qat_hal_init_nn(handle
, ae
, ctx_mask
, reg_addr
, value
);
796 pr_err("QAT: UOF uses not supported reg type 0x%x\n", reg_type
);
802 static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle
*handle
,
804 struct icp_qat_uclo_encapme
*encap_ae
)
807 unsigned char ctx_mask
;
808 struct icp_qat_uof_init_regsym
*init_regsym
;
810 if (ICP_QAT_CTX_MODE(encap_ae
->img_ptr
->ae_mode
) ==
811 ICP_QAT_UCLO_MAX_CTX
)
816 for (i
= 0; i
< encap_ae
->init_regsym_num
; i
++) {
817 unsigned int exp_res
;
819 init_regsym
= &encap_ae
->init_regsym
[i
];
820 exp_res
= init_regsym
->value
;
821 switch (init_regsym
->init_type
) {
822 case ICP_QAT_UOF_INIT_REG
:
823 qat_uclo_init_reg(handle
, ae
, ctx_mask
,
824 (enum icp_qat_uof_regtype
)
825 init_regsym
->reg_type
,
826 (unsigned short)init_regsym
->reg_addr
,
829 case ICP_QAT_UOF_INIT_REG_CTX
:
830 /* check if ctx is appropriate for the ctxMode */
831 if (!((1 << init_regsym
->ctx
) & ctx_mask
)) {
832 pr_err("QAT: invalid ctx num = 0x%x\n",
836 qat_uclo_init_reg(handle
, ae
,
838 (1 << init_regsym
->ctx
),
839 (enum icp_qat_uof_regtype
)
840 init_regsym
->reg_type
,
841 (unsigned short)init_regsym
->reg_addr
,
844 case ICP_QAT_UOF_INIT_EXPR
:
845 pr_err("QAT: INIT_EXPR feature not supported\n");
847 case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP
:
848 pr_err("QAT: INIT_EXPR_ENDIAN_SWAP feature not supported\n");
857 static int qat_uclo_init_globals(struct icp_qat_fw_loader_handle
*handle
)
859 struct icp_qat_uclo_objhandle
*obj_handle
= handle
->obj_handle
;
860 unsigned long ae_mask
= handle
->hal_handle
->ae_mask
;
861 struct icp_qat_uclo_aedata
*aed
;
864 if (obj_handle
->global_inited
)
866 if (obj_handle
->init_mem_tab
.entry_num
) {
867 if (qat_uclo_init_memory(handle
)) {
868 pr_err("QAT: initialize memory failed\n");
873 for_each_set_bit(ae
, &ae_mask
, handle
->hal_handle
->ae_max_num
) {
874 aed
= &obj_handle
->ae_data
[ae
];
875 for (s
= 0; s
< aed
->slice_num
; s
++) {
876 if (!aed
->ae_slices
[s
].encap_image
)
878 if (qat_uclo_init_reg_sym(handle
, ae
, aed
->ae_slices
[s
].encap_image
))
882 obj_handle
->global_inited
= 1;
886 static int qat_hal_set_modes(struct icp_qat_fw_loader_handle
*handle
,
887 struct icp_qat_uclo_objhandle
*obj_handle
,
889 struct icp_qat_uof_image
*uof_image
)
894 mode
= ICP_QAT_CTX_MODE(uof_image
->ae_mode
);
895 ret
= qat_hal_set_ae_ctx_mode(handle
, ae
, mode
);
897 pr_err("QAT: qat_hal_set_ae_ctx_mode error\n");
900 if (handle
->chip_info
->nn
) {
901 mode
= ICP_QAT_NN_MODE(uof_image
->ae_mode
);
902 ret
= qat_hal_set_ae_nn_mode(handle
, ae
, mode
);
904 pr_err("QAT: qat_hal_set_ae_nn_mode error\n");
908 mode
= ICP_QAT_LOC_MEM0_MODE(uof_image
->ae_mode
);
909 ret
= qat_hal_set_ae_lm_mode(handle
, ae
, ICP_LMEM0
, mode
);
911 pr_err("QAT: qat_hal_set_ae_lm_mode LMEM0 error\n");
914 mode
= ICP_QAT_LOC_MEM1_MODE(uof_image
->ae_mode
);
915 ret
= qat_hal_set_ae_lm_mode(handle
, ae
, ICP_LMEM1
, mode
);
917 pr_err("QAT: qat_hal_set_ae_lm_mode LMEM1 error\n");
920 if (handle
->chip_info
->lm2lm3
) {
921 mode
= ICP_QAT_LOC_MEM2_MODE(uof_image
->ae_mode
);
922 ret
= qat_hal_set_ae_lm_mode(handle
, ae
, ICP_LMEM2
, mode
);
924 pr_err("QAT: qat_hal_set_ae_lm_mode LMEM2 error\n");
927 mode
= ICP_QAT_LOC_MEM3_MODE(uof_image
->ae_mode
);
928 ret
= qat_hal_set_ae_lm_mode(handle
, ae
, ICP_LMEM3
, mode
);
930 pr_err("QAT: qat_hal_set_ae_lm_mode LMEM3 error\n");
933 mode
= ICP_QAT_LOC_TINDEX_MODE(uof_image
->ae_mode
);
934 qat_hal_set_ae_tindex_mode(handle
, ae
, mode
);
939 static int qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle
*handle
)
941 struct icp_qat_uof_image
*uof_image
;
942 struct icp_qat_uclo_aedata
*ae_data
;
943 struct icp_qat_uclo_objhandle
*obj_handle
= handle
->obj_handle
;
944 unsigned long ae_mask
= handle
->hal_handle
->ae_mask
;
945 unsigned long cfg_ae_mask
= handle
->cfg_ae_mask
;
949 for_each_set_bit(ae
, &ae_mask
, handle
->hal_handle
->ae_max_num
) {
950 if (!test_bit(ae
, &cfg_ae_mask
))
953 ae_data
= &obj_handle
->ae_data
[ae
];
954 for (s
= 0; s
< min_t(unsigned int, ae_data
->slice_num
,
955 ICP_QAT_UCLO_MAX_CTX
); s
++) {
956 if (!obj_handle
->ae_data
[ae
].ae_slices
[s
].encap_image
)
958 uof_image
= ae_data
->ae_slices
[s
].encap_image
->img_ptr
;
959 error
= qat_hal_set_modes(handle
, obj_handle
, ae
,
968 static void qat_uclo_init_uword_num(struct icp_qat_fw_loader_handle
*handle
)
970 struct icp_qat_uclo_objhandle
*obj_handle
= handle
->obj_handle
;
971 struct icp_qat_uclo_encapme
*image
;
974 for (a
= 0; a
< obj_handle
->uimage_num
; a
++) {
975 image
= &obj_handle
->ae_uimage
[a
];
976 image
->uwords_num
= image
->page
->beg_addr_p
+
977 image
->page
->micro_words_num
;
981 static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle
*handle
)
983 struct icp_qat_uclo_objhandle
*obj_handle
= handle
->obj_handle
;
986 obj_handle
->encap_uof_obj
.beg_uof
= obj_handle
->obj_hdr
->file_buff
;
987 obj_handle
->encap_uof_obj
.obj_hdr
= (struct icp_qat_uof_objhdr
*)
988 obj_handle
->obj_hdr
->file_buff
;
989 obj_handle
->uword_in_bytes
= 6;
990 obj_handle
->prod_type
= qat_uclo_get_dev_type(handle
);
991 obj_handle
->prod_rev
= PID_MAJOR_REV
|
992 (PID_MINOR_REV
& handle
->hal_handle
->revision_id
);
993 if (qat_uclo_check_uof_compat(obj_handle
)) {
994 pr_err("QAT: UOF incompatible\n");
997 obj_handle
->uword_buf
= kcalloc(UWORD_CPYBUF_SIZE
, sizeof(u64
),
999 if (!obj_handle
->uword_buf
)
1001 obj_handle
->ustore_phy_size
= ICP_QAT_UCLO_MAX_USTORE
;
1002 if (!obj_handle
->obj_hdr
->file_buff
||
1003 !qat_uclo_map_str_table(obj_handle
->obj_hdr
, ICP_QAT_UOF_STRT
,
1004 &obj_handle
->str_table
)) {
1005 pr_err("QAT: UOF doesn't have effective images\n");
1008 obj_handle
->uimage_num
=
1009 qat_uclo_map_uimage(obj_handle
, obj_handle
->ae_uimage
,
1010 ICP_QAT_UCLO_MAX_AE
* ICP_QAT_UCLO_MAX_CTX
);
1011 if (!obj_handle
->uimage_num
)
1013 if (qat_uclo_map_ae(handle
, handle
->hal_handle
->ae_max_num
)) {
1014 pr_err("QAT: Bad object\n");
1015 goto out_check_uof_aemask_err
;
1017 qat_uclo_init_uword_num(handle
);
1018 qat_uclo_map_initmem_table(&obj_handle
->encap_uof_obj
,
1019 &obj_handle
->init_mem_tab
);
1020 if (qat_uclo_set_ae_mode(handle
))
1021 goto out_check_uof_aemask_err
;
1023 out_check_uof_aemask_err
:
1024 for (ae
= 0; ae
< obj_handle
->uimage_num
; ae
++)
1025 kfree(obj_handle
->ae_uimage
[ae
].page
);
1027 kfree(obj_handle
->uword_buf
);
1031 static int qat_uclo_map_suof_file_hdr(struct icp_qat_fw_loader_handle
*handle
,
1032 struct icp_qat_suof_filehdr
*suof_ptr
,
1035 unsigned int check_sum
= 0;
1036 unsigned int min_ver_offset
= 0;
1037 struct icp_qat_suof_handle
*suof_handle
= handle
->sobj_handle
;
1039 suof_handle
->file_id
= ICP_QAT_SUOF_FID
;
1040 suof_handle
->suof_buf
= (char *)suof_ptr
;
1041 suof_handle
->suof_size
= suof_size
;
1042 min_ver_offset
= suof_size
- offsetof(struct icp_qat_suof_filehdr
,
1044 check_sum
= qat_uclo_calc_str_checksum((char *)&suof_ptr
->min_ver
,
1046 if (check_sum
!= suof_ptr
->check_sum
) {
1047 pr_err("QAT: incorrect SUOF checksum\n");
1050 suof_handle
->check_sum
= suof_ptr
->check_sum
;
1051 suof_handle
->min_ver
= suof_ptr
->min_ver
;
1052 suof_handle
->maj_ver
= suof_ptr
->maj_ver
;
1053 suof_handle
->fw_type
= suof_ptr
->fw_type
;
1057 static void qat_uclo_map_simg(struct icp_qat_fw_loader_handle
*handle
,
1058 struct icp_qat_suof_img_hdr
*suof_img_hdr
,
1059 struct icp_qat_suof_chunk_hdr
*suof_chunk_hdr
)
1061 struct icp_qat_suof_handle
*suof_handle
= handle
->sobj_handle
;
1062 struct icp_qat_simg_ae_mode
*ae_mode
;
1063 struct icp_qat_suof_objhdr
*suof_objhdr
;
1065 suof_img_hdr
->simg_buf
= (suof_handle
->suof_buf
+
1066 suof_chunk_hdr
->offset
+
1067 sizeof(*suof_objhdr
));
1068 suof_img_hdr
->simg_len
= ((struct icp_qat_suof_objhdr
*)(uintptr_t)
1069 (suof_handle
->suof_buf
+
1070 suof_chunk_hdr
->offset
))->img_length
;
1072 suof_img_hdr
->css_header
= suof_img_hdr
->simg_buf
;
1073 suof_img_hdr
->css_key
= (suof_img_hdr
->css_header
+
1074 sizeof(struct icp_qat_css_hdr
));
1075 suof_img_hdr
->css_signature
= suof_img_hdr
->css_key
+
1076 ICP_QAT_CSS_FWSK_MODULUS_LEN(handle
) +
1077 ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle
);
1078 suof_img_hdr
->css_simg
= suof_img_hdr
->css_signature
+
1079 ICP_QAT_CSS_SIGNATURE_LEN(handle
);
1081 ae_mode
= (struct icp_qat_simg_ae_mode
*)(suof_img_hdr
->css_simg
);
1082 suof_img_hdr
->ae_mask
= ae_mode
->ae_mask
;
1083 suof_img_hdr
->simg_name
= (unsigned long)&ae_mode
->simg_name
;
1084 suof_img_hdr
->appmeta_data
= (unsigned long)&ae_mode
->appmeta_data
;
1085 suof_img_hdr
->fw_type
= ae_mode
->fw_type
;
1089 qat_uclo_map_suof_symobjs(struct icp_qat_suof_handle
*suof_handle
,
1090 struct icp_qat_suof_chunk_hdr
*suof_chunk_hdr
)
1092 char **sym_str
= (char **)&suof_handle
->sym_str
;
1093 unsigned int *sym_size
= &suof_handle
->sym_size
;
1094 struct icp_qat_suof_strtable
*str_table_obj
;
1096 *sym_size
= *(unsigned int *)(uintptr_t)
1097 (suof_chunk_hdr
->offset
+ suof_handle
->suof_buf
);
1098 *sym_str
= (char *)(uintptr_t)
1099 (suof_handle
->suof_buf
+ suof_chunk_hdr
->offset
+
1100 sizeof(str_table_obj
->tab_length
));
1103 static int qat_uclo_check_simg_compat(struct icp_qat_fw_loader_handle
*handle
,
1104 struct icp_qat_suof_img_hdr
*img_hdr
)
1106 struct icp_qat_simg_ae_mode
*img_ae_mode
= NULL
;
1107 unsigned int prod_rev
, maj_ver
, prod_type
;
1109 prod_type
= qat_uclo_get_dev_type(handle
);
1110 img_ae_mode
= (struct icp_qat_simg_ae_mode
*)img_hdr
->css_simg
;
1111 prod_rev
= PID_MAJOR_REV
|
1112 (PID_MINOR_REV
& handle
->hal_handle
->revision_id
);
1113 if (img_ae_mode
->dev_type
!= prod_type
) {
1114 pr_err("QAT: incompatible product type %x\n",
1115 img_ae_mode
->dev_type
);
1118 maj_ver
= prod_rev
& 0xff;
1119 if (maj_ver
> img_ae_mode
->devmax_ver
||
1120 maj_ver
< img_ae_mode
->devmin_ver
) {
1121 pr_err("QAT: incompatible device majver 0x%x\n", maj_ver
);
1127 static void qat_uclo_del_suof(struct icp_qat_fw_loader_handle
*handle
)
1129 struct icp_qat_suof_handle
*sobj_handle
= handle
->sobj_handle
;
1131 kfree(sobj_handle
->img_table
.simg_hdr
);
1132 sobj_handle
->img_table
.simg_hdr
= NULL
;
1133 kfree(handle
->sobj_handle
);
1134 handle
->sobj_handle
= NULL
;
1137 static void qat_uclo_tail_img(struct icp_qat_suof_img_hdr
*suof_img_hdr
,
1138 unsigned int img_id
, unsigned int num_simgs
)
1140 struct icp_qat_suof_img_hdr img_header
;
1142 if (img_id
!= num_simgs
- 1) {
1143 memcpy(&img_header
, &suof_img_hdr
[num_simgs
- 1],
1144 sizeof(*suof_img_hdr
));
1145 memcpy(&suof_img_hdr
[num_simgs
- 1], &suof_img_hdr
[img_id
],
1146 sizeof(*suof_img_hdr
));
1147 memcpy(&suof_img_hdr
[img_id
], &img_header
,
1148 sizeof(*suof_img_hdr
));
1152 static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle
*handle
,
1153 struct icp_qat_suof_filehdr
*suof_ptr
,
1156 struct icp_qat_suof_handle
*suof_handle
= handle
->sobj_handle
;
1157 struct icp_qat_suof_chunk_hdr
*suof_chunk_hdr
= NULL
;
1158 struct icp_qat_suof_img_hdr
*suof_img_hdr
= NULL
;
1159 int ret
= 0, ae0_img
= ICP_QAT_UCLO_MAX_AE
;
1161 struct icp_qat_suof_img_hdr img_header
;
1163 if (!suof_ptr
|| suof_size
== 0) {
1164 pr_err("QAT: input parameter SUOF pointer/size is NULL\n");
1167 if (qat_uclo_check_suof_format(suof_ptr
))
1169 ret
= qat_uclo_map_suof_file_hdr(handle
, suof_ptr
, suof_size
);
1172 suof_chunk_hdr
= (struct icp_qat_suof_chunk_hdr
*)
1173 ((uintptr_t)suof_ptr
+ sizeof(*suof_ptr
));
1175 qat_uclo_map_suof_symobjs(suof_handle
, suof_chunk_hdr
);
1176 suof_handle
->img_table
.num_simgs
= suof_ptr
->num_chunks
- 1;
1178 if (suof_handle
->img_table
.num_simgs
!= 0) {
1179 suof_img_hdr
= kcalloc(suof_handle
->img_table
.num_simgs
,
1184 suof_handle
->img_table
.simg_hdr
= suof_img_hdr
;
1186 for (i
= 0; i
< suof_handle
->img_table
.num_simgs
; i
++) {
1187 qat_uclo_map_simg(handle
, &suof_img_hdr
[i
],
1188 &suof_chunk_hdr
[1 + i
]);
1189 ret
= qat_uclo_check_simg_compat(handle
,
1193 suof_img_hdr
[i
].ae_mask
&= handle
->cfg_ae_mask
;
1194 if ((suof_img_hdr
[i
].ae_mask
& 0x1) != 0)
1198 if (!handle
->chip_info
->tgroup_share_ustore
) {
1199 qat_uclo_tail_img(suof_img_hdr
, ae0_img
,
1200 suof_handle
->img_table
.num_simgs
);
1206 #define ADD_ADDR(high, low) ((((u64)high) << 32) + low)
1207 #define BITS_IN_DWORD 32
1209 static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle
*handle
,
1210 struct icp_qat_fw_auth_desc
*desc
)
1212 u32 fcu_sts
, retry
= 0;
1213 u32 fcu_ctl_csr
, fcu_sts_csr
;
1214 u32 fcu_dram_hi_csr
, fcu_dram_lo_csr
;
1217 bus_addr
= ADD_ADDR(desc
->css_hdr_high
, desc
->css_hdr_low
)
1218 - sizeof(struct icp_qat_auth_chunk
);
1220 fcu_ctl_csr
= handle
->chip_info
->fcu_ctl_csr
;
1221 fcu_sts_csr
= handle
->chip_info
->fcu_sts_csr
;
1222 fcu_dram_hi_csr
= handle
->chip_info
->fcu_dram_addr_hi
;
1223 fcu_dram_lo_csr
= handle
->chip_info
->fcu_dram_addr_lo
;
1225 SET_CAP_CSR(handle
, fcu_dram_hi_csr
, (bus_addr
>> BITS_IN_DWORD
));
1226 SET_CAP_CSR(handle
, fcu_dram_lo_csr
, bus_addr
);
1227 SET_CAP_CSR(handle
, fcu_ctl_csr
, FCU_CTRL_CMD_AUTH
);
1230 msleep(FW_AUTH_WAIT_PERIOD
);
1231 fcu_sts
= GET_CAP_CSR(handle
, fcu_sts_csr
);
1232 if ((fcu_sts
& FCU_AUTH_STS_MASK
) == FCU_STS_VERI_FAIL
)
1234 if (((fcu_sts
>> FCU_STS_AUTHFWLD_POS
) & 0x1))
1235 if ((fcu_sts
& FCU_AUTH_STS_MASK
) == FCU_STS_VERI_DONE
)
1237 } while (retry
++ < FW_AUTH_MAX_RETRY
);
1239 pr_err("QAT: authentication error (FCU_STATUS = 0x%x),retry = %d\n",
1240 fcu_sts
& FCU_AUTH_STS_MASK
, retry
);
1244 static bool qat_uclo_is_broadcast(struct icp_qat_fw_loader_handle
*handle
,
1247 struct icp_qat_suof_handle
*sobj_handle
;
1249 if (!handle
->chip_info
->tgroup_share_ustore
)
1252 sobj_handle
= (struct icp_qat_suof_handle
*)handle
->sobj_handle
;
1253 if (handle
->hal_handle
->admin_ae_mask
&
1254 sobj_handle
->img_table
.simg_hdr
[imgid
].ae_mask
)
1260 static int qat_uclo_broadcast_load_fw(struct icp_qat_fw_loader_handle
*handle
,
1261 struct icp_qat_fw_auth_desc
*desc
)
1263 unsigned long ae_mask
= handle
->hal_handle
->ae_mask
;
1264 unsigned long desc_ae_mask
= desc
->ae_mask
;
1265 u32 fcu_sts
, ae_broadcast_mask
= 0;
1266 u32 fcu_loaded_csr
, ae_loaded
;
1267 u32 fcu_sts_csr
, fcu_ctl_csr
;
1268 unsigned int ae
, retry
= 0;
1270 if (handle
->chip_info
->tgroup_share_ustore
) {
1271 fcu_ctl_csr
= handle
->chip_info
->fcu_ctl_csr
;
1272 fcu_sts_csr
= handle
->chip_info
->fcu_sts_csr
;
1273 fcu_loaded_csr
= handle
->chip_info
->fcu_loaded_ae_csr
;
1275 pr_err("Chip 0x%x doesn't support broadcast load\n",
1276 handle
->pci_dev
->device
);
1280 for_each_set_bit(ae
, &ae_mask
, handle
->hal_handle
->ae_max_num
) {
1281 if (qat_hal_check_ae_active(handle
, (unsigned char)ae
)) {
1282 pr_err("QAT: Broadcast load failed. AE is not enabled or active.\n");
1286 if (test_bit(ae
, &desc_ae_mask
))
1287 ae_broadcast_mask
|= 1 << ae
;
1290 if (ae_broadcast_mask
) {
1291 SET_CAP_CSR(handle
, FCU_ME_BROADCAST_MASK_TYPE
,
1294 SET_CAP_CSR(handle
, fcu_ctl_csr
, FCU_CTRL_CMD_LOAD
);
1297 msleep(FW_AUTH_WAIT_PERIOD
);
1298 fcu_sts
= GET_CAP_CSR(handle
, fcu_sts_csr
);
1299 fcu_sts
&= FCU_AUTH_STS_MASK
;
1301 if (fcu_sts
== FCU_STS_LOAD_FAIL
) {
1302 pr_err("Broadcast load failed: 0x%x)\n", fcu_sts
);
1304 } else if (fcu_sts
== FCU_STS_LOAD_DONE
) {
1305 ae_loaded
= GET_CAP_CSR(handle
, fcu_loaded_csr
);
1306 ae_loaded
>>= handle
->chip_info
->fcu_loaded_ae_pos
;
1308 if ((ae_loaded
& ae_broadcast_mask
) == ae_broadcast_mask
)
1311 } while (retry
++ < FW_AUTH_MAX_RETRY
);
1313 if (retry
> FW_AUTH_MAX_RETRY
) {
1314 pr_err("QAT: broadcast load failed timeout %d\n", retry
);
1321 static int qat_uclo_simg_alloc(struct icp_qat_fw_loader_handle
*handle
,
1322 struct icp_firml_dram_desc
*dram_desc
,
1328 vptr
= dma_alloc_coherent(&handle
->pci_dev
->dev
,
1329 size
, &ptr
, GFP_KERNEL
);
1332 dram_desc
->dram_base_addr_v
= vptr
;
1333 dram_desc
->dram_bus_addr
= ptr
;
1334 dram_desc
->dram_size
= size
;
1338 static void qat_uclo_simg_free(struct icp_qat_fw_loader_handle
*handle
,
1339 struct icp_firml_dram_desc
*dram_desc
)
1341 if (handle
&& dram_desc
&& dram_desc
->dram_base_addr_v
) {
1342 dma_free_coherent(&handle
->pci_dev
->dev
,
1343 (size_t)(dram_desc
->dram_size
),
1344 dram_desc
->dram_base_addr_v
,
1345 dram_desc
->dram_bus_addr
);
1349 memset(dram_desc
, 0, sizeof(*dram_desc
));
1352 static void qat_uclo_ummap_auth_fw(struct icp_qat_fw_loader_handle
*handle
,
1353 struct icp_qat_fw_auth_desc
**desc
)
1355 struct icp_firml_dram_desc dram_desc
;
1358 dram_desc
.dram_base_addr_v
= *desc
;
1359 dram_desc
.dram_bus_addr
= ((struct icp_qat_auth_chunk
*)
1360 (*desc
))->chunk_bus_addr
;
1361 dram_desc
.dram_size
= ((struct icp_qat_auth_chunk
*)
1362 (*desc
))->chunk_size
;
1363 qat_uclo_simg_free(handle
, &dram_desc
);
1367 static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle
*handle
,
1368 char *image
, unsigned int size
,
1369 struct icp_qat_fw_auth_desc
**desc
)
1371 struct icp_qat_css_hdr
*css_hdr
= (struct icp_qat_css_hdr
*)image
;
1372 struct icp_qat_fw_auth_desc
*auth_desc
;
1373 struct icp_qat_auth_chunk
*auth_chunk
;
1374 u64 virt_addr
, bus_addr
, virt_base
;
1375 unsigned int length
, simg_offset
= sizeof(*auth_chunk
);
1376 struct icp_qat_simg_ae_mode
*simg_ae_mode
;
1377 struct icp_firml_dram_desc img_desc
;
1379 if (size
> (ICP_QAT_AE_IMG_OFFSET(handle
) + ICP_QAT_CSS_MAX_IMAGE_LEN
)) {
1380 pr_err("QAT: error, input image size overflow %d\n", size
);
1383 length
= (css_hdr
->fw_type
== CSS_AE_FIRMWARE
) ?
1384 ICP_QAT_CSS_AE_SIMG_LEN(handle
) + simg_offset
:
1385 size
+ ICP_QAT_CSS_FWSK_PAD_LEN(handle
) + simg_offset
;
1386 if (qat_uclo_simg_alloc(handle
, &img_desc
, length
)) {
1387 pr_err("QAT: error, allocate continuous dram fail\n");
1391 auth_chunk
= img_desc
.dram_base_addr_v
;
1392 auth_chunk
->chunk_size
= img_desc
.dram_size
;
1393 auth_chunk
->chunk_bus_addr
= img_desc
.dram_bus_addr
;
1394 virt_base
= (uintptr_t)img_desc
.dram_base_addr_v
+ simg_offset
;
1395 bus_addr
= img_desc
.dram_bus_addr
+ simg_offset
;
1396 auth_desc
= img_desc
.dram_base_addr_v
;
1397 auth_desc
->css_hdr_high
= (unsigned int)(bus_addr
>> BITS_IN_DWORD
);
1398 auth_desc
->css_hdr_low
= (unsigned int)bus_addr
;
1399 virt_addr
= virt_base
;
1401 memcpy((void *)(uintptr_t)virt_addr
, image
, sizeof(*css_hdr
));
1403 bus_addr
= ADD_ADDR(auth_desc
->css_hdr_high
, auth_desc
->css_hdr_low
) +
1405 virt_addr
= virt_addr
+ sizeof(*css_hdr
);
1407 auth_desc
->fwsk_pub_high
= (unsigned int)(bus_addr
>> BITS_IN_DWORD
);
1408 auth_desc
->fwsk_pub_low
= (unsigned int)bus_addr
;
1410 memcpy((void *)(uintptr_t)virt_addr
,
1411 (void *)(image
+ sizeof(*css_hdr
)),
1412 ICP_QAT_CSS_FWSK_MODULUS_LEN(handle
));
1414 memset((void *)(uintptr_t)(virt_addr
+ ICP_QAT_CSS_FWSK_MODULUS_LEN(handle
)),
1415 0, ICP_QAT_CSS_FWSK_PAD_LEN(handle
));
1418 memcpy((void *)(uintptr_t)(virt_addr
+ ICP_QAT_CSS_FWSK_MODULUS_LEN(handle
) +
1419 ICP_QAT_CSS_FWSK_PAD_LEN(handle
)),
1420 (void *)(image
+ sizeof(*css_hdr
) +
1421 ICP_QAT_CSS_FWSK_MODULUS_LEN(handle
)),
1422 sizeof(unsigned int));
1425 bus_addr
= ADD_ADDR(auth_desc
->fwsk_pub_high
,
1426 auth_desc
->fwsk_pub_low
) +
1427 ICP_QAT_CSS_FWSK_PUB_LEN(handle
);
1428 virt_addr
= virt_addr
+ ICP_QAT_CSS_FWSK_PUB_LEN(handle
);
1429 auth_desc
->signature_high
= (unsigned int)(bus_addr
>> BITS_IN_DWORD
);
1430 auth_desc
->signature_low
= (unsigned int)bus_addr
;
1432 memcpy((void *)(uintptr_t)virt_addr
,
1433 (void *)(image
+ sizeof(*css_hdr
) +
1434 ICP_QAT_CSS_FWSK_MODULUS_LEN(handle
) +
1435 ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle
)),
1436 ICP_QAT_CSS_SIGNATURE_LEN(handle
));
1438 bus_addr
= ADD_ADDR(auth_desc
->signature_high
,
1439 auth_desc
->signature_low
) +
1440 ICP_QAT_CSS_SIGNATURE_LEN(handle
);
1441 virt_addr
+= ICP_QAT_CSS_SIGNATURE_LEN(handle
);
1443 auth_desc
->img_high
= (unsigned int)(bus_addr
>> BITS_IN_DWORD
);
1444 auth_desc
->img_low
= (unsigned int)bus_addr
;
1445 auth_desc
->img_len
= size
- ICP_QAT_AE_IMG_OFFSET(handle
);
1446 memcpy((void *)(uintptr_t)virt_addr
,
1447 (void *)(image
+ ICP_QAT_AE_IMG_OFFSET(handle
)),
1448 auth_desc
->img_len
);
1449 virt_addr
= virt_base
;
1451 if (((struct icp_qat_css_hdr
*)(uintptr_t)virt_addr
)->fw_type
==
1453 auth_desc
->img_ae_mode_data_high
= auth_desc
->img_high
;
1454 auth_desc
->img_ae_mode_data_low
= auth_desc
->img_low
;
1455 bus_addr
= ADD_ADDR(auth_desc
->img_ae_mode_data_high
,
1456 auth_desc
->img_ae_mode_data_low
) +
1457 sizeof(struct icp_qat_simg_ae_mode
);
1459 auth_desc
->img_ae_init_data_high
= (unsigned int)
1460 (bus_addr
>> BITS_IN_DWORD
);
1461 auth_desc
->img_ae_init_data_low
= (unsigned int)bus_addr
;
1462 bus_addr
+= ICP_QAT_SIMG_AE_INIT_SEQ_LEN
;
1463 auth_desc
->img_ae_insts_high
= (unsigned int)
1464 (bus_addr
>> BITS_IN_DWORD
);
1465 auth_desc
->img_ae_insts_low
= (unsigned int)bus_addr
;
1466 virt_addr
+= sizeof(struct icp_qat_css_hdr
);
1467 virt_addr
+= ICP_QAT_CSS_FWSK_PUB_LEN(handle
);
1468 virt_addr
+= ICP_QAT_CSS_SIGNATURE_LEN(handle
);
1469 simg_ae_mode
= (struct icp_qat_simg_ae_mode
*)(uintptr_t)virt_addr
;
1470 auth_desc
->ae_mask
= simg_ae_mode
->ae_mask
& handle
->cfg_ae_mask
;
1472 auth_desc
->img_ae_insts_high
= auth_desc
->img_high
;
1473 auth_desc
->img_ae_insts_low
= auth_desc
->img_low
;
1479 static int qat_uclo_load_fw(struct icp_qat_fw_loader_handle
*handle
,
1480 struct icp_qat_fw_auth_desc
*desc
)
1482 unsigned long ae_mask
= handle
->hal_handle
->ae_mask
;
1483 u32 fcu_sts_csr
, fcu_ctl_csr
;
1484 u32 loaded_aes
, loaded_csr
;
1488 fcu_ctl_csr
= handle
->chip_info
->fcu_ctl_csr
;
1489 fcu_sts_csr
= handle
->chip_info
->fcu_sts_csr
;
1490 loaded_csr
= handle
->chip_info
->fcu_loaded_ae_csr
;
1492 for_each_set_bit(i
, &ae_mask
, handle
->hal_handle
->ae_max_num
) {
1495 if (!((desc
->ae_mask
>> i
) & 0x1))
1497 if (qat_hal_check_ae_active(handle
, i
)) {
1498 pr_err("QAT: AE %d is active\n", i
);
1501 SET_CAP_CSR(handle
, fcu_ctl_csr
,
1502 (FCU_CTRL_CMD_LOAD
|
1503 (1 << FCU_CTRL_BROADCAST_POS
) |
1504 (i
<< FCU_CTRL_AE_POS
)));
1507 msleep(FW_AUTH_WAIT_PERIOD
);
1508 fcu_sts
= GET_CAP_CSR(handle
, fcu_sts_csr
);
1509 if ((fcu_sts
& FCU_AUTH_STS_MASK
) ==
1510 FCU_STS_LOAD_DONE
) {
1511 loaded_aes
= GET_CAP_CSR(handle
, loaded_csr
);
1512 loaded_aes
>>= handle
->chip_info
->fcu_loaded_ae_pos
;
1513 if (loaded_aes
& (1 << i
))
1516 } while (retry
++ < FW_AUTH_MAX_RETRY
);
1517 if (retry
> FW_AUTH_MAX_RETRY
) {
1518 pr_err("QAT: firmware load failed timeout %x\n", retry
);
1525 static int qat_uclo_map_suof_obj(struct icp_qat_fw_loader_handle
*handle
,
1526 void *addr_ptr
, int mem_size
)
1528 struct icp_qat_suof_handle
*suof_handle
;
1530 suof_handle
= kzalloc(sizeof(*suof_handle
), GFP_KERNEL
);
1533 handle
->sobj_handle
= suof_handle
;
1534 if (qat_uclo_map_suof(handle
, addr_ptr
, mem_size
)) {
1535 qat_uclo_del_suof(handle
);
1536 pr_err("QAT: map SUOF failed\n");
1542 int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle
*handle
,
1543 void *addr_ptr
, int mem_size
)
1545 struct icp_qat_fw_auth_desc
*desc
= NULL
;
1548 if (handle
->chip_info
->fw_auth
) {
1549 if (!qat_uclo_map_auth_fw(handle
, addr_ptr
, mem_size
, &desc
))
1550 status
= qat_uclo_auth_fw(handle
, desc
);
1551 qat_uclo_ummap_auth_fw(handle
, &desc
);
1553 if (!handle
->chip_info
->sram_visible
) {
1554 dev_dbg(&handle
->pci_dev
->dev
,
1555 "QAT MMP fw not loaded for device 0x%x",
1556 handle
->pci_dev
->device
);
1559 qat_uclo_wr_sram_by_words(handle
, 0, addr_ptr
, mem_size
);
1564 static int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle
*handle
,
1565 void *addr_ptr
, int mem_size
)
1567 struct icp_qat_uof_filehdr
*filehdr
;
1568 struct icp_qat_uclo_objhandle
*objhdl
;
1570 objhdl
= kzalloc(sizeof(*objhdl
), GFP_KERNEL
);
1573 objhdl
->obj_buf
= kmemdup(addr_ptr
, mem_size
, GFP_KERNEL
);
1574 if (!objhdl
->obj_buf
)
1575 goto out_objbuf_err
;
1576 filehdr
= (struct icp_qat_uof_filehdr
*)objhdl
->obj_buf
;
1577 if (qat_uclo_check_uof_format(filehdr
))
1578 goto out_objhdr_err
;
1579 objhdl
->obj_hdr
= qat_uclo_map_chunk((char *)objhdl
->obj_buf
, filehdr
,
1581 if (!objhdl
->obj_hdr
) {
1582 pr_err("QAT: object file chunk is null\n");
1583 goto out_objhdr_err
;
1585 handle
->obj_handle
= objhdl
;
1586 if (qat_uclo_parse_uof_obj(handle
))
1587 goto out_overlay_obj_err
;
1590 out_overlay_obj_err
:
1591 handle
->obj_handle
= NULL
;
1592 kfree(objhdl
->obj_hdr
);
1594 kfree(objhdl
->obj_buf
);
1600 static int qat_uclo_map_mof_file_hdr(struct icp_qat_fw_loader_handle
*handle
,
1601 struct icp_qat_mof_file_hdr
*mof_ptr
,
1604 struct icp_qat_mof_handle
*mobj_handle
= handle
->mobj_handle
;
1605 unsigned int min_ver_offset
;
1606 unsigned int checksum
;
1608 mobj_handle
->file_id
= ICP_QAT_MOF_FID
;
1609 mobj_handle
->mof_buf
= (char *)mof_ptr
;
1610 mobj_handle
->mof_size
= mof_size
;
1612 min_ver_offset
= mof_size
- offsetof(struct icp_qat_mof_file_hdr
,
1614 checksum
= qat_uclo_calc_str_checksum(&mof_ptr
->min_ver
,
1616 if (checksum
!= mof_ptr
->checksum
) {
1617 pr_err("QAT: incorrect MOF checksum\n");
1621 mobj_handle
->checksum
= mof_ptr
->checksum
;
1622 mobj_handle
->min_ver
= mof_ptr
->min_ver
;
1623 mobj_handle
->maj_ver
= mof_ptr
->maj_ver
;
1627 static void qat_uclo_del_mof(struct icp_qat_fw_loader_handle
*handle
)
1629 struct icp_qat_mof_handle
*mobj_handle
= handle
->mobj_handle
;
1631 kfree(mobj_handle
->obj_table
.obj_hdr
);
1632 mobj_handle
->obj_table
.obj_hdr
= NULL
;
1633 kfree(handle
->mobj_handle
);
1634 handle
->mobj_handle
= NULL
;
1637 static int qat_uclo_seek_obj_inside_mof(struct icp_qat_mof_handle
*mobj_handle
,
1638 char *obj_name
, char **obj_ptr
,
1639 unsigned int *obj_size
)
1641 struct icp_qat_mof_objhdr
*obj_hdr
= mobj_handle
->obj_table
.obj_hdr
;
1644 for (i
= 0; i
< mobj_handle
->obj_table
.num_objs
; i
++) {
1645 if (!strncmp(obj_hdr
[i
].obj_name
, obj_name
,
1646 ICP_QAT_SUOF_OBJ_NAME_LEN
)) {
1647 *obj_ptr
= obj_hdr
[i
].obj_buf
;
1648 *obj_size
= obj_hdr
[i
].obj_size
;
1653 pr_err("QAT: object %s is not found inside MOF\n", obj_name
);
1657 static int qat_uclo_map_obj_from_mof(struct icp_qat_mof_handle
*mobj_handle
,
1658 struct icp_qat_mof_objhdr
*mobj_hdr
,
1659 struct icp_qat_mof_obj_chunkhdr
*obj_chunkhdr
)
1663 if (!strncmp(obj_chunkhdr
->chunk_id
, ICP_QAT_UOF_IMAG
,
1664 ICP_QAT_MOF_OBJ_CHUNKID_LEN
)) {
1665 obj
= mobj_handle
->uobjs_hdr
+ obj_chunkhdr
->offset
;
1666 } else if (!strncmp(obj_chunkhdr
->chunk_id
, ICP_QAT_SUOF_IMAG
,
1667 ICP_QAT_MOF_OBJ_CHUNKID_LEN
)) {
1668 obj
= mobj_handle
->sobjs_hdr
+ obj_chunkhdr
->offset
;
1670 pr_err("QAT: unsupported chunk id\n");
1673 mobj_hdr
->obj_buf
= obj
;
1674 mobj_hdr
->obj_size
= (unsigned int)obj_chunkhdr
->size
;
1675 mobj_hdr
->obj_name
= obj_chunkhdr
->name
+ mobj_handle
->sym_str
;
1679 static int qat_uclo_map_objs_from_mof(struct icp_qat_mof_handle
*mobj_handle
)
1681 struct icp_qat_mof_obj_chunkhdr
*uobj_chunkhdr
;
1682 struct icp_qat_mof_obj_chunkhdr
*sobj_chunkhdr
;
1683 struct icp_qat_mof_obj_hdr
*uobj_hdr
;
1684 struct icp_qat_mof_obj_hdr
*sobj_hdr
;
1685 struct icp_qat_mof_objhdr
*mobj_hdr
;
1686 unsigned int uobj_chunk_num
= 0;
1687 unsigned int sobj_chunk_num
= 0;
1688 unsigned int *valid_chunk
;
1691 uobj_hdr
= (struct icp_qat_mof_obj_hdr
*)mobj_handle
->uobjs_hdr
;
1692 sobj_hdr
= (struct icp_qat_mof_obj_hdr
*)mobj_handle
->sobjs_hdr
;
1694 uobj_chunk_num
= uobj_hdr
->num_chunks
;
1696 sobj_chunk_num
= sobj_hdr
->num_chunks
;
1698 mobj_hdr
= kzalloc((uobj_chunk_num
+ sobj_chunk_num
) *
1699 sizeof(*mobj_hdr
), GFP_KERNEL
);
1703 mobj_handle
->obj_table
.obj_hdr
= mobj_hdr
;
1704 valid_chunk
= &mobj_handle
->obj_table
.num_objs
;
1705 uobj_chunkhdr
= (struct icp_qat_mof_obj_chunkhdr
*)
1706 ((uintptr_t)uobj_hdr
+ sizeof(*uobj_hdr
));
1707 sobj_chunkhdr
= (struct icp_qat_mof_obj_chunkhdr
*)
1708 ((uintptr_t)sobj_hdr
+ sizeof(*sobj_hdr
));
1710 /* map uof objects */
1711 for (i
= 0; i
< uobj_chunk_num
; i
++) {
1712 ret
= qat_uclo_map_obj_from_mof(mobj_handle
,
1713 &mobj_hdr
[*valid_chunk
],
1720 /* map suof objects */
1721 for (i
= 0; i
< sobj_chunk_num
; i
++) {
1722 ret
= qat_uclo_map_obj_from_mof(mobj_handle
,
1723 &mobj_hdr
[*valid_chunk
],
1730 if ((uobj_chunk_num
+ sobj_chunk_num
) != *valid_chunk
) {
1731 pr_err("QAT: inconsistent UOF/SUOF chunk amount\n");
1737 static void qat_uclo_map_mof_symobjs(struct icp_qat_mof_handle
*mobj_handle
,
1738 struct icp_qat_mof_chunkhdr
*mof_chunkhdr
)
1740 char **sym_str
= (char **)&mobj_handle
->sym_str
;
1741 unsigned int *sym_size
= &mobj_handle
->sym_size
;
1742 struct icp_qat_mof_str_table
*str_table_obj
;
1744 *sym_size
= *(unsigned int *)(uintptr_t)
1745 (mof_chunkhdr
->offset
+ mobj_handle
->mof_buf
);
1746 *sym_str
= (char *)(uintptr_t)
1747 (mobj_handle
->mof_buf
+ mof_chunkhdr
->offset
+
1748 sizeof(str_table_obj
->tab_len
));
1751 static void qat_uclo_map_mof_chunk(struct icp_qat_mof_handle
*mobj_handle
,
1752 struct icp_qat_mof_chunkhdr
*mof_chunkhdr
)
1754 char *chunk_id
= mof_chunkhdr
->chunk_id
;
1756 if (!strncmp(chunk_id
, ICP_QAT_MOF_SYM_OBJS
, ICP_QAT_MOF_OBJ_ID_LEN
))
1757 qat_uclo_map_mof_symobjs(mobj_handle
, mof_chunkhdr
);
1758 else if (!strncmp(chunk_id
, ICP_QAT_UOF_OBJS
, ICP_QAT_MOF_OBJ_ID_LEN
))
1759 mobj_handle
->uobjs_hdr
= mobj_handle
->mof_buf
+
1760 mof_chunkhdr
->offset
;
1761 else if (!strncmp(chunk_id
, ICP_QAT_SUOF_OBJS
, ICP_QAT_MOF_OBJ_ID_LEN
))
1762 mobj_handle
->sobjs_hdr
= mobj_handle
->mof_buf
+
1763 mof_chunkhdr
->offset
;
1766 static int qat_uclo_check_mof_format(struct icp_qat_mof_file_hdr
*mof_hdr
)
1768 int maj
= mof_hdr
->maj_ver
& 0xff;
1769 int min
= mof_hdr
->min_ver
& 0xff;
1771 if (mof_hdr
->file_id
!= ICP_QAT_MOF_FID
) {
1772 pr_err("QAT: invalid header 0x%x\n", mof_hdr
->file_id
);
1776 if (mof_hdr
->num_chunks
<= 0x1) {
1777 pr_err("QAT: MOF chunk amount is incorrect\n");
1780 if (maj
!= ICP_QAT_MOF_MAJVER
|| min
!= ICP_QAT_MOF_MINVER
) {
1781 pr_err("QAT: bad MOF version, major 0x%x, minor 0x%x\n",
1788 static int qat_uclo_map_mof_obj(struct icp_qat_fw_loader_handle
*handle
,
1789 struct icp_qat_mof_file_hdr
*mof_ptr
,
1790 u32 mof_size
, char *obj_name
, char **obj_ptr
,
1791 unsigned int *obj_size
)
1793 struct icp_qat_mof_chunkhdr
*mof_chunkhdr
;
1794 unsigned int file_id
= mof_ptr
->file_id
;
1795 struct icp_qat_mof_handle
*mobj_handle
;
1796 unsigned short chunks_num
;
1800 if (file_id
== ICP_QAT_UOF_FID
|| file_id
== ICP_QAT_SUOF_FID
) {
1802 *obj_ptr
= (char *)mof_ptr
;
1804 *obj_size
= mof_size
;
1807 if (qat_uclo_check_mof_format(mof_ptr
))
1810 mobj_handle
= kzalloc(sizeof(*mobj_handle
), GFP_KERNEL
);
1814 handle
->mobj_handle
= mobj_handle
;
1815 ret
= qat_uclo_map_mof_file_hdr(handle
, mof_ptr
, mof_size
);
1819 mof_chunkhdr
= (void *)mof_ptr
+ sizeof(*mof_ptr
);
1820 chunks_num
= mof_ptr
->num_chunks
;
1822 /* Parse MOF file chunks */
1823 for (i
= 0; i
< chunks_num
; i
++)
1824 qat_uclo_map_mof_chunk(mobj_handle
, &mof_chunkhdr
[i
]);
1826 /* All sym_objs uobjs and sobjs should be available */
1827 if (!mobj_handle
->sym_str
||
1828 (!mobj_handle
->uobjs_hdr
&& !mobj_handle
->sobjs_hdr
))
1831 ret
= qat_uclo_map_objs_from_mof(mobj_handle
);
1835 /* Seek specified uof object in MOF */
1836 return qat_uclo_seek_obj_inside_mof(mobj_handle
, obj_name
,
1840 int qat_uclo_map_obj(struct icp_qat_fw_loader_handle
*handle
,
1841 void *addr_ptr
, u32 mem_size
, char *obj_name
)
1847 BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE
>=
1848 (sizeof(handle
->hal_handle
->ae_mask
) * 8));
1850 if (!handle
|| !addr_ptr
|| mem_size
< 24)
1854 ret
= qat_uclo_map_mof_obj(handle
, addr_ptr
, mem_size
, obj_name
,
1855 &obj_addr
, &obj_size
);
1859 obj_addr
= addr_ptr
;
1860 obj_size
= mem_size
;
1863 return (handle
->chip_info
->fw_auth
) ?
1864 qat_uclo_map_suof_obj(handle
, obj_addr
, obj_size
) :
1865 qat_uclo_map_uof_obj(handle
, obj_addr
, obj_size
);
1868 void qat_uclo_del_obj(struct icp_qat_fw_loader_handle
*handle
)
1870 struct icp_qat_uclo_objhandle
*obj_handle
= handle
->obj_handle
;
1873 if (handle
->mobj_handle
)
1874 qat_uclo_del_mof(handle
);
1875 if (handle
->sobj_handle
)
1876 qat_uclo_del_suof(handle
);
1880 kfree(obj_handle
->uword_buf
);
1881 for (a
= 0; a
< obj_handle
->uimage_num
; a
++)
1882 kfree(obj_handle
->ae_uimage
[a
].page
);
1884 for (a
= 0; a
< handle
->hal_handle
->ae_max_num
; a
++)
1885 qat_uclo_free_ae_data(&obj_handle
->ae_data
[a
]);
1887 kfree(obj_handle
->obj_hdr
);
1888 kfree(obj_handle
->obj_buf
);
1890 handle
->obj_handle
= NULL
;
1893 static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle
*obj_handle
,
1894 struct icp_qat_uclo_encap_page
*encap_page
,
1895 u64
*uword
, unsigned int addr_p
,
1896 unsigned int raddr
, u64 fill
)
1898 unsigned int i
, addr
;
1905 addr
= (encap_page
->page_region
) ? raddr
: addr_p
;
1906 for (i
= 0; i
< encap_page
->uwblock_num
; i
++) {
1907 if (addr
>= encap_page
->uwblock
[i
].start_addr
&&
1908 addr
<= encap_page
->uwblock
[i
].start_addr
+
1909 encap_page
->uwblock
[i
].words_num
- 1) {
1910 addr
-= encap_page
->uwblock
[i
].start_addr
;
1911 addr
*= obj_handle
->uword_in_bytes
;
1912 memcpy(&uwrd
, (void *)(((uintptr_t)
1913 encap_page
->uwblock
[i
].micro_words
) + addr
),
1914 obj_handle
->uword_in_bytes
);
1915 uwrd
= uwrd
& GENMASK_ULL(43, 0);
1919 if (*uword
== INVLD_UWORD
)
1923 static void qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle
*handle
,
1924 struct icp_qat_uclo_encap_page
1925 *encap_page
, unsigned int ae
)
1927 unsigned int uw_physical_addr
, uw_relative_addr
, i
, words_num
, cpylen
;
1928 struct icp_qat_uclo_objhandle
*obj_handle
= handle
->obj_handle
;
1931 /* load the page starting at appropriate ustore address */
1932 /* get fill-pattern from an image -- they are all the same */
1933 memcpy(&fill_pat
, obj_handle
->ae_uimage
[0].img_ptr
->fill_pattern
,
1935 uw_physical_addr
= encap_page
->beg_addr_p
;
1936 uw_relative_addr
= 0;
1937 words_num
= encap_page
->micro_words_num
;
1939 if (words_num
< UWORD_CPYBUF_SIZE
)
1942 cpylen
= UWORD_CPYBUF_SIZE
;
1944 /* load the buffer */
1945 for (i
= 0; i
< cpylen
; i
++)
1946 qat_uclo_fill_uwords(obj_handle
, encap_page
,
1947 &obj_handle
->uword_buf
[i
],
1948 uw_physical_addr
+ i
,
1949 uw_relative_addr
+ i
, fill_pat
);
1951 /* copy the buffer to ustore */
1952 qat_hal_wr_uwords(handle
, (unsigned char)ae
,
1953 uw_physical_addr
, cpylen
,
1954 obj_handle
->uword_buf
);
1956 uw_physical_addr
+= cpylen
;
1957 uw_relative_addr
+= cpylen
;
1958 words_num
-= cpylen
;
1962 static void qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle
*handle
,
1963 struct icp_qat_uof_image
*image
)
1965 struct icp_qat_uclo_objhandle
*obj_handle
= handle
->obj_handle
;
1966 unsigned long ae_mask
= handle
->hal_handle
->ae_mask
;
1967 unsigned long cfg_ae_mask
= handle
->cfg_ae_mask
;
1968 unsigned long ae_assigned
= image
->ae_assigned
;
1969 struct icp_qat_uclo_aedata
*aed
;
1970 unsigned int ctx_mask
, s
;
1971 struct icp_qat_uclo_page
*page
;
1975 if (ICP_QAT_CTX_MODE(image
->ae_mode
) == ICP_QAT_UCLO_MAX_CTX
)
1979 /* load the default page and set assigned CTX PC
1980 * to the entrypoint address */
1981 for_each_set_bit(ae
, &ae_mask
, handle
->hal_handle
->ae_max_num
) {
1982 if (!test_bit(ae
, &cfg_ae_mask
))
1985 if (!test_bit(ae
, &ae_assigned
))
1988 aed
= &obj_handle
->ae_data
[ae
];
1989 /* find the slice to which this image is assigned */
1990 for (s
= 0; s
< aed
->slice_num
; s
++) {
1991 if (image
->ctx_assigned
&
1992 aed
->ae_slices
[s
].ctx_mask_assigned
)
1995 if (s
>= aed
->slice_num
)
1997 page
= aed
->ae_slices
[s
].page
;
1998 if (!page
->encap_page
->def_page
)
2000 qat_uclo_wr_uimage_raw_page(handle
, page
->encap_page
, ae
);
2002 page
= aed
->ae_slices
[s
].page
;
2003 for (ctx
= 0; ctx
< ICP_QAT_UCLO_MAX_CTX
; ctx
++)
2004 aed
->ae_slices
[s
].cur_page
[ctx
] =
2005 (ctx_mask
& (1 << ctx
)) ? page
: NULL
;
2006 qat_hal_set_live_ctx(handle
, (unsigned char)ae
,
2007 image
->ctx_assigned
);
2008 qat_hal_set_pc(handle
, (unsigned char)ae
, image
->ctx_assigned
,
2009 image
->entry_address
);
2013 static int qat_uclo_wr_suof_img(struct icp_qat_fw_loader_handle
*handle
)
2016 struct icp_qat_fw_auth_desc
*desc
= NULL
;
2017 struct icp_qat_suof_handle
*sobj_handle
= handle
->sobj_handle
;
2018 struct icp_qat_suof_img_hdr
*simg_hdr
= sobj_handle
->img_table
.simg_hdr
;
2020 for (i
= 0; i
< sobj_handle
->img_table
.num_simgs
; i
++) {
2021 if (qat_uclo_map_auth_fw(handle
,
2022 (char *)simg_hdr
[i
].simg_buf
,
2024 simg_hdr
[i
].simg_len
,
2027 if (qat_uclo_auth_fw(handle
, desc
))
2029 if (qat_uclo_is_broadcast(handle
, i
)) {
2030 if (qat_uclo_broadcast_load_fw(handle
, desc
))
2033 if (qat_uclo_load_fw(handle
, desc
))
2036 qat_uclo_ummap_auth_fw(handle
, &desc
);
2040 qat_uclo_ummap_auth_fw(handle
, &desc
);
2044 static int qat_uclo_wr_uof_img(struct icp_qat_fw_loader_handle
*handle
)
2046 struct icp_qat_uclo_objhandle
*obj_handle
= handle
->obj_handle
;
2049 if (qat_uclo_init_globals(handle
))
2051 for (i
= 0; i
< obj_handle
->uimage_num
; i
++) {
2052 if (!obj_handle
->ae_uimage
[i
].img_ptr
)
2054 if (qat_uclo_init_ustore(handle
, &obj_handle
->ae_uimage
[i
]))
2056 qat_uclo_wr_uimage_page(handle
,
2057 obj_handle
->ae_uimage
[i
].img_ptr
);
2062 int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle
*handle
)
2064 return (handle
->chip_info
->fw_auth
) ? qat_uclo_wr_suof_img(handle
) :
2065 qat_uclo_wr_uof_img(handle
);
2068 int qat_uclo_set_cfg_ae_mask(struct icp_qat_fw_loader_handle
*handle
,
2069 unsigned int cfg_ae_mask
)
2074 handle
->cfg_ae_mask
= cfg_ae_mask
;