2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #include <linux/slab.h>
48 #include <linux/ctype.h>
49 #include <linux/kernel.h>
50 #include <linux/delay.h>
51 #include "adf_accel_devices.h"
52 #include "adf_common_drv.h"
53 #include "icp_qat_uclo.h"
54 #include "icp_qat_hal.h"
55 #include "icp_qat_fw_loader_handle.h"
57 #define UWORD_CPYBUF_SIZE 1024
58 #define INVLD_UWORD 0xffffffffffull
59 #define PID_MINOR_REV 0xf
60 #define PID_MAJOR_REV (0xf << 4)
62 static int qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle
*obj_handle
,
63 unsigned int ae
, unsigned int image_num
)
65 struct icp_qat_uclo_aedata
*ae_data
;
66 struct icp_qat_uclo_encapme
*encap_image
;
67 struct icp_qat_uclo_page
*page
= NULL
;
68 struct icp_qat_uclo_aeslice
*ae_slice
= NULL
;
70 ae_data
= &obj_handle
->ae_data
[ae
];
71 encap_image
= &obj_handle
->ae_uimage
[image_num
];
72 ae_slice
= &ae_data
->ae_slices
[ae_data
->slice_num
];
73 ae_slice
->encap_image
= encap_image
;
75 if (encap_image
->img_ptr
) {
76 ae_slice
->ctx_mask_assigned
=
77 encap_image
->img_ptr
->ctx_assigned
;
78 ae_data
->eff_ustore_size
= obj_handle
->ustore_phy_size
;
80 ae_slice
->ctx_mask_assigned
= 0;
82 ae_slice
->region
= kzalloc(sizeof(*ae_slice
->region
), GFP_KERNEL
);
83 if (!ae_slice
->region
)
85 ae_slice
->page
= kzalloc(sizeof(*ae_slice
->page
), GFP_KERNEL
);
88 page
= ae_slice
->page
;
89 page
->encap_page
= encap_image
->page
;
90 ae_slice
->page
->region
= ae_slice
->region
;
94 kfree(ae_slice
->region
);
95 ae_slice
->region
= NULL
;
99 static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata
*ae_data
)
104 pr_err("QAT: bad argument, ae_data is NULL\n ");
108 for (i
= 0; i
< ae_data
->slice_num
; i
++) {
109 kfree(ae_data
->ae_slices
[i
].region
);
110 ae_data
->ae_slices
[i
].region
= NULL
;
111 kfree(ae_data
->ae_slices
[i
].page
);
112 ae_data
->ae_slices
[i
].page
= NULL
;
117 static char *qat_uclo_get_string(struct icp_qat_uof_strtable
*str_table
,
118 unsigned int str_offset
)
120 if ((!str_table
->table_len
) || (str_offset
> str_table
->table_len
))
122 return (char *)(((uintptr_t)(str_table
->strings
)) + str_offset
);
125 static int qat_uclo_check_uof_format(struct icp_qat_uof_filehdr
*hdr
)
127 int maj
= hdr
->maj_ver
& 0xff;
128 int min
= hdr
->min_ver
& 0xff;
130 if (hdr
->file_id
!= ICP_QAT_UOF_FID
) {
131 pr_err("QAT: Invalid header 0x%x\n", hdr
->file_id
);
134 if (min
!= ICP_QAT_UOF_MINVER
|| maj
!= ICP_QAT_UOF_MAJVER
) {
135 pr_err("QAT: bad UOF version, major 0x%x, minor 0x%x\n",
142 static int qat_uclo_check_suof_format(struct icp_qat_suof_filehdr
*suof_hdr
)
144 int maj
= suof_hdr
->maj_ver
& 0xff;
145 int min
= suof_hdr
->min_ver
& 0xff;
147 if (suof_hdr
->file_id
!= ICP_QAT_SUOF_FID
) {
148 pr_err("QAT: invalid header 0x%x\n", suof_hdr
->file_id
);
151 if (suof_hdr
->fw_type
!= 0) {
152 pr_err("QAT: unsupported firmware type\n");
155 if (suof_hdr
->num_chunks
<= 0x1) {
156 pr_err("QAT: SUOF chunk amount is incorrect\n");
159 if (maj
!= ICP_QAT_SUOF_MAJVER
|| min
!= ICP_QAT_SUOF_MINVER
) {
160 pr_err("QAT: bad SUOF version, major 0x%x, minor 0x%x\n",
167 static void qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle
*handle
,
168 unsigned int addr
, unsigned int *val
,
169 unsigned int num_in_bytes
)
172 unsigned char *ptr
= (unsigned char *)val
;
174 while (num_in_bytes
) {
175 memcpy(&outval
, ptr
, 4);
176 SRAM_WRITE(handle
, addr
, outval
);
183 static void qat_uclo_wr_umem_by_words(struct icp_qat_fw_loader_handle
*handle
,
184 unsigned char ae
, unsigned int addr
,
186 unsigned int num_in_bytes
)
189 unsigned char *ptr
= (unsigned char *)val
;
191 addr
>>= 0x2; /* convert to uword address */
193 while (num_in_bytes
) {
194 memcpy(&outval
, ptr
, 4);
195 qat_hal_wr_umem(handle
, ae
, addr
++, 1, &outval
);
201 static void qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle
*handle
,
203 struct icp_qat_uof_batch_init
206 struct icp_qat_uof_batch_init
*umem_init
;
208 if (!umem_init_header
)
210 umem_init
= umem_init_header
->next
;
212 unsigned int addr
, *value
, size
;
215 addr
= umem_init
->addr
;
216 value
= umem_init
->value
;
217 size
= umem_init
->size
;
218 qat_uclo_wr_umem_by_words(handle
, ae
, addr
, value
, size
);
219 umem_init
= umem_init
->next
;
224 qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle
*handle
,
225 struct icp_qat_uof_batch_init
**base
)
227 struct icp_qat_uof_batch_init
*umem_init
;
231 struct icp_qat_uof_batch_init
*pre
;
234 umem_init
= umem_init
->next
;
240 static int qat_uclo_parse_num(char *str
, unsigned int *num
)
243 unsigned long ae
= 0;
246 strncpy(buf
, str
, 15);
247 for (i
= 0; i
< 16; i
++) {
248 if (!isdigit(buf
[i
])) {
253 if ((kstrtoul(buf
, 10, &ae
)))
256 *num
= (unsigned int)ae
;
260 static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle
*handle
,
261 struct icp_qat_uof_initmem
*init_mem
,
262 unsigned int size_range
, unsigned int *ae
)
264 struct icp_qat_uclo_objhandle
*obj_handle
= handle
->obj_handle
;
267 if ((init_mem
->addr
+ init_mem
->num_in_bytes
) > (size_range
<< 0x2)) {
268 pr_err("QAT: initmem is out of range");
271 if (init_mem
->scope
!= ICP_QAT_UOF_LOCAL_SCOPE
) {
272 pr_err("QAT: Memory scope for init_mem error\n");
275 str
= qat_uclo_get_string(&obj_handle
->str_table
, init_mem
->sym_name
);
277 pr_err("QAT: AE name assigned in UOF init table is NULL\n");
280 if (qat_uclo_parse_num(str
, ae
)) {
281 pr_err("QAT: Parse num for AE number failed\n");
284 if (*ae
>= ICP_QAT_UCLO_MAX_AE
) {
285 pr_err("QAT: ae %d out of range\n", *ae
);
291 static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle
292 *handle
, struct icp_qat_uof_initmem
293 *init_mem
, unsigned int ae
,
294 struct icp_qat_uof_batch_init
297 struct icp_qat_uof_batch_init
*init_header
, *tail
;
298 struct icp_qat_uof_batch_init
*mem_init
, *tail_old
;
299 struct icp_qat_uof_memvar_attr
*mem_val_attr
;
300 unsigned int i
, flag
= 0;
303 (struct icp_qat_uof_memvar_attr
*)((uintptr_t)init_mem
+
304 sizeof(struct icp_qat_uof_initmem
));
306 init_header
= *init_tab_base
;
308 init_header
= kzalloc(sizeof(*init_header
), GFP_KERNEL
);
311 init_header
->size
= 1;
312 *init_tab_base
= init_header
;
315 tail_old
= init_header
;
316 while (tail_old
->next
)
317 tail_old
= tail_old
->next
;
319 for (i
= 0; i
< init_mem
->val_attr_num
; i
++) {
320 mem_init
= kzalloc(sizeof(*mem_init
), GFP_KERNEL
);
324 mem_init
->addr
= init_mem
->addr
+ mem_val_attr
->offset_in_byte
;
325 mem_init
->value
= &mem_val_attr
->value
;
327 mem_init
->next
= NULL
;
328 tail
->next
= mem_init
;
330 init_header
->size
+= qat_hal_get_ins_num();
336 mem_init
= tail_old
->next
;
341 kfree(*init_tab_base
);
345 static int qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle
*handle
,
346 struct icp_qat_uof_initmem
*init_mem
)
348 struct icp_qat_uclo_objhandle
*obj_handle
= handle
->obj_handle
;
351 if (qat_uclo_fetch_initmem_ae(handle
, init_mem
,
352 ICP_QAT_UCLO_MAX_LMEM_REG
, &ae
))
354 if (qat_uclo_create_batch_init_list(handle
, init_mem
, ae
,
355 &obj_handle
->lm_init_tab
[ae
]))
360 static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle
*handle
,
361 struct icp_qat_uof_initmem
*init_mem
)
363 struct icp_qat_uclo_objhandle
*obj_handle
= handle
->obj_handle
;
364 unsigned int ae
, ustore_size
, uaddr
, i
;
366 ustore_size
= obj_handle
->ustore_phy_size
;
367 if (qat_uclo_fetch_initmem_ae(handle
, init_mem
, ustore_size
, &ae
))
369 if (qat_uclo_create_batch_init_list(handle
, init_mem
, ae
,
370 &obj_handle
->umem_init_tab
[ae
]))
372 /* set the highest ustore address referenced */
373 uaddr
= (init_mem
->addr
+ init_mem
->num_in_bytes
) >> 0x2;
374 for (i
= 0; i
< obj_handle
->ae_data
[ae
].slice_num
; i
++) {
375 if (obj_handle
->ae_data
[ae
].ae_slices
[i
].
376 encap_image
->uwords_num
< uaddr
)
377 obj_handle
->ae_data
[ae
].ae_slices
[i
].
378 encap_image
->uwords_num
= uaddr
;
383 #define ICP_DH895XCC_PESRAM_BAR_SIZE 0x80000
384 static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle
*handle
,
385 struct icp_qat_uof_initmem
*init_mem
)
387 switch (init_mem
->region
) {
388 case ICP_QAT_UOF_LMEM_REGION
:
389 if (qat_uclo_init_lmem_seg(handle
, init_mem
))
392 case ICP_QAT_UOF_UMEM_REGION
:
393 if (qat_uclo_init_umem_seg(handle
, init_mem
))
397 pr_err("QAT: initmem region error. region type=0x%x\n",
404 static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle
*handle
,
405 struct icp_qat_uclo_encapme
*image
)
408 struct icp_qat_uclo_encap_page
*page
;
409 struct icp_qat_uof_image
*uof_image
;
411 unsigned int ustore_size
;
412 unsigned int patt_pos
;
413 struct icp_qat_uclo_objhandle
*obj_handle
= handle
->obj_handle
;
416 uof_image
= image
->img_ptr
;
417 fill_data
= kcalloc(ICP_QAT_UCLO_MAX_USTORE
, sizeof(uint64_t),
421 for (i
= 0; i
< ICP_QAT_UCLO_MAX_USTORE
; i
++)
422 memcpy(&fill_data
[i
], &uof_image
->fill_pattern
,
426 for (ae
= 0; ae
< handle
->hal_handle
->ae_max_num
; ae
++) {
427 if (!test_bit(ae
, (unsigned long *)&uof_image
->ae_assigned
))
429 ustore_size
= obj_handle
->ae_data
[ae
].eff_ustore_size
;
430 patt_pos
= page
->beg_addr_p
+ page
->micro_words_num
;
432 qat_hal_wr_uwords(handle
, (unsigned char)ae
, 0,
433 page
->beg_addr_p
, &fill_data
[0]);
434 qat_hal_wr_uwords(handle
, (unsigned char)ae
, patt_pos
,
435 ustore_size
- patt_pos
+ 1,
436 &fill_data
[page
->beg_addr_p
]);
442 static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle
*handle
)
445 struct icp_qat_uclo_objhandle
*obj_handle
= handle
->obj_handle
;
446 struct icp_qat_uof_initmem
*initmem
= obj_handle
->init_mem_tab
.init_mem
;
448 for (i
= 0; i
< obj_handle
->init_mem_tab
.entry_num
; i
++) {
449 if (initmem
->num_in_bytes
) {
450 if (qat_uclo_init_ae_memory(handle
, initmem
))
453 initmem
= (struct icp_qat_uof_initmem
*)((uintptr_t)(
455 sizeof(struct icp_qat_uof_initmem
)) +
456 (sizeof(struct icp_qat_uof_memvar_attr
) *
457 initmem
->val_attr_num
));
459 for (ae
= 0; ae
< handle
->hal_handle
->ae_max_num
; ae
++) {
460 if (qat_hal_batch_wr_lm(handle
, ae
,
461 obj_handle
->lm_init_tab
[ae
])) {
462 pr_err("QAT: fail to batch init lmem for AE %d\n", ae
);
465 qat_uclo_cleanup_batch_init_list(handle
,
466 &obj_handle
->lm_init_tab
[ae
]);
467 qat_uclo_batch_wr_umem(handle
, ae
,
468 obj_handle
->umem_init_tab
[ae
]);
469 qat_uclo_cleanup_batch_init_list(handle
,
476 static void *qat_uclo_find_chunk(struct icp_qat_uof_objhdr
*obj_hdr
,
477 char *chunk_id
, void *cur
)
480 struct icp_qat_uof_chunkhdr
*chunk_hdr
=
481 (struct icp_qat_uof_chunkhdr
*)
482 ((uintptr_t)obj_hdr
+ sizeof(struct icp_qat_uof_objhdr
));
484 for (i
= 0; i
< obj_hdr
->num_chunks
; i
++) {
485 if ((cur
< (void *)&chunk_hdr
[i
]) &&
486 !strncmp(chunk_hdr
[i
].chunk_id
, chunk_id
,
487 ICP_QAT_UOF_OBJID_LEN
)) {
488 return &chunk_hdr
[i
];
494 static unsigned int qat_uclo_calc_checksum(unsigned int reg
, int ch
)
497 unsigned int topbit
= 1 << 0xF;
498 unsigned int inbyte
= (unsigned int)((reg
>> 0x18) ^ ch
);
500 reg
^= inbyte
<< 0x8;
501 for (i
= 0; i
< 0x8; i
++) {
503 reg
= (reg
<< 1) ^ 0x1021;
510 static unsigned int qat_uclo_calc_str_checksum(char *ptr
, int num
)
512 unsigned int chksum
= 0;
516 chksum
= qat_uclo_calc_checksum(chksum
, *ptr
++);
520 static struct icp_qat_uclo_objhdr
*
521 qat_uclo_map_chunk(char *buf
, struct icp_qat_uof_filehdr
*file_hdr
,
524 struct icp_qat_uof_filechunkhdr
*file_chunk
;
525 struct icp_qat_uclo_objhdr
*obj_hdr
;
529 file_chunk
= (struct icp_qat_uof_filechunkhdr
*)
530 (buf
+ sizeof(struct icp_qat_uof_filehdr
));
531 for (i
= 0; i
< file_hdr
->num_chunks
; i
++) {
532 if (!strncmp(file_chunk
->chunk_id
, chunk_id
,
533 ICP_QAT_UOF_OBJID_LEN
)) {
534 chunk
= buf
+ file_chunk
->offset
;
535 if (file_chunk
->checksum
!= qat_uclo_calc_str_checksum(
536 chunk
, file_chunk
->size
))
538 obj_hdr
= kzalloc(sizeof(*obj_hdr
), GFP_KERNEL
);
541 obj_hdr
->file_buff
= chunk
;
542 obj_hdr
->checksum
= file_chunk
->checksum
;
543 obj_hdr
->size
= file_chunk
->size
;
552 qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj
*encap_uof_obj
,
553 struct icp_qat_uof_image
*image
)
555 struct icp_qat_uof_objtable
*uc_var_tab
, *imp_var_tab
, *imp_expr_tab
;
556 struct icp_qat_uof_objtable
*neigh_reg_tab
;
557 struct icp_qat_uof_code_page
*code_page
;
559 code_page
= (struct icp_qat_uof_code_page
*)
560 ((char *)image
+ sizeof(struct icp_qat_uof_image
));
561 uc_var_tab
= (struct icp_qat_uof_objtable
*)(encap_uof_obj
->beg_uof
+
562 code_page
->uc_var_tab_offset
);
563 imp_var_tab
= (struct icp_qat_uof_objtable
*)(encap_uof_obj
->beg_uof
+
564 code_page
->imp_var_tab_offset
);
565 imp_expr_tab
= (struct icp_qat_uof_objtable
*)
566 (encap_uof_obj
->beg_uof
+
567 code_page
->imp_expr_tab_offset
);
568 if (uc_var_tab
->entry_num
|| imp_var_tab
->entry_num
||
569 imp_expr_tab
->entry_num
) {
570 pr_err("QAT: UOF can't contain imported variable to be parsed\n");
573 neigh_reg_tab
= (struct icp_qat_uof_objtable
*)
574 (encap_uof_obj
->beg_uof
+
575 code_page
->neigh_reg_tab_offset
);
576 if (neigh_reg_tab
->entry_num
) {
577 pr_err("QAT: UOF can't contain shared control store feature\n");
580 if (image
->numpages
> 1) {
581 pr_err("QAT: UOF can't contain multiple pages\n");
584 if (ICP_QAT_SHARED_USTORE_MODE(image
->ae_mode
)) {
585 pr_err("QAT: UOF can't use shared control store feature\n");
588 if (RELOADABLE_CTX_SHARED_MODE(image
->ae_mode
)) {
589 pr_err("QAT: UOF can't use reloadable feature\n");
595 static void qat_uclo_map_image_page(struct icp_qat_uof_encap_obj
597 struct icp_qat_uof_image
*img
,
598 struct icp_qat_uclo_encap_page
*page
)
600 struct icp_qat_uof_code_page
*code_page
;
601 struct icp_qat_uof_code_area
*code_area
;
602 struct icp_qat_uof_objtable
*uword_block_tab
;
603 struct icp_qat_uof_uword_block
*uwblock
;
606 code_page
= (struct icp_qat_uof_code_page
*)
607 ((char *)img
+ sizeof(struct icp_qat_uof_image
));
608 page
->def_page
= code_page
->def_page
;
609 page
->page_region
= code_page
->page_region
;
610 page
->beg_addr_v
= code_page
->beg_addr_v
;
611 page
->beg_addr_p
= code_page
->beg_addr_p
;
612 code_area
= (struct icp_qat_uof_code_area
*)(encap_uof_obj
->beg_uof
+
613 code_page
->code_area_offset
);
614 page
->micro_words_num
= code_area
->micro_words_num
;
615 uword_block_tab
= (struct icp_qat_uof_objtable
*)
616 (encap_uof_obj
->beg_uof
+
617 code_area
->uword_block_tab
);
618 page
->uwblock_num
= uword_block_tab
->entry_num
;
619 uwblock
= (struct icp_qat_uof_uword_block
*)((char *)uword_block_tab
+
620 sizeof(struct icp_qat_uof_objtable
));
621 page
->uwblock
= (struct icp_qat_uclo_encap_uwblock
*)uwblock
;
622 for (i
= 0; i
< uword_block_tab
->entry_num
; i
++)
623 page
->uwblock
[i
].micro_words
=
624 (uintptr_t)encap_uof_obj
->beg_uof
+ uwblock
[i
].uword_offset
;
627 static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle
*obj_handle
,
628 struct icp_qat_uclo_encapme
*ae_uimage
,
632 struct icp_qat_uof_chunkhdr
*chunk_hdr
= NULL
;
633 struct icp_qat_uof_image
*image
;
634 struct icp_qat_uof_objtable
*ae_regtab
;
635 struct icp_qat_uof_objtable
*init_reg_sym_tab
;
636 struct icp_qat_uof_objtable
*sbreak_tab
;
637 struct icp_qat_uof_encap_obj
*encap_uof_obj
=
638 &obj_handle
->encap_uof_obj
;
640 for (j
= 0; j
< max_image
; j
++) {
641 chunk_hdr
= qat_uclo_find_chunk(encap_uof_obj
->obj_hdr
,
642 ICP_QAT_UOF_IMAG
, chunk_hdr
);
645 image
= (struct icp_qat_uof_image
*)(encap_uof_obj
->beg_uof
+
647 ae_regtab
= (struct icp_qat_uof_objtable
*)
648 (image
->reg_tab_offset
+
649 obj_handle
->obj_hdr
->file_buff
);
650 ae_uimage
[j
].ae_reg_num
= ae_regtab
->entry_num
;
651 ae_uimage
[j
].ae_reg
= (struct icp_qat_uof_ae_reg
*)
652 (((char *)ae_regtab
) +
653 sizeof(struct icp_qat_uof_objtable
));
654 init_reg_sym_tab
= (struct icp_qat_uof_objtable
*)
655 (image
->init_reg_sym_tab
+
656 obj_handle
->obj_hdr
->file_buff
);
657 ae_uimage
[j
].init_regsym_num
= init_reg_sym_tab
->entry_num
;
658 ae_uimage
[j
].init_regsym
= (struct icp_qat_uof_init_regsym
*)
659 (((char *)init_reg_sym_tab
) +
660 sizeof(struct icp_qat_uof_objtable
));
661 sbreak_tab
= (struct icp_qat_uof_objtable
*)
662 (image
->sbreak_tab
+ obj_handle
->obj_hdr
->file_buff
);
663 ae_uimage
[j
].sbreak_num
= sbreak_tab
->entry_num
;
664 ae_uimage
[j
].sbreak
= (struct icp_qat_uof_sbreak
*)
665 (((char *)sbreak_tab
) +
666 sizeof(struct icp_qat_uof_objtable
));
667 ae_uimage
[j
].img_ptr
= image
;
668 if (qat_uclo_check_image_compat(encap_uof_obj
, image
))
671 kzalloc(sizeof(struct icp_qat_uclo_encap_page
),
673 if (!ae_uimage
[j
].page
)
675 qat_uclo_map_image_page(encap_uof_obj
, image
,
680 for (i
= 0; i
< j
; i
++)
681 kfree(ae_uimage
[i
].page
);
685 static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle
*handle
, int max_ae
)
689 struct icp_qat_uclo_objhandle
*obj_handle
= handle
->obj_handle
;
691 for (ae
= 0; ae
< max_ae
; ae
++) {
693 (unsigned long *)&handle
->hal_handle
->ae_mask
))
695 for (i
= 0; i
< obj_handle
->uimage_num
; i
++) {
696 if (!test_bit(ae
, (unsigned long *)
697 &obj_handle
->ae_uimage
[i
].img_ptr
->ae_assigned
))
700 if (qat_uclo_init_ae_data(obj_handle
, ae
, i
))
705 pr_err("QAT: uimage uses AE not set\n");
711 static struct icp_qat_uof_strtable
*
712 qat_uclo_map_str_table(struct icp_qat_uclo_objhdr
*obj_hdr
,
713 char *tab_name
, struct icp_qat_uof_strtable
*str_table
)
715 struct icp_qat_uof_chunkhdr
*chunk_hdr
;
717 chunk_hdr
= qat_uclo_find_chunk((struct icp_qat_uof_objhdr
*)
718 obj_hdr
->file_buff
, tab_name
, NULL
);
722 memcpy(&str_table
->table_len
, obj_hdr
->file_buff
+
723 chunk_hdr
->offset
, sizeof(str_table
->table_len
));
724 hdr_size
= (char *)&str_table
->strings
- (char *)str_table
;
725 str_table
->strings
= (uintptr_t)obj_hdr
->file_buff
+
726 chunk_hdr
->offset
+ hdr_size
;
733 qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj
*encap_uof_obj
,
734 struct icp_qat_uclo_init_mem_table
*init_mem_tab
)
736 struct icp_qat_uof_chunkhdr
*chunk_hdr
;
738 chunk_hdr
= qat_uclo_find_chunk(encap_uof_obj
->obj_hdr
,
739 ICP_QAT_UOF_IMEM
, NULL
);
741 memmove(&init_mem_tab
->entry_num
, encap_uof_obj
->beg_uof
+
742 chunk_hdr
->offset
, sizeof(unsigned int));
743 init_mem_tab
->init_mem
= (struct icp_qat_uof_initmem
*)
744 (encap_uof_obj
->beg_uof
+ chunk_hdr
->offset
+
745 sizeof(unsigned int));
750 qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle
*handle
)
752 switch (handle
->pci_dev
->device
) {
753 case ADF_DH895XCC_PCI_DEVICE_ID
:
754 return ICP_QAT_AC_895XCC_DEV_TYPE
;
755 case ADF_C62X_PCI_DEVICE_ID
:
756 return ICP_QAT_AC_C62X_DEV_TYPE
;
757 case ADF_C3XXX_PCI_DEVICE_ID
:
758 return ICP_QAT_AC_C3XXX_DEV_TYPE
;
760 pr_err("QAT: unsupported device 0x%x\n",
761 handle
->pci_dev
->device
);
766 static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle
*obj_handle
)
768 unsigned int maj_ver
, prod_type
= obj_handle
->prod_type
;
770 if (!(prod_type
& obj_handle
->encap_uof_obj
.obj_hdr
->ac_dev_type
)) {
771 pr_err("QAT: UOF type 0x%x doesn't match with platform 0x%x\n",
772 obj_handle
->encap_uof_obj
.obj_hdr
->ac_dev_type
,
776 maj_ver
= obj_handle
->prod_rev
& 0xff;
777 if ((obj_handle
->encap_uof_obj
.obj_hdr
->max_cpu_ver
< maj_ver
) ||
778 (obj_handle
->encap_uof_obj
.obj_hdr
->min_cpu_ver
> maj_ver
)) {
779 pr_err("QAT: UOF majVer 0x%x out of range\n", maj_ver
);
785 static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle
*handle
,
786 unsigned char ae
, unsigned char ctx_mask
,
787 enum icp_qat_uof_regtype reg_type
,
788 unsigned short reg_addr
, unsigned int value
)
797 return qat_hal_init_gpr(handle
, ae
, ctx_mask
, reg_type
,
809 return qat_hal_init_rd_xfer(handle
, ae
, ctx_mask
, reg_type
,
817 return qat_hal_init_wr_xfer(handle
, ae
, ctx_mask
, reg_type
,
820 return qat_hal_init_nn(handle
, ae
, ctx_mask
, reg_addr
, value
);
822 pr_err("QAT: UOF uses not supported reg type 0x%x\n", reg_type
);
828 static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle
*handle
,
830 struct icp_qat_uclo_encapme
*encap_ae
)
833 unsigned char ctx_mask
;
834 struct icp_qat_uof_init_regsym
*init_regsym
;
836 if (ICP_QAT_CTX_MODE(encap_ae
->img_ptr
->ae_mode
) ==
837 ICP_QAT_UCLO_MAX_CTX
)
842 for (i
= 0; i
< encap_ae
->init_regsym_num
; i
++) {
843 unsigned int exp_res
;
845 init_regsym
= &encap_ae
->init_regsym
[i
];
846 exp_res
= init_regsym
->value
;
847 switch (init_regsym
->init_type
) {
848 case ICP_QAT_UOF_INIT_REG
:
849 qat_uclo_init_reg(handle
, ae
, ctx_mask
,
850 (enum icp_qat_uof_regtype
)
851 init_regsym
->reg_type
,
852 (unsigned short)init_regsym
->reg_addr
,
855 case ICP_QAT_UOF_INIT_REG_CTX
:
856 /* check if ctx is appropriate for the ctxMode */
857 if (!((1 << init_regsym
->ctx
) & ctx_mask
)) {
858 pr_err("QAT: invalid ctx num = 0x%x\n",
862 qat_uclo_init_reg(handle
, ae
,
864 (1 << init_regsym
->ctx
),
865 (enum icp_qat_uof_regtype
)
866 init_regsym
->reg_type
,
867 (unsigned short)init_regsym
->reg_addr
,
870 case ICP_QAT_UOF_INIT_EXPR
:
871 pr_err("QAT: INIT_EXPR feature not supported\n");
873 case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP
:
874 pr_err("QAT: INIT_EXPR_ENDIAN_SWAP feature not supported\n");
883 static int qat_uclo_init_globals(struct icp_qat_fw_loader_handle
*handle
)
885 struct icp_qat_uclo_objhandle
*obj_handle
= handle
->obj_handle
;
888 if (obj_handle
->global_inited
)
890 if (obj_handle
->init_mem_tab
.entry_num
) {
891 if (qat_uclo_init_memory(handle
)) {
892 pr_err("QAT: initialize memory failed\n");
896 for (ae
= 0; ae
< handle
->hal_handle
->ae_max_num
; ae
++) {
897 for (s
= 0; s
< obj_handle
->ae_data
[ae
].slice_num
; s
++) {
898 if (!obj_handle
->ae_data
[ae
].ae_slices
[s
].encap_image
)
900 if (qat_uclo_init_reg_sym(handle
, ae
,
901 obj_handle
->ae_data
[ae
].
902 ae_slices
[s
].encap_image
))
906 obj_handle
->global_inited
= 1;
910 static int qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle
*handle
)
912 unsigned char ae
, nn_mode
, s
;
913 struct icp_qat_uof_image
*uof_image
;
914 struct icp_qat_uclo_aedata
*ae_data
;
915 struct icp_qat_uclo_objhandle
*obj_handle
= handle
->obj_handle
;
917 for (ae
= 0; ae
< handle
->hal_handle
->ae_max_num
; ae
++) {
919 (unsigned long *)&handle
->hal_handle
->ae_mask
))
921 ae_data
= &obj_handle
->ae_data
[ae
];
922 for (s
= 0; s
< min_t(unsigned int, ae_data
->slice_num
,
923 ICP_QAT_UCLO_MAX_CTX
); s
++) {
924 if (!obj_handle
->ae_data
[ae
].ae_slices
[s
].encap_image
)
926 uof_image
= ae_data
->ae_slices
[s
].encap_image
->img_ptr
;
927 if (qat_hal_set_ae_ctx_mode(handle
, ae
,
928 (char)ICP_QAT_CTX_MODE
929 (uof_image
->ae_mode
))) {
930 pr_err("QAT: qat_hal_set_ae_ctx_mode error\n");
933 nn_mode
= ICP_QAT_NN_MODE(uof_image
->ae_mode
);
934 if (qat_hal_set_ae_nn_mode(handle
, ae
, nn_mode
)) {
935 pr_err("QAT: qat_hal_set_ae_nn_mode error\n");
938 if (qat_hal_set_ae_lm_mode(handle
, ae
, ICP_LMEM0
,
939 (char)ICP_QAT_LOC_MEM0_MODE
940 (uof_image
->ae_mode
))) {
941 pr_err("QAT: qat_hal_set_ae_lm_mode LMEM0 error\n");
944 if (qat_hal_set_ae_lm_mode(handle
, ae
, ICP_LMEM1
,
945 (char)ICP_QAT_LOC_MEM1_MODE
946 (uof_image
->ae_mode
))) {
947 pr_err("QAT: qat_hal_set_ae_lm_mode LMEM1 error\n");
955 static void qat_uclo_init_uword_num(struct icp_qat_fw_loader_handle
*handle
)
957 struct icp_qat_uclo_objhandle
*obj_handle
= handle
->obj_handle
;
958 struct icp_qat_uclo_encapme
*image
;
961 for (a
= 0; a
< obj_handle
->uimage_num
; a
++) {
962 image
= &obj_handle
->ae_uimage
[a
];
963 image
->uwords_num
= image
->page
->beg_addr_p
+
964 image
->page
->micro_words_num
;
968 static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle
*handle
)
970 struct icp_qat_uclo_objhandle
*obj_handle
= handle
->obj_handle
;
973 obj_handle
->encap_uof_obj
.beg_uof
= obj_handle
->obj_hdr
->file_buff
;
974 obj_handle
->encap_uof_obj
.obj_hdr
= (struct icp_qat_uof_objhdr
*)
975 obj_handle
->obj_hdr
->file_buff
;
976 obj_handle
->uword_in_bytes
= 6;
977 obj_handle
->prod_type
= qat_uclo_get_dev_type(handle
);
978 obj_handle
->prod_rev
= PID_MAJOR_REV
|
979 (PID_MINOR_REV
& handle
->hal_handle
->revision_id
);
980 if (qat_uclo_check_uof_compat(obj_handle
)) {
981 pr_err("QAT: UOF incompatible\n");
984 obj_handle
->uword_buf
= kcalloc(UWORD_CPYBUF_SIZE
, sizeof(uint64_t),
986 if (!obj_handle
->uword_buf
)
988 obj_handle
->ustore_phy_size
= ICP_QAT_UCLO_MAX_USTORE
;
989 if (!obj_handle
->obj_hdr
->file_buff
||
990 !qat_uclo_map_str_table(obj_handle
->obj_hdr
, ICP_QAT_UOF_STRT
,
991 &obj_handle
->str_table
)) {
992 pr_err("QAT: UOF doesn't have effective images\n");
995 obj_handle
->uimage_num
=
996 qat_uclo_map_uimage(obj_handle
, obj_handle
->ae_uimage
,
997 ICP_QAT_UCLO_MAX_AE
* ICP_QAT_UCLO_MAX_CTX
);
998 if (!obj_handle
->uimage_num
)
1000 if (qat_uclo_map_ae(handle
, handle
->hal_handle
->ae_max_num
)) {
1001 pr_err("QAT: Bad object\n");
1002 goto out_check_uof_aemask_err
;
1004 qat_uclo_init_uword_num(handle
);
1005 qat_uclo_map_initmem_table(&obj_handle
->encap_uof_obj
,
1006 &obj_handle
->init_mem_tab
);
1007 if (qat_uclo_set_ae_mode(handle
))
1008 goto out_check_uof_aemask_err
;
1010 out_check_uof_aemask_err
:
1011 for (ae
= 0; ae
< obj_handle
->uimage_num
; ae
++)
1012 kfree(obj_handle
->ae_uimage
[ae
].page
);
1014 kfree(obj_handle
->uword_buf
);
1018 static int qat_uclo_map_suof_file_hdr(struct icp_qat_fw_loader_handle
*handle
,
1019 struct icp_qat_suof_filehdr
*suof_ptr
,
1022 unsigned int check_sum
= 0;
1023 unsigned int min_ver_offset
= 0;
1024 struct icp_qat_suof_handle
*suof_handle
= handle
->sobj_handle
;
1026 suof_handle
->file_id
= ICP_QAT_SUOF_FID
;
1027 suof_handle
->suof_buf
= (char *)suof_ptr
;
1028 suof_handle
->suof_size
= suof_size
;
1029 min_ver_offset
= suof_size
- offsetof(struct icp_qat_suof_filehdr
,
1031 check_sum
= qat_uclo_calc_str_checksum((char *)&suof_ptr
->min_ver
,
1033 if (check_sum
!= suof_ptr
->check_sum
) {
1034 pr_err("QAT: incorrect SUOF checksum\n");
1037 suof_handle
->check_sum
= suof_ptr
->check_sum
;
1038 suof_handle
->min_ver
= suof_ptr
->min_ver
;
1039 suof_handle
->maj_ver
= suof_ptr
->maj_ver
;
1040 suof_handle
->fw_type
= suof_ptr
->fw_type
;
1044 static void qat_uclo_map_simg(struct icp_qat_suof_handle
*suof_handle
,
1045 struct icp_qat_suof_img_hdr
*suof_img_hdr
,
1046 struct icp_qat_suof_chunk_hdr
*suof_chunk_hdr
)
1048 struct icp_qat_simg_ae_mode
*ae_mode
;
1049 struct icp_qat_suof_objhdr
*suof_objhdr
;
1051 suof_img_hdr
->simg_buf
= (suof_handle
->suof_buf
+
1052 suof_chunk_hdr
->offset
+
1053 sizeof(*suof_objhdr
));
1054 suof_img_hdr
->simg_len
= ((struct icp_qat_suof_objhdr
*)(uintptr_t)
1055 (suof_handle
->suof_buf
+
1056 suof_chunk_hdr
->offset
))->img_length
;
1058 suof_img_hdr
->css_header
= suof_img_hdr
->simg_buf
;
1059 suof_img_hdr
->css_key
= (suof_img_hdr
->css_header
+
1060 sizeof(struct icp_qat_css_hdr
));
1061 suof_img_hdr
->css_signature
= suof_img_hdr
->css_key
+
1062 ICP_QAT_CSS_FWSK_MODULUS_LEN
+
1063 ICP_QAT_CSS_FWSK_EXPONENT_LEN
;
1064 suof_img_hdr
->css_simg
= suof_img_hdr
->css_signature
+
1065 ICP_QAT_CSS_SIGNATURE_LEN
;
1067 ae_mode
= (struct icp_qat_simg_ae_mode
*)(suof_img_hdr
->css_simg
);
1068 suof_img_hdr
->ae_mask
= ae_mode
->ae_mask
;
1069 suof_img_hdr
->simg_name
= (unsigned long)&ae_mode
->simg_name
;
1070 suof_img_hdr
->appmeta_data
= (unsigned long)&ae_mode
->appmeta_data
;
1071 suof_img_hdr
->fw_type
= ae_mode
->fw_type
;
1075 qat_uclo_map_suof_symobjs(struct icp_qat_suof_handle
*suof_handle
,
1076 struct icp_qat_suof_chunk_hdr
*suof_chunk_hdr
)
1078 char **sym_str
= (char **)&suof_handle
->sym_str
;
1079 unsigned int *sym_size
= &suof_handle
->sym_size
;
1080 struct icp_qat_suof_strtable
*str_table_obj
;
1082 *sym_size
= *(unsigned int *)(uintptr_t)
1083 (suof_chunk_hdr
->offset
+ suof_handle
->suof_buf
);
1084 *sym_str
= (char *)(uintptr_t)
1085 (suof_handle
->suof_buf
+ suof_chunk_hdr
->offset
+
1086 sizeof(str_table_obj
->tab_length
));
1089 static int qat_uclo_check_simg_compat(struct icp_qat_fw_loader_handle
*handle
,
1090 struct icp_qat_suof_img_hdr
*img_hdr
)
1092 struct icp_qat_simg_ae_mode
*img_ae_mode
= NULL
;
1093 unsigned int prod_rev
, maj_ver
, prod_type
;
1095 prod_type
= qat_uclo_get_dev_type(handle
);
1096 img_ae_mode
= (struct icp_qat_simg_ae_mode
*)img_hdr
->css_simg
;
1097 prod_rev
= PID_MAJOR_REV
|
1098 (PID_MINOR_REV
& handle
->hal_handle
->revision_id
);
1099 if (img_ae_mode
->dev_type
!= prod_type
) {
1100 pr_err("QAT: incompatible product type %x\n",
1101 img_ae_mode
->dev_type
);
1104 maj_ver
= prod_rev
& 0xff;
1105 if ((maj_ver
> img_ae_mode
->devmax_ver
) ||
1106 (maj_ver
< img_ae_mode
->devmin_ver
)) {
1107 pr_err("QAT: incompatible device majver 0x%x\n", maj_ver
);
1113 static void qat_uclo_del_suof(struct icp_qat_fw_loader_handle
*handle
)
1115 struct icp_qat_suof_handle
*sobj_handle
= handle
->sobj_handle
;
1117 kfree(sobj_handle
->img_table
.simg_hdr
);
1118 sobj_handle
->img_table
.simg_hdr
= NULL
;
1119 kfree(handle
->sobj_handle
);
1120 handle
->sobj_handle
= NULL
;
1123 static void qat_uclo_tail_img(struct icp_qat_suof_img_hdr
*suof_img_hdr
,
1124 unsigned int img_id
, unsigned int num_simgs
)
1126 struct icp_qat_suof_img_hdr img_header
;
1128 if (img_id
!= num_simgs
- 1) {
1129 memcpy(&img_header
, &suof_img_hdr
[num_simgs
- 1],
1130 sizeof(*suof_img_hdr
));
1131 memcpy(&suof_img_hdr
[num_simgs
- 1], &suof_img_hdr
[img_id
],
1132 sizeof(*suof_img_hdr
));
1133 memcpy(&suof_img_hdr
[img_id
], &img_header
,
1134 sizeof(*suof_img_hdr
));
1138 static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle
*handle
,
1139 struct icp_qat_suof_filehdr
*suof_ptr
,
1142 struct icp_qat_suof_handle
*suof_handle
= handle
->sobj_handle
;
1143 struct icp_qat_suof_chunk_hdr
*suof_chunk_hdr
= NULL
;
1144 struct icp_qat_suof_img_hdr
*suof_img_hdr
= NULL
;
1145 int ret
= 0, ae0_img
= ICP_QAT_UCLO_MAX_AE
;
1147 struct icp_qat_suof_img_hdr img_header
;
1149 if (!suof_ptr
|| (suof_size
== 0)) {
1150 pr_err("QAT: input parameter SUOF pointer/size is NULL\n");
1153 if (qat_uclo_check_suof_format(suof_ptr
))
1155 ret
= qat_uclo_map_suof_file_hdr(handle
, suof_ptr
, suof_size
);
1158 suof_chunk_hdr
= (struct icp_qat_suof_chunk_hdr
*)
1159 ((uintptr_t)suof_ptr
+ sizeof(*suof_ptr
));
1161 qat_uclo_map_suof_symobjs(suof_handle
, suof_chunk_hdr
);
1162 suof_handle
->img_table
.num_simgs
= suof_ptr
->num_chunks
- 1;
1164 if (suof_handle
->img_table
.num_simgs
!= 0) {
1165 suof_img_hdr
= kzalloc(suof_handle
->img_table
.num_simgs
*
1166 sizeof(img_header
), GFP_KERNEL
);
1169 suof_handle
->img_table
.simg_hdr
= suof_img_hdr
;
1172 for (i
= 0; i
< suof_handle
->img_table
.num_simgs
; i
++) {
1173 qat_uclo_map_simg(handle
->sobj_handle
, &suof_img_hdr
[i
],
1174 &suof_chunk_hdr
[1 + i
]);
1175 ret
= qat_uclo_check_simg_compat(handle
,
1179 if ((suof_img_hdr
[i
].ae_mask
& 0x1) != 0)
1182 qat_uclo_tail_img(suof_img_hdr
, ae0_img
,
1183 suof_handle
->img_table
.num_simgs
);
1187 #define ADD_ADDR(high, low) ((((uint64_t)high) << 32) + low)
1188 #define BITS_IN_DWORD 32
1190 static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle
*handle
,
1191 struct icp_qat_fw_auth_desc
*desc
)
1193 unsigned int fcu_sts
, retry
= 0;
1196 bus_addr
= ADD_ADDR(desc
->css_hdr_high
, desc
->css_hdr_low
)
1197 - sizeof(struct icp_qat_auth_chunk
);
1198 SET_CAP_CSR(handle
, FCU_DRAM_ADDR_HI
, (bus_addr
>> BITS_IN_DWORD
));
1199 SET_CAP_CSR(handle
, FCU_DRAM_ADDR_LO
, bus_addr
);
1200 SET_CAP_CSR(handle
, FCU_CONTROL
, FCU_CTRL_CMD_AUTH
);
1203 msleep(FW_AUTH_WAIT_PERIOD
);
1204 fcu_sts
= GET_CAP_CSR(handle
, FCU_STATUS
);
1205 if ((fcu_sts
& FCU_AUTH_STS_MASK
) == FCU_STS_VERI_FAIL
)
1207 if (((fcu_sts
>> FCU_STS_AUTHFWLD_POS
) & 0x1))
1208 if ((fcu_sts
& FCU_AUTH_STS_MASK
) == FCU_STS_VERI_DONE
)
1210 } while (retry
++ < FW_AUTH_MAX_RETRY
);
1212 pr_err("QAT: authentication error (FCU_STATUS = 0x%x),retry = %d\n",
1213 fcu_sts
& FCU_AUTH_STS_MASK
, retry
);
1217 static int qat_uclo_simg_alloc(struct icp_qat_fw_loader_handle
*handle
,
1218 struct icp_firml_dram_desc
*dram_desc
,
1224 vptr
= dma_alloc_coherent(&handle
->pci_dev
->dev
,
1225 size
, &ptr
, GFP_KERNEL
);
1228 dram_desc
->dram_base_addr_v
= vptr
;
1229 dram_desc
->dram_bus_addr
= ptr
;
1230 dram_desc
->dram_size
= size
;
1234 static void qat_uclo_simg_free(struct icp_qat_fw_loader_handle
*handle
,
1235 struct icp_firml_dram_desc
*dram_desc
)
1237 dma_free_coherent(&handle
->pci_dev
->dev
,
1238 (size_t)(dram_desc
->dram_size
),
1239 (dram_desc
->dram_base_addr_v
),
1240 dram_desc
->dram_bus_addr
);
1241 memset(dram_desc
, 0, sizeof(*dram_desc
));
1244 static void qat_uclo_ummap_auth_fw(struct icp_qat_fw_loader_handle
*handle
,
1245 struct icp_qat_fw_auth_desc
**desc
)
1247 struct icp_firml_dram_desc dram_desc
;
1249 dram_desc
.dram_base_addr_v
= *desc
;
1250 dram_desc
.dram_bus_addr
= ((struct icp_qat_auth_chunk
*)
1251 (*desc
))->chunk_bus_addr
;
1252 dram_desc
.dram_size
= ((struct icp_qat_auth_chunk
*)
1253 (*desc
))->chunk_size
;
1254 qat_uclo_simg_free(handle
, &dram_desc
);
1257 static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle
*handle
,
1258 char *image
, unsigned int size
,
1259 struct icp_qat_fw_auth_desc
**desc
)
1261 struct icp_qat_css_hdr
*css_hdr
= (struct icp_qat_css_hdr
*)image
;
1262 struct icp_qat_fw_auth_desc
*auth_desc
;
1263 struct icp_qat_auth_chunk
*auth_chunk
;
1264 u64 virt_addr
, bus_addr
, virt_base
;
1265 unsigned int length
, simg_offset
= sizeof(*auth_chunk
);
1266 struct icp_firml_dram_desc img_desc
;
1268 if (size
> (ICP_QAT_AE_IMG_OFFSET
+ ICP_QAT_CSS_MAX_IMAGE_LEN
)) {
1269 pr_err("QAT: error, input image size overflow %d\n", size
);
1272 length
= (css_hdr
->fw_type
== CSS_AE_FIRMWARE
) ?
1273 ICP_QAT_CSS_AE_SIMG_LEN
+ simg_offset
:
1274 size
+ ICP_QAT_CSS_FWSK_PAD_LEN
+ simg_offset
;
1275 if (qat_uclo_simg_alloc(handle
, &img_desc
, length
)) {
1276 pr_err("QAT: error, allocate continuous dram fail\n");
1280 auth_chunk
= img_desc
.dram_base_addr_v
;
1281 auth_chunk
->chunk_size
= img_desc
.dram_size
;
1282 auth_chunk
->chunk_bus_addr
= img_desc
.dram_bus_addr
;
1283 virt_base
= (uintptr_t)img_desc
.dram_base_addr_v
+ simg_offset
;
1284 bus_addr
= img_desc
.dram_bus_addr
+ simg_offset
;
1285 auth_desc
= img_desc
.dram_base_addr_v
;
1286 auth_desc
->css_hdr_high
= (unsigned int)(bus_addr
>> BITS_IN_DWORD
);
1287 auth_desc
->css_hdr_low
= (unsigned int)bus_addr
;
1288 virt_addr
= virt_base
;
1290 memcpy((void *)(uintptr_t)virt_addr
, image
, sizeof(*css_hdr
));
1292 bus_addr
= ADD_ADDR(auth_desc
->css_hdr_high
, auth_desc
->css_hdr_low
) +
1294 virt_addr
= virt_addr
+ sizeof(*css_hdr
);
1296 auth_desc
->fwsk_pub_high
= (unsigned int)(bus_addr
>> BITS_IN_DWORD
);
1297 auth_desc
->fwsk_pub_low
= (unsigned int)bus_addr
;
1299 memcpy((void *)(uintptr_t)virt_addr
,
1300 (void *)(image
+ sizeof(*css_hdr
)),
1301 ICP_QAT_CSS_FWSK_MODULUS_LEN
);
1303 memset((void *)(uintptr_t)(virt_addr
+ ICP_QAT_CSS_FWSK_MODULUS_LEN
),
1304 0, ICP_QAT_CSS_FWSK_PAD_LEN
);
1307 memcpy((void *)(uintptr_t)(virt_addr
+ ICP_QAT_CSS_FWSK_MODULUS_LEN
+
1308 ICP_QAT_CSS_FWSK_PAD_LEN
),
1309 (void *)(image
+ sizeof(*css_hdr
) +
1310 ICP_QAT_CSS_FWSK_MODULUS_LEN
),
1311 sizeof(unsigned int));
1314 bus_addr
= ADD_ADDR(auth_desc
->fwsk_pub_high
,
1315 auth_desc
->fwsk_pub_low
) +
1316 ICP_QAT_CSS_FWSK_PUB_LEN
;
1317 virt_addr
= virt_addr
+ ICP_QAT_CSS_FWSK_PUB_LEN
;
1318 auth_desc
->signature_high
= (unsigned int)(bus_addr
>> BITS_IN_DWORD
);
1319 auth_desc
->signature_low
= (unsigned int)bus_addr
;
1321 memcpy((void *)(uintptr_t)virt_addr
,
1322 (void *)(image
+ sizeof(*css_hdr
) +
1323 ICP_QAT_CSS_FWSK_MODULUS_LEN
+
1324 ICP_QAT_CSS_FWSK_EXPONENT_LEN
),
1325 ICP_QAT_CSS_SIGNATURE_LEN
);
1327 bus_addr
= ADD_ADDR(auth_desc
->signature_high
,
1328 auth_desc
->signature_low
) +
1329 ICP_QAT_CSS_SIGNATURE_LEN
;
1330 virt_addr
+= ICP_QAT_CSS_SIGNATURE_LEN
;
1332 auth_desc
->img_high
= (unsigned int)(bus_addr
>> BITS_IN_DWORD
);
1333 auth_desc
->img_low
= (unsigned int)bus_addr
;
1334 auth_desc
->img_len
= size
- ICP_QAT_AE_IMG_OFFSET
;
1335 memcpy((void *)(uintptr_t)virt_addr
,
1336 (void *)(image
+ ICP_QAT_AE_IMG_OFFSET
),
1337 auth_desc
->img_len
);
1338 virt_addr
= virt_base
;
1340 if (((struct icp_qat_css_hdr
*)(uintptr_t)virt_addr
)->fw_type
==
1342 auth_desc
->img_ae_mode_data_high
= auth_desc
->img_high
;
1343 auth_desc
->img_ae_mode_data_low
= auth_desc
->img_low
;
1344 bus_addr
= ADD_ADDR(auth_desc
->img_ae_mode_data_high
,
1345 auth_desc
->img_ae_mode_data_low
) +
1346 sizeof(struct icp_qat_simg_ae_mode
);
1348 auth_desc
->img_ae_init_data_high
= (unsigned int)
1349 (bus_addr
>> BITS_IN_DWORD
);
1350 auth_desc
->img_ae_init_data_low
= (unsigned int)bus_addr
;
1351 bus_addr
+= ICP_QAT_SIMG_AE_INIT_SEQ_LEN
;
1352 auth_desc
->img_ae_insts_high
= (unsigned int)
1353 (bus_addr
>> BITS_IN_DWORD
);
1354 auth_desc
->img_ae_insts_low
= (unsigned int)bus_addr
;
1356 auth_desc
->img_ae_insts_high
= auth_desc
->img_high
;
1357 auth_desc
->img_ae_insts_low
= auth_desc
->img_low
;
1363 static int qat_uclo_load_fw(struct icp_qat_fw_loader_handle
*handle
,
1364 struct icp_qat_fw_auth_desc
*desc
)
1367 unsigned int fcu_sts
;
1368 struct icp_qat_simg_ae_mode
*virt_addr
;
1369 unsigned int fcu_loaded_ae_pos
= FCU_LOADED_AE_POS
;
1371 virt_addr
= (void *)((uintptr_t)desc
+
1372 sizeof(struct icp_qat_auth_chunk
) +
1373 sizeof(struct icp_qat_css_hdr
) +
1374 ICP_QAT_CSS_FWSK_PUB_LEN
+
1375 ICP_QAT_CSS_SIGNATURE_LEN
);
1376 for (i
= 0; i
< handle
->hal_handle
->ae_max_num
; i
++) {
1379 if (!((virt_addr
->ae_mask
>> i
) & 0x1))
1381 if (qat_hal_check_ae_active(handle
, i
)) {
1382 pr_err("QAT: AE %d is active\n", i
);
1385 SET_CAP_CSR(handle
, FCU_CONTROL
,
1386 (FCU_CTRL_CMD_LOAD
| (i
<< FCU_CTRL_AE_POS
)));
1389 msleep(FW_AUTH_WAIT_PERIOD
);
1390 fcu_sts
= GET_CAP_CSR(handle
, FCU_STATUS
);
1391 if (((fcu_sts
& FCU_AUTH_STS_MASK
) ==
1392 FCU_STS_LOAD_DONE
) &&
1393 ((fcu_sts
>> fcu_loaded_ae_pos
) & (1 << i
)))
1395 } while (retry
++ < FW_AUTH_MAX_RETRY
);
1396 if (retry
> FW_AUTH_MAX_RETRY
) {
1397 pr_err("QAT: firmware load failed timeout %x\n", retry
);
1404 static int qat_uclo_map_suof_obj(struct icp_qat_fw_loader_handle
*handle
,
1405 void *addr_ptr
, int mem_size
)
1407 struct icp_qat_suof_handle
*suof_handle
;
1409 suof_handle
= kzalloc(sizeof(*suof_handle
), GFP_KERNEL
);
1412 handle
->sobj_handle
= suof_handle
;
1413 if (qat_uclo_map_suof(handle
, addr_ptr
, mem_size
)) {
1414 qat_uclo_del_suof(handle
);
1415 pr_err("QAT: map SUOF failed\n");
1421 int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle
*handle
,
1422 void *addr_ptr
, int mem_size
)
1424 struct icp_qat_fw_auth_desc
*desc
= NULL
;
1427 if (handle
->fw_auth
) {
1428 if (!qat_uclo_map_auth_fw(handle
, addr_ptr
, mem_size
, &desc
))
1429 status
= qat_uclo_auth_fw(handle
, desc
);
1430 qat_uclo_ummap_auth_fw(handle
, &desc
);
1432 if (handle
->pci_dev
->device
== ADF_C3XXX_PCI_DEVICE_ID
) {
1433 pr_err("QAT: C3XXX doesn't support unsigned MMP\n");
1436 qat_uclo_wr_sram_by_words(handle
, 0, addr_ptr
, mem_size
);
1441 static int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle
*handle
,
1442 void *addr_ptr
, int mem_size
)
1444 struct icp_qat_uof_filehdr
*filehdr
;
1445 struct icp_qat_uclo_objhandle
*objhdl
;
1447 objhdl
= kzalloc(sizeof(*objhdl
), GFP_KERNEL
);
1450 objhdl
->obj_buf
= kmemdup(addr_ptr
, mem_size
, GFP_KERNEL
);
1451 if (!objhdl
->obj_buf
)
1452 goto out_objbuf_err
;
1453 filehdr
= (struct icp_qat_uof_filehdr
*)objhdl
->obj_buf
;
1454 if (qat_uclo_check_uof_format(filehdr
))
1455 goto out_objhdr_err
;
1456 objhdl
->obj_hdr
= qat_uclo_map_chunk((char *)objhdl
->obj_buf
, filehdr
,
1458 if (!objhdl
->obj_hdr
) {
1459 pr_err("QAT: object file chunk is null\n");
1460 goto out_objhdr_err
;
1462 handle
->obj_handle
= objhdl
;
1463 if (qat_uclo_parse_uof_obj(handle
))
1464 goto out_overlay_obj_err
;
1467 out_overlay_obj_err
:
1468 handle
->obj_handle
= NULL
;
1469 kfree(objhdl
->obj_hdr
);
1471 kfree(objhdl
->obj_buf
);
1477 int qat_uclo_map_obj(struct icp_qat_fw_loader_handle
*handle
,
1478 void *addr_ptr
, int mem_size
)
1480 BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE
>=
1481 (sizeof(handle
->hal_handle
->ae_mask
) * 8));
1483 if (!handle
|| !addr_ptr
|| mem_size
< 24)
1486 return (handle
->fw_auth
) ?
1487 qat_uclo_map_suof_obj(handle
, addr_ptr
, mem_size
) :
1488 qat_uclo_map_uof_obj(handle
, addr_ptr
, mem_size
);
1491 void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle
*handle
)
1493 struct icp_qat_uclo_objhandle
*obj_handle
= handle
->obj_handle
;
1496 if (handle
->sobj_handle
)
1497 qat_uclo_del_suof(handle
);
1501 kfree(obj_handle
->uword_buf
);
1502 for (a
= 0; a
< obj_handle
->uimage_num
; a
++)
1503 kfree(obj_handle
->ae_uimage
[a
].page
);
1505 for (a
= 0; a
< handle
->hal_handle
->ae_max_num
; a
++)
1506 qat_uclo_free_ae_data(&obj_handle
->ae_data
[a
]);
1508 kfree(obj_handle
->obj_hdr
);
1509 kfree(obj_handle
->obj_buf
);
1511 handle
->obj_handle
= NULL
;
1514 static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle
*obj_handle
,
1515 struct icp_qat_uclo_encap_page
*encap_page
,
1516 uint64_t *uword
, unsigned int addr_p
,
1517 unsigned int raddr
, uint64_t fill
)
1526 for (i
= 0; i
< encap_page
->uwblock_num
; i
++) {
1527 if (raddr
>= encap_page
->uwblock
[i
].start_addr
&&
1528 raddr
<= encap_page
->uwblock
[i
].start_addr
+
1529 encap_page
->uwblock
[i
].words_num
- 1) {
1530 raddr
-= encap_page
->uwblock
[i
].start_addr
;
1531 raddr
*= obj_handle
->uword_in_bytes
;
1532 memcpy(&uwrd
, (void *)(((uintptr_t)
1533 encap_page
->uwblock
[i
].micro_words
) + raddr
),
1534 obj_handle
->uword_in_bytes
);
1535 uwrd
= uwrd
& 0xbffffffffffull
;
1539 if (*uword
== INVLD_UWORD
)
1543 static void qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle
*handle
,
1544 struct icp_qat_uclo_encap_page
1545 *encap_page
, unsigned int ae
)
1547 unsigned int uw_physical_addr
, uw_relative_addr
, i
, words_num
, cpylen
;
1548 struct icp_qat_uclo_objhandle
*obj_handle
= handle
->obj_handle
;
1551 /* load the page starting at appropriate ustore address */
1552 /* get fill-pattern from an image -- they are all the same */
1553 memcpy(&fill_pat
, obj_handle
->ae_uimage
[0].img_ptr
->fill_pattern
,
1555 uw_physical_addr
= encap_page
->beg_addr_p
;
1556 uw_relative_addr
= 0;
1557 words_num
= encap_page
->micro_words_num
;
1559 if (words_num
< UWORD_CPYBUF_SIZE
)
1562 cpylen
= UWORD_CPYBUF_SIZE
;
1564 /* load the buffer */
1565 for (i
= 0; i
< cpylen
; i
++)
1566 qat_uclo_fill_uwords(obj_handle
, encap_page
,
1567 &obj_handle
->uword_buf
[i
],
1568 uw_physical_addr
+ i
,
1569 uw_relative_addr
+ i
, fill_pat
);
1571 /* copy the buffer to ustore */
1572 qat_hal_wr_uwords(handle
, (unsigned char)ae
,
1573 uw_physical_addr
, cpylen
,
1574 obj_handle
->uword_buf
);
1576 uw_physical_addr
+= cpylen
;
1577 uw_relative_addr
+= cpylen
;
1578 words_num
-= cpylen
;
1582 static void qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle
*handle
,
1583 struct icp_qat_uof_image
*image
)
1585 struct icp_qat_uclo_objhandle
*obj_handle
= handle
->obj_handle
;
1586 unsigned int ctx_mask
, s
;
1587 struct icp_qat_uclo_page
*page
;
1591 if (ICP_QAT_CTX_MODE(image
->ae_mode
) == ICP_QAT_UCLO_MAX_CTX
)
1595 /* load the default page and set assigned CTX PC
1596 * to the entrypoint address */
1597 for (ae
= 0; ae
< handle
->hal_handle
->ae_max_num
; ae
++) {
1598 if (!test_bit(ae
, (unsigned long *)&image
->ae_assigned
))
1600 /* find the slice to which this image is assigned */
1601 for (s
= 0; s
< obj_handle
->ae_data
[ae
].slice_num
; s
++) {
1602 if (image
->ctx_assigned
& obj_handle
->ae_data
[ae
].
1603 ae_slices
[s
].ctx_mask_assigned
)
1606 if (s
>= obj_handle
->ae_data
[ae
].slice_num
)
1608 page
= obj_handle
->ae_data
[ae
].ae_slices
[s
].page
;
1609 if (!page
->encap_page
->def_page
)
1611 qat_uclo_wr_uimage_raw_page(handle
, page
->encap_page
, ae
);
1613 page
= obj_handle
->ae_data
[ae
].ae_slices
[s
].page
;
1614 for (ctx
= 0; ctx
< ICP_QAT_UCLO_MAX_CTX
; ctx
++)
1615 obj_handle
->ae_data
[ae
].ae_slices
[s
].cur_page
[ctx
] =
1616 (ctx_mask
& (1 << ctx
)) ? page
: NULL
;
1617 qat_hal_set_live_ctx(handle
, (unsigned char)ae
,
1618 image
->ctx_assigned
);
1619 qat_hal_set_pc(handle
, (unsigned char)ae
, image
->ctx_assigned
,
1620 image
->entry_address
);
1624 static int qat_uclo_wr_suof_img(struct icp_qat_fw_loader_handle
*handle
)
1627 struct icp_qat_fw_auth_desc
*desc
= NULL
;
1628 struct icp_qat_suof_handle
*sobj_handle
= handle
->sobj_handle
;
1629 struct icp_qat_suof_img_hdr
*simg_hdr
= sobj_handle
->img_table
.simg_hdr
;
1631 for (i
= 0; i
< sobj_handle
->img_table
.num_simgs
; i
++) {
1632 if (qat_uclo_map_auth_fw(handle
,
1633 (char *)simg_hdr
[i
].simg_buf
,
1635 (simg_hdr
[i
].simg_len
),
1638 if (qat_uclo_auth_fw(handle
, desc
))
1640 if (qat_uclo_load_fw(handle
, desc
))
1642 qat_uclo_ummap_auth_fw(handle
, &desc
);
1646 qat_uclo_ummap_auth_fw(handle
, &desc
);
1650 static int qat_uclo_wr_uof_img(struct icp_qat_fw_loader_handle
*handle
)
1652 struct icp_qat_uclo_objhandle
*obj_handle
= handle
->obj_handle
;
1655 if (qat_uclo_init_globals(handle
))
1657 for (i
= 0; i
< obj_handle
->uimage_num
; i
++) {
1658 if (!obj_handle
->ae_uimage
[i
].img_ptr
)
1660 if (qat_uclo_init_ustore(handle
, &obj_handle
->ae_uimage
[i
]))
1662 qat_uclo_wr_uimage_page(handle
,
1663 obj_handle
->ae_uimage
[i
].img_ptr
);
1668 int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle
*handle
)
1670 return (handle
->fw_auth
) ? qat_uclo_wr_suof_img(handle
) :
1671 qat_uclo_wr_uof_img(handle
);