2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #include <linux/slab.h>
48 #include <linux/ctype.h>
49 #include <linux/kernel.h>
51 #include "adf_accel_devices.h"
52 #include "adf_common_drv.h"
53 #include "icp_qat_uclo.h"
54 #include "icp_qat_hal.h"
55 #include "icp_qat_fw_loader_handle.h"
57 #define UWORD_CPYBUF_SIZE 1024
58 #define INVLD_UWORD 0xffffffffffull
59 #define PID_MINOR_REV 0xf
60 #define PID_MAJOR_REV (0xf << 4)
62 static int qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle
*obj_handle
,
63 unsigned int ae
, unsigned int image_num
)
65 struct icp_qat_uclo_aedata
*ae_data
;
66 struct icp_qat_uclo_encapme
*encap_image
;
67 struct icp_qat_uclo_page
*page
= NULL
;
68 struct icp_qat_uclo_aeslice
*ae_slice
= NULL
;
70 ae_data
= &obj_handle
->ae_data
[ae
];
71 encap_image
= &obj_handle
->ae_uimage
[image_num
];
72 ae_slice
= &ae_data
->ae_slices
[ae_data
->slice_num
];
73 ae_slice
->encap_image
= encap_image
;
75 if (encap_image
->img_ptr
) {
76 ae_slice
->ctx_mask_assigned
=
77 encap_image
->img_ptr
->ctx_assigned
;
78 ae_data
->eff_ustore_size
= obj_handle
->ustore_phy_size
;
80 ae_slice
->ctx_mask_assigned
= 0;
82 ae_slice
->region
= kzalloc(sizeof(*ae_slice
->region
), GFP_KERNEL
);
83 if (!ae_slice
->region
)
85 ae_slice
->page
= kzalloc(sizeof(*ae_slice
->page
), GFP_KERNEL
);
88 page
= ae_slice
->page
;
89 page
->encap_page
= encap_image
->page
;
90 ae_slice
->page
->region
= ae_slice
->region
;
94 kfree(ae_slice
->region
);
95 ae_slice
->region
= NULL
;
99 static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata
*ae_data
)
104 pr_err("QAT: bad argument, ae_data is NULL\n ");
108 for (i
= 0; i
< ae_data
->slice_num
; i
++) {
109 kfree(ae_data
->ae_slices
[i
].region
);
110 ae_data
->ae_slices
[i
].region
= NULL
;
111 kfree(ae_data
->ae_slices
[i
].page
);
112 ae_data
->ae_slices
[i
].page
= NULL
;
117 static char *qat_uclo_get_string(struct icp_qat_uof_strtable
*str_table
,
118 unsigned int str_offset
)
120 if ((!str_table
->table_len
) || (str_offset
> str_table
->table_len
))
122 return (char *)(((unsigned long)(str_table
->strings
)) + str_offset
);
125 static int qat_uclo_check_format(struct icp_qat_uof_filehdr
*hdr
)
127 int maj
= hdr
->maj_ver
& 0xff;
128 int min
= hdr
->min_ver
& 0xff;
130 if (hdr
->file_id
!= ICP_QAT_UOF_FID
) {
131 pr_err("QAT: Invalid header 0x%x\n", hdr
->file_id
);
134 if (min
!= ICP_QAT_UOF_MINVER
|| maj
!= ICP_QAT_UOF_MAJVER
) {
135 pr_err("QAT: bad UOF version, major 0x%x, minor 0x%x\n",
142 static void qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle
*handle
,
143 unsigned int addr
, unsigned int *val
,
144 unsigned int num_in_bytes
)
147 unsigned char *ptr
= (unsigned char *)val
;
149 while (num_in_bytes
) {
150 memcpy(&outval
, ptr
, 4);
151 SRAM_WRITE(handle
, addr
, outval
);
158 static void qat_uclo_wr_umem_by_words(struct icp_qat_fw_loader_handle
*handle
,
159 unsigned char ae
, unsigned int addr
,
161 unsigned int num_in_bytes
)
164 unsigned char *ptr
= (unsigned char *)val
;
166 addr
>>= 0x2; /* convert to uword address */
168 while (num_in_bytes
) {
169 memcpy(&outval
, ptr
, 4);
170 qat_hal_wr_umem(handle
, ae
, addr
++, 1, &outval
);
176 static void qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle
*handle
,
178 struct icp_qat_uof_batch_init
181 struct icp_qat_uof_batch_init
*umem_init
;
183 if (!umem_init_header
)
185 umem_init
= umem_init_header
->next
;
187 unsigned int addr
, *value
, size
;
190 addr
= umem_init
->addr
;
191 value
= umem_init
->value
;
192 size
= umem_init
->size
;
193 qat_uclo_wr_umem_by_words(handle
, ae
, addr
, value
, size
);
194 umem_init
= umem_init
->next
;
199 qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle
*handle
,
200 struct icp_qat_uof_batch_init
**base
)
202 struct icp_qat_uof_batch_init
*umem_init
;
206 struct icp_qat_uof_batch_init
*pre
;
209 umem_init
= umem_init
->next
;
215 static int qat_uclo_parse_num(char *str
, unsigned int *num
)
218 unsigned long ae
= 0;
221 strncpy(buf
, str
, 15);
222 for (i
= 0; i
< 16; i
++) {
223 if (!isdigit(buf
[i
])) {
228 if ((kstrtoul(buf
, 10, &ae
)))
231 *num
= (unsigned int)ae
;
235 static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle
*handle
,
236 struct icp_qat_uof_initmem
*init_mem
,
237 unsigned int size_range
, unsigned int *ae
)
239 struct icp_qat_uclo_objhandle
*obj_handle
= handle
->obj_handle
;
242 if ((init_mem
->addr
+ init_mem
->num_in_bytes
) > (size_range
<< 0x2)) {
243 pr_err("QAT: initmem is out of range");
246 if (init_mem
->scope
!= ICP_QAT_UOF_LOCAL_SCOPE
) {
247 pr_err("QAT: Memory scope for init_mem error\n");
250 str
= qat_uclo_get_string(&obj_handle
->str_table
, init_mem
->sym_name
);
252 pr_err("QAT: AE name assigned in UOF init table is NULL\n");
255 if (qat_uclo_parse_num(str
, ae
)) {
256 pr_err("QAT: Parse num for AE number failed\n");
259 if (*ae
>= ICP_QAT_UCLO_MAX_AE
) {
260 pr_err("QAT: ae %d out of range\n", *ae
);
266 static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle
267 *handle
, struct icp_qat_uof_initmem
268 *init_mem
, unsigned int ae
,
269 struct icp_qat_uof_batch_init
272 struct icp_qat_uof_batch_init
*init_header
, *tail
;
273 struct icp_qat_uof_batch_init
*mem_init
, *tail_old
;
274 struct icp_qat_uof_memvar_attr
*mem_val_attr
;
275 unsigned int i
, flag
= 0;
278 (struct icp_qat_uof_memvar_attr
*)((unsigned long)init_mem
+
279 sizeof(struct icp_qat_uof_initmem
));
281 init_header
= *init_tab_base
;
283 init_header
= kzalloc(sizeof(*init_header
), GFP_KERNEL
);
286 init_header
->size
= 1;
287 *init_tab_base
= init_header
;
290 tail_old
= init_header
;
291 while (tail_old
->next
)
292 tail_old
= tail_old
->next
;
294 for (i
= 0; i
< init_mem
->val_attr_num
; i
++) {
295 mem_init
= kzalloc(sizeof(*mem_init
), GFP_KERNEL
);
299 mem_init
->addr
= init_mem
->addr
+ mem_val_attr
->offset_in_byte
;
300 mem_init
->value
= &mem_val_attr
->value
;
302 mem_init
->next
= NULL
;
303 tail
->next
= mem_init
;
305 init_header
->size
+= qat_hal_get_ins_num();
311 mem_init
= tail_old
->next
;
316 kfree(*init_tab_base
);
320 static int qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle
*handle
,
321 struct icp_qat_uof_initmem
*init_mem
)
323 struct icp_qat_uclo_objhandle
*obj_handle
= handle
->obj_handle
;
326 if (qat_uclo_fetch_initmem_ae(handle
, init_mem
,
327 ICP_QAT_UCLO_MAX_LMEM_REG
, &ae
))
329 if (qat_uclo_create_batch_init_list(handle
, init_mem
, ae
,
330 &obj_handle
->lm_init_tab
[ae
]))
335 static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle
*handle
,
336 struct icp_qat_uof_initmem
*init_mem
)
338 struct icp_qat_uclo_objhandle
*obj_handle
= handle
->obj_handle
;
339 unsigned int ae
, ustore_size
, uaddr
, i
;
341 ustore_size
= obj_handle
->ustore_phy_size
;
342 if (qat_uclo_fetch_initmem_ae(handle
, init_mem
, ustore_size
, &ae
))
344 if (qat_uclo_create_batch_init_list(handle
, init_mem
, ae
,
345 &obj_handle
->umem_init_tab
[ae
]))
347 /* set the highest ustore address referenced */
348 uaddr
= (init_mem
->addr
+ init_mem
->num_in_bytes
) >> 0x2;
349 for (i
= 0; i
< obj_handle
->ae_data
[ae
].slice_num
; i
++) {
350 if (obj_handle
->ae_data
[ae
].ae_slices
[i
].
351 encap_image
->uwords_num
< uaddr
)
352 obj_handle
->ae_data
[ae
].ae_slices
[i
].
353 encap_image
->uwords_num
= uaddr
;
358 #define ICP_DH895XCC_PESRAM_BAR_SIZE 0x80000
359 static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle
*handle
,
360 struct icp_qat_uof_initmem
*init_mem
)
363 struct icp_qat_uof_memvar_attr
*mem_val_attr
;
366 (struct icp_qat_uof_memvar_attr
*)((unsigned long)init_mem
+
367 sizeof(struct icp_qat_uof_initmem
));
369 switch (init_mem
->region
) {
370 case ICP_QAT_UOF_SRAM_REGION
:
371 if ((init_mem
->addr
+ init_mem
->num_in_bytes
) >
372 ICP_DH895XCC_PESRAM_BAR_SIZE
) {
373 pr_err("QAT: initmem on SRAM is out of range");
376 for (i
= 0; i
< init_mem
->val_attr_num
; i
++) {
377 qat_uclo_wr_sram_by_words(handle
,
379 mem_val_attr
->offset_in_byte
,
380 &mem_val_attr
->value
, 4);
384 case ICP_QAT_UOF_LMEM_REGION
:
385 if (qat_uclo_init_lmem_seg(handle
, init_mem
))
388 case ICP_QAT_UOF_UMEM_REGION
:
389 if (qat_uclo_init_umem_seg(handle
, init_mem
))
393 pr_err("QAT: initmem region error. region type=0x%x\n",
400 static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle
*handle
,
401 struct icp_qat_uclo_encapme
*image
)
404 struct icp_qat_uclo_encap_page
*page
;
405 struct icp_qat_uof_image
*uof_image
;
407 unsigned int ustore_size
;
408 unsigned int patt_pos
;
409 struct icp_qat_uclo_objhandle
*obj_handle
= handle
->obj_handle
;
412 uof_image
= image
->img_ptr
;
413 fill_data
= kcalloc(ICP_QAT_UCLO_MAX_USTORE
, sizeof(uint64_t),
417 for (i
= 0; i
< ICP_QAT_UCLO_MAX_USTORE
; i
++)
418 memcpy(&fill_data
[i
], &uof_image
->fill_pattern
,
422 for (ae
= 0; ae
< handle
->hal_handle
->ae_max_num
; ae
++) {
423 if (!test_bit(ae
, (unsigned long *)&uof_image
->ae_assigned
))
425 ustore_size
= obj_handle
->ae_data
[ae
].eff_ustore_size
;
426 patt_pos
= page
->beg_addr_p
+ page
->micro_words_num
;
428 qat_hal_wr_uwords(handle
, (unsigned char)ae
, 0,
429 page
->beg_addr_p
, &fill_data
[0]);
430 qat_hal_wr_uwords(handle
, (unsigned char)ae
, patt_pos
,
431 ustore_size
- patt_pos
+ 1,
432 &fill_data
[page
->beg_addr_p
]);
438 static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle
*handle
)
441 struct icp_qat_uclo_objhandle
*obj_handle
= handle
->obj_handle
;
442 struct icp_qat_uof_initmem
*initmem
= obj_handle
->init_mem_tab
.init_mem
;
444 for (i
= 0; i
< obj_handle
->init_mem_tab
.entry_num
; i
++) {
445 if (initmem
->num_in_bytes
) {
446 if (qat_uclo_init_ae_memory(handle
, initmem
))
449 initmem
= (struct icp_qat_uof_initmem
*)((unsigned long)(
450 (unsigned long)initmem
+
451 sizeof(struct icp_qat_uof_initmem
)) +
452 (sizeof(struct icp_qat_uof_memvar_attr
) *
453 initmem
->val_attr_num
));
455 for (ae
= 0; ae
< handle
->hal_handle
->ae_max_num
; ae
++) {
456 if (qat_hal_batch_wr_lm(handle
, ae
,
457 obj_handle
->lm_init_tab
[ae
])) {
458 pr_err("QAT: fail to batch init lmem for AE %d\n", ae
);
461 qat_uclo_cleanup_batch_init_list(handle
,
462 &obj_handle
->lm_init_tab
[ae
]);
463 qat_uclo_batch_wr_umem(handle
, ae
,
464 obj_handle
->umem_init_tab
[ae
]);
465 qat_uclo_cleanup_batch_init_list(handle
,
472 static void *qat_uclo_find_chunk(struct icp_qat_uof_objhdr
*obj_hdr
,
473 char *chunk_id
, void *cur
)
476 struct icp_qat_uof_chunkhdr
*chunk_hdr
=
477 (struct icp_qat_uof_chunkhdr
*)
478 ((unsigned long)obj_hdr
+ sizeof(struct icp_qat_uof_objhdr
));
480 for (i
= 0; i
< obj_hdr
->num_chunks
; i
++) {
481 if ((cur
< (void *)&chunk_hdr
[i
]) &&
482 !strncmp(chunk_hdr
[i
].chunk_id
, chunk_id
,
483 ICP_QAT_UOF_OBJID_LEN
)) {
484 return &chunk_hdr
[i
];
490 static unsigned int qat_uclo_calc_checksum(unsigned int reg
, int ch
)
493 unsigned int topbit
= 1 << 0xF;
494 unsigned int inbyte
= (unsigned int)((reg
>> 0x18) ^ ch
);
496 reg
^= inbyte
<< 0x8;
497 for (i
= 0; i
< 0x8; i
++) {
499 reg
= (reg
<< 1) ^ 0x1021;
506 static unsigned int qat_uclo_calc_str_checksum(char *ptr
, int num
)
508 unsigned int chksum
= 0;
512 chksum
= qat_uclo_calc_checksum(chksum
, *ptr
++);
516 static struct icp_qat_uclo_objhdr
*
517 qat_uclo_map_chunk(char *buf
, struct icp_qat_uof_filehdr
*file_hdr
,
520 struct icp_qat_uof_filechunkhdr
*file_chunk
;
521 struct icp_qat_uclo_objhdr
*obj_hdr
;
525 file_chunk
= (struct icp_qat_uof_filechunkhdr
*)
526 (buf
+ sizeof(struct icp_qat_uof_filehdr
));
527 for (i
= 0; i
< file_hdr
->num_chunks
; i
++) {
528 if (!strncmp(file_chunk
->chunk_id
, chunk_id
,
529 ICP_QAT_UOF_OBJID_LEN
)) {
530 chunk
= buf
+ file_chunk
->offset
;
531 if (file_chunk
->checksum
!= qat_uclo_calc_str_checksum(
532 chunk
, file_chunk
->size
))
534 obj_hdr
= kzalloc(sizeof(*obj_hdr
), GFP_KERNEL
);
537 obj_hdr
->file_buff
= chunk
;
538 obj_hdr
->checksum
= file_chunk
->checksum
;
539 obj_hdr
->size
= file_chunk
->size
;
548 qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj
*encap_uof_obj
,
549 struct icp_qat_uof_image
*image
)
551 struct icp_qat_uof_objtable
*uc_var_tab
, *imp_var_tab
, *imp_expr_tab
;
552 struct icp_qat_uof_objtable
*neigh_reg_tab
;
553 struct icp_qat_uof_code_page
*code_page
;
555 code_page
= (struct icp_qat_uof_code_page
*)
556 ((char *)image
+ sizeof(struct icp_qat_uof_image
));
557 uc_var_tab
= (struct icp_qat_uof_objtable
*)(encap_uof_obj
->beg_uof
+
558 code_page
->uc_var_tab_offset
);
559 imp_var_tab
= (struct icp_qat_uof_objtable
*)(encap_uof_obj
->beg_uof
+
560 code_page
->imp_var_tab_offset
);
561 imp_expr_tab
= (struct icp_qat_uof_objtable
*)
562 (encap_uof_obj
->beg_uof
+
563 code_page
->imp_expr_tab_offset
);
564 if (uc_var_tab
->entry_num
|| imp_var_tab
->entry_num
||
565 imp_expr_tab
->entry_num
) {
566 pr_err("QAT: UOF can't contain imported variable to be parsed");
569 neigh_reg_tab
= (struct icp_qat_uof_objtable
*)
570 (encap_uof_obj
->beg_uof
+
571 code_page
->neigh_reg_tab_offset
);
572 if (neigh_reg_tab
->entry_num
) {
573 pr_err("QAT: UOF can't contain shared control store feature");
576 if (image
->numpages
> 1) {
577 pr_err("QAT: UOF can't contain multiple pages");
580 if (ICP_QAT_SHARED_USTORE_MODE(image
->ae_mode
)) {
581 pr_err("QAT: UOF can't use shared control store feature");
584 if (RELOADABLE_CTX_SHARED_MODE(image
->ae_mode
)) {
585 pr_err("QAT: UOF can't use reloadable feature");
591 static void qat_uclo_map_image_page(struct icp_qat_uof_encap_obj
593 struct icp_qat_uof_image
*img
,
594 struct icp_qat_uclo_encap_page
*page
)
596 struct icp_qat_uof_code_page
*code_page
;
597 struct icp_qat_uof_code_area
*code_area
;
598 struct icp_qat_uof_objtable
*uword_block_tab
;
599 struct icp_qat_uof_uword_block
*uwblock
;
602 code_page
= (struct icp_qat_uof_code_page
*)
603 ((char *)img
+ sizeof(struct icp_qat_uof_image
));
604 page
->def_page
= code_page
->def_page
;
605 page
->page_region
= code_page
->page_region
;
606 page
->beg_addr_v
= code_page
->beg_addr_v
;
607 page
->beg_addr_p
= code_page
->beg_addr_p
;
608 code_area
= (struct icp_qat_uof_code_area
*)(encap_uof_obj
->beg_uof
+
609 code_page
->code_area_offset
);
610 page
->micro_words_num
= code_area
->micro_words_num
;
611 uword_block_tab
= (struct icp_qat_uof_objtable
*)
612 (encap_uof_obj
->beg_uof
+
613 code_area
->uword_block_tab
);
614 page
->uwblock_num
= uword_block_tab
->entry_num
;
615 uwblock
= (struct icp_qat_uof_uword_block
*)((char *)uword_block_tab
+
616 sizeof(struct icp_qat_uof_objtable
));
617 page
->uwblock
= (struct icp_qat_uclo_encap_uwblock
*)uwblock
;
618 for (i
= 0; i
< uword_block_tab
->entry_num
; i
++)
619 page
->uwblock
[i
].micro_words
=
620 (unsigned long)encap_uof_obj
->beg_uof
+ uwblock
[i
].uword_offset
;
623 static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle
*obj_handle
,
624 struct icp_qat_uclo_encapme
*ae_uimage
,
628 struct icp_qat_uof_chunkhdr
*chunk_hdr
= NULL
;
629 struct icp_qat_uof_image
*image
;
630 struct icp_qat_uof_objtable
*ae_regtab
;
631 struct icp_qat_uof_objtable
*init_reg_sym_tab
;
632 struct icp_qat_uof_objtable
*sbreak_tab
;
633 struct icp_qat_uof_encap_obj
*encap_uof_obj
=
634 &obj_handle
->encap_uof_obj
;
636 for (j
= 0; j
< max_image
; j
++) {
637 chunk_hdr
= qat_uclo_find_chunk(encap_uof_obj
->obj_hdr
,
638 ICP_QAT_UOF_IMAG
, chunk_hdr
);
641 image
= (struct icp_qat_uof_image
*)(encap_uof_obj
->beg_uof
+
643 ae_regtab
= (struct icp_qat_uof_objtable
*)
644 (image
->reg_tab_offset
+
645 obj_handle
->obj_hdr
->file_buff
);
646 ae_uimage
[j
].ae_reg_num
= ae_regtab
->entry_num
;
647 ae_uimage
[j
].ae_reg
= (struct icp_qat_uof_ae_reg
*)
648 (((char *)ae_regtab
) +
649 sizeof(struct icp_qat_uof_objtable
));
650 init_reg_sym_tab
= (struct icp_qat_uof_objtable
*)
651 (image
->init_reg_sym_tab
+
652 obj_handle
->obj_hdr
->file_buff
);
653 ae_uimage
[j
].init_regsym_num
= init_reg_sym_tab
->entry_num
;
654 ae_uimage
[j
].init_regsym
= (struct icp_qat_uof_init_regsym
*)
655 (((char *)init_reg_sym_tab
) +
656 sizeof(struct icp_qat_uof_objtable
));
657 sbreak_tab
= (struct icp_qat_uof_objtable
*)
658 (image
->sbreak_tab
+ obj_handle
->obj_hdr
->file_buff
);
659 ae_uimage
[j
].sbreak_num
= sbreak_tab
->entry_num
;
660 ae_uimage
[j
].sbreak
= (struct icp_qat_uof_sbreak
*)
661 (((char *)sbreak_tab
) +
662 sizeof(struct icp_qat_uof_objtable
));
663 ae_uimage
[j
].img_ptr
= image
;
664 if (qat_uclo_check_image_compat(encap_uof_obj
, image
))
667 kzalloc(sizeof(struct icp_qat_uclo_encap_page
),
669 if (!ae_uimage
[j
].page
)
671 qat_uclo_map_image_page(encap_uof_obj
, image
,
676 for (i
= 0; i
< j
; i
++)
677 kfree(ae_uimage
[i
].page
);
681 static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle
*handle
, int max_ae
)
685 struct icp_qat_uclo_objhandle
*obj_handle
= handle
->obj_handle
;
687 for (ae
= 0; ae
<= max_ae
; ae
++) {
689 (unsigned long *)&handle
->hal_handle
->ae_mask
))
691 for (i
= 0; i
< obj_handle
->uimage_num
; i
++) {
692 if (!test_bit(ae
, (unsigned long *)
693 &obj_handle
->ae_uimage
[i
].img_ptr
->ae_assigned
))
696 if (qat_uclo_init_ae_data(obj_handle
, ae
, i
))
701 pr_err("QAT: uimage uses AE not set");
707 static struct icp_qat_uof_strtable
*
708 qat_uclo_map_str_table(struct icp_qat_uclo_objhdr
*obj_hdr
,
709 char *tab_name
, struct icp_qat_uof_strtable
*str_table
)
711 struct icp_qat_uof_chunkhdr
*chunk_hdr
;
713 chunk_hdr
= qat_uclo_find_chunk((struct icp_qat_uof_objhdr
*)
714 obj_hdr
->file_buff
, tab_name
, NULL
);
718 memcpy(&str_table
->table_len
, obj_hdr
->file_buff
+
719 chunk_hdr
->offset
, sizeof(str_table
->table_len
));
720 hdr_size
= (char *)&str_table
->strings
- (char *)str_table
;
721 str_table
->strings
= (unsigned long)obj_hdr
->file_buff
+
722 chunk_hdr
->offset
+ hdr_size
;
729 qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj
*encap_uof_obj
,
730 struct icp_qat_uclo_init_mem_table
*init_mem_tab
)
732 struct icp_qat_uof_chunkhdr
*chunk_hdr
;
734 chunk_hdr
= qat_uclo_find_chunk(encap_uof_obj
->obj_hdr
,
735 ICP_QAT_UOF_IMEM
, NULL
);
737 memmove(&init_mem_tab
->entry_num
, encap_uof_obj
->beg_uof
+
738 chunk_hdr
->offset
, sizeof(unsigned int));
739 init_mem_tab
->init_mem
= (struct icp_qat_uof_initmem
*)
740 (encap_uof_obj
->beg_uof
+ chunk_hdr
->offset
+
741 sizeof(unsigned int));
745 static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle
*obj_handle
)
747 unsigned int maj_ver
, prod_type
= obj_handle
->prod_type
;
749 if (!(prod_type
& obj_handle
->encap_uof_obj
.obj_hdr
->cpu_type
)) {
750 pr_err("QAT: UOF type 0x%x not match with cur platform 0x%x\n",
751 obj_handle
->encap_uof_obj
.obj_hdr
->cpu_type
, prod_type
);
754 maj_ver
= obj_handle
->prod_rev
& 0xff;
755 if ((obj_handle
->encap_uof_obj
.obj_hdr
->max_cpu_ver
< maj_ver
) ||
756 (obj_handle
->encap_uof_obj
.obj_hdr
->min_cpu_ver
> maj_ver
)) {
757 pr_err("QAT: UOF majVer 0x%x out of range\n", maj_ver
);
763 static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle
*handle
,
764 unsigned char ae
, unsigned char ctx_mask
,
765 enum icp_qat_uof_regtype reg_type
,
766 unsigned short reg_addr
, unsigned int value
)
774 return qat_hal_init_gpr(handle
, ae
, ctx_mask
, reg_type
,
785 return qat_hal_init_rd_xfer(handle
, ae
, ctx_mask
, reg_type
,
792 return qat_hal_init_wr_xfer(handle
, ae
, ctx_mask
, reg_type
,
795 return qat_hal_init_nn(handle
, ae
, ctx_mask
, reg_addr
, value
);
797 pr_err("QAT: UOF uses not supported reg type 0x%x\n", reg_type
);
803 static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle
*handle
,
805 struct icp_qat_uclo_encapme
*encap_ae
)
808 unsigned char ctx_mask
;
809 struct icp_qat_uof_init_regsym
*init_regsym
;
811 if (ICP_QAT_CTX_MODE(encap_ae
->img_ptr
->ae_mode
) ==
812 ICP_QAT_UCLO_MAX_CTX
)
817 for (i
= 0; i
< encap_ae
->init_regsym_num
; i
++) {
818 unsigned int exp_res
;
820 init_regsym
= &encap_ae
->init_regsym
[i
];
821 exp_res
= init_regsym
->value
;
822 switch (init_regsym
->init_type
) {
823 case ICP_QAT_UOF_INIT_REG
:
824 qat_uclo_init_reg(handle
, ae
, ctx_mask
,
825 (enum icp_qat_uof_regtype
)
826 init_regsym
->reg_type
,
827 (unsigned short)init_regsym
->reg_addr
,
830 case ICP_QAT_UOF_INIT_REG_CTX
:
831 /* check if ctx is appropriate for the ctxMode */
832 if (!((1 << init_regsym
->ctx
) & ctx_mask
)) {
833 pr_err("QAT: invalid ctx num = 0x%x\n",
837 qat_uclo_init_reg(handle
, ae
,
839 (1 << init_regsym
->ctx
),
840 (enum icp_qat_uof_regtype
)
841 init_regsym
->reg_type
,
842 (unsigned short)init_regsym
->reg_addr
,
845 case ICP_QAT_UOF_INIT_EXPR
:
846 pr_err("QAT: INIT_EXPR feature not supported\n");
848 case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP
:
849 pr_err("QAT: INIT_EXPR_ENDIAN_SWAP feature not supported\n");
858 static int qat_uclo_init_globals(struct icp_qat_fw_loader_handle
*handle
)
860 struct icp_qat_uclo_objhandle
*obj_handle
= handle
->obj_handle
;
863 if (obj_handle
->global_inited
)
865 if (obj_handle
->init_mem_tab
.entry_num
) {
866 if (qat_uclo_init_memory(handle
)) {
867 pr_err("QAT: initialize memory failed\n");
871 for (ae
= 0; ae
< handle
->hal_handle
->ae_max_num
; ae
++) {
872 for (s
= 0; s
< obj_handle
->ae_data
[ae
].slice_num
; s
++) {
873 if (!obj_handle
->ae_data
[ae
].ae_slices
[s
].encap_image
)
875 if (qat_uclo_init_reg_sym(handle
, ae
,
876 obj_handle
->ae_data
[ae
].
877 ae_slices
[s
].encap_image
))
881 obj_handle
->global_inited
= 1;
885 static int qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle
*handle
)
887 unsigned char ae
, nn_mode
, s
;
888 struct icp_qat_uof_image
*uof_image
;
889 struct icp_qat_uclo_aedata
*ae_data
;
890 struct icp_qat_uclo_objhandle
*obj_handle
= handle
->obj_handle
;
892 for (ae
= 0; ae
< handle
->hal_handle
->ae_max_num
; ae
++) {
894 (unsigned long *)&handle
->hal_handle
->ae_mask
))
896 ae_data
= &obj_handle
->ae_data
[ae
];
897 for (s
= 0; s
< min_t(unsigned int, ae_data
->slice_num
,
898 ICP_QAT_UCLO_MAX_CTX
); s
++) {
899 if (!obj_handle
->ae_data
[ae
].ae_slices
[s
].encap_image
)
901 uof_image
= ae_data
->ae_slices
[s
].encap_image
->img_ptr
;
902 if (qat_hal_set_ae_ctx_mode(handle
, ae
,
903 (char)ICP_QAT_CTX_MODE
904 (uof_image
->ae_mode
))) {
905 pr_err("QAT: qat_hal_set_ae_ctx_mode error\n");
908 nn_mode
= ICP_QAT_NN_MODE(uof_image
->ae_mode
);
909 if (qat_hal_set_ae_nn_mode(handle
, ae
, nn_mode
)) {
910 pr_err("QAT: qat_hal_set_ae_nn_mode error\n");
913 if (qat_hal_set_ae_lm_mode(handle
, ae
, ICP_LMEM0
,
914 (char)ICP_QAT_LOC_MEM0_MODE
915 (uof_image
->ae_mode
))) {
916 pr_err("QAT: qat_hal_set_ae_lm_mode LMEM0 error\n");
919 if (qat_hal_set_ae_lm_mode(handle
, ae
, ICP_LMEM1
,
920 (char)ICP_QAT_LOC_MEM1_MODE
921 (uof_image
->ae_mode
))) {
922 pr_err("QAT: qat_hal_set_ae_lm_mode LMEM1 error\n");
930 static void qat_uclo_init_uword_num(struct icp_qat_fw_loader_handle
*handle
)
932 struct icp_qat_uclo_objhandle
*obj_handle
= handle
->obj_handle
;
933 struct icp_qat_uclo_encapme
*image
;
936 for (a
= 0; a
< obj_handle
->uimage_num
; a
++) {
937 image
= &obj_handle
->ae_uimage
[a
];
938 image
->uwords_num
= image
->page
->beg_addr_p
+
939 image
->page
->micro_words_num
;
943 static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle
*handle
)
945 struct icp_qat_uclo_objhandle
*obj_handle
= handle
->obj_handle
;
948 obj_handle
->uword_buf
= kcalloc(UWORD_CPYBUF_SIZE
, sizeof(uint64_t),
950 if (!obj_handle
->uword_buf
)
952 obj_handle
->encap_uof_obj
.beg_uof
= obj_handle
->obj_hdr
->file_buff
;
953 obj_handle
->encap_uof_obj
.obj_hdr
= (struct icp_qat_uof_objhdr
*)
954 obj_handle
->obj_hdr
->file_buff
;
955 obj_handle
->uword_in_bytes
= 6;
956 obj_handle
->prod_type
= ICP_QAT_AC_C_CPU_TYPE
;
957 obj_handle
->prod_rev
= PID_MAJOR_REV
|
958 (PID_MINOR_REV
& handle
->hal_handle
->revision_id
);
959 if (qat_uclo_check_uof_compat(obj_handle
)) {
960 pr_err("QAT: UOF incompatible\n");
963 obj_handle
->ustore_phy_size
= ICP_QAT_UCLO_MAX_USTORE
;
964 if (!obj_handle
->obj_hdr
->file_buff
||
965 !qat_uclo_map_str_table(obj_handle
->obj_hdr
, ICP_QAT_UOF_STRT
,
966 &obj_handle
->str_table
)) {
967 pr_err("QAT: UOF doesn't have effective images\n");
970 obj_handle
->uimage_num
=
971 qat_uclo_map_uimage(obj_handle
, obj_handle
->ae_uimage
,
972 ICP_QAT_UCLO_MAX_AE
* ICP_QAT_UCLO_MAX_CTX
);
973 if (!obj_handle
->uimage_num
)
975 if (qat_uclo_map_ae(handle
, handle
->hal_handle
->ae_max_num
)) {
976 pr_err("QAT: Bad object\n");
977 goto out_check_uof_aemask_err
;
979 qat_uclo_init_uword_num(handle
);
980 qat_uclo_map_initmem_table(&obj_handle
->encap_uof_obj
,
981 &obj_handle
->init_mem_tab
);
982 if (qat_uclo_set_ae_mode(handle
))
983 goto out_check_uof_aemask_err
;
985 out_check_uof_aemask_err
:
986 for (ae
= 0; ae
< obj_handle
->uimage_num
; ae
++)
987 kfree(obj_handle
->ae_uimage
[ae
].page
);
989 kfree(obj_handle
->uword_buf
);
993 int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle
*handle
,
994 void *addr_ptr
, int mem_size
)
996 struct icp_qat_uof_filehdr
*filehdr
;
997 struct icp_qat_uclo_objhandle
*objhdl
;
999 BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE
>=
1000 (sizeof(handle
->hal_handle
->ae_mask
) * 8));
1002 if (!handle
|| !addr_ptr
|| mem_size
< 24)
1004 objhdl
= kzalloc(sizeof(*objhdl
), GFP_KERNEL
);
1007 objhdl
->obj_buf
= kmemdup(addr_ptr
, mem_size
, GFP_KERNEL
);
1008 if (!objhdl
->obj_buf
)
1009 goto out_objbuf_err
;
1010 filehdr
= (struct icp_qat_uof_filehdr
*)objhdl
->obj_buf
;
1011 if (qat_uclo_check_format(filehdr
))
1012 goto out_objhdr_err
;
1013 objhdl
->obj_hdr
= qat_uclo_map_chunk((char *)objhdl
->obj_buf
, filehdr
,
1015 if (!objhdl
->obj_hdr
) {
1016 pr_err("QAT: object file chunk is null\n");
1017 goto out_objhdr_err
;
1019 handle
->obj_handle
= objhdl
;
1020 if (qat_uclo_parse_uof_obj(handle
))
1021 goto out_overlay_obj_err
;
1024 out_overlay_obj_err
:
1025 handle
->obj_handle
= NULL
;
1026 kfree(objhdl
->obj_hdr
);
1028 kfree(objhdl
->obj_buf
);
1034 void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle
*handle
)
1036 struct icp_qat_uclo_objhandle
*obj_handle
= handle
->obj_handle
;
1042 kfree(obj_handle
->uword_buf
);
1043 for (a
= 0; a
< obj_handle
->uimage_num
; a
++)
1044 kfree(obj_handle
->ae_uimage
[a
].page
);
1046 for (a
= 0; a
< handle
->hal_handle
->ae_max_num
; a
++)
1047 qat_uclo_free_ae_data(&obj_handle
->ae_data
[a
]);
1049 kfree(obj_handle
->obj_hdr
);
1050 kfree(obj_handle
->obj_buf
);
1052 handle
->obj_handle
= NULL
;
1055 static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle
*obj_handle
,
1056 struct icp_qat_uclo_encap_page
*encap_page
,
1057 uint64_t *uword
, unsigned int addr_p
,
1058 unsigned int raddr
, uint64_t fill
)
1067 for (i
= 0; i
< encap_page
->uwblock_num
; i
++) {
1068 if (raddr
>= encap_page
->uwblock
[i
].start_addr
&&
1069 raddr
<= encap_page
->uwblock
[i
].start_addr
+
1070 encap_page
->uwblock
[i
].words_num
- 1) {
1071 raddr
-= encap_page
->uwblock
[i
].start_addr
;
1072 raddr
*= obj_handle
->uword_in_bytes
;
1073 memcpy(&uwrd
, (void *)(((unsigned long)
1074 encap_page
->uwblock
[i
].micro_words
) + raddr
),
1075 obj_handle
->uword_in_bytes
);
1076 uwrd
= uwrd
& 0xbffffffffffull
;
1080 if (*uword
== INVLD_UWORD
)
1084 static void qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle
*handle
,
1085 struct icp_qat_uclo_encap_page
1086 *encap_page
, unsigned int ae
)
1088 unsigned int uw_physical_addr
, uw_relative_addr
, i
, words_num
, cpylen
;
1089 struct icp_qat_uclo_objhandle
*obj_handle
= handle
->obj_handle
;
1092 /* load the page starting at appropriate ustore address */
1093 /* get fill-pattern from an image -- they are all the same */
1094 memcpy(&fill_pat
, obj_handle
->ae_uimage
[0].img_ptr
->fill_pattern
,
1096 uw_physical_addr
= encap_page
->beg_addr_p
;
1097 uw_relative_addr
= 0;
1098 words_num
= encap_page
->micro_words_num
;
1100 if (words_num
< UWORD_CPYBUF_SIZE
)
1103 cpylen
= UWORD_CPYBUF_SIZE
;
1105 /* load the buffer */
1106 for (i
= 0; i
< cpylen
; i
++)
1107 qat_uclo_fill_uwords(obj_handle
, encap_page
,
1108 &obj_handle
->uword_buf
[i
],
1109 uw_physical_addr
+ i
,
1110 uw_relative_addr
+ i
, fill_pat
);
1112 /* copy the buffer to ustore */
1113 qat_hal_wr_uwords(handle
, (unsigned char)ae
,
1114 uw_physical_addr
, cpylen
,
1115 obj_handle
->uword_buf
);
1117 uw_physical_addr
+= cpylen
;
1118 uw_relative_addr
+= cpylen
;
1119 words_num
-= cpylen
;
1123 static void qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle
*handle
,
1124 struct icp_qat_uof_image
*image
)
1126 struct icp_qat_uclo_objhandle
*obj_handle
= handle
->obj_handle
;
1127 unsigned int ctx_mask
, s
;
1128 struct icp_qat_uclo_page
*page
;
1132 if (ICP_QAT_CTX_MODE(image
->ae_mode
) == ICP_QAT_UCLO_MAX_CTX
)
1136 /* load the default page and set assigned CTX PC
1137 * to the entrypoint address */
1138 for (ae
= 0; ae
< handle
->hal_handle
->ae_max_num
; ae
++) {
1139 if (!test_bit(ae
, (unsigned long *)&image
->ae_assigned
))
1141 /* find the slice to which this image is assigned */
1142 for (s
= 0; s
< obj_handle
->ae_data
[ae
].slice_num
; s
++) {
1143 if (image
->ctx_assigned
& obj_handle
->ae_data
[ae
].
1144 ae_slices
[s
].ctx_mask_assigned
)
1147 if (s
>= obj_handle
->ae_data
[ae
].slice_num
)
1149 page
= obj_handle
->ae_data
[ae
].ae_slices
[s
].page
;
1150 if (!page
->encap_page
->def_page
)
1152 qat_uclo_wr_uimage_raw_page(handle
, page
->encap_page
, ae
);
1154 page
= obj_handle
->ae_data
[ae
].ae_slices
[s
].page
;
1155 for (ctx
= 0; ctx
< ICP_QAT_UCLO_MAX_CTX
; ctx
++)
1156 obj_handle
->ae_data
[ae
].ae_slices
[s
].cur_page
[ctx
] =
1157 (ctx_mask
& (1 << ctx
)) ? page
: NULL
;
1158 qat_hal_set_live_ctx(handle
, (unsigned char)ae
,
1159 image
->ctx_assigned
);
1160 qat_hal_set_pc(handle
, (unsigned char)ae
, image
->ctx_assigned
,
1161 image
->entry_address
);
1165 int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle
*handle
)
1167 struct icp_qat_uclo_objhandle
*obj_handle
= handle
->obj_handle
;
1170 if (qat_uclo_init_globals(handle
))
1172 for (i
= 0; i
< obj_handle
->uimage_num
; i
++) {
1173 if (!obj_handle
->ae_uimage
[i
].img_ptr
)
1175 if (qat_uclo_init_ustore(handle
, &obj_handle
->ae_uimage
[i
]))
1177 qat_uclo_wr_uimage_page(handle
,
1178 obj_handle
->ae_uimage
[i
].img_ptr
);