1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
4 #include <crypto/internal/aead.h>
5 #include <crypto/authenc.h>
6 #include <crypto/scatterwalk.h>
7 #include <linux/dmapool.h>
8 #include <linux/dma-mapping.h>
10 #include "cc_buffer_mgr.h"
11 #include "cc_lli_defs.h"
12 #include "cc_cipher.h"
16 enum dma_buffer_type
{
22 struct buff_mgr_handle
{
23 struct dma_pool
*mlli_buffs_pool
;
26 union buffer_array_entry
{
27 struct scatterlist
*sgl
;
28 dma_addr_t buffer_dma
;
32 unsigned int num_of_buffers
;
33 union buffer_array_entry entry
[MAX_NUM_OF_BUFFERS_IN_MLLI
];
34 unsigned int offset
[MAX_NUM_OF_BUFFERS_IN_MLLI
];
35 int nents
[MAX_NUM_OF_BUFFERS_IN_MLLI
];
36 int total_data_len
[MAX_NUM_OF_BUFFERS_IN_MLLI
];
37 enum dma_buffer_type type
[MAX_NUM_OF_BUFFERS_IN_MLLI
];
38 bool is_last
[MAX_NUM_OF_BUFFERS_IN_MLLI
];
39 u32
*mlli_nents
[MAX_NUM_OF_BUFFERS_IN_MLLI
];
42 static inline char *cc_dma_buf_type(enum cc_req_dma_buf_type type
)
57 * cc_copy_mac() - Copy MAC to temporary location
60 * @req: aead request object
61 * @dir: [IN] copy from/to sgl
63 static void cc_copy_mac(struct device
*dev
, struct aead_request
*req
,
64 enum cc_sg_cpy_direct dir
)
66 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
67 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
68 u32 skip
= req
->assoclen
+ req
->cryptlen
;
70 if (areq_ctx
->is_gcm4543
)
71 skip
+= crypto_aead_ivsize(tfm
);
73 cc_copy_sg_portion(dev
, areq_ctx
->backup_mac
, req
->src
,
74 (skip
- areq_ctx
->req_authsize
), skip
, dir
);
78 * cc_get_sgl_nents() - Get scatterlist number of entries.
81 * @nbytes: [IN] Total SGL data bytes.
82 * @lbytes: [OUT] Returns the amount of bytes at the last entry
84 static unsigned int cc_get_sgl_nents(struct device
*dev
,
85 struct scatterlist
*sg_list
,
86 unsigned int nbytes
, u32
*lbytes
,
89 unsigned int nents
= 0;
91 while (nbytes
&& sg_list
) {
92 if (sg_list
->length
) {
94 /* get the number of bytes in the last entry */
96 nbytes
-= (sg_list
->length
> nbytes
) ?
97 nbytes
: sg_list
->length
;
98 sg_list
= sg_next(sg_list
);
100 sg_list
= (struct scatterlist
*)sg_page(sg_list
);
105 dev_dbg(dev
, "nents %d last bytes %d\n", nents
, *lbytes
);
110 * cc_zero_sgl() - Zero scatter scatter list data.
114 void cc_zero_sgl(struct scatterlist
*sgl
, u32 data_len
)
116 struct scatterlist
*current_sg
= sgl
;
119 while (sg_index
<= data_len
) {
121 /* reached the end of the sgl --> just return back */
124 memset(sg_virt(current_sg
), 0, current_sg
->length
);
125 sg_index
+= current_sg
->length
;
126 current_sg
= sg_next(current_sg
);
131 * cc_copy_sg_portion() - Copy scatter list data,
132 * from to_skip to end, to dest and vice versa
140 void cc_copy_sg_portion(struct device
*dev
, u8
*dest
, struct scatterlist
*sg
,
141 u32 to_skip
, u32 end
, enum cc_sg_cpy_direct direct
)
145 nents
= cc_get_sgl_nents(dev
, sg
, end
, &lbytes
, NULL
);
146 sg_copy_buffer(sg
, nents
, (void *)dest
, (end
- to_skip
+ 1), to_skip
,
147 (direct
== CC_SG_TO_BUF
));
150 static int cc_render_buff_to_mlli(struct device
*dev
, dma_addr_t buff_dma
,
151 u32 buff_size
, u32
*curr_nents
,
154 u32
*mlli_entry_p
= *mlli_entry_pp
;
157 /* Verify there is no memory overflow*/
158 new_nents
= (*curr_nents
+ buff_size
/ CC_MAX_MLLI_ENTRY_SIZE
+ 1);
159 if (new_nents
> MAX_NUM_OF_TOTAL_MLLI_ENTRIES
)
162 /*handle buffer longer than 64 kbytes */
163 while (buff_size
> CC_MAX_MLLI_ENTRY_SIZE
) {
164 cc_lli_set_addr(mlli_entry_p
, buff_dma
);
165 cc_lli_set_size(mlli_entry_p
, CC_MAX_MLLI_ENTRY_SIZE
);
166 dev_dbg(dev
, "entry[%d]: single_buff=0x%08X size=%08X\n",
167 *curr_nents
, mlli_entry_p
[LLI_WORD0_OFFSET
],
168 mlli_entry_p
[LLI_WORD1_OFFSET
]);
169 buff_dma
+= CC_MAX_MLLI_ENTRY_SIZE
;
170 buff_size
-= CC_MAX_MLLI_ENTRY_SIZE
;
171 mlli_entry_p
= mlli_entry_p
+ 2;
175 cc_lli_set_addr(mlli_entry_p
, buff_dma
);
176 cc_lli_set_size(mlli_entry_p
, buff_size
);
177 dev_dbg(dev
, "entry[%d]: single_buff=0x%08X size=%08X\n",
178 *curr_nents
, mlli_entry_p
[LLI_WORD0_OFFSET
],
179 mlli_entry_p
[LLI_WORD1_OFFSET
]);
180 mlli_entry_p
= mlli_entry_p
+ 2;
181 *mlli_entry_pp
= mlli_entry_p
;
186 static int cc_render_sg_to_mlli(struct device
*dev
, struct scatterlist
*sgl
,
187 u32 sgl_data_len
, u32 sgl_offset
,
188 u32
*curr_nents
, u32
**mlli_entry_pp
)
190 struct scatterlist
*curr_sgl
= sgl
;
191 u32
*mlli_entry_p
= *mlli_entry_pp
;
194 for ( ; (curr_sgl
&& sgl_data_len
);
195 curr_sgl
= sg_next(curr_sgl
)) {
197 (sgl_data_len
> sg_dma_len(curr_sgl
) - sgl_offset
) ?
198 sg_dma_len(curr_sgl
) - sgl_offset
:
200 sgl_data_len
-= entry_data_len
;
201 rc
= cc_render_buff_to_mlli(dev
, sg_dma_address(curr_sgl
) +
202 sgl_offset
, entry_data_len
,
203 curr_nents
, &mlli_entry_p
);
209 *mlli_entry_pp
= mlli_entry_p
;
213 static int cc_generate_mlli(struct device
*dev
, struct buffer_array
*sg_data
,
214 struct mlli_params
*mlli_params
, gfp_t flags
)
217 u32 total_nents
= 0, prev_total_nents
= 0;
220 dev_dbg(dev
, "NUM of SG's = %d\n", sg_data
->num_of_buffers
);
222 /* Allocate memory from the pointed pool */
223 mlli_params
->mlli_virt_addr
=
224 dma_pool_alloc(mlli_params
->curr_pool
, flags
,
225 &mlli_params
->mlli_dma_addr
);
226 if (!mlli_params
->mlli_virt_addr
) {
227 dev_err(dev
, "dma_pool_alloc() failed\n");
229 goto build_mlli_exit
;
231 /* Point to start of MLLI */
232 mlli_p
= (u32
*)mlli_params
->mlli_virt_addr
;
233 /* go over all SG's and link it to one MLLI table */
234 for (i
= 0; i
< sg_data
->num_of_buffers
; i
++) {
235 union buffer_array_entry
*entry
= &sg_data
->entry
[i
];
236 u32 tot_len
= sg_data
->total_data_len
[i
];
237 u32 offset
= sg_data
->offset
[i
];
239 if (sg_data
->type
[i
] == DMA_SGL_TYPE
)
240 rc
= cc_render_sg_to_mlli(dev
, entry
->sgl
, tot_len
,
241 offset
, &total_nents
,
243 else /*DMA_BUFF_TYPE*/
244 rc
= cc_render_buff_to_mlli(dev
, entry
->buffer_dma
,
245 tot_len
, &total_nents
,
250 /* set last bit in the current table */
251 if (sg_data
->mlli_nents
[i
]) {
252 /*Calculate the current MLLI table length for the
253 *length field in the descriptor
255 *sg_data
->mlli_nents
[i
] +=
256 (total_nents
- prev_total_nents
);
257 prev_total_nents
= total_nents
;
261 /* Set MLLI size for the bypass operation */
262 mlli_params
->mlli_len
= (total_nents
* LLI_ENTRY_BYTE_SIZE
);
264 dev_dbg(dev
, "MLLI params: virt_addr=%pK dma_addr=%pad mlli_len=0x%X\n",
265 mlli_params
->mlli_virt_addr
, &mlli_params
->mlli_dma_addr
,
266 mlli_params
->mlli_len
);
272 static void cc_add_buffer_entry(struct device
*dev
,
273 struct buffer_array
*sgl_data
,
274 dma_addr_t buffer_dma
, unsigned int buffer_len
,
275 bool is_last_entry
, u32
*mlli_nents
)
277 unsigned int index
= sgl_data
->num_of_buffers
;
279 dev_dbg(dev
, "index=%u single_buff=%pad buffer_len=0x%08X is_last=%d\n",
280 index
, &buffer_dma
, buffer_len
, is_last_entry
);
281 sgl_data
->nents
[index
] = 1;
282 sgl_data
->entry
[index
].buffer_dma
= buffer_dma
;
283 sgl_data
->offset
[index
] = 0;
284 sgl_data
->total_data_len
[index
] = buffer_len
;
285 sgl_data
->type
[index
] = DMA_BUFF_TYPE
;
286 sgl_data
->is_last
[index
] = is_last_entry
;
287 sgl_data
->mlli_nents
[index
] = mlli_nents
;
288 if (sgl_data
->mlli_nents
[index
])
289 *sgl_data
->mlli_nents
[index
] = 0;
290 sgl_data
->num_of_buffers
++;
293 static void cc_add_sg_entry(struct device
*dev
, struct buffer_array
*sgl_data
,
294 unsigned int nents
, struct scatterlist
*sgl
,
295 unsigned int data_len
, unsigned int data_offset
,
296 bool is_last_table
, u32
*mlli_nents
)
298 unsigned int index
= sgl_data
->num_of_buffers
;
300 dev_dbg(dev
, "index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n",
301 index
, nents
, sgl
, data_len
, is_last_table
);
302 sgl_data
->nents
[index
] = nents
;
303 sgl_data
->entry
[index
].sgl
= sgl
;
304 sgl_data
->offset
[index
] = data_offset
;
305 sgl_data
->total_data_len
[index
] = data_len
;
306 sgl_data
->type
[index
] = DMA_SGL_TYPE
;
307 sgl_data
->is_last
[index
] = is_last_table
;
308 sgl_data
->mlli_nents
[index
] = mlli_nents
;
309 if (sgl_data
->mlli_nents
[index
])
310 *sgl_data
->mlli_nents
[index
] = 0;
311 sgl_data
->num_of_buffers
++;
314 static int cc_dma_map_sg(struct device
*dev
, struct scatterlist
*sg
, u32 nents
,
315 enum dma_data_direction direction
)
318 struct scatterlist
*l_sg
= sg
;
320 for (i
= 0; i
< nents
; i
++) {
323 if (dma_map_sg(dev
, l_sg
, 1, direction
) != 1) {
324 dev_err(dev
, "dma_map_page() sg buffer failed\n");
327 l_sg
= sg_next(l_sg
);
332 /* Restore mapped parts */
333 for (j
= 0; j
< i
; j
++) {
336 dma_unmap_sg(dev
, sg
, 1, direction
);
342 static int cc_map_sg(struct device
*dev
, struct scatterlist
*sg
,
343 unsigned int nbytes
, int direction
, u32
*nents
,
344 u32 max_sg_nents
, u32
*lbytes
, u32
*mapped_nents
)
346 bool is_chained
= false;
348 if (sg_is_last(sg
)) {
349 /* One entry only case -set to DLLI */
350 if (dma_map_sg(dev
, sg
, 1, direction
) != 1) {
351 dev_err(dev
, "dma_map_sg() single buffer failed\n");
354 dev_dbg(dev
, "Mapped sg: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
355 &sg_dma_address(sg
), sg_page(sg
), sg_virt(sg
),
356 sg
->offset
, sg
->length
);
360 } else { /*sg_is_last*/
361 *nents
= cc_get_sgl_nents(dev
, sg
, nbytes
, lbytes
,
363 if (*nents
> max_sg_nents
) {
365 dev_err(dev
, "Too many fragments. current %d max %d\n",
366 *nents
, max_sg_nents
);
370 /* In case of mmu the number of mapped nents might
371 * be changed from the original sgl nents
373 *mapped_nents
= dma_map_sg(dev
, sg
, *nents
, direction
);
374 if (*mapped_nents
== 0) {
376 dev_err(dev
, "dma_map_sg() sg buffer failed\n");
380 /*In this case the driver maps entry by entry so it
381 * must have the same nents before and after map
383 *mapped_nents
= cc_dma_map_sg(dev
, sg
, *nents
,
385 if (*mapped_nents
!= *nents
) {
386 *nents
= *mapped_nents
;
387 dev_err(dev
, "dma_map_sg() sg buffer failed\n");
397 cc_set_aead_conf_buf(struct device
*dev
, struct aead_req_ctx
*areq_ctx
,
398 u8
*config_data
, struct buffer_array
*sg_data
,
399 unsigned int assoclen
)
401 dev_dbg(dev
, " handle additional data config set to DLLI\n");
402 /* create sg for the current buffer */
403 sg_init_one(&areq_ctx
->ccm_adata_sg
, config_data
,
404 AES_BLOCK_SIZE
+ areq_ctx
->ccm_hdr_size
);
405 if (dma_map_sg(dev
, &areq_ctx
->ccm_adata_sg
, 1, DMA_TO_DEVICE
) != 1) {
406 dev_err(dev
, "dma_map_sg() config buffer failed\n");
409 dev_dbg(dev
, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
410 &sg_dma_address(&areq_ctx
->ccm_adata_sg
),
411 sg_page(&areq_ctx
->ccm_adata_sg
),
412 sg_virt(&areq_ctx
->ccm_adata_sg
),
413 areq_ctx
->ccm_adata_sg
.offset
, areq_ctx
->ccm_adata_sg
.length
);
414 /* prepare for case of MLLI */
416 cc_add_sg_entry(dev
, sg_data
, 1, &areq_ctx
->ccm_adata_sg
,
417 (AES_BLOCK_SIZE
+ areq_ctx
->ccm_hdr_size
),
423 static int cc_set_hash_buf(struct device
*dev
, struct ahash_req_ctx
*areq_ctx
,
424 u8
*curr_buff
, u32 curr_buff_cnt
,
425 struct buffer_array
*sg_data
)
427 dev_dbg(dev
, " handle curr buff %x set to DLLI\n", curr_buff_cnt
);
428 /* create sg for the current buffer */
429 sg_init_one(areq_ctx
->buff_sg
, curr_buff
, curr_buff_cnt
);
430 if (dma_map_sg(dev
, areq_ctx
->buff_sg
, 1, DMA_TO_DEVICE
) != 1) {
431 dev_err(dev
, "dma_map_sg() src buffer failed\n");
434 dev_dbg(dev
, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
435 &sg_dma_address(areq_ctx
->buff_sg
), sg_page(areq_ctx
->buff_sg
),
436 sg_virt(areq_ctx
->buff_sg
), areq_ctx
->buff_sg
->offset
,
437 areq_ctx
->buff_sg
->length
);
438 areq_ctx
->data_dma_buf_type
= CC_DMA_BUF_DLLI
;
439 areq_ctx
->curr_sg
= areq_ctx
->buff_sg
;
440 areq_ctx
->in_nents
= 0;
441 /* prepare for case of MLLI */
442 cc_add_sg_entry(dev
, sg_data
, 1, areq_ctx
->buff_sg
, curr_buff_cnt
, 0,
447 void cc_unmap_cipher_request(struct device
*dev
, void *ctx
,
448 unsigned int ivsize
, struct scatterlist
*src
,
449 struct scatterlist
*dst
)
451 struct cipher_req_ctx
*req_ctx
= (struct cipher_req_ctx
*)ctx
;
453 if (req_ctx
->gen_ctx
.iv_dma_addr
) {
454 dev_dbg(dev
, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
455 &req_ctx
->gen_ctx
.iv_dma_addr
, ivsize
);
456 dma_unmap_single(dev
, req_ctx
->gen_ctx
.iv_dma_addr
,
457 ivsize
, DMA_TO_DEVICE
);
460 if (req_ctx
->dma_buf_type
== CC_DMA_BUF_MLLI
&&
461 req_ctx
->mlli_params
.mlli_virt_addr
) {
462 dma_pool_free(req_ctx
->mlli_params
.curr_pool
,
463 req_ctx
->mlli_params
.mlli_virt_addr
,
464 req_ctx
->mlli_params
.mlli_dma_addr
);
467 dma_unmap_sg(dev
, src
, req_ctx
->in_nents
, DMA_BIDIRECTIONAL
);
468 dev_dbg(dev
, "Unmapped req->src=%pK\n", sg_virt(src
));
471 dma_unmap_sg(dev
, dst
, req_ctx
->out_nents
, DMA_BIDIRECTIONAL
);
472 dev_dbg(dev
, "Unmapped req->dst=%pK\n", sg_virt(dst
));
476 int cc_map_cipher_request(struct cc_drvdata
*drvdata
, void *ctx
,
477 unsigned int ivsize
, unsigned int nbytes
,
478 void *info
, struct scatterlist
*src
,
479 struct scatterlist
*dst
, gfp_t flags
)
481 struct cipher_req_ctx
*req_ctx
= (struct cipher_req_ctx
*)ctx
;
482 struct mlli_params
*mlli_params
= &req_ctx
->mlli_params
;
483 struct buff_mgr_handle
*buff_mgr
= drvdata
->buff_mgr_handle
;
484 struct device
*dev
= drvdata_to_dev(drvdata
);
485 struct buffer_array sg_data
;
488 u32 mapped_nents
= 0;
490 req_ctx
->dma_buf_type
= CC_DMA_BUF_DLLI
;
491 mlli_params
->curr_pool
= NULL
;
492 sg_data
.num_of_buffers
= 0;
496 dump_byte_array("iv", (u8
*)info
, ivsize
);
497 req_ctx
->gen_ctx
.iv_dma_addr
=
498 dma_map_single(dev
, (void *)info
,
499 ivsize
, DMA_TO_DEVICE
);
500 if (dma_mapping_error(dev
, req_ctx
->gen_ctx
.iv_dma_addr
)) {
501 dev_err(dev
, "Mapping iv %u B at va=%pK for DMA failed\n",
505 dev_dbg(dev
, "Mapped iv %u B at va=%pK to dma=%pad\n",
506 ivsize
, info
, &req_ctx
->gen_ctx
.iv_dma_addr
);
508 req_ctx
->gen_ctx
.iv_dma_addr
= 0;
511 /* Map the src SGL */
512 rc
= cc_map_sg(dev
, src
, nbytes
, DMA_BIDIRECTIONAL
, &req_ctx
->in_nents
,
513 LLI_MAX_NUM_OF_DATA_ENTRIES
, &dummy
, &mapped_nents
);
518 if (mapped_nents
> 1)
519 req_ctx
->dma_buf_type
= CC_DMA_BUF_MLLI
;
522 /* Handle inplace operation */
523 if (req_ctx
->dma_buf_type
== CC_DMA_BUF_MLLI
) {
524 req_ctx
->out_nents
= 0;
525 cc_add_sg_entry(dev
, &sg_data
, req_ctx
->in_nents
, src
,
527 &req_ctx
->in_mlli_nents
);
531 if (cc_map_sg(dev
, dst
, nbytes
, DMA_BIDIRECTIONAL
,
532 &req_ctx
->out_nents
, LLI_MAX_NUM_OF_DATA_ENTRIES
,
533 &dummy
, &mapped_nents
)) {
537 if (mapped_nents
> 1)
538 req_ctx
->dma_buf_type
= CC_DMA_BUF_MLLI
;
540 if (req_ctx
->dma_buf_type
== CC_DMA_BUF_MLLI
) {
541 cc_add_sg_entry(dev
, &sg_data
, req_ctx
->in_nents
, src
,
543 &req_ctx
->in_mlli_nents
);
544 cc_add_sg_entry(dev
, &sg_data
, req_ctx
->out_nents
, dst
,
546 &req_ctx
->out_mlli_nents
);
550 if (req_ctx
->dma_buf_type
== CC_DMA_BUF_MLLI
) {
551 mlli_params
->curr_pool
= buff_mgr
->mlli_buffs_pool
;
552 rc
= cc_generate_mlli(dev
, &sg_data
, mlli_params
, flags
);
557 dev_dbg(dev
, "areq_ctx->dma_buf_type = %s\n",
558 cc_dma_buf_type(req_ctx
->dma_buf_type
));
563 cc_unmap_cipher_request(dev
, req_ctx
, ivsize
, src
, dst
);
567 void cc_unmap_aead_request(struct device
*dev
, struct aead_request
*req
)
569 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
570 unsigned int hw_iv_size
= areq_ctx
->hw_iv_size
;
571 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
572 struct cc_drvdata
*drvdata
= dev_get_drvdata(dev
);
575 u32 size_to_unmap
= 0;
577 if (areq_ctx
->mac_buf_dma_addr
) {
578 dma_unmap_single(dev
, areq_ctx
->mac_buf_dma_addr
,
579 MAX_MAC_SIZE
, DMA_BIDIRECTIONAL
);
582 if (areq_ctx
->cipher_mode
== DRV_CIPHER_GCTR
) {
583 if (areq_ctx
->hkey_dma_addr
) {
584 dma_unmap_single(dev
, areq_ctx
->hkey_dma_addr
,
585 AES_BLOCK_SIZE
, DMA_BIDIRECTIONAL
);
588 if (areq_ctx
->gcm_block_len_dma_addr
) {
589 dma_unmap_single(dev
, areq_ctx
->gcm_block_len_dma_addr
,
590 AES_BLOCK_SIZE
, DMA_TO_DEVICE
);
593 if (areq_ctx
->gcm_iv_inc1_dma_addr
) {
594 dma_unmap_single(dev
, areq_ctx
->gcm_iv_inc1_dma_addr
,
595 AES_BLOCK_SIZE
, DMA_TO_DEVICE
);
598 if (areq_ctx
->gcm_iv_inc2_dma_addr
) {
599 dma_unmap_single(dev
, areq_ctx
->gcm_iv_inc2_dma_addr
,
600 AES_BLOCK_SIZE
, DMA_TO_DEVICE
);
604 if (areq_ctx
->ccm_hdr_size
!= ccm_header_size_null
) {
605 if (areq_ctx
->ccm_iv0_dma_addr
) {
606 dma_unmap_single(dev
, areq_ctx
->ccm_iv0_dma_addr
,
607 AES_BLOCK_SIZE
, DMA_TO_DEVICE
);
610 dma_unmap_sg(dev
, &areq_ctx
->ccm_adata_sg
, 1, DMA_TO_DEVICE
);
612 if (areq_ctx
->gen_ctx
.iv_dma_addr
) {
613 dma_unmap_single(dev
, areq_ctx
->gen_ctx
.iv_dma_addr
,
614 hw_iv_size
, DMA_BIDIRECTIONAL
);
617 /*In case a pool was set, a table was
618 *allocated and should be released
620 if (areq_ctx
->mlli_params
.curr_pool
) {
621 dev_dbg(dev
, "free MLLI buffer: dma=%pad virt=%pK\n",
622 &areq_ctx
->mlli_params
.mlli_dma_addr
,
623 areq_ctx
->mlli_params
.mlli_virt_addr
);
624 dma_pool_free(areq_ctx
->mlli_params
.curr_pool
,
625 areq_ctx
->mlli_params
.mlli_virt_addr
,
626 areq_ctx
->mlli_params
.mlli_dma_addr
);
629 dev_dbg(dev
, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
630 sg_virt(req
->src
), areq_ctx
->src
.nents
, areq_ctx
->assoc
.nents
,
631 req
->assoclen
, req
->cryptlen
);
632 size_to_unmap
= req
->assoclen
+ req
->cryptlen
;
633 if (areq_ctx
->gen_ctx
.op_type
== DRV_CRYPTO_DIRECTION_ENCRYPT
)
634 size_to_unmap
+= areq_ctx
->req_authsize
;
635 if (areq_ctx
->is_gcm4543
)
636 size_to_unmap
+= crypto_aead_ivsize(tfm
);
638 dma_unmap_sg(dev
, req
->src
,
639 cc_get_sgl_nents(dev
, req
->src
, size_to_unmap
,
642 if (req
->src
!= req
->dst
) {
643 dev_dbg(dev
, "Unmapping dst sgl: req->dst=%pK\n",
645 dma_unmap_sg(dev
, req
->dst
,
646 cc_get_sgl_nents(dev
, req
->dst
, size_to_unmap
,
650 if (drvdata
->coherent
&&
651 areq_ctx
->gen_ctx
.op_type
== DRV_CRYPTO_DIRECTION_DECRYPT
&&
652 req
->src
== req
->dst
) {
653 /* copy back mac from temporary location to deal with possible
654 * data memory overriding that caused by cache coherence
657 cc_copy_mac(dev
, req
, CC_SG_FROM_BUF
);
661 static int cc_get_aead_icv_nents(struct device
*dev
, struct scatterlist
*sgl
,
662 unsigned int sgl_nents
, unsigned int authsize
,
663 u32 last_entry_data_size
,
664 bool *is_icv_fragmented
)
666 unsigned int icv_max_size
= 0;
667 unsigned int icv_required_size
= authsize
> last_entry_data_size
?
668 (authsize
- last_entry_data_size
) :
673 if (sgl_nents
< MAX_ICV_NENTS_SUPPORTED
) {
674 *is_icv_fragmented
= false;
678 for (i
= 0 ; i
< (sgl_nents
- MAX_ICV_NENTS_SUPPORTED
) ; i
++) {
685 icv_max_size
= sgl
->length
;
687 if (last_entry_data_size
> authsize
) {
688 /* ICV attached to data in last entry (not fragmented!) */
690 *is_icv_fragmented
= false;
691 } else if (last_entry_data_size
== authsize
) {
692 /* ICV placed in whole last entry (not fragmented!) */
694 *is_icv_fragmented
= false;
695 } else if (icv_max_size
> icv_required_size
) {
697 *is_icv_fragmented
= true;
698 } else if (icv_max_size
== icv_required_size
) {
700 *is_icv_fragmented
= true;
702 dev_err(dev
, "Unsupported num. of ICV fragments (> %d)\n",
703 MAX_ICV_NENTS_SUPPORTED
);
704 nents
= -1; /*unsupported*/
706 dev_dbg(dev
, "is_frag=%s icv_nents=%u\n",
707 (*is_icv_fragmented
? "true" : "false"), nents
);
712 static int cc_aead_chain_iv(struct cc_drvdata
*drvdata
,
713 struct aead_request
*req
,
714 struct buffer_array
*sg_data
,
715 bool is_last
, bool do_chain
)
717 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
718 unsigned int hw_iv_size
= areq_ctx
->hw_iv_size
;
719 struct device
*dev
= drvdata_to_dev(drvdata
);
723 areq_ctx
->gen_ctx
.iv_dma_addr
= 0;
727 areq_ctx
->gen_ctx
.iv_dma_addr
= dma_map_single(dev
, req
->iv
,
730 if (dma_mapping_error(dev
, areq_ctx
->gen_ctx
.iv_dma_addr
)) {
731 dev_err(dev
, "Mapping iv %u B at va=%pK for DMA failed\n",
732 hw_iv_size
, req
->iv
);
737 dev_dbg(dev
, "Mapped iv %u B at va=%pK to dma=%pad\n",
738 hw_iv_size
, req
->iv
, &areq_ctx
->gen_ctx
.iv_dma_addr
);
739 // TODO: what about CTR?? ask Ron
740 if (do_chain
&& areq_ctx
->plaintext_authenticate_only
) {
741 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
742 unsigned int iv_size_to_authenc
= crypto_aead_ivsize(tfm
);
743 unsigned int iv_ofs
= GCM_BLOCK_RFC4_IV_OFFSET
;
744 /* Chain to given list */
745 cc_add_buffer_entry(dev
, sg_data
,
746 (areq_ctx
->gen_ctx
.iv_dma_addr
+ iv_ofs
),
747 iv_size_to_authenc
, is_last
,
748 &areq_ctx
->assoc
.mlli_nents
);
749 areq_ctx
->assoc_buff_type
= CC_DMA_BUF_MLLI
;
756 static int cc_aead_chain_assoc(struct cc_drvdata
*drvdata
,
757 struct aead_request
*req
,
758 struct buffer_array
*sg_data
,
759 bool is_last
, bool do_chain
)
761 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
763 u32 mapped_nents
= 0;
764 struct scatterlist
*current_sg
= req
->src
;
765 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
766 unsigned int sg_index
= 0;
767 u32 size_of_assoc
= req
->assoclen
;
768 struct device
*dev
= drvdata_to_dev(drvdata
);
770 if (areq_ctx
->is_gcm4543
)
771 size_of_assoc
+= crypto_aead_ivsize(tfm
);
775 goto chain_assoc_exit
;
778 if (req
->assoclen
== 0) {
779 areq_ctx
->assoc_buff_type
= CC_DMA_BUF_NULL
;
780 areq_ctx
->assoc
.nents
= 0;
781 areq_ctx
->assoc
.mlli_nents
= 0;
782 dev_dbg(dev
, "Chain assoc of length 0: buff_type=%s nents=%u\n",
783 cc_dma_buf_type(areq_ctx
->assoc_buff_type
),
784 areq_ctx
->assoc
.nents
);
785 goto chain_assoc_exit
;
788 //iterate over the sgl to see how many entries are for associated data
789 //it is assumed that if we reach here , the sgl is already mapped
790 sg_index
= current_sg
->length
;
791 //the first entry in the scatter list contains all the associated data
792 if (sg_index
> size_of_assoc
) {
795 while (sg_index
<= size_of_assoc
) {
796 current_sg
= sg_next(current_sg
);
797 /* if have reached the end of the sgl, then this is
801 dev_err(dev
, "reached end of sg list. unexpected\n");
804 sg_index
+= current_sg
->length
;
808 if (mapped_nents
> LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES
) {
809 dev_err(dev
, "Too many fragments. current %d max %d\n",
810 mapped_nents
, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES
);
813 areq_ctx
->assoc
.nents
= mapped_nents
;
815 /* in CCM case we have additional entry for
816 * ccm header configurations
818 if (areq_ctx
->ccm_hdr_size
!= ccm_header_size_null
) {
819 if ((mapped_nents
+ 1) > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES
) {
820 dev_err(dev
, "CCM case.Too many fragments. Current %d max %d\n",
821 (areq_ctx
->assoc
.nents
+ 1),
822 LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES
);
824 goto chain_assoc_exit
;
828 if (mapped_nents
== 1 && areq_ctx
->ccm_hdr_size
== ccm_header_size_null
)
829 areq_ctx
->assoc_buff_type
= CC_DMA_BUF_DLLI
;
831 areq_ctx
->assoc_buff_type
= CC_DMA_BUF_MLLI
;
833 if (do_chain
|| areq_ctx
->assoc_buff_type
== CC_DMA_BUF_MLLI
) {
834 dev_dbg(dev
, "Chain assoc: buff_type=%s nents=%u\n",
835 cc_dma_buf_type(areq_ctx
->assoc_buff_type
),
836 areq_ctx
->assoc
.nents
);
837 cc_add_sg_entry(dev
, sg_data
, areq_ctx
->assoc
.nents
, req
->src
,
838 req
->assoclen
, 0, is_last
,
839 &areq_ctx
->assoc
.mlli_nents
);
840 areq_ctx
->assoc_buff_type
= CC_DMA_BUF_MLLI
;
847 static void cc_prepare_aead_data_dlli(struct aead_request
*req
,
848 u32
*src_last_bytes
, u32
*dst_last_bytes
)
850 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
851 enum drv_crypto_direction direct
= areq_ctx
->gen_ctx
.op_type
;
852 unsigned int authsize
= areq_ctx
->req_authsize
;
854 areq_ctx
->is_icv_fragmented
= false;
855 if (req
->src
== req
->dst
) {
857 areq_ctx
->icv_dma_addr
= sg_dma_address(areq_ctx
->src_sgl
) +
858 (*src_last_bytes
- authsize
);
859 areq_ctx
->icv_virt_addr
= sg_virt(areq_ctx
->src_sgl
) +
860 (*src_last_bytes
- authsize
);
861 } else if (direct
== DRV_CRYPTO_DIRECTION_DECRYPT
) {
862 /*NON-INPLACE and DECRYPT*/
863 areq_ctx
->icv_dma_addr
= sg_dma_address(areq_ctx
->src_sgl
) +
864 (*src_last_bytes
- authsize
);
865 areq_ctx
->icv_virt_addr
= sg_virt(areq_ctx
->src_sgl
) +
866 (*src_last_bytes
- authsize
);
868 /*NON-INPLACE and ENCRYPT*/
869 areq_ctx
->icv_dma_addr
= sg_dma_address(areq_ctx
->dst_sgl
) +
870 (*dst_last_bytes
- authsize
);
871 areq_ctx
->icv_virt_addr
= sg_virt(areq_ctx
->dst_sgl
) +
872 (*dst_last_bytes
- authsize
);
876 static int cc_prepare_aead_data_mlli(struct cc_drvdata
*drvdata
,
877 struct aead_request
*req
,
878 struct buffer_array
*sg_data
,
879 u32
*src_last_bytes
, u32
*dst_last_bytes
,
882 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
883 enum drv_crypto_direction direct
= areq_ctx
->gen_ctx
.op_type
;
884 unsigned int authsize
= areq_ctx
->req_authsize
;
885 int rc
= 0, icv_nents
;
886 struct device
*dev
= drvdata_to_dev(drvdata
);
887 struct scatterlist
*sg
;
889 if (req
->src
== req
->dst
) {
891 cc_add_sg_entry(dev
, sg_data
, areq_ctx
->src
.nents
,
892 areq_ctx
->src_sgl
, areq_ctx
->cryptlen
,
893 areq_ctx
->src_offset
, is_last_table
,
894 &areq_ctx
->src
.mlli_nents
);
896 icv_nents
= cc_get_aead_icv_nents(dev
, areq_ctx
->src_sgl
,
898 authsize
, *src_last_bytes
,
899 &areq_ctx
->is_icv_fragmented
);
902 goto prepare_data_mlli_exit
;
905 if (areq_ctx
->is_icv_fragmented
) {
906 /* Backup happens only when ICV is fragmented, ICV
907 * verification is made by CPU compare in order to
908 * simplify MAC verification upon request completion
910 if (direct
== DRV_CRYPTO_DIRECTION_DECRYPT
) {
911 /* In coherent platforms (e.g. ACP)
912 * already copying ICV for any
913 * INPLACE-DECRYPT operation, hence
914 * we must neglect this code.
916 if (!drvdata
->coherent
)
917 cc_copy_mac(dev
, req
, CC_SG_TO_BUF
);
919 areq_ctx
->icv_virt_addr
= areq_ctx
->backup_mac
;
921 areq_ctx
->icv_virt_addr
= areq_ctx
->mac_buf
;
922 areq_ctx
->icv_dma_addr
=
923 areq_ctx
->mac_buf_dma_addr
;
925 } else { /* Contig. ICV */
926 sg
= &areq_ctx
->src_sgl
[areq_ctx
->src
.nents
- 1];
927 /*Should hanlde if the sg is not contig.*/
928 areq_ctx
->icv_dma_addr
= sg_dma_address(sg
) +
929 (*src_last_bytes
- authsize
);
930 areq_ctx
->icv_virt_addr
= sg_virt(sg
) +
931 (*src_last_bytes
- authsize
);
934 } else if (direct
== DRV_CRYPTO_DIRECTION_DECRYPT
) {
935 /*NON-INPLACE and DECRYPT*/
936 cc_add_sg_entry(dev
, sg_data
, areq_ctx
->src
.nents
,
937 areq_ctx
->src_sgl
, areq_ctx
->cryptlen
,
938 areq_ctx
->src_offset
, is_last_table
,
939 &areq_ctx
->src
.mlli_nents
);
940 cc_add_sg_entry(dev
, sg_data
, areq_ctx
->dst
.nents
,
941 areq_ctx
->dst_sgl
, areq_ctx
->cryptlen
,
942 areq_ctx
->dst_offset
, is_last_table
,
943 &areq_ctx
->dst
.mlli_nents
);
945 icv_nents
= cc_get_aead_icv_nents(dev
, areq_ctx
->src_sgl
,
947 authsize
, *src_last_bytes
,
948 &areq_ctx
->is_icv_fragmented
);
951 goto prepare_data_mlli_exit
;
954 /* Backup happens only when ICV is fragmented, ICV
955 * verification is made by CPU compare in order to simplify
956 * MAC verification upon request completion
958 if (areq_ctx
->is_icv_fragmented
) {
959 cc_copy_mac(dev
, req
, CC_SG_TO_BUF
);
960 areq_ctx
->icv_virt_addr
= areq_ctx
->backup_mac
;
962 } else { /* Contig. ICV */
963 sg
= &areq_ctx
->src_sgl
[areq_ctx
->src
.nents
- 1];
964 /*Should hanlde if the sg is not contig.*/
965 areq_ctx
->icv_dma_addr
= sg_dma_address(sg
) +
966 (*src_last_bytes
- authsize
);
967 areq_ctx
->icv_virt_addr
= sg_virt(sg
) +
968 (*src_last_bytes
- authsize
);
972 /*NON-INPLACE and ENCRYPT*/
973 cc_add_sg_entry(dev
, sg_data
, areq_ctx
->dst
.nents
,
974 areq_ctx
->dst_sgl
, areq_ctx
->cryptlen
,
975 areq_ctx
->dst_offset
, is_last_table
,
976 &areq_ctx
->dst
.mlli_nents
);
977 cc_add_sg_entry(dev
, sg_data
, areq_ctx
->src
.nents
,
978 areq_ctx
->src_sgl
, areq_ctx
->cryptlen
,
979 areq_ctx
->src_offset
, is_last_table
,
980 &areq_ctx
->src
.mlli_nents
);
982 icv_nents
= cc_get_aead_icv_nents(dev
, areq_ctx
->dst_sgl
,
984 authsize
, *dst_last_bytes
,
985 &areq_ctx
->is_icv_fragmented
);
988 goto prepare_data_mlli_exit
;
991 if (!areq_ctx
->is_icv_fragmented
) {
992 sg
= &areq_ctx
->dst_sgl
[areq_ctx
->dst
.nents
- 1];
994 areq_ctx
->icv_dma_addr
= sg_dma_address(sg
) +
995 (*dst_last_bytes
- authsize
);
996 areq_ctx
->icv_virt_addr
= sg_virt(sg
) +
997 (*dst_last_bytes
- authsize
);
999 areq_ctx
->icv_dma_addr
= areq_ctx
->mac_buf_dma_addr
;
1000 areq_ctx
->icv_virt_addr
= areq_ctx
->mac_buf
;
1004 prepare_data_mlli_exit
:
1008 static int cc_aead_chain_data(struct cc_drvdata
*drvdata
,
1009 struct aead_request
*req
,
1010 struct buffer_array
*sg_data
,
1011 bool is_last_table
, bool do_chain
)
1013 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
1014 struct device
*dev
= drvdata_to_dev(drvdata
);
1015 enum drv_crypto_direction direct
= areq_ctx
->gen_ctx
.op_type
;
1016 unsigned int authsize
= areq_ctx
->req_authsize
;
1017 unsigned int src_last_bytes
= 0, dst_last_bytes
= 0;
1019 u32 src_mapped_nents
= 0, dst_mapped_nents
= 0;
1021 /* non-inplace mode */
1022 unsigned int size_for_map
= req
->assoclen
+ req
->cryptlen
;
1023 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1025 bool chained
= false;
1026 bool is_gcm4543
= areq_ctx
->is_gcm4543
;
1027 u32 size_to_skip
= req
->assoclen
;
1030 size_to_skip
+= crypto_aead_ivsize(tfm
);
1032 offset
= size_to_skip
;
1037 areq_ctx
->src_sgl
= req
->src
;
1038 areq_ctx
->dst_sgl
= req
->dst
;
1041 size_for_map
+= crypto_aead_ivsize(tfm
);
1043 size_for_map
+= (direct
== DRV_CRYPTO_DIRECTION_ENCRYPT
) ?
1045 src_mapped_nents
= cc_get_sgl_nents(dev
, req
->src
, size_for_map
,
1046 &src_last_bytes
, &chained
);
1047 sg_index
= areq_ctx
->src_sgl
->length
;
1048 //check where the data starts
1049 while (sg_index
<= size_to_skip
) {
1050 offset
-= areq_ctx
->src_sgl
->length
;
1051 areq_ctx
->src_sgl
= sg_next(areq_ctx
->src_sgl
);
1052 //if have reached the end of the sgl, then this is unexpected
1053 if (!areq_ctx
->src_sgl
) {
1054 dev_err(dev
, "reached end of sg list. unexpected\n");
1057 sg_index
+= areq_ctx
->src_sgl
->length
;
1060 if (src_mapped_nents
> LLI_MAX_NUM_OF_DATA_ENTRIES
) {
1061 dev_err(dev
, "Too many fragments. current %d max %d\n",
1062 src_mapped_nents
, LLI_MAX_NUM_OF_DATA_ENTRIES
);
1066 areq_ctx
->src
.nents
= src_mapped_nents
;
1068 areq_ctx
->src_offset
= offset
;
1070 if (req
->src
!= req
->dst
) {
1071 size_for_map
= req
->assoclen
+ req
->cryptlen
;
1072 size_for_map
+= (direct
== DRV_CRYPTO_DIRECTION_ENCRYPT
) ?
1075 size_for_map
+= crypto_aead_ivsize(tfm
);
1077 rc
= cc_map_sg(dev
, req
->dst
, size_for_map
, DMA_BIDIRECTIONAL
,
1078 &areq_ctx
->dst
.nents
,
1079 LLI_MAX_NUM_OF_DATA_ENTRIES
, &dst_last_bytes
,
1083 goto chain_data_exit
;
1087 dst_mapped_nents
= cc_get_sgl_nents(dev
, req
->dst
, size_for_map
,
1088 &dst_last_bytes
, &chained
);
1089 sg_index
= areq_ctx
->dst_sgl
->length
;
1090 offset
= size_to_skip
;
1092 //check where the data starts
1093 while (sg_index
<= size_to_skip
) {
1094 offset
-= areq_ctx
->dst_sgl
->length
;
1095 areq_ctx
->dst_sgl
= sg_next(areq_ctx
->dst_sgl
);
1096 //if have reached the end of the sgl, then this is unexpected
1097 if (!areq_ctx
->dst_sgl
) {
1098 dev_err(dev
, "reached end of sg list. unexpected\n");
1101 sg_index
+= areq_ctx
->dst_sgl
->length
;
1104 if (dst_mapped_nents
> LLI_MAX_NUM_OF_DATA_ENTRIES
) {
1105 dev_err(dev
, "Too many fragments. current %d max %d\n",
1106 dst_mapped_nents
, LLI_MAX_NUM_OF_DATA_ENTRIES
);
1109 areq_ctx
->dst
.nents
= dst_mapped_nents
;
1110 areq_ctx
->dst_offset
= offset
;
1111 if (src_mapped_nents
> 1 ||
1112 dst_mapped_nents
> 1 ||
1114 areq_ctx
->data_buff_type
= CC_DMA_BUF_MLLI
;
1115 rc
= cc_prepare_aead_data_mlli(drvdata
, req
, sg_data
,
1117 &dst_last_bytes
, is_last_table
);
1119 areq_ctx
->data_buff_type
= CC_DMA_BUF_DLLI
;
1120 cc_prepare_aead_data_dlli(req
, &src_last_bytes
,
1128 static void cc_update_aead_mlli_nents(struct cc_drvdata
*drvdata
,
1129 struct aead_request
*req
)
1131 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
1132 u32 curr_mlli_size
= 0;
1134 if (areq_ctx
->assoc_buff_type
== CC_DMA_BUF_MLLI
) {
1135 areq_ctx
->assoc
.sram_addr
= drvdata
->mlli_sram_addr
;
1136 curr_mlli_size
= areq_ctx
->assoc
.mlli_nents
*
1137 LLI_ENTRY_BYTE_SIZE
;
1140 if (areq_ctx
->data_buff_type
== CC_DMA_BUF_MLLI
) {
1141 /*Inplace case dst nents equal to src nents*/
1142 if (req
->src
== req
->dst
) {
1143 areq_ctx
->dst
.mlli_nents
= areq_ctx
->src
.mlli_nents
;
1144 areq_ctx
->src
.sram_addr
= drvdata
->mlli_sram_addr
+
1146 areq_ctx
->dst
.sram_addr
= areq_ctx
->src
.sram_addr
;
1147 if (!areq_ctx
->is_single_pass
)
1148 areq_ctx
->assoc
.mlli_nents
+=
1149 areq_ctx
->src
.mlli_nents
;
1151 if (areq_ctx
->gen_ctx
.op_type
==
1152 DRV_CRYPTO_DIRECTION_DECRYPT
) {
1153 areq_ctx
->src
.sram_addr
=
1154 drvdata
->mlli_sram_addr
+
1156 areq_ctx
->dst
.sram_addr
=
1157 areq_ctx
->src
.sram_addr
+
1158 areq_ctx
->src
.mlli_nents
*
1159 LLI_ENTRY_BYTE_SIZE
;
1160 if (!areq_ctx
->is_single_pass
)
1161 areq_ctx
->assoc
.mlli_nents
+=
1162 areq_ctx
->src
.mlli_nents
;
1164 areq_ctx
->dst
.sram_addr
=
1165 drvdata
->mlli_sram_addr
+
1167 areq_ctx
->src
.sram_addr
=
1168 areq_ctx
->dst
.sram_addr
+
1169 areq_ctx
->dst
.mlli_nents
*
1170 LLI_ENTRY_BYTE_SIZE
;
1171 if (!areq_ctx
->is_single_pass
)
1172 areq_ctx
->assoc
.mlli_nents
+=
1173 areq_ctx
->dst
.mlli_nents
;
1179 int cc_map_aead_request(struct cc_drvdata
*drvdata
, struct aead_request
*req
)
1181 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
1182 struct mlli_params
*mlli_params
= &areq_ctx
->mlli_params
;
1183 struct device
*dev
= drvdata_to_dev(drvdata
);
1184 struct buffer_array sg_data
;
1185 unsigned int authsize
= areq_ctx
->req_authsize
;
1186 struct buff_mgr_handle
*buff_mgr
= drvdata
->buff_mgr_handle
;
1188 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1189 bool is_gcm4543
= areq_ctx
->is_gcm4543
;
1190 dma_addr_t dma_addr
;
1191 u32 mapped_nents
= 0;
1192 u32 dummy
= 0; /*used for the assoc data fragments */
1193 u32 size_to_map
= 0;
1194 gfp_t flags
= cc_gfp_flags(&req
->base
);
1196 mlli_params
->curr_pool
= NULL
;
1197 sg_data
.num_of_buffers
= 0;
1199 /* copy mac to a temporary location to deal with possible
1200 * data memory overriding that caused by cache coherence problem.
1202 if (drvdata
->coherent
&&
1203 areq_ctx
->gen_ctx
.op_type
== DRV_CRYPTO_DIRECTION_DECRYPT
&&
1204 req
->src
== req
->dst
)
1205 cc_copy_mac(dev
, req
, CC_SG_TO_BUF
);
1207 /* cacluate the size for cipher remove ICV in decrypt*/
1208 areq_ctx
->cryptlen
= (areq_ctx
->gen_ctx
.op_type
==
1209 DRV_CRYPTO_DIRECTION_ENCRYPT
) ?
1211 (req
->cryptlen
- authsize
);
1213 dma_addr
= dma_map_single(dev
, areq_ctx
->mac_buf
, MAX_MAC_SIZE
,
1215 if (dma_mapping_error(dev
, dma_addr
)) {
1216 dev_err(dev
, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
1217 MAX_MAC_SIZE
, areq_ctx
->mac_buf
);
1219 goto aead_map_failure
;
1221 areq_ctx
->mac_buf_dma_addr
= dma_addr
;
1223 if (areq_ctx
->ccm_hdr_size
!= ccm_header_size_null
) {
1224 void *addr
= areq_ctx
->ccm_config
+ CCM_CTR_COUNT_0_OFFSET
;
1226 dma_addr
= dma_map_single(dev
, addr
, AES_BLOCK_SIZE
,
1229 if (dma_mapping_error(dev
, dma_addr
)) {
1230 dev_err(dev
, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
1231 AES_BLOCK_SIZE
, addr
);
1232 areq_ctx
->ccm_iv0_dma_addr
= 0;
1234 goto aead_map_failure
;
1236 areq_ctx
->ccm_iv0_dma_addr
= dma_addr
;
1238 if (cc_set_aead_conf_buf(dev
, areq_ctx
, areq_ctx
->ccm_config
,
1239 &sg_data
, req
->assoclen
)) {
1241 goto aead_map_failure
;
1245 if (areq_ctx
->cipher_mode
== DRV_CIPHER_GCTR
) {
1246 dma_addr
= dma_map_single(dev
, areq_ctx
->hkey
, AES_BLOCK_SIZE
,
1248 if (dma_mapping_error(dev
, dma_addr
)) {
1249 dev_err(dev
, "Mapping hkey %u B at va=%pK for DMA failed\n",
1250 AES_BLOCK_SIZE
, areq_ctx
->hkey
);
1252 goto aead_map_failure
;
1254 areq_ctx
->hkey_dma_addr
= dma_addr
;
1256 dma_addr
= dma_map_single(dev
, &areq_ctx
->gcm_len_block
,
1257 AES_BLOCK_SIZE
, DMA_TO_DEVICE
);
1258 if (dma_mapping_error(dev
, dma_addr
)) {
1259 dev_err(dev
, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
1260 AES_BLOCK_SIZE
, &areq_ctx
->gcm_len_block
);
1262 goto aead_map_failure
;
1264 areq_ctx
->gcm_block_len_dma_addr
= dma_addr
;
1266 dma_addr
= dma_map_single(dev
, areq_ctx
->gcm_iv_inc1
,
1267 AES_BLOCK_SIZE
, DMA_TO_DEVICE
);
1269 if (dma_mapping_error(dev
, dma_addr
)) {
1270 dev_err(dev
, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n",
1271 AES_BLOCK_SIZE
, (areq_ctx
->gcm_iv_inc1
));
1272 areq_ctx
->gcm_iv_inc1_dma_addr
= 0;
1274 goto aead_map_failure
;
1276 areq_ctx
->gcm_iv_inc1_dma_addr
= dma_addr
;
1278 dma_addr
= dma_map_single(dev
, areq_ctx
->gcm_iv_inc2
,
1279 AES_BLOCK_SIZE
, DMA_TO_DEVICE
);
1281 if (dma_mapping_error(dev
, dma_addr
)) {
1282 dev_err(dev
, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n",
1283 AES_BLOCK_SIZE
, (areq_ctx
->gcm_iv_inc2
));
1284 areq_ctx
->gcm_iv_inc2_dma_addr
= 0;
1286 goto aead_map_failure
;
1288 areq_ctx
->gcm_iv_inc2_dma_addr
= dma_addr
;
1291 size_to_map
= req
->cryptlen
+ req
->assoclen
;
1292 if (areq_ctx
->gen_ctx
.op_type
== DRV_CRYPTO_DIRECTION_ENCRYPT
)
1293 size_to_map
+= authsize
;
1296 size_to_map
+= crypto_aead_ivsize(tfm
);
1297 rc
= cc_map_sg(dev
, req
->src
, size_to_map
, DMA_BIDIRECTIONAL
,
1298 &areq_ctx
->src
.nents
,
1299 (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES
+
1300 LLI_MAX_NUM_OF_DATA_ENTRIES
),
1301 &dummy
, &mapped_nents
);
1304 goto aead_map_failure
;
1307 if (areq_ctx
->is_single_pass
) {
1309 * Create MLLI table for:
1312 * Note: IV is contg. buffer (not an SGL)
1314 rc
= cc_aead_chain_assoc(drvdata
, req
, &sg_data
, true, false);
1316 goto aead_map_failure
;
1317 rc
= cc_aead_chain_iv(drvdata
, req
, &sg_data
, true, false);
1319 goto aead_map_failure
;
1320 rc
= cc_aead_chain_data(drvdata
, req
, &sg_data
, true, false);
1322 goto aead_map_failure
;
1323 } else { /* DOUBLE-PASS flow */
1325 * Prepare MLLI table(s) in this order:
1327 * If ENCRYPT/DECRYPT (inplace):
1328 * (1) MLLI table for assoc
1329 * (2) IV entry (chained right after end of assoc)
1330 * (3) MLLI for src/dst (inplace operation)
1332 * If ENCRYPT (non-inplace)
1333 * (1) MLLI table for assoc
1334 * (2) IV entry (chained right after end of assoc)
1338 * If DECRYPT (non-inplace)
1339 * (1) MLLI table for assoc
1340 * (2) IV entry (chained right after end of assoc)
1344 rc
= cc_aead_chain_assoc(drvdata
, req
, &sg_data
, false, true);
1346 goto aead_map_failure
;
1347 rc
= cc_aead_chain_iv(drvdata
, req
, &sg_data
, false, true);
1349 goto aead_map_failure
;
1350 rc
= cc_aead_chain_data(drvdata
, req
, &sg_data
, true, true);
1352 goto aead_map_failure
;
1355 /* Mlli support -start building the MLLI according to the above
1358 if (areq_ctx
->assoc_buff_type
== CC_DMA_BUF_MLLI
||
1359 areq_ctx
->data_buff_type
== CC_DMA_BUF_MLLI
) {
1360 mlli_params
->curr_pool
= buff_mgr
->mlli_buffs_pool
;
1361 rc
= cc_generate_mlli(dev
, &sg_data
, mlli_params
, flags
);
1363 goto aead_map_failure
;
1365 cc_update_aead_mlli_nents(drvdata
, req
);
1366 dev_dbg(dev
, "assoc params mn %d\n",
1367 areq_ctx
->assoc
.mlli_nents
);
1368 dev_dbg(dev
, "src params mn %d\n", areq_ctx
->src
.mlli_nents
);
1369 dev_dbg(dev
, "dst params mn %d\n", areq_ctx
->dst
.mlli_nents
);
1374 cc_unmap_aead_request(dev
, req
);
1378 int cc_map_hash_request_final(struct cc_drvdata
*drvdata
, void *ctx
,
1379 struct scatterlist
*src
, unsigned int nbytes
,
1380 bool do_update
, gfp_t flags
)
1382 struct ahash_req_ctx
*areq_ctx
= (struct ahash_req_ctx
*)ctx
;
1383 struct device
*dev
= drvdata_to_dev(drvdata
);
1384 u8
*curr_buff
= cc_hash_buf(areq_ctx
);
1385 u32
*curr_buff_cnt
= cc_hash_buf_cnt(areq_ctx
);
1386 struct mlli_params
*mlli_params
= &areq_ctx
->mlli_params
;
1387 struct buffer_array sg_data
;
1388 struct buff_mgr_handle
*buff_mgr
= drvdata
->buff_mgr_handle
;
1390 u32 mapped_nents
= 0;
1392 dev_dbg(dev
, "final params : curr_buff=%pK curr_buff_cnt=0x%X nbytes = 0x%X src=%pK curr_index=%u\n",
1393 curr_buff
, *curr_buff_cnt
, nbytes
, src
, areq_ctx
->buff_index
);
1394 /* Init the type of the dma buffer */
1395 areq_ctx
->data_dma_buf_type
= CC_DMA_BUF_NULL
;
1396 mlli_params
->curr_pool
= NULL
;
1397 sg_data
.num_of_buffers
= 0;
1398 areq_ctx
->in_nents
= 0;
1400 if (nbytes
== 0 && *curr_buff_cnt
== 0) {
1405 /*TODO: copy data in case that buffer is enough for operation */
1406 /* map the previous buffer */
1407 if (*curr_buff_cnt
) {
1408 if (cc_set_hash_buf(dev
, areq_ctx
, curr_buff
, *curr_buff_cnt
,
1414 if (src
&& nbytes
> 0 && do_update
) {
1415 if (cc_map_sg(dev
, src
, nbytes
, DMA_TO_DEVICE
,
1416 &areq_ctx
->in_nents
, LLI_MAX_NUM_OF_DATA_ENTRIES
,
1417 &dummy
, &mapped_nents
)) {
1418 goto unmap_curr_buff
;
1420 if (src
&& mapped_nents
== 1 &&
1421 areq_ctx
->data_dma_buf_type
== CC_DMA_BUF_NULL
) {
1422 memcpy(areq_ctx
->buff_sg
, src
,
1423 sizeof(struct scatterlist
));
1424 areq_ctx
->buff_sg
->length
= nbytes
;
1425 areq_ctx
->curr_sg
= areq_ctx
->buff_sg
;
1426 areq_ctx
->data_dma_buf_type
= CC_DMA_BUF_DLLI
;
1428 areq_ctx
->data_dma_buf_type
= CC_DMA_BUF_MLLI
;
1433 if (areq_ctx
->data_dma_buf_type
== CC_DMA_BUF_MLLI
) {
1434 mlli_params
->curr_pool
= buff_mgr
->mlli_buffs_pool
;
1435 /* add the src data to the sg_data */
1436 cc_add_sg_entry(dev
, &sg_data
, areq_ctx
->in_nents
, src
, nbytes
,
1437 0, true, &areq_ctx
->mlli_nents
);
1438 if (cc_generate_mlli(dev
, &sg_data
, mlli_params
, flags
))
1439 goto fail_unmap_din
;
1441 /* change the buffer index for the unmap function */
1442 areq_ctx
->buff_index
= (areq_ctx
->buff_index
^ 1);
1443 dev_dbg(dev
, "areq_ctx->data_dma_buf_type = %s\n",
1444 cc_dma_buf_type(areq_ctx
->data_dma_buf_type
));
1448 dma_unmap_sg(dev
, src
, areq_ctx
->in_nents
, DMA_TO_DEVICE
);
1452 dma_unmap_sg(dev
, areq_ctx
->buff_sg
, 1, DMA_TO_DEVICE
);
1457 int cc_map_hash_request_update(struct cc_drvdata
*drvdata
, void *ctx
,
1458 struct scatterlist
*src
, unsigned int nbytes
,
1459 unsigned int block_size
, gfp_t flags
)
1461 struct ahash_req_ctx
*areq_ctx
= (struct ahash_req_ctx
*)ctx
;
1462 struct device
*dev
= drvdata_to_dev(drvdata
);
1463 u8
*curr_buff
= cc_hash_buf(areq_ctx
);
1464 u32
*curr_buff_cnt
= cc_hash_buf_cnt(areq_ctx
);
1465 u8
*next_buff
= cc_next_buf(areq_ctx
);
1466 u32
*next_buff_cnt
= cc_next_buf_cnt(areq_ctx
);
1467 struct mlli_params
*mlli_params
= &areq_ctx
->mlli_params
;
1468 unsigned int update_data_len
;
1469 u32 total_in_len
= nbytes
+ *curr_buff_cnt
;
1470 struct buffer_array sg_data
;
1471 struct buff_mgr_handle
*buff_mgr
= drvdata
->buff_mgr_handle
;
1472 unsigned int swap_index
= 0;
1474 u32 mapped_nents
= 0;
1476 dev_dbg(dev
, " update params : curr_buff=%pK curr_buff_cnt=0x%X nbytes=0x%X src=%pK curr_index=%u\n",
1477 curr_buff
, *curr_buff_cnt
, nbytes
, src
, areq_ctx
->buff_index
);
1478 /* Init the type of the dma buffer */
1479 areq_ctx
->data_dma_buf_type
= CC_DMA_BUF_NULL
;
1480 mlli_params
->curr_pool
= NULL
;
1481 areq_ctx
->curr_sg
= NULL
;
1482 sg_data
.num_of_buffers
= 0;
1483 areq_ctx
->in_nents
= 0;
1485 if (total_in_len
< block_size
) {
1486 dev_dbg(dev
, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
1487 curr_buff
, *curr_buff_cnt
, &curr_buff
[*curr_buff_cnt
]);
1488 areq_ctx
->in_nents
=
1489 cc_get_sgl_nents(dev
, src
, nbytes
, &dummy
, NULL
);
1490 sg_copy_to_buffer(src
, areq_ctx
->in_nents
,
1491 &curr_buff
[*curr_buff_cnt
], nbytes
);
1492 *curr_buff_cnt
+= nbytes
;
1496 /* Calculate the residue size*/
1497 *next_buff_cnt
= total_in_len
& (block_size
- 1);
1498 /* update data len */
1499 update_data_len
= total_in_len
- *next_buff_cnt
;
1501 dev_dbg(dev
, " temp length : *next_buff_cnt=0x%X update_data_len=0x%X\n",
1502 *next_buff_cnt
, update_data_len
);
1504 /* Copy the new residue to next buffer */
1505 if (*next_buff_cnt
) {
1506 dev_dbg(dev
, " handle residue: next buff %pK skip data %u residue %u\n",
1507 next_buff
, (update_data_len
- *curr_buff_cnt
),
1509 cc_copy_sg_portion(dev
, next_buff
, src
,
1510 (update_data_len
- *curr_buff_cnt
),
1511 nbytes
, CC_SG_TO_BUF
);
1512 /* change the buffer index for next operation */
1516 if (*curr_buff_cnt
) {
1517 if (cc_set_hash_buf(dev
, areq_ctx
, curr_buff
, *curr_buff_cnt
,
1521 /* change the buffer index for next operation */
1525 if (update_data_len
> *curr_buff_cnt
) {
1526 if (cc_map_sg(dev
, src
, (update_data_len
- *curr_buff_cnt
),
1527 DMA_TO_DEVICE
, &areq_ctx
->in_nents
,
1528 LLI_MAX_NUM_OF_DATA_ENTRIES
, &dummy
,
1530 goto unmap_curr_buff
;
1532 if (mapped_nents
== 1 &&
1533 areq_ctx
->data_dma_buf_type
== CC_DMA_BUF_NULL
) {
1534 /* only one entry in the SG and no previous data */
1535 memcpy(areq_ctx
->buff_sg
, src
,
1536 sizeof(struct scatterlist
));
1537 areq_ctx
->buff_sg
->length
= update_data_len
;
1538 areq_ctx
->data_dma_buf_type
= CC_DMA_BUF_DLLI
;
1539 areq_ctx
->curr_sg
= areq_ctx
->buff_sg
;
1541 areq_ctx
->data_dma_buf_type
= CC_DMA_BUF_MLLI
;
1545 if (areq_ctx
->data_dma_buf_type
== CC_DMA_BUF_MLLI
) {
1546 mlli_params
->curr_pool
= buff_mgr
->mlli_buffs_pool
;
1547 /* add the src data to the sg_data */
1548 cc_add_sg_entry(dev
, &sg_data
, areq_ctx
->in_nents
, src
,
1549 (update_data_len
- *curr_buff_cnt
), 0, true,
1550 &areq_ctx
->mlli_nents
);
1551 if (cc_generate_mlli(dev
, &sg_data
, mlli_params
, flags
))
1552 goto fail_unmap_din
;
1554 areq_ctx
->buff_index
= (areq_ctx
->buff_index
^ swap_index
);
1559 dma_unmap_sg(dev
, src
, areq_ctx
->in_nents
, DMA_TO_DEVICE
);
1563 dma_unmap_sg(dev
, areq_ctx
->buff_sg
, 1, DMA_TO_DEVICE
);
1568 void cc_unmap_hash_request(struct device
*dev
, void *ctx
,
1569 struct scatterlist
*src
, bool do_revert
)
1571 struct ahash_req_ctx
*areq_ctx
= (struct ahash_req_ctx
*)ctx
;
1572 u32
*prev_len
= cc_next_buf_cnt(areq_ctx
);
1574 /*In case a pool was set, a table was
1575 *allocated and should be released
1577 if (areq_ctx
->mlli_params
.curr_pool
) {
1578 dev_dbg(dev
, "free MLLI buffer: dma=%pad virt=%pK\n",
1579 &areq_ctx
->mlli_params
.mlli_dma_addr
,
1580 areq_ctx
->mlli_params
.mlli_virt_addr
);
1581 dma_pool_free(areq_ctx
->mlli_params
.curr_pool
,
1582 areq_ctx
->mlli_params
.mlli_virt_addr
,
1583 areq_ctx
->mlli_params
.mlli_dma_addr
);
1586 if (src
&& areq_ctx
->in_nents
) {
1587 dev_dbg(dev
, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n",
1588 sg_virt(src
), &sg_dma_address(src
), sg_dma_len(src
));
1589 dma_unmap_sg(dev
, src
,
1590 areq_ctx
->in_nents
, DMA_TO_DEVICE
);
1594 dev_dbg(dev
, "Unmapped buffer: areq_ctx->buff_sg=%pK dma=%pad len 0x%X\n",
1595 sg_virt(areq_ctx
->buff_sg
),
1596 &sg_dma_address(areq_ctx
->buff_sg
),
1597 sg_dma_len(areq_ctx
->buff_sg
));
1598 dma_unmap_sg(dev
, areq_ctx
->buff_sg
, 1, DMA_TO_DEVICE
);
1600 /* clean the previous data length for update
1605 areq_ctx
->buff_index
^= 1;
1610 int cc_buffer_mgr_init(struct cc_drvdata
*drvdata
)
1612 struct buff_mgr_handle
*buff_mgr_handle
;
1613 struct device
*dev
= drvdata_to_dev(drvdata
);
1615 buff_mgr_handle
= kmalloc(sizeof(*buff_mgr_handle
), GFP_KERNEL
);
1616 if (!buff_mgr_handle
)
1619 drvdata
->buff_mgr_handle
= buff_mgr_handle
;
1621 buff_mgr_handle
->mlli_buffs_pool
=
1622 dma_pool_create("dx_single_mlli_tables", dev
,
1623 MAX_NUM_OF_TOTAL_MLLI_ENTRIES
*
1624 LLI_ENTRY_BYTE_SIZE
,
1625 MLLI_TABLE_MIN_ALIGNMENT
, 0);
1627 if (!buff_mgr_handle
->mlli_buffs_pool
)
1633 cc_buffer_mgr_fini(drvdata
);
1637 int cc_buffer_mgr_fini(struct cc_drvdata
*drvdata
)
1639 struct buff_mgr_handle
*buff_mgr_handle
= drvdata
->buff_mgr_handle
;
1641 if (buff_mgr_handle
) {
1642 dma_pool_destroy(buff_mgr_handle
->mlli_buffs_pool
);
1643 kfree(drvdata
->buff_mgr_handle
);
1644 drvdata
->buff_mgr_handle
= NULL
;