1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
4 #include <crypto/internal/aead.h>
5 #include <crypto/authenc.h>
6 #include <crypto/scatterwalk.h>
7 #include <linux/dmapool.h>
8 #include <linux/dma-mapping.h>
10 #include "cc_buffer_mgr.h"
11 #include "cc_lli_defs.h"
12 #include "cc_cipher.h"
16 union buffer_array_entry
{
17 struct scatterlist
*sgl
;
18 dma_addr_t buffer_dma
;
22 unsigned int num_of_buffers
;
23 union buffer_array_entry entry
[MAX_NUM_OF_BUFFERS_IN_MLLI
];
24 unsigned int offset
[MAX_NUM_OF_BUFFERS_IN_MLLI
];
25 int nents
[MAX_NUM_OF_BUFFERS_IN_MLLI
];
26 int total_data_len
[MAX_NUM_OF_BUFFERS_IN_MLLI
];
27 bool is_last
[MAX_NUM_OF_BUFFERS_IN_MLLI
];
28 u32
*mlli_nents
[MAX_NUM_OF_BUFFERS_IN_MLLI
];
31 static inline char *cc_dma_buf_type(enum cc_req_dma_buf_type type
)
46 * cc_copy_mac() - Copy MAC to temporary location
49 * @req: aead request object
50 * @dir: [IN] copy from/to sgl
52 static void cc_copy_mac(struct device
*dev
, struct aead_request
*req
,
53 enum cc_sg_cpy_direct dir
)
55 struct aead_req_ctx
*areq_ctx
= aead_request_ctx_dma(req
);
56 u32 skip
= req
->assoclen
+ req
->cryptlen
;
58 cc_copy_sg_portion(dev
, areq_ctx
->backup_mac
, req
->src
,
59 (skip
- areq_ctx
->req_authsize
), skip
, dir
);
63 * cc_get_sgl_nents() - Get scatterlist number of entries.
67 * @nbytes: [IN] Total SGL data bytes.
68 * @lbytes: [OUT] Returns the amount of bytes at the last entry
71 * Number of entries in the scatterlist
73 static unsigned int cc_get_sgl_nents(struct device
*dev
,
74 struct scatterlist
*sg_list
,
75 unsigned int nbytes
, u32
*lbytes
)
77 unsigned int nents
= 0;
81 while (nbytes
&& sg_list
) {
83 /* get the number of bytes in the last entry */
85 nbytes
-= (sg_list
->length
> nbytes
) ?
86 nbytes
: sg_list
->length
;
87 sg_list
= sg_next(sg_list
);
90 dev_dbg(dev
, "nents %d last bytes %d\n", nents
, *lbytes
);
95 * cc_copy_sg_portion() - Copy scatter list data,
96 * from to_skip to end, to dest and vice versa
99 * @dest: Buffer to copy to/from
101 * @to_skip: Number of bytes to skip before copying
102 * @end: Offset of last byte to copy
103 * @direct: Transfer direction (true == from SG list to buffer, false == from
106 void cc_copy_sg_portion(struct device
*dev
, u8
*dest
, struct scatterlist
*sg
,
107 u32 to_skip
, u32 end
, enum cc_sg_cpy_direct direct
)
111 nents
= sg_nents_for_len(sg
, end
);
112 sg_copy_buffer(sg
, nents
, dest
, (end
- to_skip
+ 1), to_skip
,
113 (direct
== CC_SG_TO_BUF
));
116 static int cc_render_buff_to_mlli(struct device
*dev
, dma_addr_t buff_dma
,
117 u32 buff_size
, u32
*curr_nents
,
120 u32
*mlli_entry_p
= *mlli_entry_pp
;
123 /* Verify there is no memory overflow*/
124 new_nents
= (*curr_nents
+ buff_size
/ CC_MAX_MLLI_ENTRY_SIZE
+ 1);
125 if (new_nents
> MAX_NUM_OF_TOTAL_MLLI_ENTRIES
) {
126 dev_err(dev
, "Too many mlli entries. current %d max %d\n",
127 new_nents
, MAX_NUM_OF_TOTAL_MLLI_ENTRIES
);
131 /*handle buffer longer than 64 kbytes */
132 while (buff_size
> CC_MAX_MLLI_ENTRY_SIZE
) {
133 cc_lli_set_addr(mlli_entry_p
, buff_dma
);
134 cc_lli_set_size(mlli_entry_p
, CC_MAX_MLLI_ENTRY_SIZE
);
135 dev_dbg(dev
, "entry[%d]: single_buff=0x%08X size=%08X\n",
136 *curr_nents
, mlli_entry_p
[LLI_WORD0_OFFSET
],
137 mlli_entry_p
[LLI_WORD1_OFFSET
]);
138 buff_dma
+= CC_MAX_MLLI_ENTRY_SIZE
;
139 buff_size
-= CC_MAX_MLLI_ENTRY_SIZE
;
140 mlli_entry_p
= mlli_entry_p
+ 2;
144 cc_lli_set_addr(mlli_entry_p
, buff_dma
);
145 cc_lli_set_size(mlli_entry_p
, buff_size
);
146 dev_dbg(dev
, "entry[%d]: single_buff=0x%08X size=%08X\n",
147 *curr_nents
, mlli_entry_p
[LLI_WORD0_OFFSET
],
148 mlli_entry_p
[LLI_WORD1_OFFSET
]);
149 mlli_entry_p
= mlli_entry_p
+ 2;
150 *mlli_entry_pp
= mlli_entry_p
;
155 static int cc_render_sg_to_mlli(struct device
*dev
, struct scatterlist
*sgl
,
156 u32 sgl_data_len
, u32 sgl_offset
,
157 u32
*curr_nents
, u32
**mlli_entry_pp
)
159 struct scatterlist
*curr_sgl
= sgl
;
160 u32
*mlli_entry_p
= *mlli_entry_pp
;
163 for ( ; (curr_sgl
&& sgl_data_len
);
164 curr_sgl
= sg_next(curr_sgl
)) {
166 (sgl_data_len
> sg_dma_len(curr_sgl
) - sgl_offset
) ?
167 sg_dma_len(curr_sgl
) - sgl_offset
:
169 sgl_data_len
-= entry_data_len
;
170 rc
= cc_render_buff_to_mlli(dev
, sg_dma_address(curr_sgl
) +
171 sgl_offset
, entry_data_len
,
172 curr_nents
, &mlli_entry_p
);
178 *mlli_entry_pp
= mlli_entry_p
;
182 static int cc_generate_mlli(struct device
*dev
, struct buffer_array
*sg_data
,
183 struct mlli_params
*mlli_params
, gfp_t flags
)
186 u32 total_nents
= 0, prev_total_nents
= 0;
189 dev_dbg(dev
, "NUM of SG's = %d\n", sg_data
->num_of_buffers
);
191 /* Allocate memory from the pointed pool */
192 mlli_params
->mlli_virt_addr
=
193 dma_pool_alloc(mlli_params
->curr_pool
, flags
,
194 &mlli_params
->mlli_dma_addr
);
195 if (!mlli_params
->mlli_virt_addr
) {
196 dev_err(dev
, "dma_pool_alloc() failed\n");
198 goto build_mlli_exit
;
200 /* Point to start of MLLI */
201 mlli_p
= mlli_params
->mlli_virt_addr
;
202 /* go over all SG's and link it to one MLLI table */
203 for (i
= 0; i
< sg_data
->num_of_buffers
; i
++) {
204 union buffer_array_entry
*entry
= &sg_data
->entry
[i
];
205 u32 tot_len
= sg_data
->total_data_len
[i
];
206 u32 offset
= sg_data
->offset
[i
];
208 rc
= cc_render_sg_to_mlli(dev
, entry
->sgl
, tot_len
, offset
,
209 &total_nents
, &mlli_p
);
213 /* set last bit in the current table */
214 if (sg_data
->mlli_nents
[i
]) {
215 /*Calculate the current MLLI table length for the
216 *length field in the descriptor
218 *sg_data
->mlli_nents
[i
] +=
219 (total_nents
- prev_total_nents
);
220 prev_total_nents
= total_nents
;
224 /* Set MLLI size for the bypass operation */
225 mlli_params
->mlli_len
= (total_nents
* LLI_ENTRY_BYTE_SIZE
);
227 dev_dbg(dev
, "MLLI params: virt_addr=%pK dma_addr=%pad mlli_len=0x%X\n",
228 mlli_params
->mlli_virt_addr
, &mlli_params
->mlli_dma_addr
,
229 mlli_params
->mlli_len
);
235 static void cc_add_sg_entry(struct device
*dev
, struct buffer_array
*sgl_data
,
236 unsigned int nents
, struct scatterlist
*sgl
,
237 unsigned int data_len
, unsigned int data_offset
,
238 bool is_last_table
, u32
*mlli_nents
)
240 unsigned int index
= sgl_data
->num_of_buffers
;
242 dev_dbg(dev
, "index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n",
243 index
, nents
, sgl
, data_len
, is_last_table
);
244 sgl_data
->nents
[index
] = nents
;
245 sgl_data
->entry
[index
].sgl
= sgl
;
246 sgl_data
->offset
[index
] = data_offset
;
247 sgl_data
->total_data_len
[index
] = data_len
;
248 sgl_data
->is_last
[index
] = is_last_table
;
249 sgl_data
->mlli_nents
[index
] = mlli_nents
;
250 if (sgl_data
->mlli_nents
[index
])
251 *sgl_data
->mlli_nents
[index
] = 0;
252 sgl_data
->num_of_buffers
++;
255 static int cc_map_sg(struct device
*dev
, struct scatterlist
*sg
,
256 unsigned int nbytes
, int direction
, u32
*nents
,
257 u32 max_sg_nents
, u32
*lbytes
, u32
*mapped_nents
)
268 *nents
= cc_get_sgl_nents(dev
, sg
, nbytes
, lbytes
);
269 if (*nents
> max_sg_nents
) {
271 dev_err(dev
, "Too many fragments. current %d max %d\n",
272 *nents
, max_sg_nents
);
276 ret
= dma_map_sg(dev
, sg
, *nents
, direction
);
279 dev_err(dev
, "dma_map_sg() sg buffer failed %d\n", ret
);
289 cc_set_aead_conf_buf(struct device
*dev
, struct aead_req_ctx
*areq_ctx
,
290 u8
*config_data
, struct buffer_array
*sg_data
,
291 unsigned int assoclen
)
293 dev_dbg(dev
, " handle additional data config set to DLLI\n");
294 /* create sg for the current buffer */
295 sg_init_one(&areq_ctx
->ccm_adata_sg
, config_data
,
296 AES_BLOCK_SIZE
+ areq_ctx
->ccm_hdr_size
);
297 if (dma_map_sg(dev
, &areq_ctx
->ccm_adata_sg
, 1, DMA_TO_DEVICE
) != 1) {
298 dev_err(dev
, "dma_map_sg() config buffer failed\n");
301 dev_dbg(dev
, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
302 &sg_dma_address(&areq_ctx
->ccm_adata_sg
),
303 sg_page(&areq_ctx
->ccm_adata_sg
),
304 sg_virt(&areq_ctx
->ccm_adata_sg
),
305 areq_ctx
->ccm_adata_sg
.offset
, areq_ctx
->ccm_adata_sg
.length
);
306 /* prepare for case of MLLI */
308 cc_add_sg_entry(dev
, sg_data
, 1, &areq_ctx
->ccm_adata_sg
,
309 (AES_BLOCK_SIZE
+ areq_ctx
->ccm_hdr_size
),
315 static int cc_set_hash_buf(struct device
*dev
, struct ahash_req_ctx
*areq_ctx
,
316 u8
*curr_buff
, u32 curr_buff_cnt
,
317 struct buffer_array
*sg_data
)
319 dev_dbg(dev
, " handle curr buff %x set to DLLI\n", curr_buff_cnt
);
320 /* create sg for the current buffer */
321 sg_init_one(areq_ctx
->buff_sg
, curr_buff
, curr_buff_cnt
);
322 if (dma_map_sg(dev
, areq_ctx
->buff_sg
, 1, DMA_TO_DEVICE
) != 1) {
323 dev_err(dev
, "dma_map_sg() src buffer failed\n");
326 dev_dbg(dev
, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
327 &sg_dma_address(areq_ctx
->buff_sg
), sg_page(areq_ctx
->buff_sg
),
328 sg_virt(areq_ctx
->buff_sg
), areq_ctx
->buff_sg
->offset
,
329 areq_ctx
->buff_sg
->length
);
330 areq_ctx
->data_dma_buf_type
= CC_DMA_BUF_DLLI
;
331 areq_ctx
->curr_sg
= areq_ctx
->buff_sg
;
332 areq_ctx
->in_nents
= 0;
333 /* prepare for case of MLLI */
334 cc_add_sg_entry(dev
, sg_data
, 1, areq_ctx
->buff_sg
, curr_buff_cnt
, 0,
339 void cc_unmap_cipher_request(struct device
*dev
, void *ctx
,
340 unsigned int ivsize
, struct scatterlist
*src
,
341 struct scatterlist
*dst
)
343 struct cipher_req_ctx
*req_ctx
= (struct cipher_req_ctx
*)ctx
;
345 if (req_ctx
->gen_ctx
.iv_dma_addr
) {
346 dev_dbg(dev
, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
347 &req_ctx
->gen_ctx
.iv_dma_addr
, ivsize
);
348 dma_unmap_single(dev
, req_ctx
->gen_ctx
.iv_dma_addr
,
349 ivsize
, DMA_BIDIRECTIONAL
);
352 if (req_ctx
->dma_buf_type
== CC_DMA_BUF_MLLI
&&
353 req_ctx
->mlli_params
.mlli_virt_addr
) {
354 dma_pool_free(req_ctx
->mlli_params
.curr_pool
,
355 req_ctx
->mlli_params
.mlli_virt_addr
,
356 req_ctx
->mlli_params
.mlli_dma_addr
);
360 dma_unmap_sg(dev
, src
, req_ctx
->in_nents
, DMA_TO_DEVICE
);
361 dma_unmap_sg(dev
, dst
, req_ctx
->out_nents
, DMA_FROM_DEVICE
);
362 dev_dbg(dev
, "Unmapped req->dst=%pK\n", sg_virt(dst
));
363 dev_dbg(dev
, "Unmapped req->src=%pK\n", sg_virt(src
));
365 dma_unmap_sg(dev
, src
, req_ctx
->in_nents
, DMA_BIDIRECTIONAL
);
366 dev_dbg(dev
, "Unmapped req->src=%pK\n", sg_virt(src
));
370 int cc_map_cipher_request(struct cc_drvdata
*drvdata
, void *ctx
,
371 unsigned int ivsize
, unsigned int nbytes
,
372 void *info
, struct scatterlist
*src
,
373 struct scatterlist
*dst
, gfp_t flags
)
375 struct cipher_req_ctx
*req_ctx
= (struct cipher_req_ctx
*)ctx
;
376 struct mlli_params
*mlli_params
= &req_ctx
->mlli_params
;
377 struct device
*dev
= drvdata_to_dev(drvdata
);
378 struct buffer_array sg_data
;
381 u32 mapped_nents
= 0;
382 int src_direction
= (src
!= dst
? DMA_TO_DEVICE
: DMA_BIDIRECTIONAL
);
384 req_ctx
->dma_buf_type
= CC_DMA_BUF_DLLI
;
385 mlli_params
->curr_pool
= NULL
;
386 sg_data
.num_of_buffers
= 0;
390 dump_byte_array("iv", info
, ivsize
);
391 req_ctx
->gen_ctx
.iv_dma_addr
=
392 dma_map_single(dev
, info
, ivsize
, DMA_BIDIRECTIONAL
);
393 if (dma_mapping_error(dev
, req_ctx
->gen_ctx
.iv_dma_addr
)) {
394 dev_err(dev
, "Mapping iv %u B at va=%pK for DMA failed\n",
398 dev_dbg(dev
, "Mapped iv %u B at va=%pK to dma=%pad\n",
399 ivsize
, info
, &req_ctx
->gen_ctx
.iv_dma_addr
);
401 req_ctx
->gen_ctx
.iv_dma_addr
= 0;
404 /* Map the src SGL */
405 rc
= cc_map_sg(dev
, src
, nbytes
, src_direction
, &req_ctx
->in_nents
,
406 LLI_MAX_NUM_OF_DATA_ENTRIES
, &dummy
, &mapped_nents
);
409 if (mapped_nents
> 1)
410 req_ctx
->dma_buf_type
= CC_DMA_BUF_MLLI
;
413 /* Handle inplace operation */
414 if (req_ctx
->dma_buf_type
== CC_DMA_BUF_MLLI
) {
415 req_ctx
->out_nents
= 0;
416 cc_add_sg_entry(dev
, &sg_data
, req_ctx
->in_nents
, src
,
418 &req_ctx
->in_mlli_nents
);
422 rc
= cc_map_sg(dev
, dst
, nbytes
, DMA_FROM_DEVICE
,
423 &req_ctx
->out_nents
, LLI_MAX_NUM_OF_DATA_ENTRIES
,
424 &dummy
, &mapped_nents
);
427 if (mapped_nents
> 1)
428 req_ctx
->dma_buf_type
= CC_DMA_BUF_MLLI
;
430 if (req_ctx
->dma_buf_type
== CC_DMA_BUF_MLLI
) {
431 cc_add_sg_entry(dev
, &sg_data
, req_ctx
->in_nents
, src
,
433 &req_ctx
->in_mlli_nents
);
434 cc_add_sg_entry(dev
, &sg_data
, req_ctx
->out_nents
, dst
,
436 &req_ctx
->out_mlli_nents
);
440 if (req_ctx
->dma_buf_type
== CC_DMA_BUF_MLLI
) {
441 mlli_params
->curr_pool
= drvdata
->mlli_buffs_pool
;
442 rc
= cc_generate_mlli(dev
, &sg_data
, mlli_params
, flags
);
447 dev_dbg(dev
, "areq_ctx->dma_buf_type = %s\n",
448 cc_dma_buf_type(req_ctx
->dma_buf_type
));
453 cc_unmap_cipher_request(dev
, req_ctx
, ivsize
, src
, dst
);
457 void cc_unmap_aead_request(struct device
*dev
, struct aead_request
*req
)
459 struct aead_req_ctx
*areq_ctx
= aead_request_ctx_dma(req
);
460 unsigned int hw_iv_size
= areq_ctx
->hw_iv_size
;
461 struct cc_drvdata
*drvdata
= dev_get_drvdata(dev
);
462 int src_direction
= (req
->src
!= req
->dst
? DMA_TO_DEVICE
: DMA_BIDIRECTIONAL
);
464 if (areq_ctx
->mac_buf_dma_addr
) {
465 dma_unmap_single(dev
, areq_ctx
->mac_buf_dma_addr
,
466 MAX_MAC_SIZE
, DMA_BIDIRECTIONAL
);
469 if (areq_ctx
->cipher_mode
== DRV_CIPHER_GCTR
) {
470 if (areq_ctx
->hkey_dma_addr
) {
471 dma_unmap_single(dev
, areq_ctx
->hkey_dma_addr
,
472 AES_BLOCK_SIZE
, DMA_BIDIRECTIONAL
);
475 if (areq_ctx
->gcm_block_len_dma_addr
) {
476 dma_unmap_single(dev
, areq_ctx
->gcm_block_len_dma_addr
,
477 AES_BLOCK_SIZE
, DMA_TO_DEVICE
);
480 if (areq_ctx
->gcm_iv_inc1_dma_addr
) {
481 dma_unmap_single(dev
, areq_ctx
->gcm_iv_inc1_dma_addr
,
482 AES_BLOCK_SIZE
, DMA_TO_DEVICE
);
485 if (areq_ctx
->gcm_iv_inc2_dma_addr
) {
486 dma_unmap_single(dev
, areq_ctx
->gcm_iv_inc2_dma_addr
,
487 AES_BLOCK_SIZE
, DMA_TO_DEVICE
);
491 if (areq_ctx
->ccm_hdr_size
!= ccm_header_size_null
) {
492 if (areq_ctx
->ccm_iv0_dma_addr
) {
493 dma_unmap_single(dev
, areq_ctx
->ccm_iv0_dma_addr
,
494 AES_BLOCK_SIZE
, DMA_TO_DEVICE
);
497 dma_unmap_sg(dev
, &areq_ctx
->ccm_adata_sg
, 1, DMA_TO_DEVICE
);
499 if (areq_ctx
->gen_ctx
.iv_dma_addr
) {
500 dma_unmap_single(dev
, areq_ctx
->gen_ctx
.iv_dma_addr
,
501 hw_iv_size
, DMA_BIDIRECTIONAL
);
502 kfree_sensitive(areq_ctx
->gen_ctx
.iv
);
506 if ((areq_ctx
->assoc_buff_type
== CC_DMA_BUF_MLLI
||
507 areq_ctx
->data_buff_type
== CC_DMA_BUF_MLLI
) &&
508 (areq_ctx
->mlli_params
.mlli_virt_addr
)) {
509 dev_dbg(dev
, "free MLLI buffer: dma=%pad virt=%pK\n",
510 &areq_ctx
->mlli_params
.mlli_dma_addr
,
511 areq_ctx
->mlli_params
.mlli_virt_addr
);
512 dma_pool_free(areq_ctx
->mlli_params
.curr_pool
,
513 areq_ctx
->mlli_params
.mlli_virt_addr
,
514 areq_ctx
->mlli_params
.mlli_dma_addr
);
517 dev_dbg(dev
, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
518 sg_virt(req
->src
), areq_ctx
->src
.nents
, areq_ctx
->assoc
.nents
,
519 areq_ctx
->assoclen
, req
->cryptlen
);
521 dma_unmap_sg(dev
, req
->src
, areq_ctx
->src
.mapped_nents
, src_direction
);
522 if (req
->src
!= req
->dst
) {
523 dev_dbg(dev
, "Unmapping dst sgl: req->dst=%pK\n",
525 dma_unmap_sg(dev
, req
->dst
, areq_ctx
->dst
.mapped_nents
, DMA_FROM_DEVICE
);
527 if (drvdata
->coherent
&&
528 areq_ctx
->gen_ctx
.op_type
== DRV_CRYPTO_DIRECTION_DECRYPT
&&
529 req
->src
== req
->dst
) {
530 /* copy back mac from temporary location to deal with possible
531 * data memory overriding that caused by cache coherence
534 cc_copy_mac(dev
, req
, CC_SG_FROM_BUF
);
538 static bool cc_is_icv_frag(unsigned int sgl_nents
, unsigned int authsize
,
539 u32 last_entry_data_size
)
541 return ((sgl_nents
> 1) && (last_entry_data_size
< authsize
));
544 static int cc_aead_chain_iv(struct cc_drvdata
*drvdata
,
545 struct aead_request
*req
,
546 struct buffer_array
*sg_data
,
547 bool is_last
, bool do_chain
)
549 struct aead_req_ctx
*areq_ctx
= aead_request_ctx_dma(req
);
550 unsigned int hw_iv_size
= areq_ctx
->hw_iv_size
;
551 struct device
*dev
= drvdata_to_dev(drvdata
);
552 gfp_t flags
= cc_gfp_flags(&req
->base
);
556 areq_ctx
->gen_ctx
.iv_dma_addr
= 0;
557 areq_ctx
->gen_ctx
.iv
= NULL
;
561 areq_ctx
->gen_ctx
.iv
= kmemdup(req
->iv
, hw_iv_size
, flags
);
562 if (!areq_ctx
->gen_ctx
.iv
)
565 areq_ctx
->gen_ctx
.iv_dma_addr
=
566 dma_map_single(dev
, areq_ctx
->gen_ctx
.iv
, hw_iv_size
,
568 if (dma_mapping_error(dev
, areq_ctx
->gen_ctx
.iv_dma_addr
)) {
569 dev_err(dev
, "Mapping iv %u B at va=%pK for DMA failed\n",
570 hw_iv_size
, req
->iv
);
571 kfree_sensitive(areq_ctx
->gen_ctx
.iv
);
572 areq_ctx
->gen_ctx
.iv
= NULL
;
577 dev_dbg(dev
, "Mapped iv %u B at va=%pK to dma=%pad\n",
578 hw_iv_size
, req
->iv
, &areq_ctx
->gen_ctx
.iv_dma_addr
);
584 static int cc_aead_chain_assoc(struct cc_drvdata
*drvdata
,
585 struct aead_request
*req
,
586 struct buffer_array
*sg_data
,
587 bool is_last
, bool do_chain
)
589 struct aead_req_ctx
*areq_ctx
= aead_request_ctx_dma(req
);
591 int mapped_nents
= 0;
592 struct device
*dev
= drvdata_to_dev(drvdata
);
596 goto chain_assoc_exit
;
599 if (areq_ctx
->assoclen
== 0) {
600 areq_ctx
->assoc_buff_type
= CC_DMA_BUF_NULL
;
601 areq_ctx
->assoc
.nents
= 0;
602 areq_ctx
->assoc
.mlli_nents
= 0;
603 dev_dbg(dev
, "Chain assoc of length 0: buff_type=%s nents=%u\n",
604 cc_dma_buf_type(areq_ctx
->assoc_buff_type
),
605 areq_ctx
->assoc
.nents
);
606 goto chain_assoc_exit
;
609 mapped_nents
= sg_nents_for_len(req
->src
, areq_ctx
->assoclen
);
610 if (mapped_nents
< 0)
613 if (mapped_nents
> LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES
) {
614 dev_err(dev
, "Too many fragments. current %d max %d\n",
615 mapped_nents
, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES
);
618 areq_ctx
->assoc
.nents
= mapped_nents
;
620 /* in CCM case we have additional entry for
621 * ccm header configurations
623 if (areq_ctx
->ccm_hdr_size
!= ccm_header_size_null
) {
624 if ((mapped_nents
+ 1) > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES
) {
625 dev_err(dev
, "CCM case.Too many fragments. Current %d max %d\n",
626 (areq_ctx
->assoc
.nents
+ 1),
627 LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES
);
629 goto chain_assoc_exit
;
633 if (mapped_nents
== 1 && areq_ctx
->ccm_hdr_size
== ccm_header_size_null
)
634 areq_ctx
->assoc_buff_type
= CC_DMA_BUF_DLLI
;
636 areq_ctx
->assoc_buff_type
= CC_DMA_BUF_MLLI
;
638 if (do_chain
|| areq_ctx
->assoc_buff_type
== CC_DMA_BUF_MLLI
) {
639 dev_dbg(dev
, "Chain assoc: buff_type=%s nents=%u\n",
640 cc_dma_buf_type(areq_ctx
->assoc_buff_type
),
641 areq_ctx
->assoc
.nents
);
642 cc_add_sg_entry(dev
, sg_data
, areq_ctx
->assoc
.nents
, req
->src
,
643 areq_ctx
->assoclen
, 0, is_last
,
644 &areq_ctx
->assoc
.mlli_nents
);
645 areq_ctx
->assoc_buff_type
= CC_DMA_BUF_MLLI
;
652 static void cc_prepare_aead_data_dlli(struct aead_request
*req
,
653 u32
*src_last_bytes
, u32
*dst_last_bytes
)
655 struct aead_req_ctx
*areq_ctx
= aead_request_ctx_dma(req
);
656 enum drv_crypto_direction direct
= areq_ctx
->gen_ctx
.op_type
;
657 unsigned int authsize
= areq_ctx
->req_authsize
;
658 struct scatterlist
*sg
;
661 areq_ctx
->is_icv_fragmented
= false;
663 if ((req
->src
== req
->dst
) || direct
== DRV_CRYPTO_DIRECTION_DECRYPT
) {
664 sg
= areq_ctx
->src_sgl
;
665 offset
= *src_last_bytes
- authsize
;
667 sg
= areq_ctx
->dst_sgl
;
668 offset
= *dst_last_bytes
- authsize
;
671 areq_ctx
->icv_dma_addr
= sg_dma_address(sg
) + offset
;
672 areq_ctx
->icv_virt_addr
= sg_virt(sg
) + offset
;
675 static void cc_prepare_aead_data_mlli(struct cc_drvdata
*drvdata
,
676 struct aead_request
*req
,
677 struct buffer_array
*sg_data
,
678 u32
*src_last_bytes
, u32
*dst_last_bytes
,
681 struct aead_req_ctx
*areq_ctx
= aead_request_ctx_dma(req
);
682 enum drv_crypto_direction direct
= areq_ctx
->gen_ctx
.op_type
;
683 unsigned int authsize
= areq_ctx
->req_authsize
;
684 struct device
*dev
= drvdata_to_dev(drvdata
);
685 struct scatterlist
*sg
;
687 if (req
->src
== req
->dst
) {
689 cc_add_sg_entry(dev
, sg_data
, areq_ctx
->src
.nents
,
690 areq_ctx
->src_sgl
, areq_ctx
->cryptlen
,
691 areq_ctx
->src_offset
, is_last_table
,
692 &areq_ctx
->src
.mlli_nents
);
694 areq_ctx
->is_icv_fragmented
=
695 cc_is_icv_frag(areq_ctx
->src
.nents
, authsize
,
698 if (areq_ctx
->is_icv_fragmented
) {
699 /* Backup happens only when ICV is fragmented, ICV
700 * verification is made by CPU compare in order to
701 * simplify MAC verification upon request completion
703 if (direct
== DRV_CRYPTO_DIRECTION_DECRYPT
) {
704 /* In coherent platforms (e.g. ACP)
705 * already copying ICV for any
706 * INPLACE-DECRYPT operation, hence
707 * we must neglect this code.
709 if (!drvdata
->coherent
)
710 cc_copy_mac(dev
, req
, CC_SG_TO_BUF
);
712 areq_ctx
->icv_virt_addr
= areq_ctx
->backup_mac
;
714 areq_ctx
->icv_virt_addr
= areq_ctx
->mac_buf
;
715 areq_ctx
->icv_dma_addr
=
716 areq_ctx
->mac_buf_dma_addr
;
718 } else { /* Contig. ICV */
719 sg
= &areq_ctx
->src_sgl
[areq_ctx
->src
.nents
- 1];
720 /*Should hanlde if the sg is not contig.*/
721 areq_ctx
->icv_dma_addr
= sg_dma_address(sg
) +
722 (*src_last_bytes
- authsize
);
723 areq_ctx
->icv_virt_addr
= sg_virt(sg
) +
724 (*src_last_bytes
- authsize
);
727 } else if (direct
== DRV_CRYPTO_DIRECTION_DECRYPT
) {
728 /*NON-INPLACE and DECRYPT*/
729 cc_add_sg_entry(dev
, sg_data
, areq_ctx
->src
.nents
,
730 areq_ctx
->src_sgl
, areq_ctx
->cryptlen
,
731 areq_ctx
->src_offset
, is_last_table
,
732 &areq_ctx
->src
.mlli_nents
);
733 cc_add_sg_entry(dev
, sg_data
, areq_ctx
->dst
.nents
,
734 areq_ctx
->dst_sgl
, areq_ctx
->cryptlen
,
735 areq_ctx
->dst_offset
, is_last_table
,
736 &areq_ctx
->dst
.mlli_nents
);
738 areq_ctx
->is_icv_fragmented
=
739 cc_is_icv_frag(areq_ctx
->src
.nents
, authsize
,
741 /* Backup happens only when ICV is fragmented, ICV
743 * verification is made by CPU compare in order to simplify
744 * MAC verification upon request completion
746 if (areq_ctx
->is_icv_fragmented
) {
747 cc_copy_mac(dev
, req
, CC_SG_TO_BUF
);
748 areq_ctx
->icv_virt_addr
= areq_ctx
->backup_mac
;
750 } else { /* Contig. ICV */
751 sg
= &areq_ctx
->src_sgl
[areq_ctx
->src
.nents
- 1];
752 /*Should hanlde if the sg is not contig.*/
753 areq_ctx
->icv_dma_addr
= sg_dma_address(sg
) +
754 (*src_last_bytes
- authsize
);
755 areq_ctx
->icv_virt_addr
= sg_virt(sg
) +
756 (*src_last_bytes
- authsize
);
760 /*NON-INPLACE and ENCRYPT*/
761 cc_add_sg_entry(dev
, sg_data
, areq_ctx
->dst
.nents
,
762 areq_ctx
->dst_sgl
, areq_ctx
->cryptlen
,
763 areq_ctx
->dst_offset
, is_last_table
,
764 &areq_ctx
->dst
.mlli_nents
);
765 cc_add_sg_entry(dev
, sg_data
, areq_ctx
->src
.nents
,
766 areq_ctx
->src_sgl
, areq_ctx
->cryptlen
,
767 areq_ctx
->src_offset
, is_last_table
,
768 &areq_ctx
->src
.mlli_nents
);
770 areq_ctx
->is_icv_fragmented
=
771 cc_is_icv_frag(areq_ctx
->dst
.nents
, authsize
,
774 if (!areq_ctx
->is_icv_fragmented
) {
775 sg
= &areq_ctx
->dst_sgl
[areq_ctx
->dst
.nents
- 1];
777 areq_ctx
->icv_dma_addr
= sg_dma_address(sg
) +
778 (*dst_last_bytes
- authsize
);
779 areq_ctx
->icv_virt_addr
= sg_virt(sg
) +
780 (*dst_last_bytes
- authsize
);
782 areq_ctx
->icv_dma_addr
= areq_ctx
->mac_buf_dma_addr
;
783 areq_ctx
->icv_virt_addr
= areq_ctx
->mac_buf
;
788 static int cc_aead_chain_data(struct cc_drvdata
*drvdata
,
789 struct aead_request
*req
,
790 struct buffer_array
*sg_data
,
791 bool is_last_table
, bool do_chain
)
793 struct aead_req_ctx
*areq_ctx
= aead_request_ctx_dma(req
);
794 struct device
*dev
= drvdata_to_dev(drvdata
);
795 enum drv_crypto_direction direct
= areq_ctx
->gen_ctx
.op_type
;
796 unsigned int authsize
= areq_ctx
->req_authsize
;
797 unsigned int src_last_bytes
= 0, dst_last_bytes
= 0;
799 u32 src_mapped_nents
= 0, dst_mapped_nents
= 0;
801 /* non-inplace mode */
802 unsigned int size_for_map
= req
->assoclen
+ req
->cryptlen
;
804 u32 size_to_skip
= req
->assoclen
;
805 struct scatterlist
*sgl
;
807 offset
= size_to_skip
;
812 areq_ctx
->src_sgl
= req
->src
;
813 areq_ctx
->dst_sgl
= req
->dst
;
815 size_for_map
+= (direct
== DRV_CRYPTO_DIRECTION_ENCRYPT
) ?
817 src_mapped_nents
= cc_get_sgl_nents(dev
, req
->src
, size_for_map
,
819 sg_index
= areq_ctx
->src_sgl
->length
;
820 //check where the data starts
821 while (src_mapped_nents
&& (sg_index
<= size_to_skip
)) {
823 offset
-= areq_ctx
->src_sgl
->length
;
824 sgl
= sg_next(areq_ctx
->src_sgl
);
827 areq_ctx
->src_sgl
= sgl
;
828 sg_index
+= areq_ctx
->src_sgl
->length
;
830 if (src_mapped_nents
> LLI_MAX_NUM_OF_DATA_ENTRIES
) {
831 dev_err(dev
, "Too many fragments. current %d max %d\n",
832 src_mapped_nents
, LLI_MAX_NUM_OF_DATA_ENTRIES
);
836 areq_ctx
->src
.nents
= src_mapped_nents
;
838 areq_ctx
->src_offset
= offset
;
840 if (req
->src
!= req
->dst
) {
841 size_for_map
= req
->assoclen
+ req
->cryptlen
;
843 if (direct
== DRV_CRYPTO_DIRECTION_ENCRYPT
)
844 size_for_map
+= authsize
;
846 size_for_map
-= authsize
;
848 rc
= cc_map_sg(dev
, req
->dst
, size_for_map
, DMA_FROM_DEVICE
,
849 &areq_ctx
->dst
.mapped_nents
,
850 LLI_MAX_NUM_OF_DATA_ENTRIES
, &dst_last_bytes
,
853 goto chain_data_exit
;
856 dst_mapped_nents
= cc_get_sgl_nents(dev
, req
->dst
, size_for_map
,
858 sg_index
= areq_ctx
->dst_sgl
->length
;
859 offset
= size_to_skip
;
861 //check where the data starts
862 while (dst_mapped_nents
&& sg_index
<= size_to_skip
) {
864 offset
-= areq_ctx
->dst_sgl
->length
;
865 sgl
= sg_next(areq_ctx
->dst_sgl
);
868 areq_ctx
->dst_sgl
= sgl
;
869 sg_index
+= areq_ctx
->dst_sgl
->length
;
871 if (dst_mapped_nents
> LLI_MAX_NUM_OF_DATA_ENTRIES
) {
872 dev_err(dev
, "Too many fragments. current %d max %d\n",
873 dst_mapped_nents
, LLI_MAX_NUM_OF_DATA_ENTRIES
);
876 areq_ctx
->dst
.nents
= dst_mapped_nents
;
877 areq_ctx
->dst_offset
= offset
;
878 if (src_mapped_nents
> 1 ||
879 dst_mapped_nents
> 1 ||
881 areq_ctx
->data_buff_type
= CC_DMA_BUF_MLLI
;
882 cc_prepare_aead_data_mlli(drvdata
, req
, sg_data
,
883 &src_last_bytes
, &dst_last_bytes
,
886 areq_ctx
->data_buff_type
= CC_DMA_BUF_DLLI
;
887 cc_prepare_aead_data_dlli(req
, &src_last_bytes
,
895 static void cc_update_aead_mlli_nents(struct cc_drvdata
*drvdata
,
896 struct aead_request
*req
)
898 struct aead_req_ctx
*areq_ctx
= aead_request_ctx_dma(req
);
899 u32 curr_mlli_size
= 0;
901 if (areq_ctx
->assoc_buff_type
== CC_DMA_BUF_MLLI
) {
902 areq_ctx
->assoc
.sram_addr
= drvdata
->mlli_sram_addr
;
903 curr_mlli_size
= areq_ctx
->assoc
.mlli_nents
*
907 if (areq_ctx
->data_buff_type
== CC_DMA_BUF_MLLI
) {
908 /*Inplace case dst nents equal to src nents*/
909 if (req
->src
== req
->dst
) {
910 areq_ctx
->dst
.mlli_nents
= areq_ctx
->src
.mlli_nents
;
911 areq_ctx
->src
.sram_addr
= drvdata
->mlli_sram_addr
+
913 areq_ctx
->dst
.sram_addr
= areq_ctx
->src
.sram_addr
;
914 if (!areq_ctx
->is_single_pass
)
915 areq_ctx
->assoc
.mlli_nents
+=
916 areq_ctx
->src
.mlli_nents
;
918 if (areq_ctx
->gen_ctx
.op_type
==
919 DRV_CRYPTO_DIRECTION_DECRYPT
) {
920 areq_ctx
->src
.sram_addr
=
921 drvdata
->mlli_sram_addr
+
923 areq_ctx
->dst
.sram_addr
=
924 areq_ctx
->src
.sram_addr
+
925 areq_ctx
->src
.mlli_nents
*
927 if (!areq_ctx
->is_single_pass
)
928 areq_ctx
->assoc
.mlli_nents
+=
929 areq_ctx
->src
.mlli_nents
;
931 areq_ctx
->dst
.sram_addr
=
932 drvdata
->mlli_sram_addr
+
934 areq_ctx
->src
.sram_addr
=
935 areq_ctx
->dst
.sram_addr
+
936 areq_ctx
->dst
.mlli_nents
*
938 if (!areq_ctx
->is_single_pass
)
939 areq_ctx
->assoc
.mlli_nents
+=
940 areq_ctx
->dst
.mlli_nents
;
946 int cc_map_aead_request(struct cc_drvdata
*drvdata
, struct aead_request
*req
)
948 struct aead_req_ctx
*areq_ctx
= aead_request_ctx_dma(req
);
949 struct mlli_params
*mlli_params
= &areq_ctx
->mlli_params
;
950 struct device
*dev
= drvdata_to_dev(drvdata
);
951 struct buffer_array sg_data
;
952 unsigned int authsize
= areq_ctx
->req_authsize
;
955 u32 mapped_nents
= 0;
956 u32 dummy
= 0; /*used for the assoc data fragments */
958 gfp_t flags
= cc_gfp_flags(&req
->base
);
960 mlli_params
->curr_pool
= NULL
;
961 sg_data
.num_of_buffers
= 0;
963 /* copy mac to a temporary location to deal with possible
964 * data memory overriding that caused by cache coherence problem.
966 if (drvdata
->coherent
&&
967 areq_ctx
->gen_ctx
.op_type
== DRV_CRYPTO_DIRECTION_DECRYPT
&&
968 req
->src
== req
->dst
)
969 cc_copy_mac(dev
, req
, CC_SG_TO_BUF
);
971 /* cacluate the size for cipher remove ICV in decrypt*/
972 areq_ctx
->cryptlen
= (areq_ctx
->gen_ctx
.op_type
==
973 DRV_CRYPTO_DIRECTION_ENCRYPT
) ?
975 (req
->cryptlen
- authsize
);
977 dma_addr
= dma_map_single(dev
, areq_ctx
->mac_buf
, MAX_MAC_SIZE
,
979 if (dma_mapping_error(dev
, dma_addr
)) {
980 dev_err(dev
, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
981 MAX_MAC_SIZE
, areq_ctx
->mac_buf
);
983 goto aead_map_failure
;
985 areq_ctx
->mac_buf_dma_addr
= dma_addr
;
987 if (areq_ctx
->ccm_hdr_size
!= ccm_header_size_null
) {
988 void *addr
= areq_ctx
->ccm_config
+ CCM_CTR_COUNT_0_OFFSET
;
990 dma_addr
= dma_map_single(dev
, addr
, AES_BLOCK_SIZE
,
993 if (dma_mapping_error(dev
, dma_addr
)) {
994 dev_err(dev
, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
995 AES_BLOCK_SIZE
, addr
);
996 areq_ctx
->ccm_iv0_dma_addr
= 0;
998 goto aead_map_failure
;
1000 areq_ctx
->ccm_iv0_dma_addr
= dma_addr
;
1002 rc
= cc_set_aead_conf_buf(dev
, areq_ctx
, areq_ctx
->ccm_config
,
1003 &sg_data
, areq_ctx
->assoclen
);
1005 goto aead_map_failure
;
1008 if (areq_ctx
->cipher_mode
== DRV_CIPHER_GCTR
) {
1009 dma_addr
= dma_map_single(dev
, areq_ctx
->hkey
, AES_BLOCK_SIZE
,
1011 if (dma_mapping_error(dev
, dma_addr
)) {
1012 dev_err(dev
, "Mapping hkey %u B at va=%pK for DMA failed\n",
1013 AES_BLOCK_SIZE
, areq_ctx
->hkey
);
1015 goto aead_map_failure
;
1017 areq_ctx
->hkey_dma_addr
= dma_addr
;
1019 dma_addr
= dma_map_single(dev
, &areq_ctx
->gcm_len_block
,
1020 AES_BLOCK_SIZE
, DMA_TO_DEVICE
);
1021 if (dma_mapping_error(dev
, dma_addr
)) {
1022 dev_err(dev
, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
1023 AES_BLOCK_SIZE
, &areq_ctx
->gcm_len_block
);
1025 goto aead_map_failure
;
1027 areq_ctx
->gcm_block_len_dma_addr
= dma_addr
;
1029 dma_addr
= dma_map_single(dev
, areq_ctx
->gcm_iv_inc1
,
1030 AES_BLOCK_SIZE
, DMA_TO_DEVICE
);
1032 if (dma_mapping_error(dev
, dma_addr
)) {
1033 dev_err(dev
, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n",
1034 AES_BLOCK_SIZE
, (areq_ctx
->gcm_iv_inc1
));
1035 areq_ctx
->gcm_iv_inc1_dma_addr
= 0;
1037 goto aead_map_failure
;
1039 areq_ctx
->gcm_iv_inc1_dma_addr
= dma_addr
;
1041 dma_addr
= dma_map_single(dev
, areq_ctx
->gcm_iv_inc2
,
1042 AES_BLOCK_SIZE
, DMA_TO_DEVICE
);
1044 if (dma_mapping_error(dev
, dma_addr
)) {
1045 dev_err(dev
, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n",
1046 AES_BLOCK_SIZE
, (areq_ctx
->gcm_iv_inc2
));
1047 areq_ctx
->gcm_iv_inc2_dma_addr
= 0;
1049 goto aead_map_failure
;
1051 areq_ctx
->gcm_iv_inc2_dma_addr
= dma_addr
;
1054 size_to_map
= req
->cryptlen
+ req
->assoclen
;
1055 /* If we do in-place encryption, we also need the auth tag */
1056 if ((areq_ctx
->gen_ctx
.op_type
== DRV_CRYPTO_DIRECTION_ENCRYPT
) &&
1057 (req
->src
== req
->dst
)) {
1058 size_to_map
+= authsize
;
1061 rc
= cc_map_sg(dev
, req
->src
, size_to_map
,
1062 (req
->src
!= req
->dst
? DMA_TO_DEVICE
: DMA_BIDIRECTIONAL
),
1063 &areq_ctx
->src
.mapped_nents
,
1064 (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES
+
1065 LLI_MAX_NUM_OF_DATA_ENTRIES
),
1066 &dummy
, &mapped_nents
);
1068 goto aead_map_failure
;
1070 if (areq_ctx
->is_single_pass
) {
1072 * Create MLLI table for:
1075 * Note: IV is contg. buffer (not an SGL)
1077 rc
= cc_aead_chain_assoc(drvdata
, req
, &sg_data
, true, false);
1079 goto aead_map_failure
;
1080 rc
= cc_aead_chain_iv(drvdata
, req
, &sg_data
, true, false);
1082 goto aead_map_failure
;
1083 rc
= cc_aead_chain_data(drvdata
, req
, &sg_data
, true, false);
1085 goto aead_map_failure
;
1086 } else { /* DOUBLE-PASS flow */
1088 * Prepare MLLI table(s) in this order:
1090 * If ENCRYPT/DECRYPT (inplace):
1091 * (1) MLLI table for assoc
1092 * (2) IV entry (chained right after end of assoc)
1093 * (3) MLLI for src/dst (inplace operation)
1095 * If ENCRYPT (non-inplace)
1096 * (1) MLLI table for assoc
1097 * (2) IV entry (chained right after end of assoc)
1101 * If DECRYPT (non-inplace)
1102 * (1) MLLI table for assoc
1103 * (2) IV entry (chained right after end of assoc)
1107 rc
= cc_aead_chain_assoc(drvdata
, req
, &sg_data
, false, true);
1109 goto aead_map_failure
;
1110 rc
= cc_aead_chain_iv(drvdata
, req
, &sg_data
, false, true);
1112 goto aead_map_failure
;
1113 rc
= cc_aead_chain_data(drvdata
, req
, &sg_data
, true, true);
1115 goto aead_map_failure
;
1118 /* Mlli support -start building the MLLI according to the above
1121 if (areq_ctx
->assoc_buff_type
== CC_DMA_BUF_MLLI
||
1122 areq_ctx
->data_buff_type
== CC_DMA_BUF_MLLI
) {
1123 mlli_params
->curr_pool
= drvdata
->mlli_buffs_pool
;
1124 rc
= cc_generate_mlli(dev
, &sg_data
, mlli_params
, flags
);
1126 goto aead_map_failure
;
1128 cc_update_aead_mlli_nents(drvdata
, req
);
1129 dev_dbg(dev
, "assoc params mn %d\n",
1130 areq_ctx
->assoc
.mlli_nents
);
1131 dev_dbg(dev
, "src params mn %d\n", areq_ctx
->src
.mlli_nents
);
1132 dev_dbg(dev
, "dst params mn %d\n", areq_ctx
->dst
.mlli_nents
);
1137 cc_unmap_aead_request(dev
, req
);
1141 int cc_map_hash_request_final(struct cc_drvdata
*drvdata
, void *ctx
,
1142 struct scatterlist
*src
, unsigned int nbytes
,
1143 bool do_update
, gfp_t flags
)
1145 struct ahash_req_ctx
*areq_ctx
= (struct ahash_req_ctx
*)ctx
;
1146 struct device
*dev
= drvdata_to_dev(drvdata
);
1147 u8
*curr_buff
= cc_hash_buf(areq_ctx
);
1148 u32
*curr_buff_cnt
= cc_hash_buf_cnt(areq_ctx
);
1149 struct mlli_params
*mlli_params
= &areq_ctx
->mlli_params
;
1150 struct buffer_array sg_data
;
1153 u32 mapped_nents
= 0;
1155 dev_dbg(dev
, "final params : curr_buff=%pK curr_buff_cnt=0x%X nbytes = 0x%X src=%pK curr_index=%u\n",
1156 curr_buff
, *curr_buff_cnt
, nbytes
, src
, areq_ctx
->buff_index
);
1157 /* Init the type of the dma buffer */
1158 areq_ctx
->data_dma_buf_type
= CC_DMA_BUF_NULL
;
1159 mlli_params
->curr_pool
= NULL
;
1160 sg_data
.num_of_buffers
= 0;
1161 areq_ctx
->in_nents
= 0;
1163 if (nbytes
== 0 && *curr_buff_cnt
== 0) {
1168 /* map the previous buffer */
1169 if (*curr_buff_cnt
) {
1170 rc
= cc_set_hash_buf(dev
, areq_ctx
, curr_buff
, *curr_buff_cnt
,
1176 if (src
&& nbytes
> 0 && do_update
) {
1177 rc
= cc_map_sg(dev
, src
, nbytes
, DMA_TO_DEVICE
,
1178 &areq_ctx
->in_nents
, LLI_MAX_NUM_OF_DATA_ENTRIES
,
1179 &dummy
, &mapped_nents
);
1181 goto unmap_curr_buff
;
1182 if (src
&& mapped_nents
== 1 &&
1183 areq_ctx
->data_dma_buf_type
== CC_DMA_BUF_NULL
) {
1184 memcpy(areq_ctx
->buff_sg
, src
,
1185 sizeof(struct scatterlist
));
1186 areq_ctx
->buff_sg
->length
= nbytes
;
1187 areq_ctx
->curr_sg
= areq_ctx
->buff_sg
;
1188 areq_ctx
->data_dma_buf_type
= CC_DMA_BUF_DLLI
;
1190 areq_ctx
->data_dma_buf_type
= CC_DMA_BUF_MLLI
;
1195 if (areq_ctx
->data_dma_buf_type
== CC_DMA_BUF_MLLI
) {
1196 mlli_params
->curr_pool
= drvdata
->mlli_buffs_pool
;
1197 /* add the src data to the sg_data */
1198 cc_add_sg_entry(dev
, &sg_data
, areq_ctx
->in_nents
, src
, nbytes
,
1199 0, true, &areq_ctx
->mlli_nents
);
1200 rc
= cc_generate_mlli(dev
, &sg_data
, mlli_params
, flags
);
1202 goto fail_unmap_din
;
1204 /* change the buffer index for the unmap function */
1205 areq_ctx
->buff_index
= (areq_ctx
->buff_index
^ 1);
1206 dev_dbg(dev
, "areq_ctx->data_dma_buf_type = %s\n",
1207 cc_dma_buf_type(areq_ctx
->data_dma_buf_type
));
1211 dma_unmap_sg(dev
, src
, areq_ctx
->in_nents
, DMA_TO_DEVICE
);
1215 dma_unmap_sg(dev
, areq_ctx
->buff_sg
, 1, DMA_TO_DEVICE
);
1220 int cc_map_hash_request_update(struct cc_drvdata
*drvdata
, void *ctx
,
1221 struct scatterlist
*src
, unsigned int nbytes
,
1222 unsigned int block_size
, gfp_t flags
)
1224 struct ahash_req_ctx
*areq_ctx
= (struct ahash_req_ctx
*)ctx
;
1225 struct device
*dev
= drvdata_to_dev(drvdata
);
1226 u8
*curr_buff
= cc_hash_buf(areq_ctx
);
1227 u32
*curr_buff_cnt
= cc_hash_buf_cnt(areq_ctx
);
1228 u8
*next_buff
= cc_next_buf(areq_ctx
);
1229 u32
*next_buff_cnt
= cc_next_buf_cnt(areq_ctx
);
1230 struct mlli_params
*mlli_params
= &areq_ctx
->mlli_params
;
1231 unsigned int update_data_len
;
1232 u32 total_in_len
= nbytes
+ *curr_buff_cnt
;
1233 struct buffer_array sg_data
;
1234 unsigned int swap_index
= 0;
1237 u32 mapped_nents
= 0;
1239 dev_dbg(dev
, " update params : curr_buff=%pK curr_buff_cnt=0x%X nbytes=0x%X src=%pK curr_index=%u\n",
1240 curr_buff
, *curr_buff_cnt
, nbytes
, src
, areq_ctx
->buff_index
);
1241 /* Init the type of the dma buffer */
1242 areq_ctx
->data_dma_buf_type
= CC_DMA_BUF_NULL
;
1243 mlli_params
->curr_pool
= NULL
;
1244 areq_ctx
->curr_sg
= NULL
;
1245 sg_data
.num_of_buffers
= 0;
1246 areq_ctx
->in_nents
= 0;
1248 if (total_in_len
< block_size
) {
1249 dev_dbg(dev
, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
1250 curr_buff
, *curr_buff_cnt
, &curr_buff
[*curr_buff_cnt
]);
1251 areq_ctx
->in_nents
= sg_nents_for_len(src
, nbytes
);
1252 sg_copy_to_buffer(src
, areq_ctx
->in_nents
,
1253 &curr_buff
[*curr_buff_cnt
], nbytes
);
1254 *curr_buff_cnt
+= nbytes
;
1258 /* Calculate the residue size*/
1259 *next_buff_cnt
= total_in_len
& (block_size
- 1);
1260 /* update data len */
1261 update_data_len
= total_in_len
- *next_buff_cnt
;
1263 dev_dbg(dev
, " temp length : *next_buff_cnt=0x%X update_data_len=0x%X\n",
1264 *next_buff_cnt
, update_data_len
);
1266 /* Copy the new residue to next buffer */
1267 if (*next_buff_cnt
) {
1268 dev_dbg(dev
, " handle residue: next buff %pK skip data %u residue %u\n",
1269 next_buff
, (update_data_len
- *curr_buff_cnt
),
1271 cc_copy_sg_portion(dev
, next_buff
, src
,
1272 (update_data_len
- *curr_buff_cnt
),
1273 nbytes
, CC_SG_TO_BUF
);
1274 /* change the buffer index for next operation */
1278 if (*curr_buff_cnt
) {
1279 rc
= cc_set_hash_buf(dev
, areq_ctx
, curr_buff
, *curr_buff_cnt
,
1283 /* change the buffer index for next operation */
1287 if (update_data_len
> *curr_buff_cnt
) {
1288 rc
= cc_map_sg(dev
, src
, (update_data_len
- *curr_buff_cnt
),
1289 DMA_TO_DEVICE
, &areq_ctx
->in_nents
,
1290 LLI_MAX_NUM_OF_DATA_ENTRIES
, &dummy
,
1293 goto unmap_curr_buff
;
1294 if (mapped_nents
== 1 &&
1295 areq_ctx
->data_dma_buf_type
== CC_DMA_BUF_NULL
) {
1296 /* only one entry in the SG and no previous data */
1297 memcpy(areq_ctx
->buff_sg
, src
,
1298 sizeof(struct scatterlist
));
1299 areq_ctx
->buff_sg
->length
= update_data_len
;
1300 areq_ctx
->data_dma_buf_type
= CC_DMA_BUF_DLLI
;
1301 areq_ctx
->curr_sg
= areq_ctx
->buff_sg
;
1303 areq_ctx
->data_dma_buf_type
= CC_DMA_BUF_MLLI
;
1307 if (areq_ctx
->data_dma_buf_type
== CC_DMA_BUF_MLLI
) {
1308 mlli_params
->curr_pool
= drvdata
->mlli_buffs_pool
;
1309 /* add the src data to the sg_data */
1310 cc_add_sg_entry(dev
, &sg_data
, areq_ctx
->in_nents
, src
,
1311 (update_data_len
- *curr_buff_cnt
), 0, true,
1312 &areq_ctx
->mlli_nents
);
1313 rc
= cc_generate_mlli(dev
, &sg_data
, mlli_params
, flags
);
1315 goto fail_unmap_din
;
1317 areq_ctx
->buff_index
= (areq_ctx
->buff_index
^ swap_index
);
1322 dma_unmap_sg(dev
, src
, areq_ctx
->in_nents
, DMA_TO_DEVICE
);
1326 dma_unmap_sg(dev
, areq_ctx
->buff_sg
, 1, DMA_TO_DEVICE
);
1331 void cc_unmap_hash_request(struct device
*dev
, void *ctx
,
1332 struct scatterlist
*src
, bool do_revert
)
1334 struct ahash_req_ctx
*areq_ctx
= (struct ahash_req_ctx
*)ctx
;
1335 u32
*prev_len
= cc_next_buf_cnt(areq_ctx
);
1337 /*In case a pool was set, a table was
1338 *allocated and should be released
1340 if (areq_ctx
->mlli_params
.curr_pool
) {
1341 dev_dbg(dev
, "free MLLI buffer: dma=%pad virt=%pK\n",
1342 &areq_ctx
->mlli_params
.mlli_dma_addr
,
1343 areq_ctx
->mlli_params
.mlli_virt_addr
);
1344 dma_pool_free(areq_ctx
->mlli_params
.curr_pool
,
1345 areq_ctx
->mlli_params
.mlli_virt_addr
,
1346 areq_ctx
->mlli_params
.mlli_dma_addr
);
1349 if (src
&& areq_ctx
->in_nents
) {
1350 dev_dbg(dev
, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n",
1351 sg_virt(src
), &sg_dma_address(src
), sg_dma_len(src
));
1352 dma_unmap_sg(dev
, src
,
1353 areq_ctx
->in_nents
, DMA_TO_DEVICE
);
1357 dev_dbg(dev
, "Unmapped buffer: areq_ctx->buff_sg=%pK dma=%pad len 0x%X\n",
1358 sg_virt(areq_ctx
->buff_sg
),
1359 &sg_dma_address(areq_ctx
->buff_sg
),
1360 sg_dma_len(areq_ctx
->buff_sg
));
1361 dma_unmap_sg(dev
, areq_ctx
->buff_sg
, 1, DMA_TO_DEVICE
);
1363 /* clean the previous data length for update
1368 areq_ctx
->buff_index
^= 1;
1373 int cc_buffer_mgr_init(struct cc_drvdata
*drvdata
)
1375 struct device
*dev
= drvdata_to_dev(drvdata
);
1377 drvdata
->mlli_buffs_pool
=
1378 dma_pool_create("dx_single_mlli_tables", dev
,
1379 MAX_NUM_OF_TOTAL_MLLI_ENTRIES
*
1380 LLI_ENTRY_BYTE_SIZE
,
1381 MLLI_TABLE_MIN_ALIGNMENT
, 0);
1383 if (!drvdata
->mlli_buffs_pool
)
1389 int cc_buffer_mgr_fini(struct cc_drvdata
*drvdata
)
1391 dma_pool_destroy(drvdata
->mlli_buffs_pool
);